aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll9
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fma-combines.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-ld1.ll26
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-rounding.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll100
-rw-r--r--llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/consthoist-gep.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/csel-zero-float.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/load-zext-bitcast.ll525
-rw-r--r--llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/recp-fastmath.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/shrink-wrap-const-pool-access.mir76
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/stack_guard_remat.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/wineh-frame5.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/wineh-frame6.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/wineh-frame7.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/wineh-frame8.mir2
-rw-r--r--llvm/test/CodeGen/AArch64/wineh5.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/zext-shuffle.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/add-max.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/addsub64_carry.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll1260
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll135
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll210
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll585
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll90
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/carryout-selection.ll614
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctpop16.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll35
-rw-r--r--llvm/test/CodeGen/AMDGPU/fpext.f16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptosi.f16.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoui.f16.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll128
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/fract.f64.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fract.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll81
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll81
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll115
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-attr.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll35
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll128
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll48
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll57
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-volatile.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll786
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-global-volatile.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-global-workgroup.ll632
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-local-agent.ll330
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-local-cluster.ll330
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-local-system.ll330
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-local-volatile.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory-legalizer-local-workgroup.ll330
-rw-r--r--llvm/test/CodeGen/AMDGPU/optimize-compare.mir82
-rw-r--r--llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/s_cmp_0.ll64
-rw-r--r--llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/schedule-barrier-latency.mir83
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdiv64.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/sitofp.f16.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem.ll654
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem64.ll207
-rw-r--r--llvm/test/CodeGen/AMDGPU/uaddo.ll54
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv64.ll80
-rw-r--r--llvm/test/CodeGen/AMDGPU/uitofp.f16.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/urem64.ll146
-rw-r--r--llvm/test/CodeGen/AMDGPU/usubo.ll54
-rw-r--r--llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll20
-rw-r--r--llvm/test/CodeGen/AMDGPU/wave32.ll190
-rw-r--r--llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll8
-rw-r--r--llvm/test/CodeGen/ARM/llround-conv.ll9
-rw-r--r--llvm/test/CodeGen/ARM/lround-conv.ll43
-rw-r--r--llvm/test/CodeGen/BPF/BTF/ptr-named-2.ll59
-rw-r--r--llvm/test/CodeGen/BPF/BTF/ptr-named.ll75
-rw-r--r--llvm/test/CodeGen/DirectX/Metadata/resource-symbols.ll9
-rw-r--r--llvm/test/CodeGen/DirectX/strip-llvm-errno-tbaa.ll19
-rw-r--r--llvm/test/CodeGen/Hexagon/bitcast-i64-to-v64i1.ll33
-rw-r--r--llvm/test/CodeGen/Hexagon/insert-big.ll47
-rw-r--r--llvm/test/CodeGen/Hexagon/qfp-conv.ll35
-rw-r--r--llvm/test/CodeGen/Hexagon/qfp-enabled.ll19
-rw-r--r--llvm/test/CodeGen/Hexagon/qfp-remove-kill.mir95
-rw-r--r--llvm/test/CodeGen/Hexagon/qfp-subreg-bug.mir33
-rw-r--r--llvm/test/CodeGen/Hexagon/qfpopt-rem-conv-add.ll21
-rw-r--r--llvm/test/CodeGen/Hexagon/swp-phi.ll2
-rw-r--r--llvm/test/CodeGen/Hexagon/vect/qfp-mix.mir79
-rw-r--r--llvm/test/CodeGen/Hexagon/vect/qfp-zeroinit.mir23
-rw-r--r--llvm/test/CodeGen/Hexagon/vect/unique-vreg-def.ll32
-rw-r--r--llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_2D_vocab.json18
-rw-r--r--llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json38
-rw-r--r--llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_inconsistent_dims.json11
-rw-r--r--llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_zero_vocab.json11
-rw-r--r--llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt291
-rw-r--r--llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt291
-rw-r--r--llvm/test/CodeGen/MIR2Vec/if-else.mir144
-rw-r--r--llvm/test/CodeGen/MIR2Vec/mir2vec-basic-symbolic.mir76
-rw-r--r--llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll16
-rw-r--r--llvm/test/CodeGen/NVPTX/fma-assoc.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/all-atomics.ll562
-rw-r--r--llvm/test/CodeGen/PowerPC/atomic-minmax.ll48
-rw-r--r--llvm/test/CodeGen/PowerPC/atomics-regression.ll880
-rw-r--r--llvm/test/CodeGen/PowerPC/atomics.ll12
-rw-r--r--llvm/test/CodeGen/PowerPC/fmf-propagation.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/i64_fp_round.ll18
-rw-r--r--llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll8
-rw-r--r--llvm/test/CodeGen/PowerPC/pr61882.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-equal.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll445
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar_cmp.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll6
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-llrint.ll1413
-rw-r--r--llvm/test/CodeGen/PowerPC/vector-lrint.ll1588
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-fence.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-load-store.ll406
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll102
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-rmw.ll9014
-rw-r--r--llvm/test/CodeGen/RISCV/atomic-signext.ll2644
-rw-r--r--llvm/test/CodeGen/RISCV/features-info.ll5
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbs.ll50
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbs.ll34
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll8
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll5
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/TypedBufferLoad.ll43
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer.ll6
-rw-r--r--llvm/test/CodeGen/SystemZ/fp-sincos-01.ll2
-rw-r--r--llvm/test/CodeGen/SystemZ/int-conv-14.ll45
-rw-r--r--llvm/test/CodeGen/SystemZ/int-conv-15.ll45
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-vs-unpredicated-copy.mir146
-rw-r--r--llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll2
-rw-r--r--llvm/test/CodeGen/WebAssembly/memory-interleave.ll1608
-rw-r--r--llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll60
-rw-r--r--llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll59
-rw-r--r--llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll39
-rw-r--r--llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll2
-rw-r--r--llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll2
-rw-r--r--llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx-minmax.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll2
-rw-r--r--llvm/test/CodeGen/X86/bf16-fast-isel.ll66
-rw-r--r--llvm/test/CodeGen/X86/bitcnt-big-integer.ll3021
-rw-r--r--llvm/test/CodeGen/X86/dag-fmf-cse.ll2
-rw-r--r--llvm/test/CodeGen/X86/fabs.ll2
-rw-r--r--llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll789
-rw-r--r--llvm/test/CodeGen/X86/fp-undef.ll2
-rw-r--r--llvm/test/CodeGen/X86/fp128-select.ll6
-rw-r--r--llvm/test/CodeGen/X86/fsxor-alignment.ll2
-rw-r--r--llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll2
-rw-r--r--llvm/test/CodeGen/X86/neg_fp.ll2
-rw-r--r--llvm/test/CodeGen/X86/negate-add-zero.ll2
-rw-r--r--llvm/test/CodeGen/X86/recip-pic.ll2
-rw-r--r--llvm/test/CodeGen/X86/sincos-opt.ll6
-rw-r--r--llvm/test/CodeGen/X86/sincos.ll2
-rw-r--r--llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll12
192 files changed, 25833 insertions, 8464 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
index 97a0417..b040ff2 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memcpy.mir
@@ -56,7 +56,7 @@
}
- attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
attributes #2 = { optsize }
attributes #3 = { minsize }
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
index fc4fbac..f24aeae 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memmove.mir
@@ -47,7 +47,7 @@
ret void
}
- attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
index b06cadf..e4d2ca3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/inline-memset.mir
@@ -50,7 +50,7 @@
declare void @llvm.stackprotector(ptr, ptr) #2
- attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cyclone" "target-features"="+aes,+crypto,+fp-armv8,+neon,+sha2" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
...
diff --git a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
index 0c1776e..6e3682a 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
@@ -37,7 +37,7 @@ for.body: ; preds = %for.body, %entry
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
index f2ed57e..353e818 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
@@ -325,7 +325,7 @@ entry:
declare void @hhh(double, double)
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
index 7e97116..8da0e11 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll
@@ -694,8 +694,8 @@ bb1:
; CHECK: .[[LABEL]]:
; CHECK: ret
-attributes #0 = { "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
!1 = !{!2, !2, i64 0}
!2 = !{!"int", !3, i64 0}
diff --git a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
index 296435a..937bfe4 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
@@ -519,8 +519,8 @@ while.cond:
br label %while.cond
}
-attributes #0 = { nounwind readonly "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind readonly "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index b215c51..0933e67 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -1371,11 +1371,10 @@ define noundef <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %sc
; CHECK-SD-NEXT: lsr x9, x0, #16
; CHECK-SD-NEXT: adrp x8, .LCPI14_0
; CHECK-SD-NEXT: dup v4.8h, w0
-; CHECK-SD-NEXT: dup v1.8h, w9
-; CHECK-SD-NEXT: fmov s3, w9
-; CHECK-SD-NEXT: sqneg v2.8h, v1.8h
-; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
-; CHECK-SD-NEXT: tbl v1.16b, { v2.16b, v3.16b }, v1.16b
+; CHECK-SD-NEXT: ldr q3, [x8, :lo12:.LCPI14_0]
+; CHECK-SD-NEXT: dup v2.8h, w9
+; CHECK-SD-NEXT: sqneg v1.8h, v2.8h
+; CHECK-SD-NEXT: tbl v1.16b, { v1.16b, v2.16b }, v3.16b
; CHECK-SD-NEXT: rev32 v2.8h, v0.8h
; CHECK-SD-NEXT: sqdmull v3.4s, v0.4h, v4.4h
; CHECK-SD-NEXT: sqdmull2 v0.4s, v0.8h, v4.8h
diff --git a/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir b/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir
index 45fa2be5..c05d661 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir
+++ b/llvm/test/CodeGen/AArch64/aarch64-mov-debug-locs.mir
@@ -79,8 +79,8 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #3
- attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
- attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
+ attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #2 = { nounwind readnone speculatable }
attributes #3 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll b/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll
index 4e86f52..071344d 100644
--- a/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-detect-vec-redux.ll
@@ -47,6 +47,6 @@ declare <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64>, <2 x i64>) #1
; Function Attrs: nounwind readnone
declare <2 x i32> @llvm.aarch64.neon.sqdmulh.v2i32(<2 x i32>, <2 x i32>) #1
-attributes #0 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll b/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll
index 9b3d539..0ddcdcc 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fma-combine-with-fpfusion.ll
@@ -8,5 +8,5 @@ define float @mul_add(float %a, float %b, float %c) local_unnamed_addr #0 {
ret float %add
}
-attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll b/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll
index e17a0a9..54f752e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fma-combines.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -O=3 -mtriple=arm64-apple-ios -mcpu=cyclone -mattr=+fullfp16 -enable-unsafe-fp-math -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -O=3 -mtriple=arm64-apple-ios -mcpu=cyclone -mattr=+fullfp16 -verify-machineinstrs | FileCheck %s
define void @foo_2d(ptr %src) {
; CHECK-LABEL: %entry
diff --git a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
index d2ce7e6..41f57bf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
@@ -84,7 +84,7 @@ bb3: ; preds = %bb3, %bb
; Function Attrs: nounwind readnone
declare i64 @llvm.objectsize.i64.p0(ptr, i1) #1
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
!1 = !{!2, !2, i64 0}
diff --git a/llvm/test/CodeGen/AArch64/arm64-ld1.ll b/llvm/test/CodeGen/AArch64/arm64-ld1.ll
index 0b22fa4..c2b2c1e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ld1.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ld1.ll
@@ -1654,24 +1654,14 @@ define %struct.__neon_float64x2x4_t @ld1_x4_v2f64(ptr %addr) {
}
define <8 x i8> @dup_ld1_from_stack(ptr %__ret) {
-; CHECK-SD-LABEL: dup_ld1_from_stack:
-; CHECK-SD: // %bb.0: // %entry
-; CHECK-SD-NEXT: sub sp, sp, #16
-; CHECK-SD-NEXT: .cfi_def_cfa_offset 16
-; CHECK-SD-NEXT: add x8, sp, #15
-; CHECK-SD-NEXT: ld1r.8b { v0 }, [x8]
-; CHECK-SD-NEXT: add sp, sp, #16
-; CHECK-SD-NEXT: ret
-;
-; CHECK-GI-LABEL: dup_ld1_from_stack:
-; CHECK-GI: // %bb.0: // %entry
-; CHECK-GI-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-GI-NEXT: .cfi_def_cfa_offset 16
-; CHECK-GI-NEXT: .cfi_offset w29, -16
-; CHECK-GI-NEXT: add x8, sp, #15
-; CHECK-GI-NEXT: ld1r.8b { v0 }, [x8]
-; CHECK-GI-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
-; CHECK-GI-NEXT: ret
+; CHECK-LABEL: dup_ld1_from_stack:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: add x8, sp, #15
+; CHECK-NEXT: ld1r.8b { v0 }, [x8]
+; CHECK-NEXT: add sp, sp, #16
+; CHECK-NEXT: ret
entry:
%item = alloca i8, align 1
%0 = load i8, ptr %item, align 1
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
index 4cdc6cc..c6cf240 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
@@ -107,7 +107,7 @@ define <4 x float> @neon4xfloat(<4 x float> %A, <4 x float> %B) {
; Function Attrs: nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
index 82b34ef..bb1a6b0 100644
--- a/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
@@ -108,5 +108,5 @@ for.end: ; preds = %for.cond
; Function Attrs: nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #1
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-rounding.ll b/llvm/test/CodeGen/AArch64/arm64-rounding.ll
index d487aab..3ce35bf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-rounding.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-rounding.ll
@@ -201,4 +201,4 @@ entry:
}
attributes #0 = { nounwind }
-attributes #1 = { nounwind "unsafe-fp-math"="true" }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll b/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll
index db65fdd..1486b3a 100644
--- a/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-storebytesmerge.ll
@@ -36,6 +36,6 @@ for.end705.i: ; preds = %for.body453.i
declare void @f() local_unnamed_addr #1
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "unsafe-fp-math"="true" "use-soft-float"="false" }
-attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "use-soft-float"="false" }
+attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a57" "target-features"="+crc,+crypto,+fp-armv8,+neon" "use-soft-float"="false" }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll b/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
index fc59350..593d629 100644
--- a/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
@@ -18,7 +18,7 @@ entry:
ret i32 %1
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
index c4de177..d7a2a83 100644
--- a/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
+++ b/llvm/test/CodeGen/AArch64/avoid-pre-trunc.ll
@@ -5,32 +5,30 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
; CHECK-LABEL: lower_trunc_16xi8:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s0, w0
-; CHECK-NEXT: ldr h1, [sp]
+; CHECK-NEXT: mov x8, sp
+; CHECK-NEXT: mov v0.b[1], w1
+; CHECK-NEXT: mov v0.b[2], w2
+; CHECK-NEXT: mov v0.b[3], w3
+; CHECK-NEXT: mov v0.b[4], w4
+; CHECK-NEXT: mov v0.b[5], w5
+; CHECK-NEXT: mov v0.b[6], w6
+; CHECK-NEXT: mov v0.b[7], w7
+; CHECK-NEXT: ld1 { v0.b }[8], [x8]
; CHECK-NEXT: add x8, sp, #8
-; CHECK-NEXT: ld1 { v1.h }[1], [x8]
+; CHECK-NEXT: ld1 { v0.b }[9], [x8]
; CHECK-NEXT: add x8, sp, #16
-; CHECK-NEXT: mov v0.h[1], w1
-; CHECK-NEXT: ld1 { v1.h }[2], [x8]
+; CHECK-NEXT: ld1 { v0.b }[10], [x8]
; CHECK-NEXT: add x8, sp, #24
-; CHECK-NEXT: mov v0.h[2], w2
-; CHECK-NEXT: ld1 { v1.h }[3], [x8]
+; CHECK-NEXT: ld1 { v0.b }[11], [x8]
; CHECK-NEXT: add x8, sp, #32
-; CHECK-NEXT: mov v0.h[3], w3
-; CHECK-NEXT: ld1 { v1.h }[4], [x8]
+; CHECK-NEXT: ld1 { v0.b }[12], [x8]
; CHECK-NEXT: add x8, sp, #40
-; CHECK-NEXT: ld1 { v1.h }[5], [x8]
+; CHECK-NEXT: ld1 { v0.b }[13], [x8]
; CHECK-NEXT: add x8, sp, #48
-; CHECK-NEXT: mov v0.h[4], w4
-; CHECK-NEXT: ld1 { v1.h }[6], [x8]
+; CHECK-NEXT: ld1 { v0.b }[14], [x8]
; CHECK-NEXT: add x8, sp, #56
-; CHECK-NEXT: mov v0.h[5], w5
-; CHECK-NEXT: ld1 { v1.h }[7], [x8]
-; CHECK-NEXT: mov v0.h[6], w6
-; CHECK-NEXT: add v2.8h, v1.8h, v1.8h
-; CHECK-NEXT: mov v0.h[7], w7
-; CHECK-NEXT: add v3.8h, v0.8h, v0.8h
-; CHECK-NEXT: uzp1 v0.16b, v0.16b, v1.16b
-; CHECK-NEXT: uzp1 v1.16b, v3.16b, v2.16b
+; CHECK-NEXT: ld1 { v0.b }[15], [x8]
+; CHECK-NEXT: add v1.16b, v0.16b, v0.16b
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <16 x i16> poison, i16 %a, i16 0
@@ -59,18 +57,15 @@ define <16 x i8> @lower_trunc_16xi8(i16 %a, i16 %b, i16 %c, i16 %d, i16 %e, i16
define <8 x i16> @lower_trunc_8xi16(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) {
; CHECK-LABEL: lower_trunc_8xi16:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov s0, w4
-; CHECK-NEXT: fmov s1, w0
-; CHECK-NEXT: mov v0.s[1], w5
-; CHECK-NEXT: mov v1.s[1], w1
-; CHECK-NEXT: mov v0.s[2], w6
-; CHECK-NEXT: mov v1.s[2], w2
-; CHECK-NEXT: mov v0.s[3], w7
-; CHECK-NEXT: mov v1.s[3], w3
-; CHECK-NEXT: add v2.4s, v0.4s, v0.4s
-; CHECK-NEXT: add v3.4s, v1.4s, v1.4s
-; CHECK-NEXT: uzp1 v0.8h, v1.8h, v0.8h
-; CHECK-NEXT: uzp1 v1.8h, v3.8h, v2.8h
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: mov v0.h[1], w1
+; CHECK-NEXT: mov v0.h[2], w2
+; CHECK-NEXT: mov v0.h[3], w3
+; CHECK-NEXT: mov v0.h[4], w4
+; CHECK-NEXT: mov v0.h[5], w5
+; CHECK-NEXT: mov v0.h[6], w6
+; CHECK-NEXT: mov v0.h[7], w7
+; CHECK-NEXT: add v1.8h, v0.8h, v0.8h
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <8 x i32> poison, i32 %a, i32 0
@@ -91,14 +86,11 @@ define <8 x i16> @lower_trunc_8xi16(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32
define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) {
; CHECK-LABEL: lower_trunc_4xi32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov d0, x2
-; CHECK-NEXT: fmov d1, x0
-; CHECK-NEXT: mov v0.d[1], x3
-; CHECK-NEXT: mov v1.d[1], x1
-; CHECK-NEXT: add v2.2d, v0.2d, v0.2d
-; CHECK-NEXT: add v3.2d, v1.2d, v1.2d
-; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v1.4s, v3.4s, v2.4s
+; CHECK-NEXT: fmov s0, w0
+; CHECK-NEXT: mov v0.s[1], w1
+; CHECK-NEXT: mov v0.s[2], w2
+; CHECK-NEXT: mov v0.s[3], w3
+; CHECK-NEXT: add v1.4s, v0.4s, v0.4s
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
; CHECK-NEXT: ret
%a1 = insertelement <4 x i64> poison, i64 %a, i64 0
@@ -115,24 +107,20 @@ define <4 x i32> @lower_trunc_4xi32(i64 %a, i64 %b, i64 %c, i64 %d) {
define <8 x i32> @lower_trunc_8xi32(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h) {
; CHECK-LABEL: lower_trunc_8xi32:
; CHECK: // %bb.0:
-; CHECK-NEXT: fmov d0, x2
-; CHECK-NEXT: fmov d1, x0
-; CHECK-NEXT: fmov d2, x6
-; CHECK-NEXT: fmov d3, x4
-; CHECK-NEXT: mov v0.d[1], x3
-; CHECK-NEXT: mov v1.d[1], x1
-; CHECK-NEXT: mov v2.d[1], x7
-; CHECK-NEXT: mov v3.d[1], x5
-; CHECK-NEXT: add v4.2d, v0.2d, v0.2d
-; CHECK-NEXT: add v5.2d, v1.2d, v1.2d
-; CHECK-NEXT: add v6.2d, v2.2d, v2.2d
-; CHECK-NEXT: add v7.2d, v3.2d, v3.2d
+; CHECK-NEXT: fmov d0, x6
+; CHECK-NEXT: fmov d1, x4
+; CHECK-NEXT: fmov d2, x2
+; CHECK-NEXT: fmov d3, x0
+; CHECK-NEXT: mov v0.d[1], x7
+; CHECK-NEXT: mov v1.d[1], x5
+; CHECK-NEXT: mov v2.d[1], x3
+; CHECK-NEXT: mov v3.d[1], x1
+; CHECK-NEXT: uzp1 v1.4s, v1.4s, v0.4s
; CHECK-NEXT: uzp1 v2.4s, v3.4s, v2.4s
-; CHECK-NEXT: uzp1 v0.4s, v1.4s, v0.4s
-; CHECK-NEXT: uzp1 v3.4s, v5.4s, v4.4s
-; CHECK-NEXT: uzp1 v1.4s, v7.4s, v6.4s
-; CHECK-NEXT: eor v0.16b, v0.16b, v3.16b
-; CHECK-NEXT: eor v1.16b, v2.16b, v1.16b
+; CHECK-NEXT: add v3.4s, v1.4s, v1.4s
+; CHECK-NEXT: add v0.4s, v2.4s, v2.4s
+; CHECK-NEXT: eor v1.16b, v1.16b, v3.16b
+; CHECK-NEXT: eor v0.16b, v2.16b, v0.16b
; CHECK-NEXT: ret
%a1 = insertelement <8 x i64> poison, i64 %a, i64 0
%b1 = insertelement <8 x i64> %a1, i64 %b, i64 1
diff --git a/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll b/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll
index 2e3b99f..c4bf7d2 100644
--- a/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll
+++ b/llvm/test/CodeGen/AArch64/bti-branch-relaxation.ll
@@ -61,4 +61,4 @@ declare dso_local void @e(...) local_unnamed_addr #0
declare dso_local i64 @llvm.aarch64.space(i32, i64) local_unnamed_addr #0
-attributes #0 = { nounwind "branch-target-enforcement" "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "branch-target-enforcement" "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon,+v8.5a" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/AArch64/consthoist-gep.ll b/llvm/test/CodeGen/AArch64/consthoist-gep.ll
index 031ee35..7d2aaec 100644
--- a/llvm/test/CodeGen/AArch64/consthoist-gep.ll
+++ b/llvm/test/CodeGen/AArch64/consthoist-gep.ll
@@ -108,7 +108,7 @@ bb19: ; preds = %bb3, %bb
ret void
}
-attributes #0 = { norecurse nounwind optsize ssp "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind optsize ssp "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/CodeGen/AArch64/csel-zero-float.ll b/llvm/test/CodeGen/AArch64/csel-zero-float.ll
index 6edde13..56a33cc 100644
--- a/llvm/test/CodeGen/AArch64/csel-zero-float.ll
+++ b/llvm/test/CodeGen/AArch64/csel-zero-float.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=aarch64-none-linux-gnu -enable-unsafe-fp-math < %s
+; RUN: llc -mtriple=aarch64-none-linux-gnu < %s
; There is no invocation to FileCheck as this
; caused a crash in "Post-RA pseudo instruction expansion"
diff --git a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll
index 61df396..e561481 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-invaraints.ll
@@ -32,5 +32,5 @@ main_:
declare i32 @printf(ptr, ...) #1
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
index 1a83930..9193025 100644
--- a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
@@ -2,8 +2,8 @@
; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
; load zero-extended i32, bitcast to f64
-define double @_Z9load_u64_from_u32_testPj(ptr %n){
-; CHECK-LABEL: _Z9load_u64_from_u32_testPj:
+define double @load_u64_from_u32(ptr %n){
+; CHECK-LABEL: load_u64_from_u32:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: ret
@@ -15,8 +15,8 @@ entry:
}
; load zero-extended i16, bitcast to f64
-define double @_Z9load_u64_from_u16_testPj(ptr %n){
-; CHECK-LABEL: _Z9load_u64_from_u16_testPj:
+define double @load_u64_from_u16(ptr %n){
+; CHECK-LABEL: load_u64_from_u16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr h0, [x0]
; CHECK-NEXT: ret
@@ -28,8 +28,8 @@ entry:
}
; load zero-extended i8, bitcast to f64
-define double @_Z16load_u64_from_u8Ph(ptr %n){
-; CHECK-LABEL: _Z16load_u64_from_u8Ph:
+define double @load_u64_from_u8(ptr %n){
+; CHECK-LABEL: load_u64_from_u8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr b0, [x0]
; CHECK-NEXT: ret
@@ -41,8 +41,8 @@ entry:
}
; load zero-extended i16, bitcast to f32
-define float @_Z17load_u32_from_u16Pt(ptr %n){
-; CHECK-LABEL: _Z17load_u32_from_u16Pt:
+define float @load_u32_from_u16(ptr %n){
+; CHECK-LABEL: load_u32_from_u16:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr h0, [x0]
; CHECK-NEXT: ret
@@ -54,8 +54,8 @@ entry:
}
; load zero-extended i8, bitcast to f32
-define float @_Z16load_u32_from_u8Ph(ptr %n){
-; CHECK-LABEL: _Z16load_u32_from_u8Ph:
+define float @load_u32_from_u8(ptr %n){
+; CHECK-LABEL: load_u32_from_u8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr b0, [x0]
; CHECK-NEXT: ret
@@ -67,8 +67,8 @@ entry:
}
; load zero-extended i8, bitcast to f16
-define half @_Z16load_u16_from_u8Ph(ptr %n){
-; CHECK-LABEL: _Z16load_u16_from_u8Ph:
+define half @load_u16_from_u8(ptr %n){
+; CHECK-LABEL: load_u16_from_u8:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ldr b0, [x0]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
@@ -80,3 +80,504 @@ entry:
ret half %1
}
+
+define double @load_u64_from_u32_off1(ptr %n){
+; CHECK-LABEL: load_u64_from_u32_off1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldur w8, [x0, #1]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 1
+ %0 = load i32, ptr %p, align 4
+ %conv = zext i32 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u16_off1(ptr %n){
+; CHECK-LABEL: load_u64_from_u16_off1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldurh w8, [x0, #1]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 1
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u8_off1(ptr %n){
+; CHECK-LABEL: load_u64_from_u8_off1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrb w8, [x0, #1]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 1
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define float @load_u32_from_u16_off1(ptr %n){
+; CHECK-LABEL: load_u32_from_u16_off1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldurh w8, [x0, #1]
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 1
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define float @load_u32_from_u8_off1(ptr %n){
+; CHECK-LABEL: load_u32_from_u8_off1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrb w8, [x0, #1]
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 1
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define half @load_u16_from_u8_off1(ptr %n){
+; CHECK-LABEL: load_u16_from_u8_off1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrb w8, [x0, #1]
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 1
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i16
+ %1 = bitcast i16 %conv to half
+ ret half %1
+}
+
+
+
+define double @load_u64_from_u32_off2(ptr %n){
+; CHECK-LABEL: load_u64_from_u32_off2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldur w8, [x0, #2]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 2
+ %0 = load i32, ptr %p, align 4
+ %conv = zext i32 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u16_off2(ptr %n){
+; CHECK-LABEL: load_u64_from_u16_off2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrh w8, [x0, #2]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 2
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u8_off2(ptr %n){
+; CHECK-LABEL: load_u64_from_u8_off2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrb w8, [x0, #2]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 2
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define float @load_u32_from_u16_off2(ptr %n){
+; CHECK-LABEL: load_u32_from_u16_off2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr h0, [x0, #2]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 2
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define float @load_u32_from_u8_off2(ptr %n){
+; CHECK-LABEL: load_u32_from_u8_off2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #1]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 2
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define half @load_u16_from_u8_off2(ptr %n){
+; CHECK-LABEL: load_u16_from_u8_off2:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #1]
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 2
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i16
+ %1 = bitcast i16 %conv to half
+ ret half %1
+}
+
+
+
+define double @load_u64_from_u32_off255(ptr %n){
+; CHECK-LABEL: load_u64_from_u32_off255:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldur w8, [x0, #255]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 255
+ %0 = load i32, ptr %p, align 4
+ %conv = zext i32 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u16_off255(ptr %n){
+; CHECK-LABEL: load_u64_from_u16_off255:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldurh w8, [x0, #255]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 255
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u8_off255(ptr %n){
+; CHECK-LABEL: load_u64_from_u8_off255:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrb w8, [x0, #255]
+; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 255
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define float @load_u32_from_u16_off255(ptr %n){
+; CHECK-LABEL: load_u32_from_u16_off255:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldurh w8, [x0, #255]
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 255
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define float @load_u32_from_u8_off255(ptr %n){
+; CHECK-LABEL: load_u32_from_u8_off255:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrb w8, [x0, #255]
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 255
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define half @load_u16_from_u8_off255(ptr %n){
+; CHECK-LABEL: load_u16_from_u8_off255:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldrb w8, [x0, #255]
+; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 255
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i16
+ %1 = bitcast i16 %conv to half
+ ret half %1
+}
+
+
+define double @load_u64_from_u32_off256(ptr %n){
+; CHECK-LABEL: load_u64_from_u32_off256:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr s0, [x0, #256]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 256
+ %0 = load i32, ptr %p, align 4
+ %conv = zext i32 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u16_off256(ptr %n){
+; CHECK-LABEL: load_u64_from_u16_off256:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr h0, [x0, #128]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 256
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u8_off256(ptr %n){
+; CHECK-LABEL: load_u64_from_u8_off256:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #64]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 256
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define float @load_u32_from_u16_off256(ptr %n){
+; CHECK-LABEL: load_u32_from_u16_off256:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr h0, [x0, #256]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 256
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define float @load_u32_from_u8_off256(ptr %n){
+; CHECK-LABEL: load_u32_from_u8_off256:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #128]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 256
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define half @load_u16_from_u8_off256(ptr %n){
+; CHECK-LABEL: load_u16_from_u8_off256:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #128]
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 256
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i16
+ %1 = bitcast i16 %conv to half
+ ret half %1
+}
+
+
+
+define double @load_u64_from_u32_offn(ptr %n){
+; CHECK-LABEL: load_u64_from_u32_offn:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr s0, [x0, #16380]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 16380
+ %0 = load i32, ptr %p, align 4
+ %conv = zext i32 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u16_offn(ptr %n){
+; CHECK-LABEL: load_u64_from_u16_offn:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov w8, #8190 // =0x1ffe
+; CHECK-NEXT: ldr h0, [x0, x8]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 8190
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u8_offn(ptr %n){
+; CHECK-LABEL: load_u64_from_u8_offn:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #4095]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 4095
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define float @load_u32_from_u16_offn(ptr %n){
+; CHECK-LABEL: load_u32_from_u16_offn:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr h0, [x0, #8190]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 8190
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define float @load_u32_from_u8_offn(ptr %n){
+; CHECK-LABEL: load_u32_from_u8_offn:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #4095]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 4095
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define half @load_u16_from_u8_offn(ptr %n){
+; CHECK-LABEL: load_u16_from_u8_offn:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #4095]
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 4095
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i16
+ %1 = bitcast i16 %conv to half
+ ret half %1
+}
+
+
+define double @load_u64_from_u32_offnp1(ptr %n){
+; CHECK-LABEL: load_u64_from_u32_offnp1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, #4, lsl #12 // =16384
+; CHECK-NEXT: ldr s0, [x8]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 16384
+ %0 = load i32, ptr %p, align 4
+ %conv = zext i32 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u16_offnp1(ptr %n){
+; CHECK-LABEL: load_u64_from_u16_offnp1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr h0, [x0, #4096]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 8192
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define double @load_u64_from_u8_offnp1(ptr %n){
+; CHECK-LABEL: load_u64_from_u8_offnp1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #1024]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 4096
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i64
+ %1 = bitcast i64 %conv to double
+ ret double %1
+}
+
+define float @load_u32_from_u16_offnp1(ptr %n){
+; CHECK-LABEL: load_u32_from_u16_offnp1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: add x8, x0, #2, lsl #12 // =8192
+; CHECK-NEXT: ldr h0, [x8]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 8192
+ %0 = load i16, ptr %p, align 2
+ %conv = zext i16 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define float @load_u32_from_u8_offnp1(ptr %n){
+; CHECK-LABEL: load_u32_from_u8_offnp1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #2048]
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 4096
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i32
+ %1 = bitcast i32 %conv to float
+ ret float %1
+}
+
+define half @load_u16_from_u8_offnp1(ptr %n){
+; CHECK-LABEL: load_u16_from_u8_offnp1:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: ldr b0, [x0, #2048]
+; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
+; CHECK-NEXT: ret
+entry:
+ %p = getelementptr i8, ptr %n, i64 4096
+ %0 = load i8, ptr %p, align 1
+ %conv = zext i8 %0 to i16
+ %1 = bitcast i16 %conv to half
+ ret half %1
+}
+
diff --git a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
index c2ef2fa..00a8c30 100644
--- a/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
+++ b/llvm/test/CodeGen/AArch64/partial-pipeline-execution.ll
@@ -74,7 +74,7 @@ for.body: ; preds = %for.body.preheader,
br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !10
}
-attributes #0 = { nofree norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nofree norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/CodeGen/AArch64/recp-fastmath.ll b/llvm/test/CodeGen/AArch64/recp-fastmath.ll
index 9f00621..fa1da33 100644
--- a/llvm/test/CodeGen/AArch64/recp-fastmath.ll
+++ b/llvm/test/CodeGen/AArch64/recp-fastmath.ll
@@ -164,5 +164,5 @@ define <4 x double> @d4recp1(<4 x double> %x) #1 {
; CHECK-NOT: frecps {{v[0-7]\.2d}}, {{v[0-7]\.2d}}, {{v[0-7]\.2d}}
}
-attributes #0 = { nounwind "unsafe-fp-math"="true" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "reciprocal-estimates"="div,vec-div" }
+attributes #0 = { nounwind }
+attributes #1 = { nounwind "reciprocal-estimates"="div,vec-div" }
diff --git a/llvm/test/CodeGen/AArch64/shrink-wrap-const-pool-access.mir b/llvm/test/CodeGen/AArch64/shrink-wrap-const-pool-access.mir
new file mode 100644
index 0000000..6f33a75
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/shrink-wrap-const-pool-access.mir
@@ -0,0 +1,76 @@
+# RUN: llc -mtriple=aarch64 -simplify-mir -run-pass=shrink-wrap -o - %s | FileCheck %s
+--- |
+ declare double @foo()
+
+ define double @shrink_wrap_load_from_const_pool(double %q) {
+ entry:
+ %0 = fcmp oeq double %q, 3.125500e+02
+ br i1 %0, label %common.ret, label %if.else
+
+ common.ret: ; preds = %if.else, %entry, %exit1
+ %common.ret.op = phi double [ %3, %exit1 ], [ 0.000000e+00, %entry ], [ 0.000000e+00, %if.else ]
+ ret double %common.ret.op
+
+ if.else: ; preds = %entry
+ %1 = call double @foo()
+ %2 = fcmp oeq double %1, 0.000000e+00
+ br i1 %2, label %exit1, label %common.ret
+
+ exit1: ; preds = %if.else
+ %3 = call double @foo()
+ br label %common.ret
+ }
+...
+# Following code has a load from constant pool. Accessing constant pool
+# must not be considered as a stack access and hence, shrink wrapping must
+# happen.
+# CHECK-LABEL:name: shrink_wrap_load_from_const_pool
+# CHECK: savePoint:
+# CHECK: - point: '%bb.3'
+# CHECK: restorePoint:
+# CHECK: - point: '%bb.5'
+---
+name: shrink_wrap_load_from_const_pool
+tracksRegLiveness: true
+constants:
+ - id: 0
+ value: 'double 3.125500e+02'
+ alignment: 8
+body: |
+ bb.0.entry:
+ successors: %bb.4(0x50000000), %bb.2(0x30000000)
+ liveins: $d0
+
+ renamable $d1 = COPY $d0
+ renamable $x8 = ADRP target-flags(aarch64-page) %const.0
+ renamable $d2 = LDRDui killed renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) %const.0 :: (load (s64) from constant-pool)
+ renamable $d0 = FMOVD0
+ nofpexcept FCMPDrr killed renamable $d1, killed renamable $d2, implicit-def $nzcv, implicit $fpcr
+ Bcc 1, %bb.2, implicit killed $nzcv
+
+ bb.4:
+ liveins: $d0
+
+ bb.1.common.ret:
+ liveins: $d0
+
+ RET_ReallyLR implicit $d0
+
+ bb.2.if.else:
+ successors: %bb.3(0x50000000), %bb.1(0x30000000)
+
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $d0
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ renamable $d1 = COPY $d0
+ renamable $d0 = FMOVD0
+ nofpexcept FCMPDri killed renamable $d1, implicit-def $nzcv, implicit $fpcr
+ Bcc 1, %bb.1, implicit killed $nzcv
+ B %bb.3
+
+ bb.3.exit1:
+ ADJCALLSTACKDOWN 0, 0, implicit-def dead $sp, implicit $sp
+ BL @foo, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp, implicit-def $d0
+ ADJCALLSTACKUP 0, 0, implicit-def dead $sp, implicit $sp
+ B %bb.1
+...
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll b/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll
index 66ac04e..22abb8c 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-ex-1.ll
@@ -64,6 +64,6 @@ declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1
; Function Attrs: argmemonly nounwind willreturn
declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1
-attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll b/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll
index e5725bc..d689a76 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-ex-2.ll
@@ -158,10 +158,10 @@ eh.resume: ; preds = %lpad.body
resume { ptr, i32 } %eh.lpad-body
}
-attributes #0 = { noreturn sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noreturn sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind willreturn }
attributes #2 = { nounwind readnone }
-attributes #3 = { norecurse sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { norecurse sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" }
attributes #4 = { nounwind }
attributes #5 = { noreturn }
diff --git a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
index 91adf82..7483622 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-untag-placement.ll
@@ -77,6 +77,6 @@ declare void @llvm.lifetime.start.p0(ptr nocapture) #1
declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { sanitize_memtag "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+mte,+neon,+v8.5a" "use-soft-float"="false" }
attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AArch64/stack_guard_remat.ll b/llvm/test/CodeGen/AArch64/stack_guard_remat.ll
index 523eda61..e41d82c 100644
--- a/llvm/test/CodeGen/AArch64/stack_guard_remat.ll
+++ b/llvm/test/CodeGen/AArch64/stack_guard_remat.ll
@@ -54,7 +54,7 @@ declare void @foo3(ptr)
; Function Attrs: nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
-attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
;--- pic.ll
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll b/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll
index f78fcea..b8dcd6f 100644
--- a/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll
+++ b/llvm/test/CodeGen/AArch64/svtcf-fmul-fdiv-combine.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -mtriple aarch64-none-linux-gnu -enable-unsafe-fp-math -mattr=+fullfp16 < %s | FileCheck %s
+; RUN: llc -mtriple aarch64-none-linux-gnu -mattr=+fullfp16 < %s | FileCheck %s
define half @scvtf_f16_2(i32 %state) {
; CHECK-LABEL: scvtf_f16_2:
diff --git a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll
index 623ea22..89b3b89 100644
--- a/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll
+++ b/llvm/test/CodeGen/AArch64/vector_merge_dep_check.ll
@@ -24,7 +24,7 @@ define void @fn(ptr %argA, ptr %argB, ptr %a) #0 align 2 {
; CHECK: ret
-attributes #0 = { noinline norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-features"="+crc,+crypto,+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noinline norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-features"="+crc,+crypto,+neon" "use-soft-float"="false" }
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/AArch64/wineh-frame5.mir b/llvm/test/CodeGen/AArch64/wineh-frame5.mir
index 97c5c85..32580f4 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame5.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame5.mir
@@ -64,9 +64,9 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #3
- attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
- attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #3 = { nounwind }
...
diff --git a/llvm/test/CodeGen/AArch64/wineh-frame6.mir b/llvm/test/CodeGen/AArch64/wineh-frame6.mir
index 5ba7842..d76fae1 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame6.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame6.mir
@@ -47,8 +47,8 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #2
- attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
- attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
+ attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #2 = { nounwind }
...
diff --git a/llvm/test/CodeGen/AArch64/wineh-frame7.mir b/llvm/test/CodeGen/AArch64/wineh-frame7.mir
index 1599098..d4e71d9 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame7.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame7.mir
@@ -71,8 +71,8 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #2
- attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
- attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
+ attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #2 = { nounwind }
...
diff --git a/llvm/test/CodeGen/AArch64/wineh-frame8.mir b/llvm/test/CodeGen/AArch64/wineh-frame8.mir
index 9de99ac..56f92f2 100644
--- a/llvm/test/CodeGen/AArch64/wineh-frame8.mir
+++ b/llvm/test/CodeGen/AArch64/wineh-frame8.mir
@@ -29,7 +29,7 @@
ret i32 %add
}
- attributes #0 = { noinline nounwind optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { noinline nounwind optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
...
---
diff --git a/llvm/test/CodeGen/AArch64/wineh5.mir b/llvm/test/CodeGen/AArch64/wineh5.mir
index efdd4b0..1c09b78 100644
--- a/llvm/test/CodeGen/AArch64/wineh5.mir
+++ b/llvm/test/CodeGen/AArch64/wineh5.mir
@@ -73,8 +73,8 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #2
- attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
- attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { noinline optnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
+ attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #2 = { nounwind }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir b/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir
index 2f631c2..52d0dff 100644
--- a/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir
+++ b/llvm/test/CodeGen/AArch64/wineh_shrinkwrap.mir
@@ -56,9 +56,9 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #3
- attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #1 = { argmemonly nounwind }
- attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
attributes #3 = { nounwind }
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/AArch64/zext-shuffle.ll b/llvm/test/CodeGen/AArch64/zext-shuffle.ll
index 20d2071..a0d4e18 100644
--- a/llvm/test/CodeGen/AArch64/zext-shuffle.ll
+++ b/llvm/test/CodeGen/AArch64/zext-shuffle.ll
@@ -674,10 +674,8 @@ define <4 x i32> @isUndefDeInterleave_t1_bad(<8 x i16> %a) {
define i16 @undeftop(<8 x i16> %0) {
; CHECK-LABEL: undeftop:
; CHECK: // %bb.0:
-; CHECK-NEXT: dup v0.8h, v0.h[4]
-; CHECK-NEXT: uaddl v0.4s, v0.4h, v0.4h
-; CHECK-NEXT: xtn v0.4h, v0.4s
-; CHECK-NEXT: umov w0, v0.h[0]
+; CHECK-NEXT: add v0.8h, v0.8h, v0.8h
+; CHECK-NEXT: umov w0, v0.h[4]
; CHECK-NEXT: ret
%2 = shufflevector <8 x i16> %0, <8 x i16> zeroinitializer, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 9, i32 7, i32 5, i32 3>
%3 = zext <8 x i16> %2 to <8 x i64>
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir
index d9ac9a7..de1bb47 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-unmerge-values.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -enable-unsafe-fp-math -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck -check-prefix=GFX10 %s
+# RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1010 -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck -check-prefix=GFX10 %s
# Test that we fold correct element from G_UNMERGE_VALUES into fma
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir
index 52b1beb..91f2f6f1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-fract.f64.mir
@@ -1,6 +1,6 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -enable-unsafe-fp-math -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX10
-# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -enable-unsafe-fp-math -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX11
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX10
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefix=GFX11
---
name: fract_f64_neg
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
index 5171403..7714c03 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i32.ll
@@ -140,7 +140,6 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_non_compare(i32 %v) {
; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; CHECK-NEXT: s_and_b32 s0, vcc_lo, exec_lo
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc0 .LBB9_2
; CHECK-NEXT: ; %bb.1: ; %false
; CHECK-NEXT: s_mov_b32 s0, 33
@@ -345,7 +344,6 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) {
; CHECK-NEXT: v_cmp_gt_u32_e32 vcc_lo, 12, v0
; CHECK-NEXT: v_cmp_lt_u32_e64 s0, 34, v1
; CHECK-NEXT: s_and_b32 s0, vcc_lo, s0
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cbranch_scc0 .LBB17_2
; CHECK-NEXT: ; %bb.1: ; %false
; CHECK-NEXT: s_mov_b32 s0, 33
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
index 7b01f13..7b81669 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ballot.i64.ll
@@ -143,7 +143,6 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_non_compare(i32 %v) {
; CHECK-NEXT: v_and_b32_e32 v0, 1, v0
; CHECK-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; CHECK-NEXT: s_and_b64 s[0:1], vcc, exec
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cbranch_scc0 .LBB9_2
; CHECK-NEXT: ; %bb.1: ; %false
; CHECK-NEXT: s_mov_b32 s0, 33
@@ -348,7 +347,6 @@ define amdgpu_cs i32 @branch_divergent_ballot_eq_zero_and(i32 %v1, i32 %v2) {
; CHECK-NEXT: v_cmp_gt_u32_e32 vcc, 12, v0
; CHECK-NEXT: v_cmp_lt_u32_e64 s[0:1], 34, v1
; CHECK-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cbranch_scc0 .LBB17_2
; CHECK-NEXT: ; %bb.1: ; %false
; CHECK-NEXT: s_mov_b32 s0, 33
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
index 002c03aa..e86f747 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/memory-legalizer-atomic-fence.ll
@@ -551,7 +551,9 @@ define amdgpu_kernel void @workgroup_one_as_release() #0 {
;
; GFX10CU-LABEL: name: workgroup_one_as_release
; GFX10CU: bb.0.entry:
+ ; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_lds_direct
+ ; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_one_as_release
@@ -562,6 +564,8 @@ define amdgpu_kernel void @workgroup_one_as_release() #0 {
;
; GFX11CU-LABEL: name: workgroup_one_as_release
; GFX11CU: bb.0.entry:
+ ; GFX11CU-NEXT: S_WAITCNT_soft 1015
+ ; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("workgroup-one-as") release
@@ -587,7 +591,9 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel() #0 {
;
; GFX10CU-LABEL: name: workgroup_one_as_acq_rel
; GFX10CU: bb.0.entry:
+ ; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_lds_direct
+ ; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_one_as_acq_rel
@@ -599,6 +605,8 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel() #0 {
;
; GFX11CU-LABEL: name: workgroup_one_as_acq_rel
; GFX11CU: bb.0.entry:
+ ; GFX11CU-NEXT: S_WAITCNT_soft 1015
+ ; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("workgroup-one-as") acq_rel
@@ -624,7 +632,9 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst() #0 {
;
; GFX10CU-LABEL: name: workgroup_one_as_seq_cst
; GFX10CU: bb.0.entry:
+ ; GFX10CU-NEXT: S_WAITCNT_soft 16240
; GFX10CU-NEXT: S_WAITCNT_lds_direct
+ ; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_one_as_seq_cst
@@ -636,6 +646,8 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst() #0 {
;
; GFX11CU-LABEL: name: workgroup_one_as_seq_cst
; GFX11CU: bb.0.entry:
+ ; GFX11CU-NEXT: S_WAITCNT_soft 1015
+ ; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("workgroup-one-as") seq_cst
@@ -1305,8 +1317,9 @@ define amdgpu_kernel void @workgroup_release() #0 {
;
; GFX10CU-LABEL: name: workgroup_release
; GFX10CU: bb.0.entry:
- ; GFX10CU-NEXT: S_WAITCNT_soft 49279
+ ; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_lds_direct
+ ; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_release
@@ -1317,7 +1330,8 @@ define amdgpu_kernel void @workgroup_release() #0 {
;
; GFX11CU-LABEL: name: workgroup_release
; GFX11CU: bb.0.entry:
- ; GFX11CU-NEXT: S_WAITCNT_soft 64519
+ ; GFX11CU-NEXT: S_WAITCNT_soft 7
+ ; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("workgroup") release
@@ -1345,8 +1359,9 @@ define amdgpu_kernel void @workgroup_acq_rel() #0 {
;
; GFX10CU-LABEL: name: workgroup_acq_rel
; GFX10CU: bb.0.entry:
- ; GFX10CU-NEXT: S_WAITCNT_soft 49279
+ ; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_lds_direct
+ ; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_acq_rel
@@ -1358,7 +1373,8 @@ define amdgpu_kernel void @workgroup_acq_rel() #0 {
;
; GFX11CU-LABEL: name: workgroup_acq_rel
; GFX11CU: bb.0.entry:
- ; GFX11CU-NEXT: S_WAITCNT_soft 64519
+ ; GFX11CU-NEXT: S_WAITCNT_soft 7
+ ; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("workgroup") acq_rel
@@ -1386,8 +1402,9 @@ define amdgpu_kernel void @workgroup_seq_cst() #0 {
;
; GFX10CU-LABEL: name: workgroup_seq_cst
; GFX10CU: bb.0.entry:
- ; GFX10CU-NEXT: S_WAITCNT_soft 49279
+ ; GFX10CU-NEXT: S_WAITCNT_soft 112
; GFX10CU-NEXT: S_WAITCNT_lds_direct
+ ; GFX10CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX10CU-NEXT: S_ENDPGM 0
;
; GFX11WGP-LABEL: name: workgroup_seq_cst
@@ -1399,7 +1416,8 @@ define amdgpu_kernel void @workgroup_seq_cst() #0 {
;
; GFX11CU-LABEL: name: workgroup_seq_cst
; GFX11CU: bb.0.entry:
- ; GFX11CU-NEXT: S_WAITCNT_soft 64519
+ ; GFX11CU-NEXT: S_WAITCNT_soft 7
+ ; GFX11CU-NEXT: S_WAITCNT_VSCNT_soft undef $sgpr_null, 0
; GFX11CU-NEXT: S_ENDPGM 0
entry:
fence syncscope("workgroup") seq_cst
diff --git a/llvm/test/CodeGen/AMDGPU/add-max.ll b/llvm/test/CodeGen/AMDGPU/add-max.ll
index 00c6656..b3a7057 100644
--- a/llvm/test/CodeGen/AMDGPU/add-max.ll
+++ b/llvm/test/CodeGen/AMDGPU/add-max.ll
@@ -5,7 +5,7 @@
define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_max_u32_e64 v0, v0, v1, v2
+; GCN-NEXT: v_add_max_u32 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -16,7 +16,7 @@ define amdgpu_ps float @add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_u32_svv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, v1
+; GCN-NEXT: v_add_max_u32 v0, s0, v0, v1
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 %c)
@@ -27,7 +27,7 @@ define amdgpu_ps float @add_max_u32_svv(i32 inreg %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_max_u32_ssv(i32 inreg %a, i32 inreg %b, i32 %c) {
; SDAG-LABEL: add_max_u32_ssv:
; SDAG: ; %bb.0:
-; SDAG-NEXT: v_add_max_u32_e64 v0, s0, s1, v0
+; SDAG-NEXT: v_add_max_u32 v0, s0, s1, v0
; SDAG-NEXT: ; return to shader part epilog
;
; GISEL-LABEL: add_max_u32_ssv:
@@ -59,7 +59,7 @@ define amdgpu_ps float @add_max_u32_sss(i32 inreg %a, i32 inreg %b, i32 inreg %c
define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
; GCN-LABEL: add_max_u32_vsi:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_max_u32_e64 v0, v0, s0, 4
+; GCN-NEXT: v_add_max_u32 v0, v0, s0, 4
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 4)
@@ -70,7 +70,7 @@ define amdgpu_ps float @add_max_u32_vsi(i32 %a, i32 inreg %b) {
define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
; GCN-LABEL: add_max_u32_svl:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_max_u32_e64 v0, s0, v0, 0x64
+; GCN-NEXT: v_add_max_u32 v0, s0, v0, 0x64
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umax.i32(i32 %add, i32 100)
@@ -81,7 +81,7 @@ define amdgpu_ps float @add_max_u32_svl(i32 inreg %a, i32 %b) {
define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
; SDAG-LABEL: add_max_u32_slv:
; SDAG: ; %bb.0:
-; SDAG-NEXT: v_add_max_u32_e64 v0, 0x64, s0, v0
+; SDAG-NEXT: v_add_max_u32 v0, 0x64, s0, v0
; SDAG-NEXT: ; return to shader part epilog
;
; GISEL-LABEL: add_max_u32_slv:
@@ -99,7 +99,7 @@ define amdgpu_ps float @add_max_u32_slv(i32 inreg %a, i32 %b) {
define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_max_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_max_i32_e64 v0, v0, v1, v2
+; GCN-NEXT: v_add_max_i32 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smax.i32(i32 %add, i32 %c)
@@ -110,7 +110,7 @@ define amdgpu_ps float @add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_u32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_min_u32_e64 v0, v0, v1, v2
+; GCN-NEXT: v_add_min_u32 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.umin.i32(i32 %add, i32 %c)
@@ -121,7 +121,7 @@ define amdgpu_ps float @add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
define amdgpu_ps float @add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
; GCN-LABEL: add_min_i32_vvv:
; GCN: ; %bb.0:
-; GCN-NEXT: v_add_min_i32_e64 v0, v0, v1, v2
+; GCN-NEXT: v_add_min_i32 v0, v0, v1, v2
; GCN-NEXT: ; return to shader part epilog
%add = add i32 %a, %b
%max = call i32 @llvm.smin.i32(i32 %add, i32 %c)
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index b72eba8..8088c1b 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -180,11 +180,7 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
; CHECK-LABEL: s_add64_32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_add_u32 s0, s0, s2
-; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
; CHECK-NEXT: s_addc_u32 s1, s1, s3
-; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_addc_u32 s2, s4, 0
; CHECK-NEXT: ; return to shader part epilog
%sum64 = add i64 %val64A, %val64B
@@ -199,14 +195,10 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_v2i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s10, s2, s6
-; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0
-; CHECK-NEXT: s_addc_u32 s8, s3, s7
+; CHECK-NEXT: s_add_u32 s6, s2, s6
+; CHECK-NEXT: s_addc_u32 s7, s3, s7
; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
; CHECK-NEXT: s_add_u32 s0, s0, s4
-; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
; CHECK-NEXT: s_addc_u32 s1, s1, s5
; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
@@ -215,8 +207,8 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v7
; CHECK-NEXT: v_readfirstlane_b32 s2, v6
-; CHECK-NEXT: v_mov_b32_e32 v4, s10
-; CHECK-NEXT: v_mov_b32_e32 v5, s8
+; CHECK-NEXT: v_mov_b32_e32 v4, s6
+; CHECK-NEXT: v_mov_b32_e32 v5, s7
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_mov_b32 s3, s2
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -233,14 +225,10 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_v2i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_sub_u32 s10, s2, s6
-; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0
-; CHECK-NEXT: s_subb_u32 s8, s3, s7
+; CHECK-NEXT: s_sub_u32 s6, s2, s6
+; CHECK-NEXT: s_subb_u32 s7, s3, s7
; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
; CHECK-NEXT: s_sub_u32 s0, s0, s4
-; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
; CHECK-NEXT: s_subb_u32 s1, s1, s5
; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
@@ -249,8 +237,8 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v7
; CHECK-NEXT: v_readfirstlane_b32 s2, v6
-; CHECK-NEXT: v_mov_b32_e32 v4, s10
-; CHECK-NEXT: v_mov_b32_e32 v5, s8
+; CHECK-NEXT: v_mov_b32_e32 v4, s6
+; CHECK-NEXT: v_mov_b32_e32 v5, s7
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_mov_b32 s3, s2
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -268,8 +256,6 @@ define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval)
; CHECK-LABEL: s_uadd_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_add_u32 s0, s0, s2
-; CHECK-NEXT: s_cselect_b64 s[4:5], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0
; CHECK-NEXT: s_addc_u32 s1, s1, s3
; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
@@ -292,8 +278,6 @@ define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_p1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_add_u32 s0, s0, 1
-; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_addc_u32 s1, s1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
@@ -339,8 +323,6 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_p1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_sub_u32 s0, s0, 1
-; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_subb_u32 s1, s1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
@@ -363,8 +345,6 @@ define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_n1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_sub_u32 s0, s0, -1
-; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_subb_u32 s1, s1, -1
; CHECK-NEXT: v_mov_b32_e32 v2, s0
; CHECK-NEXT: v_mov_b32_e32 v3, s1
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index 948811e..51df8c3 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -7821,10 +7821,9 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX6-NEXT: s_addc_u32 s15, 0, s16
; GFX6-NEXT: s_add_u32 s16, s0, s1
; GFX6-NEXT: v_mov_b32_e32 v0, s16
-; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX6-NEXT: v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_cmp_lg_u32 s0, 0
; GFX6-NEXT: s_addc_u32 s14, s14, s15
; GFX6-NEXT: s_mul_i32 s0, s12, s14
; GFX6-NEXT: v_readfirstlane_b32 s1, v0
@@ -7855,7 +7854,6 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX6-NEXT: s_add_u32 s15, s16, s0
; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_cmp_lg_u32 s0, 0
; GFX6-NEXT: s_addc_u32 s14, s14, s12
; GFX6-NEXT: s_ashr_i32 s12, s7, 31
; GFX6-NEXT: s_add_u32 s0, s6, s12
@@ -7881,52 +7879,50 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX6-NEXT: v_readfirstlane_b32 s4, v0
; GFX6-NEXT: s_addc_u32 s4, s4, 0
; GFX6-NEXT: s_mul_i32 s14, s7, s14
-; GFX6-NEXT: s_add_u32 s14, s1, s14
-; GFX6-NEXT: v_mov_b32_e32 v0, s14
+; GFX6-NEXT: s_add_u32 s16, s1, s14
+; GFX6-NEXT: v_mov_b32_e32 v0, s16
; GFX6-NEXT: v_mul_hi_u32 v0, s10, v0
-; GFX6-NEXT: s_addc_u32 s15, 0, s4
+; GFX6-NEXT: s_addc_u32 s17, 0, s4
; GFX6-NEXT: s_mov_b32 s1, s5
-; GFX6-NEXT: s_mul_i32 s4, s10, s15
+; GFX6-NEXT: s_mul_i32 s4, s10, s17
; GFX6-NEXT: v_readfirstlane_b32 s5, v0
; GFX6-NEXT: s_add_i32 s4, s5, s4
-; GFX6-NEXT: s_mul_i32 s5, s11, s14
-; GFX6-NEXT: s_add_i32 s16, s4, s5
-; GFX6-NEXT: s_sub_i32 s17, s7, s16
-; GFX6-NEXT: s_mul_i32 s4, s10, s14
+; GFX6-NEXT: s_mul_i32 s5, s11, s16
+; GFX6-NEXT: s_add_i32 s18, s4, s5
+; GFX6-NEXT: s_sub_i32 s14, s7, s18
+; GFX6-NEXT: s_mul_i32 s4, s10, s16
; GFX6-NEXT: s_sub_u32 s6, s6, s4
; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX6-NEXT: s_or_b32 s18, s4, s5
-; GFX6-NEXT: s_cmp_lg_u32 s18, 0
-; GFX6-NEXT: s_subb_u32 s17, s17, s11
-; GFX6-NEXT: s_sub_u32 s19, s6, s10
-; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX6-NEXT: s_or_b32 s15, s4, s5
+; GFX6-NEXT: s_subb_u32 s19, s14, s11
+; GFX6-NEXT: s_sub_u32 s20, s6, s10
+; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GFX6-NEXT: s_or_b32 s14, s14, s15
+; GFX6-NEXT: s_subb_u32 s14, s19, 0
+; GFX6-NEXT: s_cmp_ge_u32 s14, s11
+; GFX6-NEXT: s_cselect_b32 s15, -1, 0
+; GFX6-NEXT: s_cmp_ge_u32 s20, s10
+; GFX6-NEXT: s_cselect_b32 s19, -1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s14, s11
+; GFX6-NEXT: s_cselect_b32 s14, s19, s15
+; GFX6-NEXT: s_add_u32 s15, s16, 1
+; GFX6-NEXT: s_addc_u32 s19, s17, 0
+; GFX6-NEXT: s_add_u32 s20, s16, 2
+; GFX6-NEXT: s_addc_u32 s21, s17, 0
+; GFX6-NEXT: s_cmp_lg_u32 s14, 0
+; GFX6-NEXT: s_cselect_b32 s14, s20, s15
+; GFX6-NEXT: s_cselect_b32 s15, s21, s19
; GFX6-NEXT: s_or_b32 s4, s4, s5
-; GFX6-NEXT: s_cmp_lg_u32 s4, 0
-; GFX6-NEXT: s_subb_u32 s4, s17, 0
+; GFX6-NEXT: s_subb_u32 s4, s7, s18
; GFX6-NEXT: s_cmp_ge_u32 s4, s11
; GFX6-NEXT: s_cselect_b32 s5, -1, 0
-; GFX6-NEXT: s_cmp_ge_u32 s19, s10
-; GFX6-NEXT: s_cselect_b32 s17, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s4, s11
-; GFX6-NEXT: s_cselect_b32 s4, s17, s5
-; GFX6-NEXT: s_add_u32 s5, s14, 1
-; GFX6-NEXT: s_addc_u32 s17, s15, 0
-; GFX6-NEXT: s_add_u32 s19, s14, 2
-; GFX6-NEXT: s_addc_u32 s20, s15, 0
-; GFX6-NEXT: s_cmp_lg_u32 s4, 0
-; GFX6-NEXT: s_cselect_b32 s4, s19, s5
-; GFX6-NEXT: s_cselect_b32 s5, s20, s17
-; GFX6-NEXT: s_cmp_lg_u32 s18, 0
-; GFX6-NEXT: s_subb_u32 s7, s7, s16
-; GFX6-NEXT: s_cmp_ge_u32 s7, s11
-; GFX6-NEXT: s_cselect_b32 s16, -1, 0
; GFX6-NEXT: s_cmp_ge_u32 s6, s10
; GFX6-NEXT: s_cselect_b32 s6, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s7, s11
-; GFX6-NEXT: s_cselect_b32 s6, s6, s16
-; GFX6-NEXT: s_cmp_lg_u32 s6, 0
-; GFX6-NEXT: s_cselect_b32 s5, s5, s15
-; GFX6-NEXT: s_cselect_b32 s4, s4, s14
+; GFX6-NEXT: s_cmp_eq_u32 s4, s11
+; GFX6-NEXT: s_cselect_b32 s4, s6, s5
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s5, s15, s17
+; GFX6-NEXT: s_cselect_b32 s4, s14, s16
; GFX6-NEXT: s_xor_b64 s[6:7], s[12:13], s[8:9]
; GFX6-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7]
; GFX6-NEXT: s_sub_u32 s4, s4, s6
@@ -7949,8 +7945,8 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s8
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s9
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_sub_u32 s10, 0, s8
-; GFX9-NEXT: s_subb_u32 s11, 0, s9
+; GFX9-NEXT: s_sub_u32 s4, 0, s8
+; GFX9-NEXT: s_subb_u32 s5, 0, s9
; GFX9-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GFX9-NEXT: v_rcp_f32_e32 v1, v0
; GFX9-NEXT: v_mov_b32_e32 v0, 0
@@ -7960,56 +7956,52 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: v_madmk_f32 v1, v2, 0xcf800000, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT: v_readfirstlane_b32 s12, v2
-; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_mul_i32 s5, s10, s12
-; GFX9-NEXT: s_mul_hi_u32 s14, s10, s4
-; GFX9-NEXT: s_mul_i32 s13, s11, s4
-; GFX9-NEXT: s_add_i32 s5, s14, s5
-; GFX9-NEXT: s_mul_i32 s15, s10, s4
-; GFX9-NEXT: s_add_i32 s5, s5, s13
-; GFX9-NEXT: s_mul_hi_u32 s14, s4, s15
-; GFX9-NEXT: s_mul_i32 s16, s4, s5
-; GFX9-NEXT: s_mul_hi_u32 s13, s4, s5
+; GFX9-NEXT: v_readfirstlane_b32 s10, v2
+; GFX9-NEXT: v_readfirstlane_b32 s11, v1
+; GFX9-NEXT: s_mul_i32 s12, s4, s10
+; GFX9-NEXT: s_mul_hi_u32 s14, s4, s11
+; GFX9-NEXT: s_mul_i32 s13, s5, s11
+; GFX9-NEXT: s_add_i32 s12, s14, s12
+; GFX9-NEXT: s_mul_i32 s15, s4, s11
+; GFX9-NEXT: s_add_i32 s12, s12, s13
+; GFX9-NEXT: s_mul_hi_u32 s14, s11, s15
+; GFX9-NEXT: s_mul_i32 s16, s11, s12
+; GFX9-NEXT: s_mul_hi_u32 s13, s11, s12
; GFX9-NEXT: s_add_u32 s14, s14, s16
; GFX9-NEXT: s_addc_u32 s13, 0, s13
-; GFX9-NEXT: s_mul_hi_u32 s17, s12, s15
-; GFX9-NEXT: s_mul_i32 s15, s12, s15
+; GFX9-NEXT: s_mul_hi_u32 s17, s10, s15
+; GFX9-NEXT: s_mul_i32 s15, s10, s15
; GFX9-NEXT: s_add_u32 s14, s14, s15
-; GFX9-NEXT: s_mul_hi_u32 s16, s12, s5
+; GFX9-NEXT: s_mul_hi_u32 s16, s10, s12
; GFX9-NEXT: s_addc_u32 s13, s13, s17
; GFX9-NEXT: s_addc_u32 s14, s16, 0
-; GFX9-NEXT: s_mul_i32 s5, s12, s5
-; GFX9-NEXT: s_add_u32 s5, s13, s5
+; GFX9-NEXT: s_mul_i32 s12, s10, s12
+; GFX9-NEXT: s_add_u32 s12, s13, s12
; GFX9-NEXT: s_addc_u32 s13, 0, s14
-; GFX9-NEXT: s_add_u32 s14, s4, s5
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s12, s12, s13
-; GFX9-NEXT: s_mul_i32 s4, s10, s12
-; GFX9-NEXT: s_mul_hi_u32 s5, s10, s14
-; GFX9-NEXT: s_add_i32 s4, s5, s4
-; GFX9-NEXT: s_mul_i32 s11, s11, s14
-; GFX9-NEXT: s_add_i32 s4, s4, s11
-; GFX9-NEXT: s_mul_i32 s10, s10, s14
-; GFX9-NEXT: s_mul_hi_u32 s11, s12, s10
-; GFX9-NEXT: s_mul_i32 s13, s12, s10
-; GFX9-NEXT: s_mul_i32 s16, s14, s4
-; GFX9-NEXT: s_mul_hi_u32 s10, s14, s10
-; GFX9-NEXT: s_mul_hi_u32 s15, s14, s4
-; GFX9-NEXT: s_add_u32 s10, s10, s16
+; GFX9-NEXT: s_add_u32 s11, s11, s12
+; GFX9-NEXT: s_addc_u32 s10, s10, s13
+; GFX9-NEXT: s_mul_i32 s12, s4, s10
+; GFX9-NEXT: s_mul_hi_u32 s13, s4, s11
+; GFX9-NEXT: s_add_i32 s12, s13, s12
+; GFX9-NEXT: s_mul_i32 s5, s5, s11
+; GFX9-NEXT: s_add_i32 s12, s12, s5
+; GFX9-NEXT: s_mul_i32 s4, s4, s11
+; GFX9-NEXT: s_mul_hi_u32 s13, s10, s4
+; GFX9-NEXT: s_mul_i32 s14, s10, s4
+; GFX9-NEXT: s_mul_i32 s16, s11, s12
+; GFX9-NEXT: s_mul_hi_u32 s4, s11, s4
+; GFX9-NEXT: s_mul_hi_u32 s15, s11, s12
+; GFX9-NEXT: s_add_u32 s4, s4, s16
; GFX9-NEXT: s_addc_u32 s15, 0, s15
-; GFX9-NEXT: s_add_u32 s10, s10, s13
-; GFX9-NEXT: s_mul_hi_u32 s5, s12, s4
-; GFX9-NEXT: s_addc_u32 s10, s15, s11
+; GFX9-NEXT: s_add_u32 s4, s4, s14
+; GFX9-NEXT: s_mul_hi_u32 s5, s10, s12
+; GFX9-NEXT: s_addc_u32 s4, s15, s13
; GFX9-NEXT: s_addc_u32 s5, s5, 0
-; GFX9-NEXT: s_mul_i32 s4, s12, s4
-; GFX9-NEXT: s_add_u32 s4, s10, s4
-; GFX9-NEXT: s_addc_u32 s10, 0, s5
-; GFX9-NEXT: s_add_u32 s11, s14, s4
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s10, s12, s10
+; GFX9-NEXT: s_mul_i32 s12, s10, s12
+; GFX9-NEXT: s_add_u32 s4, s4, s12
+; GFX9-NEXT: s_addc_u32 s5, 0, s5
+; GFX9-NEXT: s_add_u32 s11, s11, s4
+; GFX9-NEXT: s_addc_u32 s10, s10, s5
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_ashr_i32 s4, s3, 31
; GFX9-NEXT: s_add_u32 s2, s2, s4
@@ -8028,38 +8020,35 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: s_addc_u32 s11, s12, s15
; GFX9-NEXT: s_addc_u32 s12, s14, 0
; GFX9-NEXT: s_mul_i32 s10, s3, s10
-; GFX9-NEXT: s_add_u32 s14, s11, s10
-; GFX9-NEXT: s_addc_u32 s15, 0, s12
-; GFX9-NEXT: s_mul_i32 s10, s8, s15
-; GFX9-NEXT: s_mul_hi_u32 s11, s8, s14
+; GFX9-NEXT: s_add_u32 s13, s11, s10
+; GFX9-NEXT: s_addc_u32 s12, 0, s12
+; GFX9-NEXT: s_mul_i32 s10, s8, s12
+; GFX9-NEXT: s_mul_hi_u32 s11, s8, s13
; GFX9-NEXT: s_add_i32 s10, s11, s10
-; GFX9-NEXT: s_mul_i32 s11, s9, s14
-; GFX9-NEXT: s_add_i32 s16, s10, s11
-; GFX9-NEXT: s_sub_i32 s12, s3, s16
-; GFX9-NEXT: s_mul_i32 s10, s8, s14
+; GFX9-NEXT: s_mul_i32 s11, s9, s13
+; GFX9-NEXT: s_add_i32 s14, s10, s11
+; GFX9-NEXT: s_sub_i32 s15, s3, s14
+; GFX9-NEXT: s_mul_i32 s10, s8, s13
; GFX9-NEXT: s_sub_u32 s2, s2, s10
; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s17, s12, s9
-; GFX9-NEXT: s_sub_u32 s18, s2, s8
-; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT: s_subb_u32 s12, s17, 0
-; GFX9-NEXT: s_cmp_ge_u32 s12, s9
-; GFX9-NEXT: s_cselect_b32 s13, -1, 0
-; GFX9-NEXT: s_cmp_ge_u32 s18, s8
+; GFX9-NEXT: s_subb_u32 s15, s15, s9
+; GFX9-NEXT: s_sub_u32 s16, s2, s8
+; GFX9-NEXT: s_subb_u32 s15, s15, 0
+; GFX9-NEXT: s_cmp_ge_u32 s15, s9
; GFX9-NEXT: s_cselect_b32 s17, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s12, s9
-; GFX9-NEXT: s_cselect_b32 s12, s17, s13
-; GFX9-NEXT: s_add_u32 s13, s14, 1
-; GFX9-NEXT: s_addc_u32 s17, s15, 0
-; GFX9-NEXT: s_add_u32 s18, s14, 2
-; GFX9-NEXT: s_addc_u32 s19, s15, 0
-; GFX9-NEXT: s_cmp_lg_u32 s12, 0
-; GFX9-NEXT: s_cselect_b32 s12, s18, s13
-; GFX9-NEXT: s_cselect_b32 s13, s19, s17
+; GFX9-NEXT: s_cmp_ge_u32 s16, s8
+; GFX9-NEXT: s_cselect_b32 s16, -1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s15, s9
+; GFX9-NEXT: s_cselect_b32 s15, s16, s17
+; GFX9-NEXT: s_add_u32 s16, s13, 1
+; GFX9-NEXT: s_addc_u32 s17, s12, 0
+; GFX9-NEXT: s_add_u32 s18, s13, 2
+; GFX9-NEXT: s_addc_u32 s19, s12, 0
+; GFX9-NEXT: s_cmp_lg_u32 s15, 0
+; GFX9-NEXT: s_cselect_b32 s15, s18, s16
+; GFX9-NEXT: s_cselect_b32 s16, s19, s17
; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s3, s3, s16
+; GFX9-NEXT: s_subb_u32 s3, s3, s14
; GFX9-NEXT: s_cmp_ge_u32 s3, s9
; GFX9-NEXT: s_cselect_b32 s10, -1, 0
; GFX9-NEXT: s_cmp_ge_u32 s2, s8
@@ -8067,8 +8056,8 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: s_cmp_eq_u32 s3, s9
; GFX9-NEXT: s_cselect_b32 s2, s2, s10
; GFX9-NEXT: s_cmp_lg_u32 s2, 0
-; GFX9-NEXT: s_cselect_b32 s3, s13, s15
-; GFX9-NEXT: s_cselect_b32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b32 s3, s16, s12
+; GFX9-NEXT: s_cselect_b32 s2, s15, s13
; GFX9-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7]
; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
; GFX9-NEXT: s_sub_u32 s2, s2, s4
@@ -8328,10 +8317,9 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: s_addc_u32 s17, 0, s18
; GFX6-NEXT: s_add_u32 s18, s12, s13
; GFX6-NEXT: v_mov_b32_e32 v0, s18
-; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
; GFX6-NEXT: v_mul_hi_u32 v0, s14, v0
+; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
; GFX6-NEXT: s_or_b32 s12, s12, s13
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
; GFX6-NEXT: s_addc_u32 s16, s16, s17
; GFX6-NEXT: s_mul_i32 s12, s14, s16
; GFX6-NEXT: v_readfirstlane_b32 s13, v0
@@ -8362,7 +8350,6 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: s_add_u32 s15, s18, s12
; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
; GFX6-NEXT: s_or_b32 s12, s12, s13
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
; GFX6-NEXT: s_addc_u32 s14, s16, s14
; GFX6-NEXT: s_ashr_i32 s12, s9, 31
; GFX6-NEXT: s_add_u32 s8, s8, s12
@@ -8387,55 +8374,53 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: v_readfirstlane_b32 s16, v0
; GFX6-NEXT: s_addc_u32 s16, s16, 0
; GFX6-NEXT: s_mul_i32 s14, s9, s14
-; GFX6-NEXT: s_add_u32 s17, s15, s14
-; GFX6-NEXT: v_mov_b32_e32 v0, s17
+; GFX6-NEXT: s_add_u32 s18, s15, s14
+; GFX6-NEXT: v_mov_b32_e32 v0, s18
; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
-; GFX6-NEXT: s_addc_u32 s16, 0, s16
-; GFX6-NEXT: s_mul_i32 s14, s6, s16
+; GFX6-NEXT: s_addc_u32 s19, 0, s16
+; GFX6-NEXT: s_mul_i32 s14, s6, s19
; GFX6-NEXT: v_readfirstlane_b32 s15, v0
; GFX6-NEXT: s_add_i32 s14, s15, s14
-; GFX6-NEXT: s_mul_i32 s15, s7, s17
-; GFX6-NEXT: s_add_i32 s18, s14, s15
-; GFX6-NEXT: s_sub_i32 s19, s9, s18
-; GFX6-NEXT: s_mul_i32 s14, s6, s17
+; GFX6-NEXT: s_mul_i32 s15, s7, s18
+; GFX6-NEXT: s_add_i32 s20, s14, s15
+; GFX6-NEXT: s_sub_i32 s16, s9, s20
+; GFX6-NEXT: s_mul_i32 s14, s6, s18
; GFX6-NEXT: s_sub_u32 s8, s8, s14
; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0
-; GFX6-NEXT: s_or_b32 s20, s14, s15
-; GFX6-NEXT: s_cmp_lg_u32 s20, 0
-; GFX6-NEXT: s_subb_u32 s19, s19, s7
-; GFX6-NEXT: s_sub_u32 s21, s8, s6
-; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GFX6-NEXT: s_or_b32 s17, s14, s15
+; GFX6-NEXT: s_subb_u32 s21, s16, s7
+; GFX6-NEXT: s_sub_u32 s22, s8, s6
+; GFX6-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GFX6-NEXT: s_or_b32 s16, s16, s17
+; GFX6-NEXT: s_subb_u32 s16, s21, 0
+; GFX6-NEXT: s_cmp_ge_u32 s16, s7
+; GFX6-NEXT: s_cselect_b32 s17, -1, 0
+; GFX6-NEXT: s_cmp_ge_u32 s22, s6
+; GFX6-NEXT: s_cselect_b32 s21, -1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s16, s7
+; GFX6-NEXT: s_cselect_b32 s16, s21, s17
+; GFX6-NEXT: s_add_u32 s17, s18, 1
+; GFX6-NEXT: s_addc_u32 s21, s19, 0
+; GFX6-NEXT: s_add_u32 s22, s18, 2
+; GFX6-NEXT: s_addc_u32 s23, s19, 0
+; GFX6-NEXT: s_cmp_lg_u32 s16, 0
+; GFX6-NEXT: s_cselect_b32 s16, s22, s17
+; GFX6-NEXT: s_cselect_b32 s17, s23, s21
; GFX6-NEXT: s_or_b32 s14, s14, s15
-; GFX6-NEXT: s_cmp_lg_u32 s14, 0
-; GFX6-NEXT: s_subb_u32 s14, s19, 0
-; GFX6-NEXT: s_cmp_ge_u32 s14, s7
-; GFX6-NEXT: s_cselect_b32 s15, -1, 0
-; GFX6-NEXT: s_cmp_ge_u32 s21, s6
-; GFX6-NEXT: s_cselect_b32 s19, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s14, s7
-; GFX6-NEXT: s_cselect_b32 s14, s19, s15
-; GFX6-NEXT: s_add_u32 s15, s17, 1
-; GFX6-NEXT: s_addc_u32 s19, s16, 0
-; GFX6-NEXT: s_add_u32 s21, s17, 2
-; GFX6-NEXT: s_addc_u32 s22, s16, 0
-; GFX6-NEXT: s_cmp_lg_u32 s14, 0
-; GFX6-NEXT: s_cselect_b32 s14, s21, s15
-; GFX6-NEXT: s_cselect_b32 s15, s22, s19
-; GFX6-NEXT: s_cmp_lg_u32 s20, 0
-; GFX6-NEXT: s_subb_u32 s9, s9, s18
+; GFX6-NEXT: s_subb_u32 s9, s9, s20
; GFX6-NEXT: s_cmp_ge_u32 s9, s7
-; GFX6-NEXT: s_cselect_b32 s18, -1, 0
+; GFX6-NEXT: s_cselect_b32 s14, -1, 0
; GFX6-NEXT: s_cmp_ge_u32 s8, s6
; GFX6-NEXT: s_cselect_b32 s6, -1, 0
; GFX6-NEXT: s_cmp_eq_u32 s9, s7
-; GFX6-NEXT: s_cselect_b32 s6, s6, s18
+; GFX6-NEXT: s_cselect_b32 s6, s6, s14
; GFX6-NEXT: s_cmp_lg_u32 s6, 0
-; GFX6-NEXT: s_cselect_b32 s7, s15, s16
-; GFX6-NEXT: s_cselect_b32 s6, s14, s17
+; GFX6-NEXT: s_cselect_b32 s7, s17, s19
+; GFX6-NEXT: s_cselect_b32 s6, s16, s18
; GFX6-NEXT: s_xor_b64 s[2:3], s[12:13], s[2:3]
; GFX6-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3]
-; GFX6-NEXT: s_sub_u32 s14, s6, s2
-; GFX6-NEXT: s_subb_u32 s15, s7, s3
+; GFX6-NEXT: s_sub_u32 s16, s6, s2
+; GFX6-NEXT: s_subb_u32 s17, s7, s3
; GFX6-NEXT: s_ashr_i32 s6, s1, 31
; GFX6-NEXT: s_add_u32 s0, s0, s6
; GFX6-NEXT: s_mov_b32 s7, s6
@@ -8454,40 +8439,39 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT: v_mul_hi_u32 v2, s12, v0
-; GFX6-NEXT: v_readfirstlane_b32 s16, v1
+; GFX6-NEXT: v_readfirstlane_b32 s14, v1
; GFX6-NEXT: v_readfirstlane_b32 s2, v0
-; GFX6-NEXT: s_mul_i32 s1, s12, s16
+; GFX6-NEXT: s_mul_i32 s1, s12, s14
; GFX6-NEXT: v_readfirstlane_b32 s3, v2
; GFX6-NEXT: s_mul_i32 s0, s13, s2
; GFX6-NEXT: s_add_i32 s1, s3, s1
; GFX6-NEXT: s_add_i32 s3, s1, s0
-; GFX6-NEXT: s_mul_i32 s17, s12, s2
+; GFX6-NEXT: s_mul_i32 s15, s12, s2
; GFX6-NEXT: v_mul_hi_u32 v2, v0, s3
-; GFX6-NEXT: v_mul_hi_u32 v0, v0, s17
+; GFX6-NEXT: v_mul_hi_u32 v0, v0, s15
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_mul_i32 s4, s2, s3
; GFX6-NEXT: v_readfirstlane_b32 s5, v2
; GFX6-NEXT: v_readfirstlane_b32 s18, v0
-; GFX6-NEXT: v_mul_hi_u32 v0, v1, s17
+; GFX6-NEXT: v_mul_hi_u32 v0, v1, s15
; GFX6-NEXT: v_mul_hi_u32 v1, v1, s3
; GFX6-NEXT: s_add_u32 s4, s18, s4
; GFX6-NEXT: s_addc_u32 s5, 0, s5
-; GFX6-NEXT: s_mul_i32 s17, s16, s17
+; GFX6-NEXT: s_mul_i32 s15, s14, s15
; GFX6-NEXT: v_readfirstlane_b32 s18, v0
-; GFX6-NEXT: s_add_u32 s4, s4, s17
+; GFX6-NEXT: s_add_u32 s4, s4, s15
; GFX6-NEXT: s_addc_u32 s4, s5, s18
; GFX6-NEXT: v_readfirstlane_b32 s5, v1
; GFX6-NEXT: s_addc_u32 s5, s5, 0
-; GFX6-NEXT: s_mul_i32 s3, s16, s3
+; GFX6-NEXT: s_mul_i32 s3, s14, s3
; GFX6-NEXT: s_add_u32 s3, s4, s3
; GFX6-NEXT: s_addc_u32 s4, 0, s5
; GFX6-NEXT: s_add_u32 s5, s2, s3
; GFX6-NEXT: v_mov_b32_e32 v0, s5
-; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX6-NEXT: v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX6-NEXT: s_or_b32 s2, s2, s3
-; GFX6-NEXT: s_cmp_lg_u32 s2, 0
-; GFX6-NEXT: s_addc_u32 s4, s16, s4
+; GFX6-NEXT: s_addc_u32 s4, s14, s4
; GFX6-NEXT: s_mul_i32 s2, s12, s4
; GFX6-NEXT: v_readfirstlane_b32 s3, v0
; GFX6-NEXT: s_add_i32 s2, s3, s2
@@ -8501,14 +8485,14 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: v_mul_hi_u32 v1, s4, v0
; GFX6-NEXT: v_mul_hi_u32 v0, s5, v0
; GFX6-NEXT: s_mul_i32 s13, s5, s2
-; GFX6-NEXT: v_readfirstlane_b32 s17, v2
-; GFX6-NEXT: s_add_u32 s13, s17, s13
-; GFX6-NEXT: v_readfirstlane_b32 s16, v0
+; GFX6-NEXT: v_readfirstlane_b32 s15, v2
+; GFX6-NEXT: s_add_u32 s13, s15, s13
+; GFX6-NEXT: v_readfirstlane_b32 s14, v0
; GFX6-NEXT: s_mul_i32 s3, s4, s3
-; GFX6-NEXT: s_addc_u32 s16, 0, s16
+; GFX6-NEXT: s_addc_u32 s14, 0, s14
; GFX6-NEXT: v_readfirstlane_b32 s12, v3
; GFX6-NEXT: s_add_u32 s3, s13, s3
-; GFX6-NEXT: s_addc_u32 s3, s16, s12
+; GFX6-NEXT: s_addc_u32 s3, s14, s12
; GFX6-NEXT: v_readfirstlane_b32 s12, v1
; GFX6-NEXT: s_addc_u32 s12, s12, 0
; GFX6-NEXT: s_mul_i32 s2, s4, s2
@@ -8517,7 +8501,6 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: s_add_u32 s13, s5, s2
; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX6-NEXT: s_or_b32 s2, s2, s3
-; GFX6-NEXT: s_cmp_lg_u32 s2, 0
; GFX6-NEXT: s_addc_u32 s12, s4, s12
; GFX6-NEXT: s_ashr_i32 s4, s11, 31
; GFX6-NEXT: s_add_u32 s2, s10, s4
@@ -8529,72 +8512,70 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: v_mov_b32_e32 v2, s13
; GFX6-NEXT: v_mul_hi_u32 v3, s10, v2
; GFX6-NEXT: s_mul_i32 s2, s10, s12
-; GFX6-NEXT: v_readfirstlane_b32 s16, v1
+; GFX6-NEXT: v_readfirstlane_b32 s14, v1
; GFX6-NEXT: v_mul_hi_u32 v1, s11, v2
-; GFX6-NEXT: v_readfirstlane_b32 s17, v3
+; GFX6-NEXT: v_readfirstlane_b32 s15, v3
; GFX6-NEXT: v_mul_hi_u32 v0, s11, v0
-; GFX6-NEXT: s_add_u32 s2, s17, s2
-; GFX6-NEXT: s_addc_u32 s16, 0, s16
+; GFX6-NEXT: s_add_u32 s2, s15, s2
+; GFX6-NEXT: s_addc_u32 s14, 0, s14
; GFX6-NEXT: s_mul_i32 s13, s11, s13
-; GFX6-NEXT: v_readfirstlane_b32 s17, v1
+; GFX6-NEXT: v_readfirstlane_b32 s15, v1
; GFX6-NEXT: s_add_u32 s2, s2, s13
-; GFX6-NEXT: s_addc_u32 s2, s16, s17
+; GFX6-NEXT: s_addc_u32 s2, s14, s15
; GFX6-NEXT: v_readfirstlane_b32 s13, v0
; GFX6-NEXT: s_addc_u32 s13, s13, 0
; GFX6-NEXT: s_mul_i32 s12, s11, s12
-; GFX6-NEXT: s_add_u32 s16, s2, s12
-; GFX6-NEXT: v_mov_b32_e32 v0, s16
+; GFX6-NEXT: s_add_u32 s18, s2, s12
+; GFX6-NEXT: v_mov_b32_e32 v0, s18
; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
-; GFX6-NEXT: s_addc_u32 s17, 0, s13
-; GFX6-NEXT: s_mul_i32 s12, s8, s17
+; GFX6-NEXT: s_addc_u32 s19, 0, s13
+; GFX6-NEXT: s_mul_i32 s12, s8, s19
; GFX6-NEXT: s_mov_b32 s3, 0xf000
; GFX6-NEXT: v_readfirstlane_b32 s13, v0
; GFX6-NEXT: s_add_i32 s12, s13, s12
-; GFX6-NEXT: s_mul_i32 s13, s9, s16
-; GFX6-NEXT: s_add_i32 s18, s12, s13
-; GFX6-NEXT: s_sub_i32 s19, s11, s18
-; GFX6-NEXT: s_mul_i32 s12, s8, s16
+; GFX6-NEXT: s_mul_i32 s13, s9, s18
+; GFX6-NEXT: s_add_i32 s20, s12, s13
+; GFX6-NEXT: s_sub_i32 s14, s11, s20
+; GFX6-NEXT: s_mul_i32 s12, s8, s18
; GFX6-NEXT: s_sub_u32 s10, s10, s12
; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX6-NEXT: s_or_b32 s20, s12, s13
-; GFX6-NEXT: s_cmp_lg_u32 s20, 0
-; GFX6-NEXT: s_subb_u32 s19, s19, s9
-; GFX6-NEXT: s_sub_u32 s21, s10, s8
-; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GFX6-NEXT: s_or_b32 s15, s12, s13
+; GFX6-NEXT: s_subb_u32 s21, s14, s9
+; GFX6-NEXT: s_sub_u32 s22, s10, s8
+; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GFX6-NEXT: s_or_b32 s14, s14, s15
+; GFX6-NEXT: s_subb_u32 s14, s21, 0
+; GFX6-NEXT: s_cmp_ge_u32 s14, s9
+; GFX6-NEXT: s_cselect_b32 s15, -1, 0
+; GFX6-NEXT: s_cmp_ge_u32 s22, s8
+; GFX6-NEXT: s_cselect_b32 s21, -1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s14, s9
+; GFX6-NEXT: s_cselect_b32 s14, s21, s15
+; GFX6-NEXT: s_add_u32 s15, s18, 1
+; GFX6-NEXT: s_addc_u32 s21, s19, 0
+; GFX6-NEXT: s_add_u32 s22, s18, 2
+; GFX6-NEXT: s_addc_u32 s23, s19, 0
+; GFX6-NEXT: s_cmp_lg_u32 s14, 0
+; GFX6-NEXT: s_cselect_b32 s14, s22, s15
+; GFX6-NEXT: s_cselect_b32 s15, s23, s21
; GFX6-NEXT: s_or_b32 s12, s12, s13
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
-; GFX6-NEXT: s_subb_u32 s12, s19, 0
-; GFX6-NEXT: s_cmp_ge_u32 s12, s9
-; GFX6-NEXT: s_cselect_b32 s13, -1, 0
-; GFX6-NEXT: s_cmp_ge_u32 s21, s8
-; GFX6-NEXT: s_cselect_b32 s19, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s12, s9
-; GFX6-NEXT: s_cselect_b32 s12, s19, s13
-; GFX6-NEXT: s_add_u32 s13, s16, 1
-; GFX6-NEXT: s_addc_u32 s19, s17, 0
-; GFX6-NEXT: s_add_u32 s21, s16, 2
-; GFX6-NEXT: s_addc_u32 s22, s17, 0
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
-; GFX6-NEXT: s_cselect_b32 s12, s21, s13
-; GFX6-NEXT: s_cselect_b32 s13, s22, s19
-; GFX6-NEXT: s_cmp_lg_u32 s20, 0
-; GFX6-NEXT: s_subb_u32 s11, s11, s18
+; GFX6-NEXT: s_subb_u32 s11, s11, s20
; GFX6-NEXT: s_cmp_ge_u32 s11, s9
-; GFX6-NEXT: s_cselect_b32 s18, -1, 0
+; GFX6-NEXT: s_cselect_b32 s12, -1, 0
; GFX6-NEXT: s_cmp_ge_u32 s10, s8
; GFX6-NEXT: s_cselect_b32 s8, -1, 0
; GFX6-NEXT: s_cmp_eq_u32 s11, s9
-; GFX6-NEXT: s_cselect_b32 s8, s8, s18
+; GFX6-NEXT: s_cselect_b32 s8, s8, s12
; GFX6-NEXT: s_cmp_lg_u32 s8, 0
-; GFX6-NEXT: s_cselect_b32 s9, s13, s17
-; GFX6-NEXT: s_cselect_b32 s8, s12, s16
+; GFX6-NEXT: s_cselect_b32 s9, s15, s19
+; GFX6-NEXT: s_cselect_b32 s8, s14, s18
; GFX6-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7]
; GFX6-NEXT: s_xor_b64 s[6:7], s[8:9], s[4:5]
; GFX6-NEXT: s_sub_u32 s4, s6, s4
; GFX6-NEXT: s_subb_u32 s5, s7, s5
; GFX6-NEXT: s_mov_b32 s2, -1
-; GFX6-NEXT: v_mov_b32_e32 v0, s14
-; GFX6-NEXT: v_mov_b32_e32 v1, s15
+; GFX6-NEXT: v_mov_b32_e32 v0, s16
+; GFX6-NEXT: v_mov_b32_e32 v1, s17
; GFX6-NEXT: v_mov_b32_e32 v2, s4
; GFX6-NEXT: v_mov_b32_e32 v3, s5
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
@@ -8614,8 +8595,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3]
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7
-; GFX9-NEXT: s_sub_u32 s14, 0, s6
-; GFX9-NEXT: s_subb_u32 s15, 0, s7
+; GFX9-NEXT: s_sub_u32 s12, 0, s6
+; GFX9-NEXT: s_subb_u32 s13, 0, s7
; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT: v_rcp_f32_e32 v0, v0
; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -8624,56 +8605,52 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT: v_readfirstlane_b32 s16, v1
-; GFX9-NEXT: v_readfirstlane_b32 s12, v0
-; GFX9-NEXT: s_mul_i32 s13, s14, s16
-; GFX9-NEXT: s_mul_hi_u32 s18, s14, s12
-; GFX9-NEXT: s_mul_i32 s17, s15, s12
-; GFX9-NEXT: s_add_i32 s13, s18, s13
-; GFX9-NEXT: s_mul_i32 s19, s14, s12
-; GFX9-NEXT: s_add_i32 s13, s13, s17
-; GFX9-NEXT: s_mul_hi_u32 s18, s12, s19
-; GFX9-NEXT: s_mul_i32 s20, s12, s13
-; GFX9-NEXT: s_mul_hi_u32 s17, s12, s13
+; GFX9-NEXT: v_readfirstlane_b32 s14, v1
+; GFX9-NEXT: v_readfirstlane_b32 s15, v0
+; GFX9-NEXT: s_mul_i32 s16, s12, s14
+; GFX9-NEXT: s_mul_hi_u32 s18, s12, s15
+; GFX9-NEXT: s_mul_i32 s17, s13, s15
+; GFX9-NEXT: s_add_i32 s16, s18, s16
+; GFX9-NEXT: s_mul_i32 s19, s12, s15
+; GFX9-NEXT: s_add_i32 s16, s16, s17
+; GFX9-NEXT: s_mul_hi_u32 s18, s15, s19
+; GFX9-NEXT: s_mul_i32 s20, s15, s16
+; GFX9-NEXT: s_mul_hi_u32 s17, s15, s16
; GFX9-NEXT: s_add_u32 s18, s18, s20
; GFX9-NEXT: s_addc_u32 s17, 0, s17
-; GFX9-NEXT: s_mul_hi_u32 s20, s16, s19
-; GFX9-NEXT: s_mul_i32 s19, s16, s19
+; GFX9-NEXT: s_mul_hi_u32 s20, s14, s19
+; GFX9-NEXT: s_mul_i32 s19, s14, s19
; GFX9-NEXT: s_add_u32 s18, s18, s19
-; GFX9-NEXT: s_mul_hi_u32 s21, s16, s13
+; GFX9-NEXT: s_mul_hi_u32 s21, s14, s16
; GFX9-NEXT: s_addc_u32 s17, s17, s20
; GFX9-NEXT: s_addc_u32 s18, s21, 0
-; GFX9-NEXT: s_mul_i32 s13, s16, s13
-; GFX9-NEXT: s_add_u32 s13, s17, s13
+; GFX9-NEXT: s_mul_i32 s16, s14, s16
+; GFX9-NEXT: s_add_u32 s16, s17, s16
; GFX9-NEXT: s_addc_u32 s17, 0, s18
-; GFX9-NEXT: s_add_u32 s18, s12, s13
-; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT: s_addc_u32 s16, s16, s17
-; GFX9-NEXT: s_mul_i32 s12, s14, s16
-; GFX9-NEXT: s_mul_hi_u32 s13, s14, s18
-; GFX9-NEXT: s_add_i32 s12, s13, s12
-; GFX9-NEXT: s_mul_i32 s15, s15, s18
-; GFX9-NEXT: s_add_i32 s12, s12, s15
-; GFX9-NEXT: s_mul_i32 s14, s14, s18
-; GFX9-NEXT: s_mul_hi_u32 s15, s16, s14
-; GFX9-NEXT: s_mul_i32 s17, s16, s14
-; GFX9-NEXT: s_mul_i32 s20, s18, s12
-; GFX9-NEXT: s_mul_hi_u32 s14, s18, s14
-; GFX9-NEXT: s_mul_hi_u32 s19, s18, s12
-; GFX9-NEXT: s_add_u32 s14, s14, s20
+; GFX9-NEXT: s_add_u32 s15, s15, s16
+; GFX9-NEXT: s_addc_u32 s14, s14, s17
+; GFX9-NEXT: s_mul_i32 s16, s12, s14
+; GFX9-NEXT: s_mul_hi_u32 s17, s12, s15
+; GFX9-NEXT: s_add_i32 s16, s17, s16
+; GFX9-NEXT: s_mul_i32 s13, s13, s15
+; GFX9-NEXT: s_add_i32 s16, s16, s13
+; GFX9-NEXT: s_mul_i32 s12, s12, s15
+; GFX9-NEXT: s_mul_hi_u32 s17, s14, s12
+; GFX9-NEXT: s_mul_i32 s18, s14, s12
+; GFX9-NEXT: s_mul_i32 s20, s15, s16
+; GFX9-NEXT: s_mul_hi_u32 s12, s15, s12
+; GFX9-NEXT: s_mul_hi_u32 s19, s15, s16
+; GFX9-NEXT: s_add_u32 s12, s12, s20
; GFX9-NEXT: s_addc_u32 s19, 0, s19
-; GFX9-NEXT: s_add_u32 s14, s14, s17
-; GFX9-NEXT: s_mul_hi_u32 s13, s16, s12
-; GFX9-NEXT: s_addc_u32 s14, s19, s15
+; GFX9-NEXT: s_add_u32 s12, s12, s18
+; GFX9-NEXT: s_mul_hi_u32 s13, s14, s16
+; GFX9-NEXT: s_addc_u32 s12, s19, s17
; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_mul_i32 s12, s16, s12
-; GFX9-NEXT: s_add_u32 s12, s14, s12
-; GFX9-NEXT: s_addc_u32 s14, 0, s13
-; GFX9-NEXT: s_add_u32 s15, s18, s12
-; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT: s_addc_u32 s14, s16, s14
+; GFX9-NEXT: s_mul_i32 s16, s14, s16
+; GFX9-NEXT: s_add_u32 s12, s12, s16
+; GFX9-NEXT: s_addc_u32 s13, 0, s13
+; GFX9-NEXT: s_add_u32 s15, s15, s12
+; GFX9-NEXT: s_addc_u32 s14, s14, s13
; GFX9-NEXT: s_ashr_i32 s12, s9, 31
; GFX9-NEXT: s_add_u32 s8, s8, s12
; GFX9-NEXT: s_mov_b32 s13, s12
@@ -8691,38 +8668,35 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_addc_u32 s15, s16, s19
; GFX9-NEXT: s_addc_u32 s16, s18, 0
; GFX9-NEXT: s_mul_i32 s14, s9, s14
-; GFX9-NEXT: s_add_u32 s18, s15, s14
-; GFX9-NEXT: s_addc_u32 s19, 0, s16
-; GFX9-NEXT: s_mul_i32 s14, s6, s19
-; GFX9-NEXT: s_mul_hi_u32 s15, s6, s18
+; GFX9-NEXT: s_add_u32 s17, s15, s14
+; GFX9-NEXT: s_addc_u32 s16, 0, s16
+; GFX9-NEXT: s_mul_i32 s14, s6, s16
+; GFX9-NEXT: s_mul_hi_u32 s15, s6, s17
; GFX9-NEXT: s_add_i32 s14, s15, s14
-; GFX9-NEXT: s_mul_i32 s15, s7, s18
-; GFX9-NEXT: s_add_i32 s20, s14, s15
-; GFX9-NEXT: s_sub_i32 s16, s9, s20
-; GFX9-NEXT: s_mul_i32 s14, s6, s18
+; GFX9-NEXT: s_mul_i32 s15, s7, s17
+; GFX9-NEXT: s_add_i32 s18, s14, s15
+; GFX9-NEXT: s_sub_i32 s19, s9, s18
+; GFX9-NEXT: s_mul_i32 s14, s6, s17
; GFX9-NEXT: s_sub_u32 s8, s8, s14
; GFX9-NEXT: s_cselect_b64 s[14:15], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT: s_subb_u32 s21, s16, s7
-; GFX9-NEXT: s_sub_u32 s22, s8, s6
-; GFX9-NEXT: s_cselect_b64 s[16:17], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[16:17], 0
-; GFX9-NEXT: s_subb_u32 s16, s21, 0
-; GFX9-NEXT: s_cmp_ge_u32 s16, s7
-; GFX9-NEXT: s_cselect_b32 s17, -1, 0
-; GFX9-NEXT: s_cmp_ge_u32 s22, s6
+; GFX9-NEXT: s_subb_u32 s19, s19, s7
+; GFX9-NEXT: s_sub_u32 s20, s8, s6
+; GFX9-NEXT: s_subb_u32 s19, s19, 0
+; GFX9-NEXT: s_cmp_ge_u32 s19, s7
; GFX9-NEXT: s_cselect_b32 s21, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s16, s7
-; GFX9-NEXT: s_cselect_b32 s16, s21, s17
-; GFX9-NEXT: s_add_u32 s17, s18, 1
-; GFX9-NEXT: s_addc_u32 s21, s19, 0
-; GFX9-NEXT: s_add_u32 s22, s18, 2
-; GFX9-NEXT: s_addc_u32 s23, s19, 0
-; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cselect_b32 s16, s22, s17
-; GFX9-NEXT: s_cselect_b32 s17, s23, s21
+; GFX9-NEXT: s_cmp_ge_u32 s20, s6
+; GFX9-NEXT: s_cselect_b32 s20, -1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s19, s7
+; GFX9-NEXT: s_cselect_b32 s19, s20, s21
+; GFX9-NEXT: s_add_u32 s20, s17, 1
+; GFX9-NEXT: s_addc_u32 s21, s16, 0
+; GFX9-NEXT: s_add_u32 s22, s17, 2
+; GFX9-NEXT: s_addc_u32 s23, s16, 0
+; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: s_cselect_b32 s19, s22, s20
+; GFX9-NEXT: s_cselect_b32 s20, s23, s21
; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT: s_subb_u32 s9, s9, s20
+; GFX9-NEXT: s_subb_u32 s9, s9, s18
; GFX9-NEXT: s_cmp_ge_u32 s9, s7
; GFX9-NEXT: s_cselect_b32 s14, -1, 0
; GFX9-NEXT: s_cmp_ge_u32 s8, s6
@@ -8730,12 +8704,12 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_cmp_eq_u32 s9, s7
; GFX9-NEXT: s_cselect_b32 s6, s6, s14
; GFX9-NEXT: s_cmp_lg_u32 s6, 0
-; GFX9-NEXT: s_cselect_b32 s7, s17, s19
-; GFX9-NEXT: s_cselect_b32 s6, s16, s18
+; GFX9-NEXT: s_cselect_b32 s7, s20, s16
+; GFX9-NEXT: s_cselect_b32 s6, s19, s17
; GFX9-NEXT: s_xor_b64 s[2:3], s[12:13], s[2:3]
; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[2:3]
-; GFX9-NEXT: s_sub_u32 s14, s6, s2
-; GFX9-NEXT: s_subb_u32 s15, s7, s3
+; GFX9-NEXT: s_sub_u32 s12, s6, s2
+; GFX9-NEXT: s_subb_u32 s13, s7, s3
; GFX9-NEXT: s_ashr_i32 s2, s1, 31
; GFX9-NEXT: s_add_u32 s0, s0, s2
; GFX9-NEXT: s_mov_b32 s3, s2
@@ -8744,8 +8718,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX9-NEXT: s_sub_u32 s8, 0, s6
-; GFX9-NEXT: s_subb_u32 s9, 0, s7
+; GFX9-NEXT: s_sub_u32 s4, 0, s6
+; GFX9-NEXT: s_subb_u32 s5, 0, s7
; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT: v_rcp_f32_e32 v1, v0
; GFX9-NEXT: v_mov_b32_e32 v0, 0
@@ -8755,105 +8729,98 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: v_mac_f32_e32 v1, 0xcf800000, v2
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
-; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: v_readfirstlane_b32 s13, v2
-; GFX9-NEXT: s_mul_hi_u32 s12, s8, s4
-; GFX9-NEXT: s_mul_i32 s16, s8, s13
-; GFX9-NEXT: s_mul_i32 s5, s9, s4
-; GFX9-NEXT: s_add_i32 s12, s12, s16
-; GFX9-NEXT: s_add_i32 s12, s12, s5
-; GFX9-NEXT: s_mul_i32 s17, s8, s4
-; GFX9-NEXT: s_mul_i32 s16, s4, s12
-; GFX9-NEXT: s_mul_hi_u32 s18, s4, s17
-; GFX9-NEXT: s_mul_hi_u32 s5, s4, s12
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s15, v2
+; GFX9-NEXT: s_mul_hi_u32 s14, s4, s8
+; GFX9-NEXT: s_mul_i32 s16, s4, s15
+; GFX9-NEXT: s_mul_i32 s9, s5, s8
+; GFX9-NEXT: s_add_i32 s14, s14, s16
+; GFX9-NEXT: s_add_i32 s14, s14, s9
+; GFX9-NEXT: s_mul_i32 s17, s4, s8
+; GFX9-NEXT: s_mul_i32 s16, s8, s14
+; GFX9-NEXT: s_mul_hi_u32 s18, s8, s17
+; GFX9-NEXT: s_mul_hi_u32 s9, s8, s14
; GFX9-NEXT: s_add_u32 s16, s18, s16
-; GFX9-NEXT: s_addc_u32 s5, 0, s5
-; GFX9-NEXT: s_mul_hi_u32 s19, s13, s17
-; GFX9-NEXT: s_mul_i32 s17, s13, s17
+; GFX9-NEXT: s_addc_u32 s9, 0, s9
+; GFX9-NEXT: s_mul_hi_u32 s19, s15, s17
+; GFX9-NEXT: s_mul_i32 s17, s15, s17
; GFX9-NEXT: s_add_u32 s16, s16, s17
-; GFX9-NEXT: s_mul_hi_u32 s18, s13, s12
-; GFX9-NEXT: s_addc_u32 s5, s5, s19
+; GFX9-NEXT: s_mul_hi_u32 s18, s15, s14
+; GFX9-NEXT: s_addc_u32 s9, s9, s19
; GFX9-NEXT: s_addc_u32 s16, s18, 0
-; GFX9-NEXT: s_mul_i32 s12, s13, s12
-; GFX9-NEXT: s_add_u32 s5, s5, s12
-; GFX9-NEXT: s_addc_u32 s12, 0, s16
-; GFX9-NEXT: s_add_u32 s16, s4, s5
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s12, s13, s12
-; GFX9-NEXT: s_mul_i32 s4, s8, s12
-; GFX9-NEXT: s_mul_hi_u32 s5, s8, s16
-; GFX9-NEXT: s_add_i32 s4, s5, s4
-; GFX9-NEXT: s_mul_i32 s9, s9, s16
-; GFX9-NEXT: s_add_i32 s4, s4, s9
-; GFX9-NEXT: s_mul_i32 s8, s8, s16
-; GFX9-NEXT: s_mul_hi_u32 s9, s12, s8
-; GFX9-NEXT: s_mul_i32 s13, s12, s8
-; GFX9-NEXT: s_mul_i32 s18, s16, s4
-; GFX9-NEXT: s_mul_hi_u32 s8, s16, s8
-; GFX9-NEXT: s_mul_hi_u32 s17, s16, s4
-; GFX9-NEXT: s_add_u32 s8, s8, s18
+; GFX9-NEXT: s_mul_i32 s14, s15, s14
+; GFX9-NEXT: s_add_u32 s9, s9, s14
+; GFX9-NEXT: s_addc_u32 s14, 0, s16
+; GFX9-NEXT: s_add_u32 s8, s8, s9
+; GFX9-NEXT: s_addc_u32 s9, s15, s14
+; GFX9-NEXT: s_mul_i32 s14, s4, s9
+; GFX9-NEXT: s_mul_hi_u32 s15, s4, s8
+; GFX9-NEXT: s_add_i32 s14, s15, s14
+; GFX9-NEXT: s_mul_i32 s5, s5, s8
+; GFX9-NEXT: s_add_i32 s14, s14, s5
+; GFX9-NEXT: s_mul_i32 s4, s4, s8
+; GFX9-NEXT: s_mul_hi_u32 s15, s9, s4
+; GFX9-NEXT: s_mul_i32 s16, s9, s4
+; GFX9-NEXT: s_mul_i32 s18, s8, s14
+; GFX9-NEXT: s_mul_hi_u32 s4, s8, s4
+; GFX9-NEXT: s_mul_hi_u32 s17, s8, s14
+; GFX9-NEXT: s_add_u32 s4, s4, s18
; GFX9-NEXT: s_addc_u32 s17, 0, s17
-; GFX9-NEXT: s_add_u32 s8, s8, s13
-; GFX9-NEXT: s_mul_hi_u32 s5, s12, s4
-; GFX9-NEXT: s_addc_u32 s8, s17, s9
+; GFX9-NEXT: s_add_u32 s4, s4, s16
+; GFX9-NEXT: s_mul_hi_u32 s5, s9, s14
+; GFX9-NEXT: s_addc_u32 s4, s17, s15
; GFX9-NEXT: s_addc_u32 s5, s5, 0
-; GFX9-NEXT: s_mul_i32 s4, s12, s4
-; GFX9-NEXT: s_add_u32 s4, s8, s4
-; GFX9-NEXT: s_addc_u32 s8, 0, s5
-; GFX9-NEXT: s_add_u32 s13, s16, s4
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s12, s12, s8
+; GFX9-NEXT: s_mul_i32 s14, s9, s14
+; GFX9-NEXT: s_add_u32 s4, s4, s14
+; GFX9-NEXT: s_addc_u32 s5, 0, s5
+; GFX9-NEXT: s_add_u32 s14, s8, s4
+; GFX9-NEXT: s_addc_u32 s15, s9, s5
; GFX9-NEXT: s_ashr_i32 s4, s11, 31
; GFX9-NEXT: s_add_u32 s8, s10, s4
; GFX9-NEXT: s_mov_b32 s5, s4
; GFX9-NEXT: s_addc_u32 s9, s11, s4
; GFX9-NEXT: s_xor_b64 s[8:9], s[8:9], s[4:5]
-; GFX9-NEXT: s_mul_i32 s11, s8, s12
-; GFX9-NEXT: s_mul_hi_u32 s16, s8, s13
-; GFX9-NEXT: s_mul_hi_u32 s10, s8, s12
+; GFX9-NEXT: s_mul_i32 s11, s8, s15
+; GFX9-NEXT: s_mul_hi_u32 s16, s8, s14
+; GFX9-NEXT: s_mul_hi_u32 s10, s8, s15
; GFX9-NEXT: s_add_u32 s11, s16, s11
; GFX9-NEXT: s_addc_u32 s10, 0, s10
-; GFX9-NEXT: s_mul_hi_u32 s17, s9, s13
-; GFX9-NEXT: s_mul_i32 s13, s9, s13
-; GFX9-NEXT: s_add_u32 s11, s11, s13
-; GFX9-NEXT: s_mul_hi_u32 s16, s9, s12
+; GFX9-NEXT: s_mul_hi_u32 s17, s9, s14
+; GFX9-NEXT: s_mul_i32 s14, s9, s14
+; GFX9-NEXT: s_add_u32 s11, s11, s14
+; GFX9-NEXT: s_mul_hi_u32 s16, s9, s15
; GFX9-NEXT: s_addc_u32 s10, s10, s17
; GFX9-NEXT: s_addc_u32 s11, s16, 0
-; GFX9-NEXT: s_mul_i32 s12, s9, s12
-; GFX9-NEXT: s_add_u32 s16, s10, s12
-; GFX9-NEXT: s_addc_u32 s17, 0, s11
-; GFX9-NEXT: s_mul_i32 s10, s6, s17
-; GFX9-NEXT: s_mul_hi_u32 s11, s6, s16
+; GFX9-NEXT: s_mul_i32 s14, s9, s15
+; GFX9-NEXT: s_add_u32 s14, s10, s14
+; GFX9-NEXT: s_addc_u32 s15, 0, s11
+; GFX9-NEXT: s_mul_i32 s10, s6, s15
+; GFX9-NEXT: s_mul_hi_u32 s11, s6, s14
; GFX9-NEXT: s_add_i32 s10, s11, s10
-; GFX9-NEXT: s_mul_i32 s11, s7, s16
-; GFX9-NEXT: s_add_i32 s18, s10, s11
-; GFX9-NEXT: s_sub_i32 s12, s9, s18
-; GFX9-NEXT: s_mul_i32 s10, s6, s16
+; GFX9-NEXT: s_mul_i32 s11, s7, s14
+; GFX9-NEXT: s_add_i32 s16, s10, s11
+; GFX9-NEXT: s_sub_i32 s17, s9, s16
+; GFX9-NEXT: s_mul_i32 s10, s6, s14
; GFX9-NEXT: s_sub_u32 s8, s8, s10
; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s19, s12, s7
-; GFX9-NEXT: s_sub_u32 s20, s8, s6
-; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT: s_subb_u32 s12, s19, 0
-; GFX9-NEXT: s_cmp_ge_u32 s12, s7
-; GFX9-NEXT: s_cselect_b32 s13, -1, 0
-; GFX9-NEXT: s_cmp_ge_u32 s20, s6
+; GFX9-NEXT: s_subb_u32 s17, s17, s7
+; GFX9-NEXT: s_sub_u32 s18, s8, s6
+; GFX9-NEXT: s_subb_u32 s17, s17, 0
+; GFX9-NEXT: s_cmp_ge_u32 s17, s7
; GFX9-NEXT: s_cselect_b32 s19, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s12, s7
-; GFX9-NEXT: s_cselect_b32 s12, s19, s13
-; GFX9-NEXT: s_add_u32 s13, s16, 1
-; GFX9-NEXT: s_addc_u32 s19, s17, 0
-; GFX9-NEXT: s_add_u32 s20, s16, 2
-; GFX9-NEXT: s_addc_u32 s21, s17, 0
-; GFX9-NEXT: s_cmp_lg_u32 s12, 0
-; GFX9-NEXT: s_cselect_b32 s12, s20, s13
-; GFX9-NEXT: s_cselect_b32 s13, s21, s19
+; GFX9-NEXT: s_cmp_ge_u32 s18, s6
+; GFX9-NEXT: s_cselect_b32 s18, -1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s17, s7
+; GFX9-NEXT: s_cselect_b32 s17, s18, s19
+; GFX9-NEXT: s_add_u32 s18, s14, 1
+; GFX9-NEXT: s_addc_u32 s19, s15, 0
+; GFX9-NEXT: s_add_u32 s20, s14, 2
+; GFX9-NEXT: s_addc_u32 s21, s15, 0
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_cselect_b32 s17, s20, s18
+; GFX9-NEXT: s_cselect_b32 s18, s21, s19
; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s9, s9, s18
+; GFX9-NEXT: s_subb_u32 s9, s9, s16
; GFX9-NEXT: s_cmp_ge_u32 s9, s7
; GFX9-NEXT: s_cselect_b32 s10, -1, 0
; GFX9-NEXT: s_cmp_ge_u32 s8, s6
@@ -8861,14 +8828,14 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_cmp_eq_u32 s9, s7
; GFX9-NEXT: s_cselect_b32 s6, s6, s10
; GFX9-NEXT: s_cmp_lg_u32 s6, 0
-; GFX9-NEXT: s_cselect_b32 s7, s13, s17
-; GFX9-NEXT: s_cselect_b32 s6, s12, s16
+; GFX9-NEXT: s_cselect_b32 s7, s18, s15
+; GFX9-NEXT: s_cselect_b32 s6, s17, s14
; GFX9-NEXT: s_xor_b64 s[2:3], s[4:5], s[2:3]
; GFX9-NEXT: s_xor_b64 s[4:5], s[6:7], s[2:3]
; GFX9-NEXT: s_sub_u32 s2, s4, s2
; GFX9-NEXT: s_subb_u32 s3, s5, s3
-; GFX9-NEXT: v_mov_b32_e32 v1, s14
-; GFX9-NEXT: v_mov_b32_e32 v2, s15
+; GFX9-NEXT: v_mov_b32_e32 v1, s12
+; GFX9-NEXT: v_mov_b32_e32 v2, s13
; GFX9-NEXT: v_mov_b32_e32 v3, s2
; GFX9-NEXT: v_mov_b32_e32 v4, s3
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
@@ -9089,10 +9056,9 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX6-NEXT: s_addc_u32 s13, 0, s14
; GFX6-NEXT: s_add_u32 s14, s0, s1
; GFX6-NEXT: v_mov_b32_e32 v0, s14
-; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX6-NEXT: v_mul_hi_u32 v0, s10, v0
+; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_cmp_lg_u32 s0, 0
; GFX6-NEXT: s_addc_u32 s12, s12, s13
; GFX6-NEXT: s_mul_i32 s0, s10, s12
; GFX6-NEXT: v_readfirstlane_b32 s1, v0
@@ -9123,7 +9089,6 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX6-NEXT: s_add_u32 s13, s14, s0
; GFX6-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX6-NEXT: s_or_b32 s0, s0, s1
-; GFX6-NEXT: s_cmp_lg_u32 s0, 0
; GFX6-NEXT: s_addc_u32 s12, s12, s10
; GFX6-NEXT: s_ashr_i32 s10, s7, 31
; GFX6-NEXT: s_add_u32 s0, s6, s10
@@ -9158,46 +9123,43 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX6-NEXT: v_readfirstlane_b32 s5, v0
; GFX6-NEXT: s_add_i32 s4, s5, s4
; GFX6-NEXT: s_mul_i32 s5, s9, s12
-; GFX6-NEXT: s_add_i32 s13, s4, s5
-; GFX6-NEXT: s_sub_i32 s14, s7, s13
+; GFX6-NEXT: s_add_i32 s14, s4, s5
+; GFX6-NEXT: s_sub_i32 s13, s7, s14
; GFX6-NEXT: s_mul_i32 s4, s8, s12
; GFX6-NEXT: s_sub_u32 s6, s6, s4
; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX6-NEXT: s_or_b32 s12, s4, s5
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
-; GFX6-NEXT: s_subb_u32 s14, s14, s9
-; GFX6-NEXT: s_sub_u32 s15, s6, s8
-; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX6-NEXT: s_subb_u32 s15, s13, s9
+; GFX6-NEXT: s_sub_u32 s16, s6, s8
+; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GFX6-NEXT: s_or_b32 s17, s12, s13
+; GFX6-NEXT: s_subb_u32 s17, s15, 0
+; GFX6-NEXT: s_cmp_ge_u32 s17, s9
+; GFX6-NEXT: s_cselect_b32 s18, -1, 0
+; GFX6-NEXT: s_cmp_ge_u32 s16, s8
+; GFX6-NEXT: s_cselect_b32 s19, -1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s17, s9
+; GFX6-NEXT: s_cselect_b32 s18, s19, s18
+; GFX6-NEXT: s_or_b32 s12, s12, s13
+; GFX6-NEXT: s_subb_u32 s15, s15, s9
+; GFX6-NEXT: s_sub_u32 s19, s16, s8
+; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GFX6-NEXT: s_or_b32 s12, s12, s13
+; GFX6-NEXT: s_subb_u32 s12, s15, 0
+; GFX6-NEXT: s_cmp_lg_u32 s18, 0
+; GFX6-NEXT: s_cselect_b32 s13, s19, s16
+; GFX6-NEXT: s_cselect_b32 s12, s12, s17
; GFX6-NEXT: s_or_b32 s4, s4, s5
-; GFX6-NEXT: s_cmp_lg_u32 s4, 0
-; GFX6-NEXT: s_subb_u32 s16, s14, 0
-; GFX6-NEXT: s_cmp_ge_u32 s16, s9
+; GFX6-NEXT: s_subb_u32 s4, s7, s14
+; GFX6-NEXT: s_cmp_ge_u32 s4, s9
; GFX6-NEXT: s_cselect_b32 s5, -1, 0
-; GFX6-NEXT: s_cmp_ge_u32 s15, s8
-; GFX6-NEXT: s_cselect_b32 s17, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s16, s9
-; GFX6-NEXT: s_cselect_b32 s17, s17, s5
-; GFX6-NEXT: s_cmp_lg_u32 s4, 0
-; GFX6-NEXT: s_subb_u32 s14, s14, s9
-; GFX6-NEXT: s_sub_u32 s18, s15, s8
-; GFX6-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX6-NEXT: s_or_b32 s4, s4, s5
-; GFX6-NEXT: s_cmp_lg_u32 s4, 0
-; GFX6-NEXT: s_subb_u32 s4, s14, 0
-; GFX6-NEXT: s_cmp_lg_u32 s17, 0
-; GFX6-NEXT: s_cselect_b32 s14, s18, s15
-; GFX6-NEXT: s_cselect_b32 s4, s4, s16
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
-; GFX6-NEXT: s_subb_u32 s5, s7, s13
-; GFX6-NEXT: s_cmp_ge_u32 s5, s9
-; GFX6-NEXT: s_cselect_b32 s7, -1, 0
; GFX6-NEXT: s_cmp_ge_u32 s6, s8
-; GFX6-NEXT: s_cselect_b32 s8, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s5, s9
-; GFX6-NEXT: s_cselect_b32 s7, s8, s7
-; GFX6-NEXT: s_cmp_lg_u32 s7, 0
-; GFX6-NEXT: s_cselect_b32 s5, s4, s5
-; GFX6-NEXT: s_cselect_b32 s4, s14, s6
+; GFX6-NEXT: s_cselect_b32 s7, -1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s4, s9
+; GFX6-NEXT: s_cselect_b32 s5, s7, s5
+; GFX6-NEXT: s_cmp_lg_u32 s5, 0
+; GFX6-NEXT: s_cselect_b32 s5, s12, s4
+; GFX6-NEXT: s_cselect_b32 s4, s13, s6
; GFX6-NEXT: s_xor_b64 s[4:5], s[4:5], s[10:11]
; GFX6-NEXT: s_sub_u32 s4, s4, s10
; GFX6-NEXT: s_subb_u32 s5, s5, s10
@@ -9219,8 +9181,8 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT: s_sub_u32 s8, 0, s6
-; GFX9-NEXT: s_subb_u32 s9, 0, s7
+; GFX9-NEXT: s_sub_u32 s4, 0, s6
+; GFX9-NEXT: s_subb_u32 s5, 0, s7
; GFX9-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GFX9-NEXT: v_rcp_f32_e32 v1, v0
; GFX9-NEXT: v_mov_b32_e32 v0, 0
@@ -9230,56 +9192,52 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: v_madmk_f32 v1, v2, 0xcf800000, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT: v_readfirstlane_b32 s10, v2
-; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_mul_i32 s5, s8, s10
-; GFX9-NEXT: s_mul_hi_u32 s12, s8, s4
-; GFX9-NEXT: s_mul_i32 s11, s9, s4
-; GFX9-NEXT: s_add_i32 s5, s12, s5
-; GFX9-NEXT: s_mul_i32 s13, s8, s4
-; GFX9-NEXT: s_add_i32 s5, s5, s11
-; GFX9-NEXT: s_mul_hi_u32 s12, s4, s13
-; GFX9-NEXT: s_mul_i32 s14, s4, s5
-; GFX9-NEXT: s_mul_hi_u32 s11, s4, s5
+; GFX9-NEXT: v_readfirstlane_b32 s8, v2
+; GFX9-NEXT: v_readfirstlane_b32 s9, v1
+; GFX9-NEXT: s_mul_i32 s10, s4, s8
+; GFX9-NEXT: s_mul_hi_u32 s12, s4, s9
+; GFX9-NEXT: s_mul_i32 s11, s5, s9
+; GFX9-NEXT: s_add_i32 s10, s12, s10
+; GFX9-NEXT: s_mul_i32 s13, s4, s9
+; GFX9-NEXT: s_add_i32 s10, s10, s11
+; GFX9-NEXT: s_mul_hi_u32 s12, s9, s13
+; GFX9-NEXT: s_mul_i32 s14, s9, s10
+; GFX9-NEXT: s_mul_hi_u32 s11, s9, s10
; GFX9-NEXT: s_add_u32 s12, s12, s14
; GFX9-NEXT: s_addc_u32 s11, 0, s11
-; GFX9-NEXT: s_mul_hi_u32 s15, s10, s13
-; GFX9-NEXT: s_mul_i32 s13, s10, s13
+; GFX9-NEXT: s_mul_hi_u32 s15, s8, s13
+; GFX9-NEXT: s_mul_i32 s13, s8, s13
; GFX9-NEXT: s_add_u32 s12, s12, s13
-; GFX9-NEXT: s_mul_hi_u32 s14, s10, s5
+; GFX9-NEXT: s_mul_hi_u32 s14, s8, s10
; GFX9-NEXT: s_addc_u32 s11, s11, s15
; GFX9-NEXT: s_addc_u32 s12, s14, 0
-; GFX9-NEXT: s_mul_i32 s5, s10, s5
-; GFX9-NEXT: s_add_u32 s5, s11, s5
+; GFX9-NEXT: s_mul_i32 s10, s8, s10
+; GFX9-NEXT: s_add_u32 s10, s11, s10
; GFX9-NEXT: s_addc_u32 s11, 0, s12
-; GFX9-NEXT: s_add_u32 s12, s4, s5
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s10, s10, s11
-; GFX9-NEXT: s_mul_i32 s4, s8, s10
-; GFX9-NEXT: s_mul_hi_u32 s5, s8, s12
-; GFX9-NEXT: s_add_i32 s4, s5, s4
-; GFX9-NEXT: s_mul_i32 s9, s9, s12
-; GFX9-NEXT: s_add_i32 s4, s4, s9
-; GFX9-NEXT: s_mul_i32 s8, s8, s12
-; GFX9-NEXT: s_mul_hi_u32 s9, s10, s8
-; GFX9-NEXT: s_mul_i32 s11, s10, s8
-; GFX9-NEXT: s_mul_i32 s14, s12, s4
-; GFX9-NEXT: s_mul_hi_u32 s8, s12, s8
-; GFX9-NEXT: s_mul_hi_u32 s13, s12, s4
-; GFX9-NEXT: s_add_u32 s8, s8, s14
+; GFX9-NEXT: s_add_u32 s9, s9, s10
+; GFX9-NEXT: s_addc_u32 s8, s8, s11
+; GFX9-NEXT: s_mul_i32 s10, s4, s8
+; GFX9-NEXT: s_mul_hi_u32 s11, s4, s9
+; GFX9-NEXT: s_add_i32 s10, s11, s10
+; GFX9-NEXT: s_mul_i32 s5, s5, s9
+; GFX9-NEXT: s_add_i32 s10, s10, s5
+; GFX9-NEXT: s_mul_i32 s4, s4, s9
+; GFX9-NEXT: s_mul_hi_u32 s11, s8, s4
+; GFX9-NEXT: s_mul_i32 s12, s8, s4
+; GFX9-NEXT: s_mul_i32 s14, s9, s10
+; GFX9-NEXT: s_mul_hi_u32 s4, s9, s4
+; GFX9-NEXT: s_mul_hi_u32 s13, s9, s10
+; GFX9-NEXT: s_add_u32 s4, s4, s14
; GFX9-NEXT: s_addc_u32 s13, 0, s13
-; GFX9-NEXT: s_add_u32 s8, s8, s11
-; GFX9-NEXT: s_mul_hi_u32 s5, s10, s4
-; GFX9-NEXT: s_addc_u32 s8, s13, s9
+; GFX9-NEXT: s_add_u32 s4, s4, s12
+; GFX9-NEXT: s_mul_hi_u32 s5, s8, s10
+; GFX9-NEXT: s_addc_u32 s4, s13, s11
; GFX9-NEXT: s_addc_u32 s5, s5, 0
-; GFX9-NEXT: s_mul_i32 s4, s10, s4
-; GFX9-NEXT: s_add_u32 s4, s8, s4
-; GFX9-NEXT: s_addc_u32 s8, 0, s5
-; GFX9-NEXT: s_add_u32 s9, s12, s4
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s8, s10, s8
+; GFX9-NEXT: s_mul_i32 s10, s8, s10
+; GFX9-NEXT: s_add_u32 s4, s4, s10
+; GFX9-NEXT: s_addc_u32 s5, 0, s5
+; GFX9-NEXT: s_add_u32 s9, s9, s4
+; GFX9-NEXT: s_addc_u32 s8, s8, s5
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_ashr_i32 s4, s3, 31
; GFX9-NEXT: s_add_u32 s2, s2, s4
@@ -9309,11 +9267,9 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: s_mul_i32 s8, s6, s8
; GFX9-NEXT: s_sub_u32 s2, s2, s8
; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
; GFX9-NEXT: s_subb_u32 s13, s10, s7
; GFX9-NEXT: s_sub_u32 s14, s2, s6
; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
; GFX9-NEXT: s_subb_u32 s15, s13, 0
; GFX9-NEXT: s_cmp_ge_u32 s15, s7
; GFX9-NEXT: s_cselect_b32 s16, -1, 0
@@ -9322,13 +9278,11 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
; GFX9-NEXT: s_cmp_eq_u32 s15, s7
; GFX9-NEXT: s_cselect_b32 s16, s17, s16
; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s13, s13, s7
-; GFX9-NEXT: s_sub_u32 s17, s14, s6
-; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s10, s13, 0
+; GFX9-NEXT: s_subb_u32 s10, s13, s7
+; GFX9-NEXT: s_sub_u32 s11, s14, s6
+; GFX9-NEXT: s_subb_u32 s10, s10, 0
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
-; GFX9-NEXT: s_cselect_b32 s11, s17, s14
+; GFX9-NEXT: s_cselect_b32 s11, s11, s14
; GFX9-NEXT: s_cselect_b32 s10, s10, s15
; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
; GFX9-NEXT: s_subb_u32 s3, s3, s12
@@ -9490,10 +9444,9 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: s_addc_u32 s15, 0, s16
; GFX6-NEXT: s_add_u32 s16, s6, s7
; GFX6-NEXT: v_mov_b32_e32 v0, s16
-; GFX6-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX6-NEXT: v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX6-NEXT: s_or_b32 s6, s6, s7
-; GFX6-NEXT: s_cmp_lg_u32 s6, 0
; GFX6-NEXT: s_addc_u32 s14, s14, s15
; GFX6-NEXT: s_mul_i32 s6, s12, s14
; GFX6-NEXT: v_readfirstlane_b32 s7, v0
@@ -9524,7 +9477,6 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: s_add_u32 s13, s16, s6
; GFX6-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX6-NEXT: s_or_b32 s6, s6, s7
-; GFX6-NEXT: s_cmp_lg_u32 s6, 0
; GFX6-NEXT: s_addc_u32 s12, s14, s12
; GFX6-NEXT: s_ashr_i32 s6, s9, 31
; GFX6-NEXT: s_add_u32 s8, s8, s6
@@ -9557,49 +9509,46 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: v_readfirstlane_b32 s14, v0
; GFX6-NEXT: s_add_i32 s13, s14, s13
; GFX6-NEXT: s_mul_i32 s14, s3, s12
-; GFX6-NEXT: s_add_i32 s14, s13, s14
-; GFX6-NEXT: s_sub_i32 s15, s9, s14
+; GFX6-NEXT: s_add_i32 s16, s13, s14
+; GFX6-NEXT: s_sub_i32 s14, s9, s16
; GFX6-NEXT: s_mul_i32 s12, s2, s12
; GFX6-NEXT: s_sub_u32 s8, s8, s12
; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX6-NEXT: s_or_b32 s16, s12, s13
-; GFX6-NEXT: s_cmp_lg_u32 s16, 0
-; GFX6-NEXT: s_subb_u32 s15, s15, s3
-; GFX6-NEXT: s_sub_u32 s17, s8, s2
-; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX6-NEXT: s_or_b32 s12, s12, s13
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
-; GFX6-NEXT: s_subb_u32 s18, s15, 0
-; GFX6-NEXT: s_cmp_ge_u32 s18, s3
-; GFX6-NEXT: s_cselect_b32 s13, -1, 0
-; GFX6-NEXT: s_cmp_ge_u32 s17, s2
-; GFX6-NEXT: s_cselect_b32 s19, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s18, s3
-; GFX6-NEXT: s_cselect_b32 s19, s19, s13
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
-; GFX6-NEXT: s_subb_u32 s15, s15, s3
-; GFX6-NEXT: s_sub_u32 s20, s17, s2
-; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GFX6-NEXT: s_or_b32 s15, s12, s13
+; GFX6-NEXT: s_subb_u32 s17, s14, s3
+; GFX6-NEXT: s_sub_u32 s18, s8, s2
+; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GFX6-NEXT: s_or_b32 s19, s14, s15
+; GFX6-NEXT: s_subb_u32 s19, s17, 0
+; GFX6-NEXT: s_cmp_ge_u32 s19, s3
+; GFX6-NEXT: s_cselect_b32 s20, -1, 0
+; GFX6-NEXT: s_cmp_ge_u32 s18, s2
+; GFX6-NEXT: s_cselect_b32 s21, -1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s19, s3
+; GFX6-NEXT: s_cselect_b32 s20, s21, s20
+; GFX6-NEXT: s_or_b32 s14, s14, s15
+; GFX6-NEXT: s_subb_u32 s17, s17, s3
+; GFX6-NEXT: s_sub_u32 s21, s18, s2
+; GFX6-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GFX6-NEXT: s_or_b32 s14, s14, s15
+; GFX6-NEXT: s_subb_u32 s14, s17, 0
+; GFX6-NEXT: s_cmp_lg_u32 s20, 0
+; GFX6-NEXT: s_cselect_b32 s15, s21, s18
+; GFX6-NEXT: s_cselect_b32 s14, s14, s19
; GFX6-NEXT: s_or_b32 s12, s12, s13
-; GFX6-NEXT: s_cmp_lg_u32 s12, 0
-; GFX6-NEXT: s_subb_u32 s12, s15, 0
-; GFX6-NEXT: s_cmp_lg_u32 s19, 0
-; GFX6-NEXT: s_cselect_b32 s13, s20, s17
-; GFX6-NEXT: s_cselect_b32 s12, s12, s18
-; GFX6-NEXT: s_cmp_lg_u32 s16, 0
-; GFX6-NEXT: s_subb_u32 s9, s9, s14
+; GFX6-NEXT: s_subb_u32 s9, s9, s16
; GFX6-NEXT: s_cmp_ge_u32 s9, s3
-; GFX6-NEXT: s_cselect_b32 s14, -1, 0
+; GFX6-NEXT: s_cselect_b32 s12, -1, 0
; GFX6-NEXT: s_cmp_ge_u32 s8, s2
; GFX6-NEXT: s_cselect_b32 s2, -1, 0
; GFX6-NEXT: s_cmp_eq_u32 s9, s3
-; GFX6-NEXT: s_cselect_b32 s2, s2, s14
+; GFX6-NEXT: s_cselect_b32 s2, s2, s12
; GFX6-NEXT: s_cmp_lg_u32 s2, 0
-; GFX6-NEXT: s_cselect_b32 s3, s12, s9
-; GFX6-NEXT: s_cselect_b32 s2, s13, s8
+; GFX6-NEXT: s_cselect_b32 s3, s14, s9
+; GFX6-NEXT: s_cselect_b32 s2, s15, s8
; GFX6-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7]
-; GFX6-NEXT: s_sub_u32 s12, s2, s6
-; GFX6-NEXT: s_subb_u32 s13, s3, s6
+; GFX6-NEXT: s_sub_u32 s14, s2, s6
+; GFX6-NEXT: s_subb_u32 s15, s3, s6
; GFX6-NEXT: s_ashr_i32 s2, s1, 31
; GFX6-NEXT: s_add_u32 s0, s0, s2
; GFX6-NEXT: s_mov_b32 s3, s2
@@ -9618,40 +9567,39 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX6-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX6-NEXT: v_mul_hi_u32 v2, s8, v0
-; GFX6-NEXT: v_readfirstlane_b32 s14, v1
+; GFX6-NEXT: v_readfirstlane_b32 s12, v1
; GFX6-NEXT: v_readfirstlane_b32 s2, v0
-; GFX6-NEXT: s_mul_i32 s1, s8, s14
+; GFX6-NEXT: s_mul_i32 s1, s8, s12
; GFX6-NEXT: v_readfirstlane_b32 s3, v2
; GFX6-NEXT: s_mul_i32 s0, s9, s2
; GFX6-NEXT: s_add_i32 s1, s3, s1
; GFX6-NEXT: s_add_i32 s3, s1, s0
-; GFX6-NEXT: s_mul_i32 s15, s8, s2
+; GFX6-NEXT: s_mul_i32 s13, s8, s2
; GFX6-NEXT: v_mul_hi_u32 v2, v0, s3
-; GFX6-NEXT: v_mul_hi_u32 v0, v0, s15
+; GFX6-NEXT: v_mul_hi_u32 v0, v0, s13
; GFX6-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GFX6-NEXT: s_mul_i32 s4, s2, s3
; GFX6-NEXT: v_readfirstlane_b32 s5, v2
; GFX6-NEXT: v_readfirstlane_b32 s16, v0
-; GFX6-NEXT: v_mul_hi_u32 v0, v1, s15
+; GFX6-NEXT: v_mul_hi_u32 v0, v1, s13
; GFX6-NEXT: v_mul_hi_u32 v1, v1, s3
; GFX6-NEXT: s_add_u32 s4, s16, s4
; GFX6-NEXT: s_addc_u32 s5, 0, s5
-; GFX6-NEXT: s_mul_i32 s15, s14, s15
+; GFX6-NEXT: s_mul_i32 s13, s12, s13
; GFX6-NEXT: v_readfirstlane_b32 s16, v0
-; GFX6-NEXT: s_add_u32 s4, s4, s15
+; GFX6-NEXT: s_add_u32 s4, s4, s13
; GFX6-NEXT: s_addc_u32 s4, s5, s16
; GFX6-NEXT: v_readfirstlane_b32 s5, v1
; GFX6-NEXT: s_addc_u32 s5, s5, 0
-; GFX6-NEXT: s_mul_i32 s3, s14, s3
+; GFX6-NEXT: s_mul_i32 s3, s12, s3
; GFX6-NEXT: s_add_u32 s3, s4, s3
; GFX6-NEXT: s_addc_u32 s4, 0, s5
; GFX6-NEXT: s_add_u32 s5, s2, s3
; GFX6-NEXT: v_mov_b32_e32 v0, s5
-; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX6-NEXT: v_mul_hi_u32 v0, s8, v0
+; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX6-NEXT: s_or_b32 s2, s2, s3
-; GFX6-NEXT: s_cmp_lg_u32 s2, 0
-; GFX6-NEXT: s_addc_u32 s4, s14, s4
+; GFX6-NEXT: s_addc_u32 s4, s12, s4
; GFX6-NEXT: s_mul_i32 s2, s8, s4
; GFX6-NEXT: v_readfirstlane_b32 s3, v0
; GFX6-NEXT: s_add_i32 s2, s3, s2
@@ -9665,102 +9613,98 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX6-NEXT: v_mul_hi_u32 v1, s4, v0
; GFX6-NEXT: v_mul_hi_u32 v0, s5, v0
; GFX6-NEXT: s_mul_i32 s9, s5, s2
-; GFX6-NEXT: v_readfirstlane_b32 s15, v2
-; GFX6-NEXT: s_add_u32 s9, s15, s9
-; GFX6-NEXT: v_readfirstlane_b32 s14, v0
+; GFX6-NEXT: v_readfirstlane_b32 s13, v2
+; GFX6-NEXT: s_add_u32 s9, s13, s9
+; GFX6-NEXT: v_readfirstlane_b32 s12, v0
; GFX6-NEXT: s_mul_i32 s3, s4, s3
-; GFX6-NEXT: s_addc_u32 s14, 0, s14
+; GFX6-NEXT: s_addc_u32 s12, 0, s12
; GFX6-NEXT: v_readfirstlane_b32 s8, v3
; GFX6-NEXT: s_add_u32 s3, s9, s3
-; GFX6-NEXT: s_addc_u32 s3, s14, s8
+; GFX6-NEXT: s_addc_u32 s3, s12, s8
; GFX6-NEXT: v_readfirstlane_b32 s8, v1
; GFX6-NEXT: s_addc_u32 s8, s8, 0
; GFX6-NEXT: s_mul_i32 s2, s4, s2
; GFX6-NEXT: s_add_u32 s2, s3, s2
; GFX6-NEXT: s_addc_u32 s8, 0, s8
-; GFX6-NEXT: s_add_u32 s14, s5, s2
+; GFX6-NEXT: s_add_u32 s12, s5, s2
; GFX6-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX6-NEXT: s_or_b32 s2, s2, s3
-; GFX6-NEXT: s_cmp_lg_u32 s2, 0
-; GFX6-NEXT: s_addc_u32 s15, s4, s8
+; GFX6-NEXT: s_addc_u32 s13, s4, s8
; GFX6-NEXT: s_ashr_i32 s4, s11, 31
; GFX6-NEXT: s_add_u32 s2, s10, s4
; GFX6-NEXT: s_mov_b32 s5, s4
; GFX6-NEXT: s_addc_u32 s3, s11, s4
; GFX6-NEXT: s_xor_b64 s[8:9], s[2:3], s[4:5]
-; GFX6-NEXT: v_mov_b32_e32 v0, s15
+; GFX6-NEXT: v_mov_b32_e32 v0, s13
; GFX6-NEXT: v_mul_hi_u32 v1, s8, v0
-; GFX6-NEXT: v_mov_b32_e32 v2, s14
+; GFX6-NEXT: v_mov_b32_e32 v2, s12
; GFX6-NEXT: v_mul_hi_u32 v3, s8, v2
-; GFX6-NEXT: s_mul_i32 s2, s8, s15
+; GFX6-NEXT: s_mul_i32 s2, s8, s13
; GFX6-NEXT: v_readfirstlane_b32 s10, v1
; GFX6-NEXT: v_mul_hi_u32 v1, s9, v2
; GFX6-NEXT: v_readfirstlane_b32 s11, v3
; GFX6-NEXT: v_mul_hi_u32 v0, s9, v0
; GFX6-NEXT: s_add_u32 s2, s11, s2
; GFX6-NEXT: s_addc_u32 s10, 0, s10
-; GFX6-NEXT: s_mul_i32 s11, s9, s14
-; GFX6-NEXT: v_readfirstlane_b32 s14, v1
+; GFX6-NEXT: s_mul_i32 s11, s9, s12
+; GFX6-NEXT: v_readfirstlane_b32 s12, v1
; GFX6-NEXT: s_add_u32 s2, s2, s11
-; GFX6-NEXT: s_addc_u32 s2, s10, s14
+; GFX6-NEXT: s_addc_u32 s2, s10, s12
; GFX6-NEXT: v_readfirstlane_b32 s10, v0
; GFX6-NEXT: s_addc_u32 s10, s10, 0
-; GFX6-NEXT: s_mul_i32 s11, s9, s15
+; GFX6-NEXT: s_mul_i32 s11, s9, s13
; GFX6-NEXT: s_add_u32 s11, s2, s11
; GFX6-NEXT: v_mov_b32_e32 v0, s11
; GFX6-NEXT: v_mul_hi_u32 v0, s6, v0
; GFX6-NEXT: s_addc_u32 s10, 0, s10
; GFX6-NEXT: s_mul_i32 s10, s6, s10
; GFX6-NEXT: s_mov_b32 s3, 0xf000
-; GFX6-NEXT: v_readfirstlane_b32 s14, v0
-; GFX6-NEXT: s_add_i32 s10, s14, s10
-; GFX6-NEXT: s_mul_i32 s14, s7, s11
-; GFX6-NEXT: s_add_i32 s14, s10, s14
-; GFX6-NEXT: s_sub_i32 s15, s9, s14
+; GFX6-NEXT: v_readfirstlane_b32 s12, v0
+; GFX6-NEXT: s_add_i32 s10, s12, s10
+; GFX6-NEXT: s_mul_i32 s12, s7, s11
+; GFX6-NEXT: s_add_i32 s16, s10, s12
+; GFX6-NEXT: s_sub_i32 s12, s9, s16
; GFX6-NEXT: s_mul_i32 s10, s6, s11
; GFX6-NEXT: s_sub_u32 s8, s8, s10
; GFX6-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX6-NEXT: s_or_b32 s16, s10, s11
-; GFX6-NEXT: s_cmp_lg_u32 s16, 0
-; GFX6-NEXT: s_subb_u32 s15, s15, s7
-; GFX6-NEXT: s_sub_u32 s17, s8, s6
-; GFX6-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX6-NEXT: s_or_b32 s10, s10, s11
-; GFX6-NEXT: s_cmp_lg_u32 s10, 0
-; GFX6-NEXT: s_subb_u32 s18, s15, 0
-; GFX6-NEXT: s_cmp_ge_u32 s18, s7
-; GFX6-NEXT: s_cselect_b32 s11, -1, 0
-; GFX6-NEXT: s_cmp_ge_u32 s17, s6
-; GFX6-NEXT: s_cselect_b32 s19, -1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s18, s7
-; GFX6-NEXT: s_cselect_b32 s19, s19, s11
-; GFX6-NEXT: s_cmp_lg_u32 s10, 0
-; GFX6-NEXT: s_subb_u32 s15, s15, s7
-; GFX6-NEXT: s_sub_u32 s20, s17, s6
-; GFX6-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GFX6-NEXT: s_or_b32 s13, s10, s11
+; GFX6-NEXT: s_subb_u32 s17, s12, s7
+; GFX6-NEXT: s_sub_u32 s18, s8, s6
+; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GFX6-NEXT: s_or_b32 s19, s12, s13
+; GFX6-NEXT: s_subb_u32 s19, s17, 0
+; GFX6-NEXT: s_cmp_ge_u32 s19, s7
+; GFX6-NEXT: s_cselect_b32 s20, -1, 0
+; GFX6-NEXT: s_cmp_ge_u32 s18, s6
+; GFX6-NEXT: s_cselect_b32 s21, -1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s19, s7
+; GFX6-NEXT: s_cselect_b32 s20, s21, s20
+; GFX6-NEXT: s_or_b32 s12, s12, s13
+; GFX6-NEXT: s_subb_u32 s17, s17, s7
+; GFX6-NEXT: s_sub_u32 s21, s18, s6
+; GFX6-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GFX6-NEXT: s_or_b32 s12, s12, s13
+; GFX6-NEXT: s_subb_u32 s12, s17, 0
+; GFX6-NEXT: s_cmp_lg_u32 s20, 0
+; GFX6-NEXT: s_cselect_b32 s13, s21, s18
+; GFX6-NEXT: s_cselect_b32 s12, s12, s19
; GFX6-NEXT: s_or_b32 s10, s10, s11
-; GFX6-NEXT: s_cmp_lg_u32 s10, 0
-; GFX6-NEXT: s_subb_u32 s10, s15, 0
-; GFX6-NEXT: s_cmp_lg_u32 s19, 0
-; GFX6-NEXT: s_cselect_b32 s11, s20, s17
-; GFX6-NEXT: s_cselect_b32 s10, s10, s18
-; GFX6-NEXT: s_cmp_lg_u32 s16, 0
-; GFX6-NEXT: s_subb_u32 s9, s9, s14
+; GFX6-NEXT: s_subb_u32 s9, s9, s16
; GFX6-NEXT: s_cmp_ge_u32 s9, s7
-; GFX6-NEXT: s_cselect_b32 s14, -1, 0
+; GFX6-NEXT: s_cselect_b32 s10, -1, 0
; GFX6-NEXT: s_cmp_ge_u32 s8, s6
; GFX6-NEXT: s_cselect_b32 s6, -1, 0
; GFX6-NEXT: s_cmp_eq_u32 s9, s7
-; GFX6-NEXT: s_cselect_b32 s6, s6, s14
+; GFX6-NEXT: s_cselect_b32 s6, s6, s10
; GFX6-NEXT: s_cmp_lg_u32 s6, 0
-; GFX6-NEXT: s_cselect_b32 s7, s10, s9
-; GFX6-NEXT: s_cselect_b32 s6, s11, s8
+; GFX6-NEXT: s_cselect_b32 s7, s12, s9
+; GFX6-NEXT: s_cselect_b32 s6, s13, s8
; GFX6-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5]
; GFX6-NEXT: s_sub_u32 s5, s6, s4
; GFX6-NEXT: s_subb_u32 s4, s7, s4
; GFX6-NEXT: s_mov_b32 s2, -1
-; GFX6-NEXT: v_mov_b32_e32 v0, s12
-; GFX6-NEXT: v_mov_b32_e32 v1, s13
+; GFX6-NEXT: v_mov_b32_e32 v0, s14
+; GFX6-NEXT: v_mov_b32_e32 v1, s15
; GFX6-NEXT: v_mov_b32_e32 v2, s5
; GFX6-NEXT: v_mov_b32_e32 v3, s4
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
@@ -9780,8 +9724,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_xor_b64 s[2:3], s[2:3], s[6:7]
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s3
-; GFX9-NEXT: s_sub_u32 s12, 0, s2
-; GFX9-NEXT: s_subb_u32 s13, 0, s3
+; GFX9-NEXT: s_sub_u32 s6, 0, s2
+; GFX9-NEXT: s_subb_u32 s7, 0, s3
; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT: v_rcp_f32_e32 v0, v0
; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -9790,56 +9734,52 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: v_mac_f32_e32 v0, 0xcf800000, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT: v_readfirstlane_b32 s14, v1
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: s_mul_i32 s7, s12, s14
-; GFX9-NEXT: s_mul_hi_u32 s16, s12, s6
-; GFX9-NEXT: s_mul_i32 s15, s13, s6
-; GFX9-NEXT: s_add_i32 s7, s16, s7
-; GFX9-NEXT: s_mul_i32 s17, s12, s6
-; GFX9-NEXT: s_add_i32 s7, s7, s15
-; GFX9-NEXT: s_mul_hi_u32 s16, s6, s17
-; GFX9-NEXT: s_mul_i32 s18, s6, s7
-; GFX9-NEXT: s_mul_hi_u32 s15, s6, s7
+; GFX9-NEXT: v_readfirstlane_b32 s12, v1
+; GFX9-NEXT: v_readfirstlane_b32 s13, v0
+; GFX9-NEXT: s_mul_i32 s14, s6, s12
+; GFX9-NEXT: s_mul_hi_u32 s16, s6, s13
+; GFX9-NEXT: s_mul_i32 s15, s7, s13
+; GFX9-NEXT: s_add_i32 s14, s16, s14
+; GFX9-NEXT: s_mul_i32 s17, s6, s13
+; GFX9-NEXT: s_add_i32 s14, s14, s15
+; GFX9-NEXT: s_mul_hi_u32 s16, s13, s17
+; GFX9-NEXT: s_mul_i32 s18, s13, s14
+; GFX9-NEXT: s_mul_hi_u32 s15, s13, s14
; GFX9-NEXT: s_add_u32 s16, s16, s18
; GFX9-NEXT: s_addc_u32 s15, 0, s15
-; GFX9-NEXT: s_mul_hi_u32 s18, s14, s17
-; GFX9-NEXT: s_mul_i32 s17, s14, s17
+; GFX9-NEXT: s_mul_hi_u32 s18, s12, s17
+; GFX9-NEXT: s_mul_i32 s17, s12, s17
; GFX9-NEXT: s_add_u32 s16, s16, s17
-; GFX9-NEXT: s_mul_hi_u32 s19, s14, s7
+; GFX9-NEXT: s_mul_hi_u32 s19, s12, s14
; GFX9-NEXT: s_addc_u32 s15, s15, s18
; GFX9-NEXT: s_addc_u32 s16, s19, 0
-; GFX9-NEXT: s_mul_i32 s7, s14, s7
-; GFX9-NEXT: s_add_u32 s7, s15, s7
+; GFX9-NEXT: s_mul_i32 s14, s12, s14
+; GFX9-NEXT: s_add_u32 s14, s15, s14
; GFX9-NEXT: s_addc_u32 s15, 0, s16
-; GFX9-NEXT: s_add_u32 s16, s6, s7
-; GFX9-NEXT: s_cselect_b64 s[6:7], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX9-NEXT: s_addc_u32 s14, s14, s15
-; GFX9-NEXT: s_mul_i32 s6, s12, s14
-; GFX9-NEXT: s_mul_hi_u32 s7, s12, s16
-; GFX9-NEXT: s_add_i32 s6, s7, s6
-; GFX9-NEXT: s_mul_i32 s13, s13, s16
-; GFX9-NEXT: s_add_i32 s6, s6, s13
-; GFX9-NEXT: s_mul_i32 s12, s12, s16
-; GFX9-NEXT: s_mul_hi_u32 s13, s14, s12
-; GFX9-NEXT: s_mul_i32 s15, s14, s12
-; GFX9-NEXT: s_mul_i32 s18, s16, s6
-; GFX9-NEXT: s_mul_hi_u32 s12, s16, s12
-; GFX9-NEXT: s_mul_hi_u32 s17, s16, s6
-; GFX9-NEXT: s_add_u32 s12, s12, s18
+; GFX9-NEXT: s_add_u32 s13, s13, s14
+; GFX9-NEXT: s_addc_u32 s12, s12, s15
+; GFX9-NEXT: s_mul_i32 s14, s6, s12
+; GFX9-NEXT: s_mul_hi_u32 s15, s6, s13
+; GFX9-NEXT: s_add_i32 s14, s15, s14
+; GFX9-NEXT: s_mul_i32 s7, s7, s13
+; GFX9-NEXT: s_add_i32 s14, s14, s7
+; GFX9-NEXT: s_mul_i32 s6, s6, s13
+; GFX9-NEXT: s_mul_hi_u32 s15, s12, s6
+; GFX9-NEXT: s_mul_i32 s16, s12, s6
+; GFX9-NEXT: s_mul_i32 s18, s13, s14
+; GFX9-NEXT: s_mul_hi_u32 s6, s13, s6
+; GFX9-NEXT: s_mul_hi_u32 s17, s13, s14
+; GFX9-NEXT: s_add_u32 s6, s6, s18
; GFX9-NEXT: s_addc_u32 s17, 0, s17
-; GFX9-NEXT: s_add_u32 s12, s12, s15
-; GFX9-NEXT: s_mul_hi_u32 s7, s14, s6
-; GFX9-NEXT: s_addc_u32 s12, s17, s13
+; GFX9-NEXT: s_add_u32 s6, s6, s16
+; GFX9-NEXT: s_mul_hi_u32 s7, s12, s14
+; GFX9-NEXT: s_addc_u32 s6, s17, s15
; GFX9-NEXT: s_addc_u32 s7, s7, 0
-; GFX9-NEXT: s_mul_i32 s6, s14, s6
-; GFX9-NEXT: s_add_u32 s6, s12, s6
-; GFX9-NEXT: s_addc_u32 s12, 0, s7
-; GFX9-NEXT: s_add_u32 s13, s16, s6
-; GFX9-NEXT: s_cselect_b64 s[6:7], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX9-NEXT: s_addc_u32 s12, s14, s12
+; GFX9-NEXT: s_mul_i32 s14, s12, s14
+; GFX9-NEXT: s_add_u32 s6, s6, s14
+; GFX9-NEXT: s_addc_u32 s7, 0, s7
+; GFX9-NEXT: s_add_u32 s13, s13, s6
+; GFX9-NEXT: s_addc_u32 s12, s12, s7
; GFX9-NEXT: s_ashr_i32 s6, s9, 31
; GFX9-NEXT: s_add_u32 s8, s8, s6
; GFX9-NEXT: s_mov_b32 s7, s6
@@ -9868,11 +9808,9 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_mul_i32 s12, s2, s12
; GFX9-NEXT: s_sub_u32 s8, s8, s12
; GFX9-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
; GFX9-NEXT: s_subb_u32 s17, s14, s3
; GFX9-NEXT: s_sub_u32 s18, s8, s2
; GFX9-NEXT: s_cselect_b64 s[14:15], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0
; GFX9-NEXT: s_subb_u32 s19, s17, 0
; GFX9-NEXT: s_cmp_ge_u32 s19, s3
; GFX9-NEXT: s_cselect_b32 s20, -1, 0
@@ -9881,13 +9819,11 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_cmp_eq_u32 s19, s3
; GFX9-NEXT: s_cselect_b32 s20, s21, s20
; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT: s_subb_u32 s17, s17, s3
-; GFX9-NEXT: s_sub_u32 s21, s18, s2
-; GFX9-NEXT: s_cselect_b64 s[14:15], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT: s_subb_u32 s14, s17, 0
+; GFX9-NEXT: s_subb_u32 s14, s17, s3
+; GFX9-NEXT: s_sub_u32 s15, s18, s2
+; GFX9-NEXT: s_subb_u32 s14, s14, 0
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cselect_b32 s15, s21, s18
+; GFX9-NEXT: s_cselect_b32 s15, s15, s18
; GFX9-NEXT: s_cselect_b32 s14, s14, s19
; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
; GFX9-NEXT: s_subb_u32 s9, s9, s16
@@ -9911,8 +9847,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s3
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX9-NEXT: s_sub_u32 s6, 0, s2
-; GFX9-NEXT: s_subb_u32 s7, 0, s3
+; GFX9-NEXT: s_sub_u32 s4, 0, s2
+; GFX9-NEXT: s_subb_u32 s5, 0, s3
; GFX9-NEXT: v_mac_f32_e32 v0, 0x4f800000, v1
; GFX9-NEXT: v_rcp_f32_e32 v1, v0
; GFX9-NEXT: v_mov_b32_e32 v0, 0
@@ -9922,74 +9858,70 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: v_mac_f32_e32 v1, 0xcf800000, v2
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v2, v2
-; GFX9-NEXT: v_readfirstlane_b32 s4, v1
+; GFX9-NEXT: v_readfirstlane_b32 s6, v1
; GFX9-NEXT: v_readfirstlane_b32 s9, v2
-; GFX9-NEXT: s_mul_hi_u32 s8, s6, s4
-; GFX9-NEXT: s_mul_i32 s14, s6, s9
-; GFX9-NEXT: s_mul_i32 s5, s7, s4
+; GFX9-NEXT: s_mul_hi_u32 s8, s4, s6
+; GFX9-NEXT: s_mul_i32 s14, s4, s9
+; GFX9-NEXT: s_mul_i32 s7, s5, s6
; GFX9-NEXT: s_add_i32 s8, s8, s14
-; GFX9-NEXT: s_add_i32 s8, s8, s5
-; GFX9-NEXT: s_mul_i32 s15, s6, s4
-; GFX9-NEXT: s_mul_i32 s14, s4, s8
-; GFX9-NEXT: s_mul_hi_u32 s16, s4, s15
-; GFX9-NEXT: s_mul_hi_u32 s5, s4, s8
+; GFX9-NEXT: s_add_i32 s8, s8, s7
+; GFX9-NEXT: s_mul_i32 s15, s4, s6
+; GFX9-NEXT: s_mul_i32 s14, s6, s8
+; GFX9-NEXT: s_mul_hi_u32 s16, s6, s15
+; GFX9-NEXT: s_mul_hi_u32 s7, s6, s8
; GFX9-NEXT: s_add_u32 s14, s16, s14
-; GFX9-NEXT: s_addc_u32 s5, 0, s5
+; GFX9-NEXT: s_addc_u32 s7, 0, s7
; GFX9-NEXT: s_mul_hi_u32 s17, s9, s15
; GFX9-NEXT: s_mul_i32 s15, s9, s15
; GFX9-NEXT: s_add_u32 s14, s14, s15
; GFX9-NEXT: s_mul_hi_u32 s16, s9, s8
-; GFX9-NEXT: s_addc_u32 s5, s5, s17
+; GFX9-NEXT: s_addc_u32 s7, s7, s17
; GFX9-NEXT: s_addc_u32 s14, s16, 0
; GFX9-NEXT: s_mul_i32 s8, s9, s8
-; GFX9-NEXT: s_add_u32 s5, s5, s8
+; GFX9-NEXT: s_add_u32 s7, s7, s8
; GFX9-NEXT: s_addc_u32 s8, 0, s14
-; GFX9-NEXT: s_add_u32 s14, s4, s5
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s8, s9, s8
-; GFX9-NEXT: s_mul_i32 s4, s6, s8
-; GFX9-NEXT: s_mul_hi_u32 s5, s6, s14
-; GFX9-NEXT: s_add_i32 s4, s5, s4
-; GFX9-NEXT: s_mul_i32 s7, s7, s14
-; GFX9-NEXT: s_add_i32 s4, s4, s7
-; GFX9-NEXT: s_mul_i32 s6, s6, s14
-; GFX9-NEXT: s_mul_hi_u32 s7, s8, s6
-; GFX9-NEXT: s_mul_i32 s9, s8, s6
-; GFX9-NEXT: s_mul_i32 s16, s14, s4
-; GFX9-NEXT: s_mul_hi_u32 s6, s14, s6
-; GFX9-NEXT: s_mul_hi_u32 s15, s14, s4
-; GFX9-NEXT: s_add_u32 s6, s6, s16
+; GFX9-NEXT: s_add_u32 s6, s6, s7
+; GFX9-NEXT: s_addc_u32 s7, s9, s8
+; GFX9-NEXT: s_mul_i32 s8, s4, s7
+; GFX9-NEXT: s_mul_hi_u32 s9, s4, s6
+; GFX9-NEXT: s_add_i32 s8, s9, s8
+; GFX9-NEXT: s_mul_i32 s5, s5, s6
+; GFX9-NEXT: s_add_i32 s8, s8, s5
+; GFX9-NEXT: s_mul_i32 s4, s4, s6
+; GFX9-NEXT: s_mul_hi_u32 s9, s7, s4
+; GFX9-NEXT: s_mul_i32 s14, s7, s4
+; GFX9-NEXT: s_mul_i32 s16, s6, s8
+; GFX9-NEXT: s_mul_hi_u32 s4, s6, s4
+; GFX9-NEXT: s_mul_hi_u32 s15, s6, s8
+; GFX9-NEXT: s_add_u32 s4, s4, s16
; GFX9-NEXT: s_addc_u32 s15, 0, s15
-; GFX9-NEXT: s_add_u32 s6, s6, s9
-; GFX9-NEXT: s_mul_hi_u32 s5, s8, s4
-; GFX9-NEXT: s_addc_u32 s6, s15, s7
+; GFX9-NEXT: s_add_u32 s4, s4, s14
+; GFX9-NEXT: s_mul_hi_u32 s5, s7, s8
+; GFX9-NEXT: s_addc_u32 s4, s15, s9
; GFX9-NEXT: s_addc_u32 s5, s5, 0
-; GFX9-NEXT: s_mul_i32 s4, s8, s4
-; GFX9-NEXT: s_add_u32 s4, s6, s4
-; GFX9-NEXT: s_addc_u32 s6, 0, s5
-; GFX9-NEXT: s_add_u32 s9, s14, s4
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s8, s8, s6
+; GFX9-NEXT: s_mul_i32 s8, s7, s8
+; GFX9-NEXT: s_add_u32 s4, s4, s8
+; GFX9-NEXT: s_addc_u32 s5, 0, s5
+; GFX9-NEXT: s_add_u32 s8, s6, s4
+; GFX9-NEXT: s_addc_u32 s9, s7, s5
; GFX9-NEXT: s_ashr_i32 s4, s11, 31
; GFX9-NEXT: s_add_u32 s6, s10, s4
; GFX9-NEXT: s_mov_b32 s5, s4
; GFX9-NEXT: s_addc_u32 s7, s11, s4
; GFX9-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5]
-; GFX9-NEXT: s_mul_i32 s11, s6, s8
-; GFX9-NEXT: s_mul_hi_u32 s14, s6, s9
-; GFX9-NEXT: s_mul_hi_u32 s10, s6, s8
+; GFX9-NEXT: s_mul_i32 s11, s6, s9
+; GFX9-NEXT: s_mul_hi_u32 s14, s6, s8
+; GFX9-NEXT: s_mul_hi_u32 s10, s6, s9
; GFX9-NEXT: s_add_u32 s11, s14, s11
; GFX9-NEXT: s_addc_u32 s10, 0, s10
-; GFX9-NEXT: s_mul_hi_u32 s15, s7, s9
-; GFX9-NEXT: s_mul_i32 s9, s7, s9
-; GFX9-NEXT: s_add_u32 s9, s11, s9
-; GFX9-NEXT: s_mul_hi_u32 s14, s7, s8
-; GFX9-NEXT: s_addc_u32 s9, s10, s15
-; GFX9-NEXT: s_addc_u32 s10, s14, 0
+; GFX9-NEXT: s_mul_hi_u32 s15, s7, s8
; GFX9-NEXT: s_mul_i32 s8, s7, s8
-; GFX9-NEXT: s_add_u32 s8, s9, s8
+; GFX9-NEXT: s_add_u32 s8, s11, s8
+; GFX9-NEXT: s_mul_hi_u32 s14, s7, s9
+; GFX9-NEXT: s_addc_u32 s8, s10, s15
+; GFX9-NEXT: s_addc_u32 s10, s14, 0
+; GFX9-NEXT: s_mul_i32 s9, s7, s9
+; GFX9-NEXT: s_add_u32 s8, s8, s9
; GFX9-NEXT: s_addc_u32 s9, 0, s10
; GFX9-NEXT: s_mul_i32 s9, s2, s9
; GFX9-NEXT: s_mul_hi_u32 s10, s2, s8
@@ -10000,11 +9932,9 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_mul_i32 s8, s2, s8
; GFX9-NEXT: s_sub_u32 s6, s6, s8
; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
; GFX9-NEXT: s_subb_u32 s15, s10, s3
; GFX9-NEXT: s_sub_u32 s16, s6, s2
; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
; GFX9-NEXT: s_subb_u32 s17, s15, 0
; GFX9-NEXT: s_cmp_ge_u32 s17, s3
; GFX9-NEXT: s_cselect_b32 s18, -1, 0
@@ -10013,13 +9943,11 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
; GFX9-NEXT: s_cmp_eq_u32 s17, s3
; GFX9-NEXT: s_cselect_b32 s18, s19, s18
; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s15, s15, s3
-; GFX9-NEXT: s_sub_u32 s19, s16, s2
-; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s10, s15, 0
+; GFX9-NEXT: s_subb_u32 s10, s15, s3
+; GFX9-NEXT: s_sub_u32 s11, s16, s2
+; GFX9-NEXT: s_subb_u32 s10, s10, 0
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cselect_b32 s11, s19, s16
+; GFX9-NEXT: s_cselect_b32 s11, s11, s16
; GFX9-NEXT: s_cselect_b32 s10, s10, s17
; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
; GFX9-NEXT: s_subb_u32 s7, s7, s14
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
index 394727c..01f4414 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_buffer.ll
@@ -612,12 +612,11 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8-NEXT: s_mov_b32 m0, s3
-; GFX8-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8-NEXT: v_readlane_b32 s6, v0, s3
; GFX8-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8-NEXT: s_add_i32 s2, s2, s8
+; GFX8-NEXT: s_add_i32 s2, s2, s6
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8-NEXT: s_cbranch_scc1 .LBB2_1
; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -653,12 +652,11 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9-NEXT: s_mov_b32 m0, s3
-; GFX9-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT: v_readlane_b32 s6, v0, s3
; GFX9-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9-NEXT: s_add_i32 s2, s2, s8
+; GFX9-NEXT: s_add_i32 s2, s2, s6
+; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB2_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -693,11 +691,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W64-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX10W64-NEXT: v_readlane_b32 s8, v0, s3
-; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: v_writelane_b32 v1, s2, s3
-; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: s_add_i32 s2, s2, s8
-; GFX10W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX10W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX10W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W64-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -733,11 +730,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W32-NEXT: s_ff1_i32_b32 s2, s1
; GFX10W32-NEXT: v_readlane_b32 s3, v0, s2
-; GFX10W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX10W32-NEXT: v_writelane_b32 v1, s0, s2
-; GFX10W32-NEXT: s_andn2_b32 s1, s1, s6
+; GFX10W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX10W32-NEXT: s_add_i32 s0, s0, s3
-; GFX10W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10W32-NEXT: s_andn2_b32 s1, s1, s2
; GFX10W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX10W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W32-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -774,11 +770,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: s_add_i32 s2, s2, s8
-; GFX11W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX11W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX11W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -818,11 +813,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX11W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX11W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX11W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX11W32-NEXT: s_add_i32 s0, s0, s3
-; GFX11W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX11W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX11W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -859,11 +853,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX12W64-NEXT: s_wait_alu 0xfffe
; GFX12W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: s_add_co_i32 s2, s2, s8
-; GFX12W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX12W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX12W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -901,15 +894,15 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W32-NEXT: ; implicit-def: $vgpr0
; GFX12W32-NEXT: .LBB2_1: ; %ComputeLoop
; GFX12W32-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX12W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX12W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX12W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX12W32-NEXT: s_add_co_i32 s0, s0, s3
; GFX12W32-NEXT: s_wait_alu 0xfffe
-; GFX12W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX12W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX12W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -999,12 +992,11 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8-NEXT: s_mov_b32 m0, s3
-; GFX8-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8-NEXT: v_readlane_b32 s6, v0, s3
; GFX8-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8-NEXT: s_add_i32 s2, s2, s8
+; GFX8-NEXT: s_add_i32 s2, s2, s6
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8-NEXT: s_cbranch_scc1 .LBB3_1
; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1042,12 +1034,11 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9-NEXT: s_mov_b32 m0, s3
-; GFX9-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT: v_readlane_b32 s6, v0, s3
; GFX9-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9-NEXT: s_add_i32 s2, s2, s8
+; GFX9-NEXT: s_add_i32 s2, s2, s6
+; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB3_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1084,11 +1075,10 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX10W64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W64-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX10W64-NEXT: v_readlane_b32 s8, v0, s3
-; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: v_writelane_b32 v1, s2, s3
-; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: s_add_i32 s2, s2, s8
-; GFX10W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX10W64-NEXT: s_cbranch_scc1 .LBB3_1
; GFX10W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W64-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1127,11 +1117,10 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX10W32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W32-NEXT: s_ff1_i32_b32 s2, s1
; GFX10W32-NEXT: v_readlane_b32 s3, v0, s2
-; GFX10W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX10W32-NEXT: v_writelane_b32 v1, s0, s2
-; GFX10W32-NEXT: s_andn2_b32 s1, s1, s6
+; GFX10W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX10W32-NEXT: s_add_i32 s0, s0, s3
-; GFX10W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10W32-NEXT: s_andn2_b32 s1, s1, s2
; GFX10W32-NEXT: s_cbranch_scc1 .LBB3_1
; GFX10W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W32-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1171,11 +1160,10 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX11W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: s_add_i32 s2, s2, s8
-; GFX11W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX11W64-NEXT: s_cbranch_scc1 .LBB3_1
; GFX11W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1218,11 +1206,10 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX11W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX11W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX11W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX11W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX11W32-NEXT: s_add_i32 s0, s0, s3
-; GFX11W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX11W32-NEXT: s_cbranch_scc1 .LBB3_1
; GFX11W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1261,11 +1248,10 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX12W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX12W64-NEXT: s_wait_alu 0xfffe
; GFX12W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: s_add_co_i32 s2, s2, s8
-; GFX12W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX12W64-NEXT: s_cbranch_scc1 .LBB3_1
; GFX12W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1306,15 +1292,15 @@ define amdgpu_kernel void @struct_add_i32_varying_vdata(ptr addrspace(1) %out, p
; GFX12W32-NEXT: ; implicit-def: $vgpr0
; GFX12W32-NEXT: .LBB3_1: ; %ComputeLoop
; GFX12W32-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX12W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX12W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX12W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX12W32-NEXT: s_add_co_i32 s0, s0, s3
; GFX12W32-NEXT: s_wait_alu 0xfffe
-; GFX12W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX12W32-NEXT: s_cbranch_scc1 .LBB3_1
; GFX12W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -2073,12 +2059,11 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8-NEXT: s_mov_b32 m0, s3
-; GFX8-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8-NEXT: v_readlane_b32 s6, v0, s3
; GFX8-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8-NEXT: s_add_i32 s2, s2, s8
+; GFX8-NEXT: s_add_i32 s2, s2, s6
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8-NEXT: s_cbranch_scc1 .LBB7_1
; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2114,12 +2099,11 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9-NEXT: s_mov_b32 m0, s3
-; GFX9-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT: v_readlane_b32 s6, v0, s3
; GFX9-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9-NEXT: s_add_i32 s2, s2, s8
+; GFX9-NEXT: s_add_i32 s2, s2, s6
+; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2154,11 +2138,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W64-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX10W64-NEXT: v_readlane_b32 s8, v0, s3
-; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: v_writelane_b32 v1, s2, s3
-; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: s_add_i32 s2, s2, s8
-; GFX10W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX10W64-NEXT: s_cbranch_scc1 .LBB7_1
; GFX10W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W64-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2194,11 +2177,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W32-NEXT: s_ff1_i32_b32 s2, s1
; GFX10W32-NEXT: v_readlane_b32 s3, v0, s2
-; GFX10W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX10W32-NEXT: v_writelane_b32 v1, s0, s2
-; GFX10W32-NEXT: s_andn2_b32 s1, s1, s6
+; GFX10W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX10W32-NEXT: s_add_i32 s0, s0, s3
-; GFX10W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10W32-NEXT: s_andn2_b32 s1, s1, s2
; GFX10W32-NEXT: s_cbranch_scc1 .LBB7_1
; GFX10W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W32-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2235,11 +2217,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: s_add_i32 s2, s2, s8
-; GFX11W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX11W64-NEXT: s_cbranch_scc1 .LBB7_1
; GFX11W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -2279,11 +2260,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX11W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX11W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX11W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX11W32-NEXT: s_add_i32 s0, s0, s3
-; GFX11W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX11W32-NEXT: s_cbranch_scc1 .LBB7_1
; GFX11W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -2321,11 +2301,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX12W64-NEXT: s_wait_alu 0xfffe
; GFX12W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: s_add_co_i32 s2, s2, s8
-; GFX12W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX12W64-NEXT: s_cbranch_scc1 .LBB7_1
; GFX12W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -2363,15 +2342,15 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W32-NEXT: ; implicit-def: $vgpr0
; GFX12W32-NEXT: .LBB7_1: ; %ComputeLoop
; GFX12W32-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX12W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX12W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX12W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX12W32-NEXT: s_add_co_i32 s0, s0, s3
; GFX12W32-NEXT: s_wait_alu 0xfffe
-; GFX12W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX12W32-NEXT: s_cbranch_scc1 .LBB7_1
; GFX12W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
index 258bc295..9db6d70 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll
@@ -717,12 +717,11 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s2
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s7, v0, s2
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s6, m0
-; GFX8_ITERATIVE-NEXT: s_add_i32 s6, s6, s7
+; GFX8_ITERATIVE-NEXT: s_add_i32 s6, s6, s3
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -762,12 +761,11 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s2
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s7, v0, s2
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s6, m0
-; GFX9_ITERATIVE-NEXT: s_add_i32 s6, s6, s7
+; GFX9_ITERATIVE-NEXT: s_add_i32 s6, s6, s3
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -805,13 +803,12 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1064_ITERATIVE-NEXT: ; implicit-def: $vgpr1
; GFX1064_ITERATIVE-NEXT: .LBB2_1: ; %ComputeLoop
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s7, s[0:1]
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s7
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s7
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s6, s7
+; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v0, s2
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s6, s2
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX1064_ITERATIVE-NEXT: s_add_i32 s6, s6, s7
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064_ITERATIVE-NEXT: s_add_i32 s6, s6, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -853,11 +850,10 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s1, s0
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s2, v0, s1
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s1
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s6, s1
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s0, s0, s3
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032_ITERATIVE-NEXT: s_add_i32 s6, s6, s2
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -897,14 +893,13 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1164_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX1164_ITERATIVE-NEXT: .LBB2_1: ; %ComputeLoop
; GFX1164_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s7, s[0:1]
+; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s2, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s7
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s7
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s7
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v1, s2
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s2
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX1164_ITERATIVE-NEXT: s_add_i32 s6, s6, s7
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164_ITERATIVE-NEXT: s_add_i32 s6, s6, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -949,11 +944,10 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s2, v1, s1
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s1
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s1
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s3
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1132_ITERATIVE-NEXT: s_add_i32 s6, s6, s2
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -993,14 +987,14 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX1264_ITERATIVE-NEXT: .LBB2_1: ; %ComputeLoop
; GFX1264_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s7, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s2, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
+; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s7, v1, s2
+; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s2
+; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX1264_ITERATIVE-NEXT: s_add_co_i32 s6, s6, s7
; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s7
-; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s7
-; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s7
; GFX1264_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1264_ITERATIVE-NEXT: s_add_co_i32 s6, s6, s8
-; GFX1264_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1264_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1264_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1264_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -1028,6 +1022,7 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264_ITERATIVE-NEXT: s_wait_kmcnt 0x0
; GFX1264_ITERATIVE-NEXT: v_readfirstlane_b32 s2, v1
; GFX1264_ITERATIVE-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xf1ff
; GFX1264_ITERATIVE-NEXT: v_add_nc_u32_e32 v0, s2, v0
; GFX1264_ITERATIVE-NEXT: s_mov_b32 s2, -1
; GFX1264_ITERATIVE-NEXT: buffer_store_b32 v0, off, s[0:3], null
@@ -1041,15 +1036,15 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX1232_ITERATIVE-NEXT: .LBB2_1: ; %ComputeLoop
; GFX1232_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: s_ctz_i32_b32 s1, s0
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: v_readlane_b32 s2, v1, s1
-; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s1
; GFX1232_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s1
-; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s3
+; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1232_ITERATIVE-NEXT: s_add_co_i32 s6, s6, s2
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1232_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1232_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1232_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1232_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -2363,7 +2358,6 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX8_ITERATIVE-NEXT: s_addc_u32 s7, s7, s3
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -2416,7 +2410,6 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX9_ITERATIVE-NEXT: s_addc_u32 s7, s7, s3
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -2462,13 +2455,12 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v3, s2
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s6, s2
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s7, s2
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s6, s2
; GFX1064_ITERATIVE-NEXT: s_add_u32 s6, s6, s3
; GFX1064_ITERATIVE-NEXT: s_addc_u32 s7, s7, s8
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -2515,13 +2507,12 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s1, s0
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s1
-; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s6, s1
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v2, s7, s1
+; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s6, s1
; GFX1032_ITERATIVE-NEXT: s_add_u32 s6, s6, s2
; GFX1032_ITERATIVE-NEXT: s_addc_u32 s7, s7, s3
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -2569,14 +2560,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s3, v2, s2
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v3, s2
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s2
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s2
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s2
; GFX1164_ITERATIVE-NEXT: s_add_u32 s6, s6, s3
; GFX1164_ITERATIVE-NEXT: s_addc_u32 s7, s7, s8
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -2626,14 +2616,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s2, v2, s1
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s1
-; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s1
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s1
+; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s1
; GFX1132_ITERATIVE-NEXT: s_add_u32 s6, s6, s2
; GFX1132_ITERATIVE-NEXT: s_addc_u32 s7, s7, s3
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -2677,16 +2666,16 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264_ITERATIVE-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264_ITERATIVE-NEXT: .LBB5_1: ; %ComputeLoop
; GFX1264_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s10, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s8, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
+; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s8
+; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s2, v2, s8
+; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s8
+; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s8
+; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s10
-; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s2, v2, s10
-; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s10
-; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s10
; GFX1264_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[8:9]
; GFX1264_ITERATIVE-NEXT: s_add_nc_u64 s[6:7], s[6:7], s[2:3]
-; GFX1264_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1264_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1264_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1264_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -2731,17 +2720,17 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_ITERATIVE-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232_ITERATIVE-NEXT: .LBB5_1: ; %ComputeLoop
; GFX1232_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: s_ctz_i32_b32 s1, s0
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s1
; GFX1232_ITERATIVE-NEXT: v_readlane_b32 s2, v2, s1
-; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s1
; GFX1232_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s1
; GFX1232_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s1
-; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s8
-; GFX1232_ITERATIVE-NEXT: s_add_nc_u64 s[6:7], s[6:7], s[2:3]
+; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1232_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
+; GFX1232_ITERATIVE-NEXT: s_add_nc_u64 s[6:7], s[6:7], s[2:3]
; GFX1232_ITERATIVE-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1232_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1232_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -4490,12 +4479,11 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s2
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s2
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v2, s12, m0
-; GFX8_ITERATIVE-NEXT: s_add_i32 s12, s12, s6
+; GFX8_ITERATIVE-NEXT: s_add_i32 s12, s12, s3
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -4550,12 +4538,11 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s2
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s2
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v2, s12, m0
-; GFX9_ITERATIVE-NEXT: s_add_i32 s12, s12, s6
+; GFX9_ITERATIVE-NEXT: s_add_i32 s12, s12, s3
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -4608,13 +4595,12 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1064_ITERATIVE-NEXT: ; implicit-def: $vgpr2
; GFX1064_ITERATIVE-NEXT: .LBB8_1: ; %ComputeLoop
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s6, s[0:1]
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v0, s6
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s6
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s12, s6
+; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s2
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s12, s2
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX1064_ITERATIVE-NEXT: s_add_i32 s12, s12, s6
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064_ITERATIVE-NEXT: s_add_i32 s12, s12, s7
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -4670,11 +4656,10 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s1, s0
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s2, v0, s1
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s1
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v2, s8, s1
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s0, s0, s3
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032_ITERATIVE-NEXT: s_add_i32 s8, s8, s2
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -4728,14 +4713,13 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1164_ITERATIVE-NEXT: ; implicit-def: $vgpr2
; GFX1164_ITERATIVE-NEXT: .LBB8_1: ; %ComputeLoop
; GFX1164_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s6, s[0:1]
+; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s2, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v0, s6
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s6
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v2, s12, s6
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s2
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v2, s12, s2
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX1164_ITERATIVE-NEXT: s_add_i32 s12, s12, s6
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164_ITERATIVE-NEXT: s_add_i32 s12, s12, s7
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -4799,11 +4783,10 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s2, v0, s1
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s1
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v2, s8, s1
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s3
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1132_ITERATIVE-NEXT: s_add_i32 s8, s8, s2
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -4861,14 +4844,14 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX1264_ITERATIVE-NEXT: .LBB8_1: ; %ComputeLoop
; GFX1264_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s7, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s2, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
+; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s7, v1, s2
+; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s2
+; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
+; GFX1264_ITERATIVE-NEXT: s_add_co_i32 s6, s6, s7
; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s7
-; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s7
-; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s7
; GFX1264_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1264_ITERATIVE-NEXT: s_add_co_i32 s6, s6, s8
-; GFX1264_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1264_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1264_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1264_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -4896,6 +4879,7 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264_ITERATIVE-NEXT: s_wait_kmcnt 0x0
; GFX1264_ITERATIVE-NEXT: v_readfirstlane_b32 s2, v1
; GFX1264_ITERATIVE-NEXT: s_mov_b32 s3, 0x31016000
+; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xf1ff
; GFX1264_ITERATIVE-NEXT: v_sub_nc_u32_e32 v0, s2, v0
; GFX1264_ITERATIVE-NEXT: s_mov_b32 s2, -1
; GFX1264_ITERATIVE-NEXT: buffer_store_b32 v0, off, s[0:3], null
@@ -4909,15 +4893,15 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_ITERATIVE-NEXT: ; implicit-def: $vgpr0
; GFX1232_ITERATIVE-NEXT: .LBB8_1: ; %ComputeLoop
; GFX1232_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: s_ctz_i32_b32 s1, s0
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: v_readlane_b32 s2, v1, s1
-; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s1
; GFX1232_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s1
-; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s3
+; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1232_ITERATIVE-NEXT: s_add_co_i32 s6, s6, s2
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1232_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1232_ITERATIVE-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1232_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1232_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -6673,7 +6657,6 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX8_ITERATIVE-NEXT: s_addc_u32 s9, s9, s3
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -6746,7 +6729,6 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX9_ITERATIVE-NEXT: s_addc_u32 s9, s9, s3
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -6812,13 +6794,12 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s2, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v1, s2
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s2
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v5, s9, s2
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s2
; GFX1064_ITERATIVE-NEXT: s_add_u32 s8, s8, s3
; GFX1064_ITERATIVE-NEXT: s_addc_u32 s9, s9, s6
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -6883,13 +6864,12 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s1, s0
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s1
-; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s1
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v5, s9, s1
+; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s1
; GFX1032_ITERATIVE-NEXT: s_add_u32 s8, s8, s2
; GFX1032_ITERATIVE-NEXT: s_addc_u32 s9, s9, s3
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -6955,14 +6935,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v1, s2
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s2
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v5, s9, s2
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s2
; GFX1164_ITERATIVE-NEXT: s_add_u32 s8, s8, s3
; GFX1164_ITERATIVE-NEXT: s_addc_u32 s9, s9, s6
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -7036,14 +7015,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s1
-; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s1
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v5, s9, s1
+; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v4, s8, s1
; GFX1132_ITERATIVE-NEXT: s_add_u32 s8, s8, s2
; GFX1132_ITERATIVE-NEXT: s_addc_u32 s9, s9, s3
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -7109,16 +7087,16 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1264_ITERATIVE-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264_ITERATIVE-NEXT: .LBB11_1: ; %ComputeLoop
; GFX1264_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s10, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_ctz_i32_b64 s8, s[0:1]
+; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
+; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s8
+; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s2, v2, s8
+; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s8
+; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s8
+; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1264_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s10
-; GFX1264_ITERATIVE-NEXT: v_readlane_b32 s2, v2, s10
-; GFX1264_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s10
-; GFX1264_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s10
; GFX1264_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[8:9]
; GFX1264_ITERATIVE-NEXT: s_add_nc_u64 s[6:7], s[6:7], s[2:3]
-; GFX1264_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1264_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1264_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1264_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
@@ -7163,17 +7141,17 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out, ptr addrspace(
; GFX1232_ITERATIVE-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232_ITERATIVE-NEXT: .LBB11_1: ; %ComputeLoop
; GFX1232_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: s_ctz_i32_b32 s1, s0
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
; GFX1232_ITERATIVE-NEXT: v_readlane_b32 s3, v3, s1
; GFX1232_ITERATIVE-NEXT: v_readlane_b32 s2, v2, s1
-; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s1
; GFX1232_ITERATIVE-NEXT: v_writelane_b32 v1, s7, s1
; GFX1232_ITERATIVE-NEXT: v_writelane_b32 v0, s6, s1
-; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s8
-; GFX1232_ITERATIVE-NEXT: s_add_nc_u64 s[6:7], s[6:7], s[2:3]
+; GFX1232_ITERATIVE-NEXT: s_lshl_b32 s1, 1, s1
; GFX1232_ITERATIVE-NEXT: s_wait_alu 0xfffe
-; GFX1232_ITERATIVE-NEXT: s_cmp_lg_u32 s0, 0
+; GFX1232_ITERATIVE-NEXT: s_and_not1_b32 s0, s0, s1
+; GFX1232_ITERATIVE-NEXT: s_add_nc_u64 s[6:7], s[6:7], s[2:3]
; GFX1232_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1232_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1232_ITERATIVE-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
index 23c5f4f..6167a84 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_local_pointer.ll
@@ -499,12 +499,11 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -540,12 +539,11 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -580,11 +578,10 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -621,11 +618,10 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -663,11 +659,10 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -707,11 +702,10 @@ define amdgpu_kernel void @add_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB2_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1088,11 +1082,10 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX8_ITERATIVE-NEXT: .LBB3_1: ; %ComputeLoop
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s4, v0, s3
+; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s4
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB3_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1117,11 +1110,10 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX9_ITERATIVE-NEXT: .LBB3_1: ; %ComputeLoop
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s4, v0, s3
+; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s4
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB3_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1147,9 +1139,8 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
; GFX1064_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1176,9 +1167,8 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1206,10 +1196,8 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
; GFX1164_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1239,10 +1227,8 @@ define amdgpu_kernel void @add_i32_varying_nouse() {
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2022,7 +2008,6 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB6_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2071,7 +2056,6 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2112,13 +2096,12 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s6, s[2:3]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v0, s6
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v3, s6
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s6
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s6
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s6
; GFX1064_ITERATIVE-NEXT: s_add_u32 s0, s0, s7
; GFX1064_ITERATIVE-NEXT: s_addc_u32 s1, s1, s8
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2160,13 +2143,12 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s3, s2
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
-; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s3
+; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s3
; GFX1032_ITERATIVE-NEXT: s_add_u32 s0, s0, s6
; GFX1032_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2209,14 +2191,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v2, s6
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v3, s6
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s6
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s6
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s6
; GFX1164_ITERATIVE-NEXT: s_add_u32 s0, s0, s7
; GFX1164_ITERATIVE-NEXT: s_addc_u32 s1, s1, s8
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -2261,14 +2242,13 @@ define amdgpu_kernel void @add_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s3
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
-; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s3
+; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s3
; GFX1132_ITERATIVE-NEXT: s_add_u32 s0, s0, s6
; GFX1132_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -2881,7 +2861,6 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX8_ITERATIVE-NEXT: s_addc_u32 s1, s1, s5
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s4
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB7_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2914,7 +2893,6 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX9_ITERATIVE-NEXT: s_addc_u32 s1, s1, s5
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s4
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB7_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2946,7 +2924,6 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX1064_ITERATIVE-NEXT: s_addc_u32 s1, s1, s6
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s4
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[4:5]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2979,7 +2956,6 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX1032_ITERATIVE-NEXT: s_addc_u32 s1, s1, s5
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3013,8 +2989,6 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX1164_ITERATIVE-NEXT: s_addc_u32 s1, s1, s6
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s4
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[4:5]
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3048,9 +3022,8 @@ define amdgpu_kernel void @add_i64_varying_nouse() {
; GFX1132_ITERATIVE-NEXT: s_add_u32 s0, s0, s4
; GFX1132_ITERATIVE-NEXT: s_addc_u32 s1, s1, s5
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3906,12 +3879,11 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB10_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3947,12 +3919,11 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB10_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3987,11 +3958,10 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4028,11 +3998,10 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4070,11 +4039,10 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_add_i32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -4114,11 +4082,10 @@ define amdgpu_kernel void @sub_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -4495,11 +4462,10 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX8_ITERATIVE-NEXT: .LBB11_1: ; %ComputeLoop
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s4, v0, s3
+; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s4
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX8_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4524,11 +4490,10 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX9_ITERATIVE-NEXT: .LBB11_1: ; %ComputeLoop
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s4, v0, s3
+; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s4
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX9_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4554,9 +4519,8 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
; GFX1064_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[4:5]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4583,9 +4547,8 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4613,10 +4576,8 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[4:5], 1, s3
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
; GFX1164_ITERATIVE-NEXT: s_add_i32 s2, s2, s6
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[4:5]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4646,10 +4607,8 @@ define amdgpu_kernel void @sub_i32_varying_nouse() {
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_add_i32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5452,7 +5411,6 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB14_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5501,7 +5459,6 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB14_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5542,13 +5499,12 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s6, s[2:3]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v0, s6
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v3, s6
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s6
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s6
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s6
; GFX1064_ITERATIVE-NEXT: s_add_u32 s0, s0, s7
; GFX1064_ITERATIVE-NEXT: s_addc_u32 s1, s1, s8
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5590,13 +5546,12 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s3, s2
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
-; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s3
+; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s3
; GFX1032_ITERATIVE-NEXT: s_add_u32 s0, s0, s6
; GFX1032_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5639,14 +5594,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v2, s6
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v3, s6
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s6
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s6
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s6
; GFX1164_ITERATIVE-NEXT: s_add_u32 s0, s0, s7
; GFX1164_ITERATIVE-NEXT: s_addc_u32 s1, s1, s8
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s6
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -5691,14 +5645,13 @@ define amdgpu_kernel void @sub_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s3
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
-; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s3
+; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s3
; GFX1132_ITERATIVE-NEXT: s_add_u32 s0, s0, s6
; GFX1132_ITERATIVE-NEXT: s_addc_u32 s1, s1, s7
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -6313,12 +6266,11 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_and_b32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_and_b32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB15_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6354,12 +6306,11 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_and_b32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_and_b32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB15_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6394,11 +6345,10 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_and_b32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6435,11 +6385,10 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_and_b32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6477,11 +6426,10 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_and_b32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -6521,11 +6469,10 @@ define amdgpu_kernel void @and_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_and_b32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -6926,12 +6873,11 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s8
; GFX8_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v2, s1, m0
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s0, m0
; GFX8_ITERATIVE-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
+; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB16_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6973,12 +6919,11 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s8
; GFX9_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v2, s1, m0
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s0, m0
; GFX9_ITERATIVE-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
+; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB16_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7015,15 +6960,14 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; implicit-def: $vgpr1_vgpr2
; GFX1064_ITERATIVE-NEXT: .LBB16_1: ; %ComputeLoop
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s10, s[2:3]
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s10
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s10
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s10
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s10
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
+; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s8, s[2:3]
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s8
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s8
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1064_ITERATIVE-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB16_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7065,12 +7009,11 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s3, s2
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s8
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB16_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7109,16 +7052,15 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1164_ITERATIVE-NEXT: .LBB16_1: ; %ComputeLoop
; GFX1164_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s10, s[2:3]
+; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s10
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s10
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s10
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s10
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[8:9]
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s8
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s8
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s8
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1164_ITERATIVE-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[8:9]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB16_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -7163,12 +7105,11 @@ define amdgpu_kernel void @and_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s3
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s8
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1132_ITERATIVE-NEXT: s_and_b64 s[0:1], s[0:1], s[6:7]
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB16_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -7672,12 +7613,11 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_or_b32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_or_b32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB17_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7713,12 +7653,11 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_or_b32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_or_b32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB17_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7753,11 +7692,10 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_or_b32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7794,11 +7732,10 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_or_b32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7836,11 +7773,10 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_or_b32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -7880,11 +7816,10 @@ define amdgpu_kernel void @or_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_or_b32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -8284,12 +8219,11 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s8
; GFX8_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v2, s1, m0
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s0, m0
; GFX8_ITERATIVE-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
+; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB18_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8331,12 +8265,11 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s8
; GFX9_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v2, s1, m0
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s0, m0
; GFX9_ITERATIVE-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
+; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB18_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8373,15 +8306,14 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; implicit-def: $vgpr1_vgpr2
; GFX1064_ITERATIVE-NEXT: .LBB18_1: ; %ComputeLoop
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s10, s[2:3]
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s10
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s10
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s10
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s10
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
+; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s8, s[2:3]
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s8
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s8
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1064_ITERATIVE-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB18_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8423,12 +8355,11 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s3, s2
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s8
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB18_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8467,16 +8398,15 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1164_ITERATIVE-NEXT: .LBB18_1: ; %ComputeLoop
; GFX1164_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s10, s[2:3]
+; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s10
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s10
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s10
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s10
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[8:9]
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s8
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s8
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s8
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1164_ITERATIVE-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[8:9]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB18_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -8521,12 +8451,11 @@ define amdgpu_kernel void @or_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s3
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s8
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1132_ITERATIVE-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB18_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9030,12 +8959,11 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_xor_b32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_xor_b32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB19_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9071,12 +8999,11 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_xor_b32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_xor_b32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB19_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9111,11 +9038,10 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_xor_b32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB19_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9152,11 +9078,10 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_xor_b32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB19_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9194,11 +9119,10 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_xor_b32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB19_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -9238,11 +9162,10 @@ define amdgpu_kernel void @xor_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_xor_b32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB19_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -9642,12 +9565,11 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s8
; GFX8_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v2, s1, m0
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s0, m0
; GFX8_ITERATIVE-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
+; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB20_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9689,12 +9611,11 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s8
; GFX9_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v2, s1, m0
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s0, m0
; GFX9_ITERATIVE-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
+; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB20_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9731,15 +9652,14 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; implicit-def: $vgpr1_vgpr2
; GFX1064_ITERATIVE-NEXT: .LBB20_1: ; %ComputeLoop
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s10, s[2:3]
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s10
-; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s10
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s10
-; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s10
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
+; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s8, s[2:3]
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
+; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s8
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s8
+; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s8
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1064_ITERATIVE-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[8:9]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB20_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9781,12 +9701,11 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s3, s2
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v2, s1, s3
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s8
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB20_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9825,16 +9744,15 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1164_ITERATIVE-NEXT: .LBB20_1: ; %ComputeLoop
; GFX1164_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s10, s[2:3]
+; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s8, s[2:3]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s10
-; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s10
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s10
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s10
-; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s10
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[8:9]
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s8
+; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s8
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s8
+; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s8
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[8:9], 1, s8
; GFX1164_ITERATIVE-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[8:9]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB20_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9879,12 +9797,11 @@ define amdgpu_kernel void @xor_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s7, v3, s3
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s6, v2, s3
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s8, 1, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v1, s1, s3
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s8
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1132_ITERATIVE-NEXT: s_xor_b64 s[0:1], s[0:1], s[6:7]
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB20_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -10388,12 +10305,11 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_max_i32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_max_i32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB21_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10429,12 +10345,11 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_max_i32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_max_i32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB21_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10469,11 +10384,10 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_max_i32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB21_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10510,11 +10424,10 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_max_i32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB21_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10552,11 +10465,10 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_max_i32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB21_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -10596,11 +10508,10 @@ define amdgpu_kernel void @max_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_max_i32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB21_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -11255,7 +11166,6 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB23_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11311,7 +11221,6 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB23_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11363,7 +11272,6 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB23_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11415,7 +11323,6 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB23_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11468,9 +11375,8 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_cselect_b32 s1, s1, s7
; GFX1164_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB23_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -11525,9 +11431,8 @@ define amdgpu_kernel void @max_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_cselect_b32 s1, s1, s7
; GFX1132_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB23_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -12214,12 +12119,11 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_min_i32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_min_i32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB24_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -12255,12 +12159,11 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_min_i32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_min_i32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB24_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -12295,11 +12198,10 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_min_i32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB24_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -12336,11 +12238,10 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_min_i32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB24_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -12378,11 +12279,10 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_min_i32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB24_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -12422,11 +12322,10 @@ define amdgpu_kernel void @min_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_min_i32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB24_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -13081,7 +12980,6 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB26_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -13137,7 +13035,6 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB26_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -13189,7 +13086,6 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB26_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -13241,7 +13137,6 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB26_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -13294,9 +13189,8 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_cselect_b32 s1, s1, s7
; GFX1164_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB26_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -13351,9 +13245,8 @@ define amdgpu_kernel void @min_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_cselect_b32 s1, s1, s7
; GFX1132_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB26_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -14040,12 +13933,11 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_max_u32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_max_u32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB27_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -14081,12 +13973,11 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_max_u32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_max_u32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB27_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -14121,11 +14012,10 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_max_u32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB27_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -14162,11 +14052,10 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_max_u32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB27_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -14204,11 +14093,10 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_max_u32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB27_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -14248,11 +14136,10 @@ define amdgpu_kernel void @umax_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_max_u32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB27_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -14901,7 +14788,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB29_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -14956,7 +14842,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB29_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -15007,7 +14892,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB29_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -15058,7 +14942,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB29_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -15112,8 +14995,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB29_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -15169,8 +15050,6 @@ define amdgpu_kernel void @umax_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB29_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -15853,12 +15732,11 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX8_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX8_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8_ITERATIVE-NEXT: s_min_u32 s2, s2, s8
+; GFX8_ITERATIVE-NEXT: s_min_u32 s2, s2, s6
+; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB30_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -15894,12 +15772,11 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9_ITERATIVE-NEXT: s_mov_b32 m0, s3
-; GFX9_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9_ITERATIVE-NEXT: v_readlane_b32 s6, v0, s3
; GFX9_ITERATIVE-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9_ITERATIVE-NEXT: s_min_u32 s2, s2, s8
+; GFX9_ITERATIVE-NEXT: s_min_u32 s2, s2, s6
+; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB30_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -15934,11 +15811,10 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064_ITERATIVE-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX1064_ITERATIVE-NEXT: v_readlane_b32 s8, v0, s3
-; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: v_writelane_b32 v1, s2, s3
-; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1064_ITERATIVE-NEXT: s_min_u32 s2, s2, s8
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB30_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -15975,11 +15851,10 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032_ITERATIVE-NEXT: s_ff1_i32_b32 s2, s1
; GFX1032_ITERATIVE-NEXT: v_readlane_b32 s3, v0, s2
-; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1032_ITERATIVE-NEXT: v_writelane_b32 v1, s0, s2
-; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s6
+; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1032_ITERATIVE-NEXT: s_min_u32 s0, s0, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s1, s1, s2
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB30_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -16017,11 +15892,10 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1164_ITERATIVE-NEXT: v_readlane_b32 s8, v1, s3
-; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: v_writelane_b32 v0, s2, s3
-; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX1164_ITERATIVE-NEXT: s_min_u32 s2, s2, s8
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB30_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -16061,11 +15935,10 @@ define amdgpu_kernel void @umin_i32_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_ctz_i32_b32 s2, s1
; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX1132_ITERATIVE-NEXT: v_readlane_b32 s3, v1, s2
-; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s6, 1, s2
; GFX1132_ITERATIVE-NEXT: v_writelane_b32 v0, s0, s2
-; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s2, 1, s2
; GFX1132_ITERATIVE-NEXT: s_min_u32 s0, s0, s3
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s1, s1, s2
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB30_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -16715,7 +16588,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX8_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX8_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX8_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX8_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX8_ITERATIVE-NEXT: s_cbranch_scc1 .LBB32_1
; GFX8_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -16770,7 +16642,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX9_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s10
; GFX9_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s8
; GFX9_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX9_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX9_ITERATIVE-NEXT: s_cbranch_scc1 .LBB32_1
; GFX9_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -16821,7 +16692,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX1064_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1064_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
; GFX1064_ITERATIVE-NEXT: s_andn2_b64 s[2:3], s[2:3], s[6:7]
-; GFX1064_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1064_ITERATIVE-NEXT: s_cbranch_scc1 .LBB32_1
; GFX1064_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -16872,7 +16742,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX1032_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1032_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1032_ITERATIVE-NEXT: s_andn2_b32 s2, s2, s3
-; GFX1032_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1032_ITERATIVE-NEXT: s_cbranch_scc1 .LBB32_1
; GFX1032_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -16926,8 +16795,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX1164_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1164_ITERATIVE-NEXT: s_lshl_b64 s[6:7], 1, s10
; GFX1164_ITERATIVE-NEXT: s_and_not1_b64 s[2:3], s[2:3], s[6:7]
-; GFX1164_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164_ITERATIVE-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX1164_ITERATIVE-NEXT: s_cbranch_scc1 .LBB32_1
; GFX1164_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -16983,8 +16850,6 @@ define amdgpu_kernel void @umin_i64_varying(ptr addrspace(1) %out) {
; GFX1132_ITERATIVE-NEXT: s_cselect_b32 s0, s0, s6
; GFX1132_ITERATIVE-NEXT: s_lshl_b32 s3, 1, s3
; GFX1132_ITERATIVE-NEXT: s_and_not1_b32 s2, s2, s3
-; GFX1132_ITERATIVE-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132_ITERATIVE-NEXT: s_cmp_lg_u32 s2, 0
; GFX1132_ITERATIVE-NEXT: s_cbranch_scc1 .LBB32_1
; GFX1132_ITERATIVE-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132_ITERATIVE-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll
index e4def28..9afc0c6 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_raw_buffer.ll
@@ -611,12 +611,11 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8-NEXT: s_mov_b32 m0, s3
-; GFX8-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8-NEXT: v_readlane_b32 s6, v0, s3
; GFX8-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8-NEXT: s_add_i32 s2, s2, s8
+; GFX8-NEXT: s_add_i32 s2, s2, s6
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8-NEXT: s_cbranch_scc1 .LBB2_1
; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -652,12 +651,11 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9-NEXT: s_mov_b32 m0, s3
-; GFX9-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT: v_readlane_b32 s6, v0, s3
; GFX9-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9-NEXT: s_add_i32 s2, s2, s8
+; GFX9-NEXT: s_add_i32 s2, s2, s6
+; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB2_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -692,11 +690,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W64-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX10W64-NEXT: v_readlane_b32 s8, v0, s3
-; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: v_writelane_b32 v1, s2, s3
-; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: s_add_i32 s2, s2, s8
-; GFX10W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX10W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX10W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W64-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -732,11 +729,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W32-NEXT: s_ff1_i32_b32 s2, s1
; GFX10W32-NEXT: v_readlane_b32 s3, v0, s2
-; GFX10W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX10W32-NEXT: v_writelane_b32 v1, s0, s2
-; GFX10W32-NEXT: s_andn2_b32 s1, s1, s6
+; GFX10W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX10W32-NEXT: s_add_i32 s0, s0, s3
-; GFX10W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10W32-NEXT: s_andn2_b32 s1, s1, s2
; GFX10W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX10W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W32-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -773,11 +769,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: s_add_i32 s2, s2, s8
-; GFX11W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX11W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX11W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -817,11 +812,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX11W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX11W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX11W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX11W32-NEXT: s_add_i32 s0, s0, s3
-; GFX11W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX11W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX11W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -858,11 +852,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX12W64-NEXT: s_wait_alu 0xfffe
; GFX12W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: s_add_co_i32 s2, s2, s8
-; GFX12W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX12W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX12W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -900,15 +893,15 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W32-NEXT: ; implicit-def: $vgpr0
; GFX12W32-NEXT: .LBB2_1: ; %ComputeLoop
; GFX12W32-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX12W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX12W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX12W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX12W32-NEXT: s_add_co_i32 s0, s0, s3
; GFX12W32-NEXT: s_wait_alu 0xfffe
-; GFX12W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX12W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX12W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1665,12 +1658,11 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8-NEXT: s_mov_b32 m0, s3
-; GFX8-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8-NEXT: v_readlane_b32 s6, v0, s3
; GFX8-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8-NEXT: s_add_i32 s2, s2, s8
+; GFX8-NEXT: s_add_i32 s2, s2, s6
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8-NEXT: s_cbranch_scc1 .LBB6_1
; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1706,12 +1698,11 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9-NEXT: s_mov_b32 m0, s3
-; GFX9-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT: v_readlane_b32 s6, v0, s3
; GFX9-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9-NEXT: s_add_i32 s2, s2, s8
+; GFX9-NEXT: s_add_i32 s2, s2, s6
+; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1746,11 +1737,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W64-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX10W64-NEXT: v_readlane_b32 s8, v0, s3
-; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: v_writelane_b32 v1, s2, s3
-; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: s_add_i32 s2, s2, s8
-; GFX10W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX10W64-NEXT: s_cbranch_scc1 .LBB6_1
; GFX10W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W64-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1786,11 +1776,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W32-NEXT: s_ff1_i32_b32 s2, s1
; GFX10W32-NEXT: v_readlane_b32 s3, v0, s2
-; GFX10W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX10W32-NEXT: v_writelane_b32 v1, s0, s2
-; GFX10W32-NEXT: s_andn2_b32 s1, s1, s6
+; GFX10W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX10W32-NEXT: s_add_i32 s0, s0, s3
-; GFX10W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10W32-NEXT: s_andn2_b32 s1, s1, s2
; GFX10W32-NEXT: s_cbranch_scc1 .LBB6_1
; GFX10W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W32-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1827,11 +1816,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: s_add_i32 s2, s2, s8
-; GFX11W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX11W64-NEXT: s_cbranch_scc1 .LBB6_1
; GFX11W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1871,11 +1859,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX11W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX11W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX11W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX11W32-NEXT: s_add_i32 s0, s0, s3
-; GFX11W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX11W32-NEXT: s_cbranch_scc1 .LBB6_1
; GFX11W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1913,11 +1900,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX12W64-NEXT: s_wait_alu 0xfffe
; GFX12W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: s_add_co_i32 s2, s2, s8
-; GFX12W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX12W64-NEXT: s_cbranch_scc1 .LBB6_1
; GFX12W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1955,15 +1941,15 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W32-NEXT: ; implicit-def: $vgpr0
; GFX12W32-NEXT: .LBB6_1: ; %ComputeLoop
; GFX12W32-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX12W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX12W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX12W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX12W32-NEXT: s_add_co_i32 s0, s0, s3
; GFX12W32-NEXT: s_wait_alu 0xfffe
-; GFX12W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX12W32-NEXT: s_cbranch_scc1 .LBB6_1
; GFX12W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll
index 39a3c9a..10fd34f 100644
--- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll
+++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_struct_buffer.ll
@@ -628,12 +628,11 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8-NEXT: s_mov_b32 m0, s3
-; GFX8-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8-NEXT: v_readlane_b32 s6, v0, s3
; GFX8-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8-NEXT: s_add_i32 s2, s2, s8
+; GFX8-NEXT: s_add_i32 s2, s2, s6
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8-NEXT: s_cbranch_scc1 .LBB2_1
; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -670,12 +669,11 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9-NEXT: s_mov_b32 m0, s3
-; GFX9-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT: v_readlane_b32 s6, v0, s3
; GFX9-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9-NEXT: s_add_i32 s2, s2, s8
+; GFX9-NEXT: s_add_i32 s2, s2, s6
+; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB2_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -711,11 +709,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W64-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX10W64-NEXT: v_readlane_b32 s8, v0, s3
-; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: v_writelane_b32 v1, s2, s3
-; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: s_add_i32 s2, s2, s8
-; GFX10W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX10W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX10W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W64-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -752,11 +749,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W32-NEXT: s_ff1_i32_b32 s2, s1
; GFX10W32-NEXT: v_readlane_b32 s3, v0, s2
-; GFX10W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX10W32-NEXT: v_writelane_b32 v1, s0, s2
-; GFX10W32-NEXT: s_andn2_b32 s1, s1, s6
+; GFX10W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX10W32-NEXT: s_add_i32 s0, s0, s3
-; GFX10W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10W32-NEXT: s_andn2_b32 s1, s1, s2
; GFX10W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX10W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W32-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -794,11 +790,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: s_add_i32 s2, s2, s8
-; GFX11W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX11W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX11W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -839,11 +834,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX11W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX11W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX11W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX11W32-NEXT: s_add_i32 s0, s0, s3
-; GFX11W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX11W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX11W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -880,11 +874,10 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX12W64-NEXT: s_wait_alu 0xfffe
; GFX12W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: s_add_co_i32 s2, s2, s8
-; GFX12W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX12W64-NEXT: s_cbranch_scc1 .LBB2_1
; GFX12W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -923,15 +916,15 @@ define amdgpu_kernel void @add_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W32-NEXT: ; implicit-def: $vgpr0
; GFX12W32-NEXT: .LBB2_1: ; %ComputeLoop
; GFX12W32-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX12W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX12W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX12W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX12W32-NEXT: s_add_co_i32 s0, s0, s3
; GFX12W32-NEXT: s_wait_alu 0xfffe
-; GFX12W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX12W32-NEXT: s_cbranch_scc1 .LBB2_1
; GFX12W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -1833,12 +1826,11 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX8-NEXT: s_mov_b32 m0, s3
-; GFX8-NEXT: v_readlane_b32 s8, v0, s3
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX8-NEXT: v_readlane_b32 s6, v0, s3
; GFX8-NEXT: v_writelane_b32 v1, s2, m0
-; GFX8-NEXT: s_add_i32 s2, s2, s8
+; GFX8-NEXT: s_add_i32 s2, s2, s6
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX8-NEXT: s_cbranch_scc1 .LBB7_1
; GFX8-NEXT: ; %bb.2: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1875,12 +1867,11 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX9-NEXT: s_mov_b32 m0, s3
-; GFX9-NEXT: v_readlane_b32 s8, v0, s3
-; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
+; GFX9-NEXT: v_readlane_b32 s6, v0, s3
; GFX9-NEXT: v_writelane_b32 v1, s2, m0
-; GFX9-NEXT: s_add_i32 s2, s2, s8
+; GFX9-NEXT: s_add_i32 s2, s2, s6
+; GFX9-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1916,11 +1907,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W64-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W64-NEXT: s_ff1_i32_b64 s3, s[0:1]
; GFX10W64-NEXT: v_readlane_b32 s8, v0, s3
-; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: v_writelane_b32 v1, s2, s3
-; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
+; GFX10W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX10W64-NEXT: s_add_i32 s2, s2, s8
-; GFX10W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX10W64-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
; GFX10W64-NEXT: s_cbranch_scc1 .LBB7_1
; GFX10W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W64-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1957,11 +1947,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX10W32-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10W32-NEXT: s_ff1_i32_b32 s2, s1
; GFX10W32-NEXT: v_readlane_b32 s3, v0, s2
-; GFX10W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX10W32-NEXT: v_writelane_b32 v1, s0, s2
-; GFX10W32-NEXT: s_andn2_b32 s1, s1, s6
+; GFX10W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX10W32-NEXT: s_add_i32 s0, s0, s3
-; GFX10W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10W32-NEXT: s_andn2_b32 s1, s1, s2
; GFX10W32-NEXT: s_cbranch_scc1 .LBB7_1
; GFX10W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX10W32-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1999,11 +1988,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX11W64-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX11W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX11W64-NEXT: s_add_i32 s2, s2, s8
-; GFX11W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX11W64-NEXT: s_cbranch_scc1 .LBB7_1
; GFX11W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -2044,11 +2032,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX11W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX11W32-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX11W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX11W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX11W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX11W32-NEXT: s_add_i32 s0, s0, s3
-; GFX11W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX11W32-NEXT: s_cbranch_scc1 .LBB7_1
; GFX11W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX11W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -2086,11 +2073,10 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W64-NEXT: s_ctz_i32_b64 s3, s[0:1]
; GFX12W64-NEXT: s_wait_alu 0xfffe
; GFX12W64-NEXT: v_readlane_b32 s8, v1, s3
-; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: v_writelane_b32 v0, s2, s3
-; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
+; GFX12W64-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX12W64-NEXT: s_add_co_i32 s2, s2, s8
-; GFX12W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX12W64-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[6:7]
; GFX12W64-NEXT: s_cbranch_scc1 .LBB7_1
; GFX12W64-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W64-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
@@ -2129,15 +2115,15 @@ define amdgpu_kernel void @sub_i32_varying_vdata(ptr addrspace(1) %out, ptr addr
; GFX12W32-NEXT: ; implicit-def: $vgpr0
; GFX12W32-NEXT: .LBB7_1: ; %ComputeLoop
; GFX12W32-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: s_ctz_i32_b32 s2, s1
; GFX12W32-NEXT: s_wait_alu 0xfffe
; GFX12W32-NEXT: v_readlane_b32 s3, v1, s2
-; GFX12W32-NEXT: s_lshl_b32 s6, 1, s2
; GFX12W32-NEXT: v_writelane_b32 v0, s0, s2
-; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s6
+; GFX12W32-NEXT: s_lshl_b32 s2, 1, s2
; GFX12W32-NEXT: s_add_co_i32 s0, s0, s3
; GFX12W32-NEXT: s_wait_alu 0xfffe
-; GFX12W32-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12W32-NEXT: s_and_not1_b32 s1, s1, s2
; GFX12W32-NEXT: s_cbranch_scc1 .LBB7_1
; GFX12W32-NEXT: ; %bb.2: ; %ComputeEnd
; GFX12W32-NEXT: v_mbcnt_lo_u32_b32 v1, exec_lo, 0
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 7ee0015f..711d57b 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -39137,7 +39137,7 @@ define bfloat @v_sitofp_i64_to_bf16(i64 %x) {
; GFX1250-NEXT: v_ashrrev_i32_e32 v2, 31, v2
; GFX1250-NEXT: v_add_nc_u32_e32 v2, 32, v2
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_add_min_u32_e64 v2, v3, -1, v2
+; GFX1250-NEXT: v_add_min_u32 v2, v3, -1, v2
; GFX1250-NEXT: v_lshlrev_b64_e32 v[0:1], v2, v[0:1]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1250-NEXT: v_min_u32_e32 v0, 1, v0
@@ -39487,8 +39487,8 @@ define <2 x bfloat> @v_sitofp_v2i64_to_v2bf16(<2 x i64> %x) {
; GFX1250-NEXT: v_dual_ashrrev_i32 v5, 31, v5 :: v_dual_ashrrev_i32 v4, 31, v4
; GFX1250-NEXT: v_dual_add_nc_u32 v5, 32, v5 :: v_dual_add_nc_u32 v4, 32, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_add_min_u32_e64 v5, v7, -1, v5
-; GFX1250-NEXT: v_add_min_u32_e64 v4, v6, -1, v4
+; GFX1250-NEXT: v_add_min_u32 v5, v7, -1, v5
+; GFX1250-NEXT: v_add_min_u32 v4, v6, -1, v4
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250-NEXT: v_lshlrev_b64_e32 v[0:1], v5, v[0:1]
; GFX1250-NEXT: v_lshlrev_b64_e32 v[2:3], v4, v[2:3]
@@ -39979,9 +39979,9 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX1250TRUE16-NEXT: v_dual_add_nc_u32 v7, 32, v7 :: v_dual_add_nc_u32 v6, 32, v6
; GFX1250TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250TRUE16-NEXT: v_ashrrev_i32_e32 v8, 31, v8
-; GFX1250TRUE16-NEXT: v_add_min_u32_e64 v7, v10, -1, v7
+; GFX1250TRUE16-NEXT: v_add_min_u32 v7, v10, -1, v7
; GFX1250TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250TRUE16-NEXT: v_add_min_u32_e64 v6, v9, -1, v6
+; GFX1250TRUE16-NEXT: v_add_min_u32 v6, v9, -1, v6
; GFX1250TRUE16-NEXT: v_lshlrev_b64_e32 v[2:3], v7, v[2:3]
; GFX1250TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250TRUE16-NEXT: v_lshlrev_b64_e32 v[4:5], v6, v[4:5]
@@ -39991,7 +39991,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX1250TRUE16-NEXT: v_min_u32_e32 v4, 1, v4
; GFX1250TRUE16-NEXT: v_or_b32_e32 v2, v3, v2
; GFX1250TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1250TRUE16-NEXT: v_add_min_u32_e64 v8, v11, -1, v8
+; GFX1250TRUE16-NEXT: v_add_min_u32 v8, v11, -1, v8
; GFX1250TRUE16-NEXT: v_dual_sub_nc_u32 v3, 32, v6 :: v_dual_bitop2_b32 v4, v5, v4 bitop3:0x54
; GFX1250TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250TRUE16-NEXT: v_cvt_f32_i32_e32 v2, v2
@@ -40027,8 +40027,8 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX1250FAKE16-NEXT: v_dual_ashrrev_i32 v6, 31, v6 :: v_dual_ashrrev_i32 v7, 31, v7
; GFX1250FAKE16-NEXT: v_dual_add_nc_u32 v6, 32, v6 :: v_dual_add_nc_u32 v7, 32, v7
; GFX1250FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250FAKE16-NEXT: v_add_min_u32_e64 v6, v10, -1, v6
-; GFX1250FAKE16-NEXT: v_add_min_u32_e64 v7, v11, -1, v7
+; GFX1250FAKE16-NEXT: v_add_min_u32 v6, v10, -1, v6
+; GFX1250FAKE16-NEXT: v_add_min_u32 v7, v11, -1, v7
; GFX1250FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250FAKE16-NEXT: v_lshlrev_b64_e32 v[2:3], v6, v[2:3]
; GFX1250FAKE16-NEXT: v_lshlrev_b64_e32 v[0:1], v7, v[0:1]
@@ -40038,7 +40038,7 @@ define <3 x bfloat> @v_sitofp_v3i64_to_v3bf16(<3 x i64> %x) {
; GFX1250FAKE16-NEXT: v_min_u32_e32 v0, 1, v0
; GFX1250FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250FAKE16-NEXT: v_or_b32_e32 v2, v3, v2
-; GFX1250FAKE16-NEXT: v_add_min_u32_e64 v8, v9, -1, v8
+; GFX1250FAKE16-NEXT: v_add_min_u32 v8, v9, -1, v8
; GFX1250FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250FAKE16-NEXT: v_dual_sub_nc_u32 v3, 32, v6 :: v_dual_bitop2_b32 v0, v1, v0 bitop3:0x54
; GFX1250FAKE16-NEXT: v_cvt_f32_i32_e32 v2, v2
@@ -40656,18 +40656,18 @@ define <4 x bfloat> @v_sitofp_v4i64_to_v4bf16(<4 x i64> %x) {
; GFX1250-NEXT: v_dual_add_nc_u32 v9, 32, v9 :: v_dual_add_nc_u32 v8, 32, v8
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX1250-NEXT: v_dual_ashrrev_i32 v10, 31, v10 :: v_dual_bitop2_b32 v11, v0, v1 bitop3:0x14
-; GFX1250-NEXT: v_add_min_u32_e64 v9, v13, -1, v9
+; GFX1250-NEXT: v_add_min_u32 v9, v13, -1, v9
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX1250-NEXT: v_add_min_u32_e64 v8, v12, -1, v8
+; GFX1250-NEXT: v_add_min_u32 v8, v12, -1, v8
; GFX1250-NEXT: v_dual_ashrrev_i32 v11, 31, v11 :: v_dual_add_nc_u32 v10, 32, v10
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX1250-NEXT: v_lshlrev_b64_e32 v[4:5], v9, v[4:5]
; GFX1250-NEXT: v_lshlrev_b64_e32 v[6:7], v8, v[6:7]
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX1250-NEXT: v_add_nc_u32_e32 v11, 32, v11
-; GFX1250-NEXT: v_add_min_u32_e64 v10, v14, -1, v10
+; GFX1250-NEXT: v_add_min_u32 v10, v14, -1, v10
; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX1250-NEXT: v_add_min_u32_e64 v11, v15, -1, v11
+; GFX1250-NEXT: v_add_min_u32 v11, v15, -1, v11
; GFX1250-NEXT: v_lshlrev_b64_e32 v[2:3], v10, v[2:3]
; GFX1250-NEXT: v_min_u32_e32 v6, 1, v6
; GFX1250-NEXT: v_min_u32_e32 v4, 1, v4
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 4a6fa4f..b96de17 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -704,7 +704,6 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_add_u32 s4, s4, s6
; CISI-NEXT: s_cselect_b64 s[12:13], -1, 0
; CISI-NEXT: s_or_b32 s6, s12, s13
-; CISI-NEXT: s_cmp_lg_u32 s6, 0
; CISI-NEXT: s_addc_u32 s5, s5, s7
; CISI-NEXT: s_mov_b32 s8, s0
; CISI-NEXT: s_mov_b32 s9, s1
@@ -725,16 +724,14 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s2
-; VI-NEXT: s_add_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: s_add_u32 s0, s4, s6
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_addc_u32 s1, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s0
+; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
-; VI-NEXT: s_addc_u32 s0, s5, s7
-; VI-NEXT: v_mov_b32_e32 v4, s2
-; VI-NEXT: v_mov_b32_e32 v5, s0
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_mov_b32_e32 v3, s3
; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
@@ -746,12 +743,10 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s2, s12, s14
-; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_addc_u32 s0, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_add_u32 s0, s12, s14
+; GFX9-NEXT: s_addc_u32 s1, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
@@ -764,10 +759,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: v_mov_b32_e32 v2, 0
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: s_add_u32 s0, s12, s14
-; GFX1010-NEXT: s_cselect_b32 s1, -1, 0
-; GFX1010-NEXT: v_mov_b32_e32 v0, s0
-; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
; GFX1010-NEXT: s_addc_u32 s1, s13, s15
+; GFX1010-NEXT: v_mov_b32_e32 v0, s0
; GFX1010-NEXT: s_cselect_b32 s0, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v1, s1
; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
@@ -781,10 +774,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W32-NEXT: s_add_u32 s4, s4, s6
-; GFX1030W32-NEXT: s_cselect_b32 s6, -1, 0
-; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
-; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
; GFX1030W32-NEXT: s_addc_u32 s5, s5, s7
+; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
; GFX1030W32-NEXT: s_cselect_b32 s4, -1, 0
; GFX1030W32-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
@@ -798,10 +789,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W64-NEXT: s_add_u32 s4, s4, s6
-; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
-; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0
; GFX1030W64-NEXT: s_addc_u32 s5, s5, s7
+; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
; GFX1030W64-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W64-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
@@ -814,10 +803,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_u32 s4, s4, s6
-; GFX11-NEXT: s_cselect_b32 s6, -1, 0
-; GFX11-NEXT: v_mov_b32_e32 v0, s4
-; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_addc_u32 s5, s5, s7
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_cselect_b32 s4, -1, 0
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
@@ -831,10 +818,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_add_co_u32 s0, s12, s14
-; GFX1250-NEXT: s_cselect_b32 s1, -1, 0
-; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
-; GFX1250-NEXT: s_cmp_lg_u32 s1, 0
; GFX1250-NEXT: s_add_co_ci_u32 s1, s13, s15
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
; GFX1250-NEXT: s_cselect_b32 s0, -1, 0
; GFX1250-NEXT: v_mov_b32_e32 v1, s1
; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
@@ -1691,7 +1676,6 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_sub_u32 s4, s4, s6
; CISI-NEXT: s_cselect_b64 s[12:13], -1, 0
; CISI-NEXT: s_or_b32 s6, s12, s13
-; CISI-NEXT: s_cmp_lg_u32 s6, 0
; CISI-NEXT: s_subb_u32 s5, s5, s7
; CISI-NEXT: s_mov_b32 s8, s0
; CISI-NEXT: s_mov_b32 s9, s1
@@ -1712,16 +1696,14 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s2
-; VI-NEXT: s_sub_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: s_sub_u32 s0, s4, s6
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_subb_u32 s1, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s0
+; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
-; VI-NEXT: s_subb_u32 s0, s5, s7
-; VI-NEXT: v_mov_b32_e32 v4, s2
-; VI-NEXT: v_mov_b32_e32 v5, s0
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_mov_b32_e32 v3, s3
; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
@@ -1733,12 +1715,10 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_sub_u32 s2, s12, s14
-; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_subb_u32 s0, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_sub_u32 s0, s12, s14
+; GFX9-NEXT: s_subb_u32 s1, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
@@ -1751,10 +1731,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: v_mov_b32_e32 v2, 0
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: s_sub_u32 s0, s12, s14
-; GFX1010-NEXT: s_cselect_b32 s1, -1, 0
-; GFX1010-NEXT: v_mov_b32_e32 v0, s0
-; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
; GFX1010-NEXT: s_subb_u32 s1, s13, s15
+; GFX1010-NEXT: v_mov_b32_e32 v0, s0
; GFX1010-NEXT: s_cselect_b32 s0, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v1, s1
; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
@@ -1768,10 +1746,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W32-NEXT: s_sub_u32 s4, s4, s6
-; GFX1030W32-NEXT: s_cselect_b32 s6, -1, 0
-; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
-; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
; GFX1030W32-NEXT: s_subb_u32 s5, s5, s7
+; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
; GFX1030W32-NEXT: s_cselect_b32 s4, -1, 0
; GFX1030W32-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
@@ -1785,10 +1761,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W64-NEXT: s_sub_u32 s4, s4, s6
-; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
-; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0
; GFX1030W64-NEXT: s_subb_u32 s5, s5, s7
+; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
; GFX1030W64-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W64-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
@@ -1801,10 +1775,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_sub_u32 s4, s4, s6
-; GFX11-NEXT: s_cselect_b32 s6, -1, 0
-; GFX11-NEXT: v_mov_b32_e32 v0, s4
-; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_subb_u32 s5, s5, s7
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_cselect_b32 s4, -1, 0
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
@@ -1818,10 +1790,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_sub_co_u32 s0, s12, s14
-; GFX1250-NEXT: s_cselect_b32 s1, -1, 0
-; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
-; GFX1250-NEXT: s_cmp_lg_u32 s1, 0
; GFX1250-NEXT: s_sub_co_ci_u32 s1, s13, s15
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
; GFX1250-NEXT: s_cselect_b32 s0, -1, 0
; GFX1250-NEXT: v_mov_b32_e32 v1, s1
; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
@@ -2218,49 +2188,46 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; VI-NEXT: s_addc_u32 s6, s7, s9
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: v_readfirstlane_b32 s7, v0
-; VI-NEXT: s_add_u32 s12, s6, s7
-; VI-NEXT: v_mov_b32_e32 v0, s12
+; VI-NEXT: s_add_u32 s10, s6, s7
+; VI-NEXT: v_mov_b32_e32 v0, s10
; VI-NEXT: v_mad_u64_u32 v[0:1], s[6:7], s4, v0, 0
-; VI-NEXT: s_addc_u32 s13, 0, s8
-; VI-NEXT: s_mul_i32 s8, s4, s13
+; VI-NEXT: s_addc_u32 s11, 0, s8
+; VI-NEXT: s_mul_i32 s8, s4, s11
; VI-NEXT: v_readfirstlane_b32 s9, v1
; VI-NEXT: s_add_i32 s8, s9, s8
-; VI-NEXT: s_mul_i32 s9, s5, s12
-; VI-NEXT: s_add_i32 s14, s8, s9
-; VI-NEXT: s_sub_i32 s10, s3, s14
+; VI-NEXT: s_mul_i32 s9, s5, s10
+; VI-NEXT: s_add_i32 s12, s8, s9
+; VI-NEXT: s_sub_i32 s13, s3, s12
; VI-NEXT: v_readfirstlane_b32 s8, v0
-; VI-NEXT: s_sub_u32 s15, s2, s8
+; VI-NEXT: s_sub_u32 s14, s2, s8
; VI-NEXT: s_cselect_b64 s[8:9], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[8:9], 0
-; VI-NEXT: s_subb_u32 s16, s10, s5
-; VI-NEXT: s_sub_u32 s17, s15, s4
-; VI-NEXT: s_cselect_b64 s[10:11], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[10:11], 0
-; VI-NEXT: s_subb_u32 s10, s16, 0
-; VI-NEXT: s_cmp_ge_u32 s10, s5
-; VI-NEXT: s_cselect_b32 s11, -1, 0
-; VI-NEXT: s_cmp_ge_u32 s17, s4
+; VI-NEXT: s_subb_u32 s13, s13, s5
+; VI-NEXT: s_sub_u32 s15, s14, s4
+; VI-NEXT: s_subb_u32 s13, s13, 0
+; VI-NEXT: s_cmp_ge_u32 s13, s5
; VI-NEXT: s_cselect_b32 s16, -1, 0
-; VI-NEXT: s_cmp_eq_u32 s10, s5
-; VI-NEXT: s_cselect_b32 s10, s16, s11
-; VI-NEXT: s_add_u32 s11, s12, 1
-; VI-NEXT: s_addc_u32 s16, s13, 0
-; VI-NEXT: s_add_u32 s17, s12, 2
-; VI-NEXT: s_addc_u32 s18, s13, 0
-; VI-NEXT: s_cmp_lg_u32 s10, 0
-; VI-NEXT: s_cselect_b32 s10, s17, s11
-; VI-NEXT: s_cselect_b32 s11, s18, s16
+; VI-NEXT: s_cmp_ge_u32 s15, s4
+; VI-NEXT: s_cselect_b32 s15, -1, 0
+; VI-NEXT: s_cmp_eq_u32 s13, s5
+; VI-NEXT: s_cselect_b32 s13, s15, s16
+; VI-NEXT: s_add_u32 s15, s10, 1
+; VI-NEXT: s_addc_u32 s16, s11, 0
+; VI-NEXT: s_add_u32 s17, s10, 2
+; VI-NEXT: s_addc_u32 s18, s11, 0
+; VI-NEXT: s_cmp_lg_u32 s13, 0
+; VI-NEXT: s_cselect_b32 s13, s17, s15
+; VI-NEXT: s_cselect_b32 s15, s18, s16
; VI-NEXT: s_cmp_lg_u64 s[8:9], 0
-; VI-NEXT: s_subb_u32 s3, s3, s14
+; VI-NEXT: s_subb_u32 s3, s3, s12
; VI-NEXT: s_cmp_ge_u32 s3, s5
; VI-NEXT: s_cselect_b32 s8, -1, 0
-; VI-NEXT: s_cmp_ge_u32 s15, s4
+; VI-NEXT: s_cmp_ge_u32 s14, s4
; VI-NEXT: s_cselect_b32 s9, -1, 0
; VI-NEXT: s_cmp_eq_u32 s3, s5
; VI-NEXT: s_cselect_b32 s3, s9, s8
; VI-NEXT: s_cmp_lg_u32 s3, 0
-; VI-NEXT: s_cselect_b32 s9, s11, s13
-; VI-NEXT: s_cselect_b32 s8, s10, s12
+; VI-NEXT: s_cselect_b32 s9, s15, s11
+; VI-NEXT: s_cselect_b32 s8, s13, s10
; VI-NEXT: s_cbranch_execnz .LBB16_4
; VI-NEXT: .LBB16_2:
; VI-NEXT: v_cvt_f32_u32_e32 v0, s4
@@ -2311,8 +2278,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6
; GFX9-NEXT: v_cvt_f32_u32_e32 v1, s7
-; GFX9-NEXT: s_sub_u32 s10, 0, s6
-; GFX9-NEXT: s_subb_u32 s11, 0, s7
+; GFX9-NEXT: s_sub_u32 s8, 0, s6
+; GFX9-NEXT: s_subb_u32 s9, 0, s7
; GFX9-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GFX9-NEXT: v_rcp_f32_e32 v0, v0
; GFX9-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2321,109 +2288,102 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX9-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GFX9-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT: v_readfirstlane_b32 s12, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v0
-; GFX9-NEXT: s_mul_i32 s9, s10, s12
-; GFX9-NEXT: s_mul_hi_u32 s14, s10, s8
-; GFX9-NEXT: s_mul_i32 s13, s11, s8
-; GFX9-NEXT: s_add_i32 s9, s14, s9
-; GFX9-NEXT: s_add_i32 s9, s9, s13
-; GFX9-NEXT: s_mul_i32 s15, s10, s8
-; GFX9-NEXT: s_mul_i32 s14, s8, s9
-; GFX9-NEXT: s_mul_hi_u32 s16, s8, s15
-; GFX9-NEXT: s_mul_hi_u32 s13, s8, s9
+; GFX9-NEXT: v_readfirstlane_b32 s10, v1
+; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_mul_i32 s12, s8, s10
+; GFX9-NEXT: s_mul_hi_u32 s14, s8, s11
+; GFX9-NEXT: s_mul_i32 s13, s9, s11
+; GFX9-NEXT: s_add_i32 s12, s14, s12
+; GFX9-NEXT: s_add_i32 s12, s12, s13
+; GFX9-NEXT: s_mul_i32 s15, s8, s11
+; GFX9-NEXT: s_mul_i32 s14, s11, s12
+; GFX9-NEXT: s_mul_hi_u32 s16, s11, s15
+; GFX9-NEXT: s_mul_hi_u32 s13, s11, s12
; GFX9-NEXT: s_add_u32 s14, s16, s14
; GFX9-NEXT: s_addc_u32 s13, 0, s13
-; GFX9-NEXT: s_mul_hi_u32 s17, s12, s15
-; GFX9-NEXT: s_mul_i32 s15, s12, s15
+; GFX9-NEXT: s_mul_hi_u32 s17, s10, s15
+; GFX9-NEXT: s_mul_i32 s15, s10, s15
; GFX9-NEXT: s_add_u32 s14, s14, s15
-; GFX9-NEXT: s_mul_hi_u32 s16, s12, s9
+; GFX9-NEXT: s_mul_hi_u32 s16, s10, s12
; GFX9-NEXT: s_addc_u32 s13, s13, s17
; GFX9-NEXT: s_addc_u32 s14, s16, 0
-; GFX9-NEXT: s_mul_i32 s9, s12, s9
-; GFX9-NEXT: s_add_u32 s9, s13, s9
+; GFX9-NEXT: s_mul_i32 s12, s10, s12
+; GFX9-NEXT: s_add_u32 s12, s13, s12
; GFX9-NEXT: s_addc_u32 s13, 0, s14
-; GFX9-NEXT: s_add_u32 s14, s8, s9
-; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT: s_addc_u32 s12, s12, s13
-; GFX9-NEXT: s_mul_i32 s8, s10, s12
-; GFX9-NEXT: s_mul_hi_u32 s9, s10, s14
-; GFX9-NEXT: s_add_i32 s8, s9, s8
-; GFX9-NEXT: s_mul_i32 s11, s11, s14
-; GFX9-NEXT: s_add_i32 s8, s8, s11
-; GFX9-NEXT: s_mul_i32 s10, s10, s14
-; GFX9-NEXT: s_mul_hi_u32 s11, s12, s10
-; GFX9-NEXT: s_mul_i32 s13, s12, s10
-; GFX9-NEXT: s_mul_i32 s16, s14, s8
-; GFX9-NEXT: s_mul_hi_u32 s10, s14, s10
-; GFX9-NEXT: s_mul_hi_u32 s15, s14, s8
-; GFX9-NEXT: s_add_u32 s10, s10, s16
+; GFX9-NEXT: s_add_u32 s11, s11, s12
+; GFX9-NEXT: s_addc_u32 s10, s10, s13
+; GFX9-NEXT: s_mul_i32 s12, s8, s10
+; GFX9-NEXT: s_mul_hi_u32 s13, s8, s11
+; GFX9-NEXT: s_add_i32 s12, s13, s12
+; GFX9-NEXT: s_mul_i32 s9, s9, s11
+; GFX9-NEXT: s_add_i32 s12, s12, s9
+; GFX9-NEXT: s_mul_i32 s8, s8, s11
+; GFX9-NEXT: s_mul_hi_u32 s13, s10, s8
+; GFX9-NEXT: s_mul_i32 s14, s10, s8
+; GFX9-NEXT: s_mul_i32 s16, s11, s12
+; GFX9-NEXT: s_mul_hi_u32 s8, s11, s8
+; GFX9-NEXT: s_mul_hi_u32 s15, s11, s12
+; GFX9-NEXT: s_add_u32 s8, s8, s16
; GFX9-NEXT: s_addc_u32 s15, 0, s15
-; GFX9-NEXT: s_add_u32 s10, s10, s13
-; GFX9-NEXT: s_mul_hi_u32 s9, s12, s8
-; GFX9-NEXT: s_addc_u32 s10, s15, s11
+; GFX9-NEXT: s_add_u32 s8, s8, s14
+; GFX9-NEXT: s_mul_hi_u32 s9, s10, s12
+; GFX9-NEXT: s_addc_u32 s8, s15, s13
; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_mul_i32 s8, s12, s8
-; GFX9-NEXT: s_add_u32 s8, s10, s8
-; GFX9-NEXT: s_addc_u32 s10, 0, s9
-; GFX9-NEXT: s_add_u32 s11, s14, s8
-; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT: s_addc_u32 s8, s12, s10
-; GFX9-NEXT: s_mul_i32 s10, s2, s8
-; GFX9-NEXT: s_mul_hi_u32 s12, s2, s11
-; GFX9-NEXT: s_mul_hi_u32 s9, s2, s8
-; GFX9-NEXT: s_add_u32 s10, s12, s10
+; GFX9-NEXT: s_mul_i32 s12, s10, s12
+; GFX9-NEXT: s_add_u32 s8, s8, s12
; GFX9-NEXT: s_addc_u32 s9, 0, s9
-; GFX9-NEXT: s_mul_hi_u32 s13, s3, s11
-; GFX9-NEXT: s_mul_i32 s11, s3, s11
-; GFX9-NEXT: s_add_u32 s10, s10, s11
-; GFX9-NEXT: s_mul_hi_u32 s12, s3, s8
-; GFX9-NEXT: s_addc_u32 s9, s9, s13
-; GFX9-NEXT: s_addc_u32 s10, s12, 0
+; GFX9-NEXT: s_add_u32 s8, s11, s8
+; GFX9-NEXT: s_addc_u32 s9, s10, s9
+; GFX9-NEXT: s_mul_i32 s11, s2, s9
+; GFX9-NEXT: s_mul_hi_u32 s12, s2, s8
+; GFX9-NEXT: s_mul_hi_u32 s10, s2, s9
+; GFX9-NEXT: s_add_u32 s11, s12, s11
+; GFX9-NEXT: s_addc_u32 s10, 0, s10
+; GFX9-NEXT: s_mul_hi_u32 s13, s3, s8
; GFX9-NEXT: s_mul_i32 s8, s3, s8
-; GFX9-NEXT: s_add_u32 s12, s9, s8
-; GFX9-NEXT: s_addc_u32 s13, 0, s10
-; GFX9-NEXT: s_mul_i32 s8, s6, s13
-; GFX9-NEXT: s_mul_hi_u32 s9, s6, s12
+; GFX9-NEXT: s_add_u32 s8, s11, s8
+; GFX9-NEXT: s_mul_hi_u32 s12, s3, s9
+; GFX9-NEXT: s_addc_u32 s8, s10, s13
+; GFX9-NEXT: s_addc_u32 s10, s12, 0
+; GFX9-NEXT: s_mul_i32 s9, s3, s9
+; GFX9-NEXT: s_add_u32 s11, s8, s9
+; GFX9-NEXT: s_addc_u32 s10, 0, s10
+; GFX9-NEXT: s_mul_i32 s8, s6, s10
+; GFX9-NEXT: s_mul_hi_u32 s9, s6, s11
; GFX9-NEXT: s_add_i32 s8, s9, s8
-; GFX9-NEXT: s_mul_i32 s9, s7, s12
-; GFX9-NEXT: s_add_i32 s14, s8, s9
-; GFX9-NEXT: s_sub_i32 s10, s3, s14
-; GFX9-NEXT: s_mul_i32 s8, s6, s12
-; GFX9-NEXT: s_sub_u32 s15, s2, s8
+; GFX9-NEXT: s_mul_i32 s9, s7, s11
+; GFX9-NEXT: s_add_i32 s12, s8, s9
+; GFX9-NEXT: s_sub_i32 s13, s3, s12
+; GFX9-NEXT: s_mul_i32 s8, s6, s11
+; GFX9-NEXT: s_sub_u32 s14, s2, s8
; GFX9-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT: s_subb_u32 s16, s10, s7
-; GFX9-NEXT: s_sub_u32 s17, s15, s6
-; GFX9-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT: s_subb_u32 s10, s16, 0
-; GFX9-NEXT: s_cmp_ge_u32 s10, s7
-; GFX9-NEXT: s_cselect_b32 s11, -1, 0
-; GFX9-NEXT: s_cmp_ge_u32 s17, s6
+; GFX9-NEXT: s_subb_u32 s13, s13, s7
+; GFX9-NEXT: s_sub_u32 s15, s14, s6
+; GFX9-NEXT: s_subb_u32 s13, s13, 0
+; GFX9-NEXT: s_cmp_ge_u32 s13, s7
; GFX9-NEXT: s_cselect_b32 s16, -1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s10, s7
-; GFX9-NEXT: s_cselect_b32 s10, s16, s11
-; GFX9-NEXT: s_add_u32 s11, s12, 1
-; GFX9-NEXT: s_addc_u32 s16, s13, 0
-; GFX9-NEXT: s_add_u32 s17, s12, 2
-; GFX9-NEXT: s_addc_u32 s18, s13, 0
-; GFX9-NEXT: s_cmp_lg_u32 s10, 0
-; GFX9-NEXT: s_cselect_b32 s10, s17, s11
-; GFX9-NEXT: s_cselect_b32 s11, s18, s16
+; GFX9-NEXT: s_cmp_ge_u32 s15, s6
+; GFX9-NEXT: s_cselect_b32 s15, -1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s13, s7
+; GFX9-NEXT: s_cselect_b32 s13, s15, s16
+; GFX9-NEXT: s_add_u32 s15, s11, 1
+; GFX9-NEXT: s_addc_u32 s16, s10, 0
+; GFX9-NEXT: s_add_u32 s17, s11, 2
+; GFX9-NEXT: s_addc_u32 s18, s10, 0
+; GFX9-NEXT: s_cmp_lg_u32 s13, 0
+; GFX9-NEXT: s_cselect_b32 s13, s17, s15
+; GFX9-NEXT: s_cselect_b32 s15, s18, s16
; GFX9-NEXT: s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT: s_subb_u32 s3, s3, s14
+; GFX9-NEXT: s_subb_u32 s3, s3, s12
; GFX9-NEXT: s_cmp_ge_u32 s3, s7
; GFX9-NEXT: s_cselect_b32 s8, -1, 0
-; GFX9-NEXT: s_cmp_ge_u32 s15, s6
+; GFX9-NEXT: s_cmp_ge_u32 s14, s6
; GFX9-NEXT: s_cselect_b32 s9, -1, 0
; GFX9-NEXT: s_cmp_eq_u32 s3, s7
; GFX9-NEXT: s_cselect_b32 s3, s9, s8
; GFX9-NEXT: s_cmp_lg_u32 s3, 0
-; GFX9-NEXT: s_cselect_b32 s9, s11, s13
-; GFX9-NEXT: s_cselect_b32 s8, s10, s12
+; GFX9-NEXT: s_cselect_b32 s9, s15, s10
+; GFX9-NEXT: s_cselect_b32 s8, s13, s11
; GFX9-NEXT: s_cbranch_execnz .LBB16_3
; GFX9-NEXT: .LBB16_2:
; GFX9-NEXT: v_cvt_f32_u32_e32 v0, s6
@@ -2503,44 +2463,40 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1010-NEXT: s_add_u32 s11, s12, s11
; GFX1010-NEXT: s_addc_u32 s12, 0, s13
; GFX1010-NEXT: s_add_u32 s8, s8, s11
-; GFX1010-NEXT: s_cselect_b32 s11, -1, 0
-; GFX1010-NEXT: s_mul_hi_u32 s13, s9, s8
-; GFX1010-NEXT: s_cmp_lg_u32 s11, 0
-; GFX1010-NEXT: s_mul_i32 s11, s9, s8
; GFX1010-NEXT: s_addc_u32 s5, s5, s12
-; GFX1010-NEXT: s_mul_i32 s10, s10, s8
+; GFX1010-NEXT: s_mul_hi_u32 s11, s9, s8
+; GFX1010-NEXT: s_mul_i32 s12, s9, s8
; GFX1010-NEXT: s_mul_i32 s9, s9, s5
-; GFX1010-NEXT: s_mul_hi_u32 s12, s8, s11
-; GFX1010-NEXT: s_add_i32 s9, s13, s9
-; GFX1010-NEXT: s_mul_hi_u32 s13, s5, s11
+; GFX1010-NEXT: s_mul_i32 s10, s10, s8
+; GFX1010-NEXT: s_add_i32 s9, s11, s9
+; GFX1010-NEXT: s_mul_i32 s11, s5, s12
; GFX1010-NEXT: s_add_i32 s9, s9, s10
-; GFX1010-NEXT: s_mul_i32 s10, s5, s11
+; GFX1010-NEXT: s_mul_hi_u32 s10, s8, s12
; GFX1010-NEXT: s_mul_i32 s15, s8, s9
; GFX1010-NEXT: s_mul_hi_u32 s14, s8, s9
-; GFX1010-NEXT: s_add_u32 s12, s12, s15
+; GFX1010-NEXT: s_add_u32 s10, s10, s15
+; GFX1010-NEXT: s_mul_hi_u32 s13, s5, s12
; GFX1010-NEXT: s_addc_u32 s14, 0, s14
-; GFX1010-NEXT: s_mul_hi_u32 s11, s5, s9
-; GFX1010-NEXT: s_add_u32 s10, s12, s10
+; GFX1010-NEXT: s_mul_hi_u32 s12, s5, s9
+; GFX1010-NEXT: s_add_u32 s10, s10, s11
; GFX1010-NEXT: s_mul_i32 s9, s5, s9
; GFX1010-NEXT: s_addc_u32 s10, s14, s13
-; GFX1010-NEXT: s_addc_u32 s11, s11, 0
+; GFX1010-NEXT: s_addc_u32 s11, s12, 0
; GFX1010-NEXT: s_add_u32 s9, s10, s9
; GFX1010-NEXT: s_addc_u32 s10, 0, s11
; GFX1010-NEXT: s_add_u32 s8, s8, s9
-; GFX1010-NEXT: s_cselect_b32 s9, -1, 0
-; GFX1010-NEXT: s_mul_hi_u32 s11, s2, s8
-; GFX1010-NEXT: s_cmp_lg_u32 s9, 0
-; GFX1010-NEXT: s_mul_hi_u32 s9, s3, s8
; GFX1010-NEXT: s_addc_u32 s5, s5, s10
-; GFX1010-NEXT: s_mul_i32 s8, s3, s8
+; GFX1010-NEXT: s_mul_hi_u32 s9, s2, s8
; GFX1010-NEXT: s_mul_i32 s12, s2, s5
-; GFX1010-NEXT: s_mul_hi_u32 s10, s2, s5
-; GFX1010-NEXT: s_add_u32 s11, s11, s12
-; GFX1010-NEXT: s_addc_u32 s10, 0, s10
+; GFX1010-NEXT: s_mul_hi_u32 s11, s2, s5
+; GFX1010-NEXT: s_mul_hi_u32 s10, s3, s8
+; GFX1010-NEXT: s_mul_i32 s8, s3, s8
+; GFX1010-NEXT: s_add_u32 s9, s9, s12
+; GFX1010-NEXT: s_addc_u32 s11, 0, s11
; GFX1010-NEXT: s_mul_hi_u32 s13, s3, s5
-; GFX1010-NEXT: s_add_u32 s8, s11, s8
+; GFX1010-NEXT: s_add_u32 s8, s9, s8
; GFX1010-NEXT: s_mul_i32 s5, s3, s5
-; GFX1010-NEXT: s_addc_u32 s8, s10, s9
+; GFX1010-NEXT: s_addc_u32 s8, s11, s10
; GFX1010-NEXT: s_addc_u32 s9, s13, 0
; GFX1010-NEXT: s_add_u32 s5, s8, s5
; GFX1010-NEXT: s_addc_u32 s8, 0, s9
@@ -2553,11 +2509,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1010-NEXT: s_sub_i32 s11, s3, s9
; GFX1010-NEXT: s_sub_u32 s10, s2, s10
; GFX1010-NEXT: s_cselect_b32 s12, -1, 0
-; GFX1010-NEXT: s_cmp_lg_u32 s12, 0
; GFX1010-NEXT: s_subb_u32 s11, s11, s7
; GFX1010-NEXT: s_sub_u32 s13, s10, s6
-; GFX1010-NEXT: s_cselect_b32 s14, -1, 0
-; GFX1010-NEXT: s_cmp_lg_u32 s14, 0
; GFX1010-NEXT: s_subb_u32 s11, s11, 0
; GFX1010-NEXT: s_cmp_ge_u32 s11, s7
; GFX1010-NEXT: s_cselect_b32 s14, -1, 0
@@ -2663,44 +2616,40 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W32-NEXT: s_add_u32 s11, s12, s11
; GFX1030W32-NEXT: s_addc_u32 s12, 0, s13
; GFX1030W32-NEXT: s_add_u32 s8, s8, s11
-; GFX1030W32-NEXT: s_cselect_b32 s11, -1, 0
-; GFX1030W32-NEXT: s_mul_hi_u32 s13, s9, s8
-; GFX1030W32-NEXT: s_cmp_lg_u32 s11, 0
-; GFX1030W32-NEXT: s_mul_i32 s11, s9, s8
; GFX1030W32-NEXT: s_addc_u32 s7, s7, s12
-; GFX1030W32-NEXT: s_mul_i32 s10, s10, s8
+; GFX1030W32-NEXT: s_mul_hi_u32 s11, s9, s8
+; GFX1030W32-NEXT: s_mul_i32 s12, s9, s8
; GFX1030W32-NEXT: s_mul_i32 s9, s9, s7
-; GFX1030W32-NEXT: s_mul_hi_u32 s12, s8, s11
-; GFX1030W32-NEXT: s_add_i32 s9, s13, s9
-; GFX1030W32-NEXT: s_mul_hi_u32 s13, s7, s11
+; GFX1030W32-NEXT: s_mul_i32 s10, s10, s8
+; GFX1030W32-NEXT: s_add_i32 s9, s11, s9
+; GFX1030W32-NEXT: s_mul_i32 s11, s7, s12
; GFX1030W32-NEXT: s_add_i32 s9, s9, s10
-; GFX1030W32-NEXT: s_mul_i32 s10, s7, s11
+; GFX1030W32-NEXT: s_mul_hi_u32 s10, s8, s12
; GFX1030W32-NEXT: s_mul_i32 s15, s8, s9
; GFX1030W32-NEXT: s_mul_hi_u32 s14, s8, s9
-; GFX1030W32-NEXT: s_add_u32 s12, s12, s15
+; GFX1030W32-NEXT: s_add_u32 s10, s10, s15
+; GFX1030W32-NEXT: s_mul_hi_u32 s13, s7, s12
; GFX1030W32-NEXT: s_addc_u32 s14, 0, s14
-; GFX1030W32-NEXT: s_mul_hi_u32 s11, s7, s9
-; GFX1030W32-NEXT: s_add_u32 s10, s12, s10
+; GFX1030W32-NEXT: s_mul_hi_u32 s12, s7, s9
+; GFX1030W32-NEXT: s_add_u32 s10, s10, s11
; GFX1030W32-NEXT: s_mul_i32 s9, s7, s9
; GFX1030W32-NEXT: s_addc_u32 s10, s14, s13
-; GFX1030W32-NEXT: s_addc_u32 s11, s11, 0
+; GFX1030W32-NEXT: s_addc_u32 s11, s12, 0
; GFX1030W32-NEXT: s_add_u32 s9, s10, s9
; GFX1030W32-NEXT: s_addc_u32 s10, 0, s11
; GFX1030W32-NEXT: s_add_u32 s8, s8, s9
-; GFX1030W32-NEXT: s_cselect_b32 s9, -1, 0
-; GFX1030W32-NEXT: s_mul_hi_u32 s11, s2, s8
-; GFX1030W32-NEXT: s_cmp_lg_u32 s9, 0
-; GFX1030W32-NEXT: s_mul_hi_u32 s9, s3, s8
; GFX1030W32-NEXT: s_addc_u32 s7, s7, s10
-; GFX1030W32-NEXT: s_mul_i32 s8, s3, s8
+; GFX1030W32-NEXT: s_mul_hi_u32 s9, s2, s8
; GFX1030W32-NEXT: s_mul_i32 s12, s2, s7
-; GFX1030W32-NEXT: s_mul_hi_u32 s10, s2, s7
-; GFX1030W32-NEXT: s_add_u32 s11, s11, s12
-; GFX1030W32-NEXT: s_addc_u32 s10, 0, s10
+; GFX1030W32-NEXT: s_mul_hi_u32 s11, s2, s7
+; GFX1030W32-NEXT: s_mul_hi_u32 s10, s3, s8
+; GFX1030W32-NEXT: s_mul_i32 s8, s3, s8
+; GFX1030W32-NEXT: s_add_u32 s9, s9, s12
+; GFX1030W32-NEXT: s_addc_u32 s11, 0, s11
; GFX1030W32-NEXT: s_mul_hi_u32 s13, s3, s7
-; GFX1030W32-NEXT: s_add_u32 s8, s11, s8
+; GFX1030W32-NEXT: s_add_u32 s8, s9, s8
; GFX1030W32-NEXT: s_mul_i32 s7, s3, s7
-; GFX1030W32-NEXT: s_addc_u32 s8, s10, s9
+; GFX1030W32-NEXT: s_addc_u32 s8, s11, s10
; GFX1030W32-NEXT: s_addc_u32 s9, s13, 0
; GFX1030W32-NEXT: s_add_u32 s7, s8, s7
; GFX1030W32-NEXT: s_addc_u32 s8, 0, s9
@@ -2713,11 +2662,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W32-NEXT: s_sub_i32 s11, s3, s9
; GFX1030W32-NEXT: s_sub_u32 s10, s2, s10
; GFX1030W32-NEXT: s_cselect_b32 s12, -1, 0
-; GFX1030W32-NEXT: s_cmp_lg_u32 s12, 0
; GFX1030W32-NEXT: s_subb_u32 s11, s11, s5
; GFX1030W32-NEXT: s_sub_u32 s13, s10, s4
-; GFX1030W32-NEXT: s_cselect_b32 s14, -1, 0
-; GFX1030W32-NEXT: s_cmp_lg_u32 s14, 0
; GFX1030W32-NEXT: s_subb_u32 s11, s11, 0
; GFX1030W32-NEXT: s_cmp_ge_u32 s11, s5
; GFX1030W32-NEXT: s_cselect_b32 s14, -1, 0
@@ -2790,8 +2736,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W64-NEXT: ; %bb.1:
; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s4
; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v1, s5
-; GFX1030W64-NEXT: s_sub_u32 s9, 0, s4
-; GFX1030W64-NEXT: s_subb_u32 s10, 0, s5
+; GFX1030W64-NEXT: s_sub_u32 s8, 0, s4
+; GFX1030W64-NEXT: s_subb_u32 s9, 0, s5
; GFX1030W64-NEXT: v_fmamk_f32 v0, v1, 0x4f800000, v0
; GFX1030W64-NEXT: v_rcp_f32_e32 v0, v0
; GFX1030W64-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2800,109 +2746,102 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W64-NEXT: v_fmamk_f32 v0, v1, 0xcf800000, v0
; GFX1030W64-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX1030W64-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX1030W64-NEXT: v_readfirstlane_b32 s8, v1
-; GFX1030W64-NEXT: v_readfirstlane_b32 s6, v0
-; GFX1030W64-NEXT: s_mul_i32 s7, s9, s8
-; GFX1030W64-NEXT: s_mul_hi_u32 s12, s9, s6
-; GFX1030W64-NEXT: s_mul_i32 s11, s10, s6
-; GFX1030W64-NEXT: s_add_i32 s7, s12, s7
-; GFX1030W64-NEXT: s_mul_i32 s13, s9, s6
-; GFX1030W64-NEXT: s_add_i32 s7, s7, s11
-; GFX1030W64-NEXT: s_mul_hi_u32 s12, s6, s13
-; GFX1030W64-NEXT: s_mul_i32 s15, s6, s7
-; GFX1030W64-NEXT: s_mul_hi_u32 s14, s8, s13
-; GFX1030W64-NEXT: s_mul_i32 s11, s8, s13
-; GFX1030W64-NEXT: s_mul_hi_u32 s13, s6, s7
+; GFX1030W64-NEXT: v_readfirstlane_b32 s6, v1
+; GFX1030W64-NEXT: v_readfirstlane_b32 s7, v0
+; GFX1030W64-NEXT: s_mul_i32 s10, s8, s6
+; GFX1030W64-NEXT: s_mul_hi_u32 s12, s8, s7
+; GFX1030W64-NEXT: s_mul_i32 s11, s9, s7
+; GFX1030W64-NEXT: s_add_i32 s10, s12, s10
+; GFX1030W64-NEXT: s_mul_i32 s13, s8, s7
+; GFX1030W64-NEXT: s_add_i32 s10, s10, s11
+; GFX1030W64-NEXT: s_mul_hi_u32 s12, s7, s13
+; GFX1030W64-NEXT: s_mul_i32 s15, s7, s10
+; GFX1030W64-NEXT: s_mul_hi_u32 s14, s6, s13
+; GFX1030W64-NEXT: s_mul_i32 s11, s6, s13
+; GFX1030W64-NEXT: s_mul_hi_u32 s13, s7, s10
; GFX1030W64-NEXT: s_add_u32 s12, s12, s15
; GFX1030W64-NEXT: s_addc_u32 s13, 0, s13
-; GFX1030W64-NEXT: s_mul_hi_u32 s16, s8, s7
+; GFX1030W64-NEXT: s_mul_hi_u32 s16, s6, s10
; GFX1030W64-NEXT: s_add_u32 s11, s12, s11
-; GFX1030W64-NEXT: s_mul_i32 s7, s8, s7
+; GFX1030W64-NEXT: s_mul_i32 s10, s6, s10
; GFX1030W64-NEXT: s_addc_u32 s11, s13, s14
; GFX1030W64-NEXT: s_addc_u32 s12, s16, 0
-; GFX1030W64-NEXT: s_add_u32 s7, s11, s7
+; GFX1030W64-NEXT: s_add_u32 s10, s11, s10
; GFX1030W64-NEXT: s_addc_u32 s11, 0, s12
-; GFX1030W64-NEXT: s_add_u32 s12, s6, s7
-; GFX1030W64-NEXT: s_cselect_b64 s[6:7], -1, 0
-; GFX1030W64-NEXT: s_mul_hi_u32 s13, s9, s12
-; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT: s_mul_i32 s6, s9, s12
-; GFX1030W64-NEXT: s_addc_u32 s8, s8, s11
-; GFX1030W64-NEXT: s_mul_i32 s10, s10, s12
-; GFX1030W64-NEXT: s_mul_i32 s9, s9, s8
-; GFX1030W64-NEXT: s_mul_hi_u32 s7, s12, s6
-; GFX1030W64-NEXT: s_add_i32 s9, s13, s9
-; GFX1030W64-NEXT: s_mul_hi_u32 s11, s8, s6
-; GFX1030W64-NEXT: s_add_i32 s9, s9, s10
-; GFX1030W64-NEXT: s_mul_i32 s6, s8, s6
-; GFX1030W64-NEXT: s_mul_i32 s14, s12, s9
-; GFX1030W64-NEXT: s_mul_hi_u32 s13, s12, s9
-; GFX1030W64-NEXT: s_add_u32 s7, s7, s14
+; GFX1030W64-NEXT: s_add_u32 s7, s7, s10
+; GFX1030W64-NEXT: s_addc_u32 s6, s6, s11
+; GFX1030W64-NEXT: s_mul_hi_u32 s10, s8, s7
+; GFX1030W64-NEXT: s_mul_i32 s11, s8, s7
+; GFX1030W64-NEXT: s_mul_i32 s8, s8, s6
+; GFX1030W64-NEXT: s_mul_i32 s9, s9, s7
+; GFX1030W64-NEXT: s_add_i32 s8, s10, s8
+; GFX1030W64-NEXT: s_mul_i32 s10, s6, s11
+; GFX1030W64-NEXT: s_add_i32 s8, s8, s9
+; GFX1030W64-NEXT: s_mul_hi_u32 s9, s7, s11
+; GFX1030W64-NEXT: s_mul_i32 s14, s7, s8
+; GFX1030W64-NEXT: s_mul_hi_u32 s13, s7, s8
+; GFX1030W64-NEXT: s_add_u32 s9, s9, s14
+; GFX1030W64-NEXT: s_mul_hi_u32 s12, s6, s11
; GFX1030W64-NEXT: s_addc_u32 s13, 0, s13
-; GFX1030W64-NEXT: s_mul_hi_u32 s10, s8, s9
-; GFX1030W64-NEXT: s_add_u32 s6, s7, s6
-; GFX1030W64-NEXT: s_mul_i32 s9, s8, s9
-; GFX1030W64-NEXT: s_addc_u32 s6, s13, s11
-; GFX1030W64-NEXT: s_addc_u32 s7, s10, 0
-; GFX1030W64-NEXT: s_add_u32 s6, s6, s9
-; GFX1030W64-NEXT: s_addc_u32 s9, 0, s7
-; GFX1030W64-NEXT: s_add_u32 s10, s12, s6
-; GFX1030W64-NEXT: s_cselect_b64 s[6:7], -1, 0
-; GFX1030W64-NEXT: s_mul_hi_u32 s11, s2, s10
-; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT: s_mul_hi_u32 s6, s3, s10
-; GFX1030W64-NEXT: s_addc_u32 s7, s8, s9
-; GFX1030W64-NEXT: s_mul_i32 s8, s3, s10
-; GFX1030W64-NEXT: s_mul_i32 s10, s2, s7
-; GFX1030W64-NEXT: s_mul_hi_u32 s9, s2, s7
-; GFX1030W64-NEXT: s_add_u32 s10, s11, s10
-; GFX1030W64-NEXT: s_addc_u32 s9, 0, s9
-; GFX1030W64-NEXT: s_mul_hi_u32 s12, s3, s7
-; GFX1030W64-NEXT: s_add_u32 s8, s10, s8
+; GFX1030W64-NEXT: s_mul_hi_u32 s11, s6, s8
+; GFX1030W64-NEXT: s_add_u32 s9, s9, s10
+; GFX1030W64-NEXT: s_mul_i32 s8, s6, s8
+; GFX1030W64-NEXT: s_addc_u32 s9, s13, s12
+; GFX1030W64-NEXT: s_addc_u32 s10, s11, 0
+; GFX1030W64-NEXT: s_add_u32 s8, s9, s8
+; GFX1030W64-NEXT: s_addc_u32 s9, 0, s10
+; GFX1030W64-NEXT: s_add_u32 s7, s7, s8
+; GFX1030W64-NEXT: s_addc_u32 s6, s6, s9
+; GFX1030W64-NEXT: s_mul_hi_u32 s8, s2, s7
+; GFX1030W64-NEXT: s_mul_i32 s11, s2, s6
+; GFX1030W64-NEXT: s_mul_hi_u32 s10, s2, s6
+; GFX1030W64-NEXT: s_mul_hi_u32 s9, s3, s7
; GFX1030W64-NEXT: s_mul_i32 s7, s3, s7
-; GFX1030W64-NEXT: s_addc_u32 s6, s9, s6
+; GFX1030W64-NEXT: s_add_u32 s8, s8, s11
+; GFX1030W64-NEXT: s_addc_u32 s10, 0, s10
+; GFX1030W64-NEXT: s_mul_hi_u32 s12, s3, s6
+; GFX1030W64-NEXT: s_add_u32 s7, s8, s7
+; GFX1030W64-NEXT: s_mul_i32 s6, s3, s6
+; GFX1030W64-NEXT: s_addc_u32 s7, s10, s9
; GFX1030W64-NEXT: s_addc_u32 s8, s12, 0
-; GFX1030W64-NEXT: s_add_u32 s10, s6, s7
+; GFX1030W64-NEXT: s_add_u32 s10, s7, s6
; GFX1030W64-NEXT: s_addc_u32 s11, 0, s8
; GFX1030W64-NEXT: s_mul_hi_u32 s6, s4, s10
; GFX1030W64-NEXT: s_mul_i32 s7, s4, s11
; GFX1030W64-NEXT: s_mul_i32 s8, s5, s10
; GFX1030W64-NEXT: s_add_i32 s6, s6, s7
-; GFX1030W64-NEXT: s_add_i32 s12, s6, s8
+; GFX1030W64-NEXT: s_add_i32 s8, s6, s8
; GFX1030W64-NEXT: s_mul_i32 s6, s4, s10
-; GFX1030W64-NEXT: s_sub_i32 s8, s3, s12
-; GFX1030W64-NEXT: s_sub_u32 s13, s2, s6
+; GFX1030W64-NEXT: s_sub_i32 s9, s3, s8
+; GFX1030W64-NEXT: s_sub_u32 s12, s2, s6
; GFX1030W64-NEXT: s_cselect_b64 s[6:7], -1, 0
-; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT: s_subb_u32 s14, s8, s5
-; GFX1030W64-NEXT: s_sub_u32 s15, s13, s4
-; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0
-; GFX1030W64-NEXT: s_subb_u32 s8, s14, 0
-; GFX1030W64-NEXT: s_cmp_ge_u32 s8, s5
-; GFX1030W64-NEXT: s_cselect_b32 s9, -1, 0
-; GFX1030W64-NEXT: s_cmp_ge_u32 s15, s4
+; GFX1030W64-NEXT: s_subb_u32 s9, s9, s5
+; GFX1030W64-NEXT: s_sub_u32 s13, s12, s4
+; GFX1030W64-NEXT: s_subb_u32 s9, s9, 0
+; GFX1030W64-NEXT: s_cmp_ge_u32 s9, s5
; GFX1030W64-NEXT: s_cselect_b32 s14, -1, 0
-; GFX1030W64-NEXT: s_cmp_eq_u32 s8, s5
-; GFX1030W64-NEXT: s_cselect_b32 s8, s14, s9
-; GFX1030W64-NEXT: s_add_u32 s9, s10, 1
+; GFX1030W64-NEXT: s_cmp_ge_u32 s13, s4
+; GFX1030W64-NEXT: s_cselect_b32 s13, -1, 0
+; GFX1030W64-NEXT: s_cmp_eq_u32 s9, s5
+; GFX1030W64-NEXT: s_cselect_b32 s9, s13, s14
+; GFX1030W64-NEXT: s_add_u32 s13, s10, 1
; GFX1030W64-NEXT: s_addc_u32 s14, s11, 0
; GFX1030W64-NEXT: s_add_u32 s15, s10, 2
; GFX1030W64-NEXT: s_addc_u32 s16, s11, 0
-; GFX1030W64-NEXT: s_cmp_lg_u32 s8, 0
-; GFX1030W64-NEXT: s_cselect_b32 s15, s15, s9
+; GFX1030W64-NEXT: s_cmp_lg_u32 s9, 0
+; GFX1030W64-NEXT: s_cselect_b32 s13, s15, s13
; GFX1030W64-NEXT: s_cselect_b32 s14, s16, s14
; GFX1030W64-NEXT: s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT: s_subb_u32 s3, s3, s12
+; GFX1030W64-NEXT: s_subb_u32 s3, s3, s8
; GFX1030W64-NEXT: s_cmp_ge_u32 s3, s5
; GFX1030W64-NEXT: s_cselect_b32 s6, -1, 0
-; GFX1030W64-NEXT: s_cmp_ge_u32 s13, s4
+; GFX1030W64-NEXT: s_cmp_ge_u32 s12, s4
; GFX1030W64-NEXT: s_cselect_b32 s7, -1, 0
; GFX1030W64-NEXT: s_cmp_eq_u32 s3, s5
; GFX1030W64-NEXT: s_cselect_b32 s3, s7, s6
; GFX1030W64-NEXT: s_cmp_lg_u32 s3, 0
; GFX1030W64-NEXT: s_cselect_b32 s7, s14, s11
-; GFX1030W64-NEXT: s_cselect_b32 s6, s15, s10
+; GFX1030W64-NEXT: s_cselect_b32 s6, s13, s10
; GFX1030W64-NEXT: s_cbranch_execnz .LBB16_3
; GFX1030W64-NEXT: .LBB16_2:
; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s4
@@ -2988,44 +2927,40 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX11-NEXT: s_add_u32 s11, s12, s11
; GFX11-NEXT: s_addc_u32 s12, 0, s13
; GFX11-NEXT: s_add_u32 s8, s8, s11
-; GFX11-NEXT: s_cselect_b32 s11, -1, 0
-; GFX11-NEXT: s_mul_hi_u32 s13, s9, s8
-; GFX11-NEXT: s_cmp_lg_u32 s11, 0
-; GFX11-NEXT: s_mul_i32 s11, s9, s8
; GFX11-NEXT: s_addc_u32 s7, s7, s12
-; GFX11-NEXT: s_mul_i32 s10, s10, s8
+; GFX11-NEXT: s_mul_hi_u32 s11, s9, s8
+; GFX11-NEXT: s_mul_i32 s12, s9, s8
; GFX11-NEXT: s_mul_i32 s9, s9, s7
-; GFX11-NEXT: s_mul_hi_u32 s12, s8, s11
-; GFX11-NEXT: s_add_i32 s9, s13, s9
-; GFX11-NEXT: s_mul_hi_u32 s13, s7, s11
+; GFX11-NEXT: s_mul_i32 s10, s10, s8
+; GFX11-NEXT: s_add_i32 s9, s11, s9
+; GFX11-NEXT: s_mul_i32 s11, s7, s12
; GFX11-NEXT: s_add_i32 s9, s9, s10
-; GFX11-NEXT: s_mul_i32 s10, s7, s11
+; GFX11-NEXT: s_mul_hi_u32 s10, s8, s12
; GFX11-NEXT: s_mul_i32 s15, s8, s9
; GFX11-NEXT: s_mul_hi_u32 s14, s8, s9
-; GFX11-NEXT: s_add_u32 s12, s12, s15
+; GFX11-NEXT: s_add_u32 s10, s10, s15
+; GFX11-NEXT: s_mul_hi_u32 s13, s7, s12
; GFX11-NEXT: s_addc_u32 s14, 0, s14
-; GFX11-NEXT: s_mul_hi_u32 s11, s7, s9
-; GFX11-NEXT: s_add_u32 s10, s12, s10
+; GFX11-NEXT: s_mul_hi_u32 s12, s7, s9
+; GFX11-NEXT: s_add_u32 s10, s10, s11
; GFX11-NEXT: s_mul_i32 s9, s7, s9
; GFX11-NEXT: s_addc_u32 s10, s14, s13
-; GFX11-NEXT: s_addc_u32 s11, s11, 0
+; GFX11-NEXT: s_addc_u32 s11, s12, 0
; GFX11-NEXT: s_add_u32 s9, s10, s9
; GFX11-NEXT: s_addc_u32 s10, 0, s11
; GFX11-NEXT: s_add_u32 s8, s8, s9
-; GFX11-NEXT: s_cselect_b32 s9, -1, 0
-; GFX11-NEXT: s_mul_hi_u32 s11, s2, s8
-; GFX11-NEXT: s_cmp_lg_u32 s9, 0
-; GFX11-NEXT: s_mul_hi_u32 s9, s3, s8
; GFX11-NEXT: s_addc_u32 s7, s7, s10
-; GFX11-NEXT: s_mul_i32 s8, s3, s8
+; GFX11-NEXT: s_mul_hi_u32 s9, s2, s8
; GFX11-NEXT: s_mul_i32 s12, s2, s7
-; GFX11-NEXT: s_mul_hi_u32 s10, s2, s7
-; GFX11-NEXT: s_add_u32 s11, s11, s12
-; GFX11-NEXT: s_addc_u32 s10, 0, s10
+; GFX11-NEXT: s_mul_hi_u32 s11, s2, s7
+; GFX11-NEXT: s_mul_hi_u32 s10, s3, s8
+; GFX11-NEXT: s_mul_i32 s8, s3, s8
+; GFX11-NEXT: s_add_u32 s9, s9, s12
+; GFX11-NEXT: s_addc_u32 s11, 0, s11
; GFX11-NEXT: s_mul_hi_u32 s13, s3, s7
-; GFX11-NEXT: s_add_u32 s8, s11, s8
+; GFX11-NEXT: s_add_u32 s8, s9, s8
; GFX11-NEXT: s_mul_i32 s7, s3, s7
-; GFX11-NEXT: s_addc_u32 s8, s10, s9
+; GFX11-NEXT: s_addc_u32 s8, s11, s10
; GFX11-NEXT: s_addc_u32 s9, s13, 0
; GFX11-NEXT: s_add_u32 s7, s8, s7
; GFX11-NEXT: s_addc_u32 s8, 0, s9
@@ -3035,17 +2970,14 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX11-NEXT: s_add_i32 s9, s9, s10
; GFX11-NEXT: s_mul_i32 s10, s4, s7
; GFX11-NEXT: s_add_i32 s9, s9, s11
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_sub_i32 s11, s3, s9
; GFX11-NEXT: s_sub_u32 s10, s2, s10
; GFX11-NEXT: s_cselect_b32 s12, -1, 0
-; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: s_subb_u32 s11, s11, s5
; GFX11-NEXT: s_sub_u32 s13, s10, s4
-; GFX11-NEXT: s_cselect_b32 s14, -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_lg_u32 s14, 0
; GFX11-NEXT: s_subb_u32 s11, s11, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_cmp_ge_u32 s11, s5
; GFX11-NEXT: s_cselect_b32 s14, -1, 0
; GFX11-NEXT: s_cmp_ge_u32 s13, s4
@@ -3118,9 +3050,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_or_b64 s[4:5], s[2:3], s[6:7]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_and_b64 s[4:5], s[4:5], 0xffffffff00000000
-; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4
; GFX1250-NEXT: ; %bb.1:
; GFX1250-NEXT: s_cvt_f32_u32 s4, s6
@@ -3155,12 +3086,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_add_nc_u64 s[12:13], s[4:5], s[12:13]
; GFX1250-NEXT: s_add_co_u32 s8, s8, s12
-; GFX1250-NEXT: s_cselect_b32 s4, -1, 0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_cmp_lg_u32 s4, 0
; GFX1250-NEXT: s_add_co_ci_u32 s9, s9, s13
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_mul_u64 s[10:11], s[10:11], s[8:9]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_mul_hi_u32 s13, s8, s11
; GFX1250-NEXT: s_mul_i32 s12, s8, s11
; GFX1250-NEXT: s_mul_hi_u32 s4, s8, s10
@@ -3175,19 +3103,17 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_add_nc_u64 s[10:11], s[4:5], s[10:11]
; GFX1250-NEXT: s_add_co_u32 s8, s8, s10
-; GFX1250-NEXT: s_cselect_b32 s10, -1, 0
-; GFX1250-NEXT: s_mul_hi_u32 s4, s2, s8
-; GFX1250-NEXT: s_cmp_lg_u32 s10, 0
-; GFX1250-NEXT: s_mul_hi_u32 s12, s3, s8
; GFX1250-NEXT: s_add_co_ci_u32 s10, s9, s11
-; GFX1250-NEXT: s_mul_i32 s11, s3, s8
+; GFX1250-NEXT: s_mul_hi_u32 s4, s2, s8
+; GFX1250-NEXT: s_mul_hi_u32 s11, s3, s8
+; GFX1250-NEXT: s_mul_i32 s12, s3, s8
; GFX1250-NEXT: s_mul_hi_u32 s9, s2, s10
; GFX1250-NEXT: s_mul_i32 s8, s2, s10
; GFX1250-NEXT: s_mul_hi_u32 s13, s3, s10
; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[8:9]
; GFX1250-NEXT: s_mul_i32 s10, s3, s10
-; GFX1250-NEXT: s_add_co_u32 s4, s8, s11
-; GFX1250-NEXT: s_add_co_ci_u32 s4, s9, s12
+; GFX1250-NEXT: s_add_co_u32 s4, s8, s12
+; GFX1250-NEXT: s_add_co_ci_u32 s4, s9, s11
; GFX1250-NEXT: s_add_co_ci_u32 s11, s13, 0
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_add_nc_u64 s[8:9], s[4:5], s[10:11]
@@ -3202,10 +3128,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_cmp_lg_u32 s8, 0
; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, s7
; GFX1250-NEXT: s_sub_co_u32 s13, s4, s6
-; GFX1250-NEXT: s_cselect_b32 s14, -1, 0
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT: s_cmp_lg_u32 s14, 0
; GFX1250-NEXT: s_sub_co_ci_u32 s12, s12, 0
+; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-NEXT: s_cmp_ge_u32 s12, s7
; GFX1250-NEXT: s_cselect_b32 s14, -1, 0
; GFX1250-NEXT: s_cmp_ge_u32 s13, s6
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 4b151b9..07e6a76 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -714,9 +714,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; VI-NEXT: s_lshl_b32 s2, s2, 8
; VI-NEXT: s_or_b32 s2, s2, s3
; VI-NEXT: s_lshl_b32 s3, s2, 16
-; VI-NEXT: s_and_b32 s2, s2, 0xffff
; VI-NEXT: s_flbit_i32_b32 s3, s3
-; VI-NEXT: s_cmp_lg_u32 s2, 0
+; VI-NEXT: s_and_b32 s2, s2, 0xffff
; VI-NEXT: s_cselect_b32 s2, s3, 32
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
diff --git a/llvm/test/CodeGen/AMDGPU/ctpop16.ll b/llvm/test/CodeGen/AMDGPU/ctpop16.ll
index cefcbdd..fca57be 100644
--- a/llvm/test/CodeGen/AMDGPU/ctpop16.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctpop16.ll
@@ -1491,7 +1491,6 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: s_cmp_lg_u32 s4, 0
; SI-NEXT: s_cbranch_scc0 .LBB14_4
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_mov_b32 s11, 0xf000
@@ -1521,7 +1520,6 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshr_b32 s4, s6, 16
-; VI-NEXT: s_cmp_lg_u32 s4, 0
; VI-NEXT: s_cbranch_scc0 .LBB14_4
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_mov_b32 s11, 0xf000
diff --git a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
index d8a5e7fa..dbdea8e 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
+++ b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
@@ -14,7 +14,6 @@ define i32 @s_add_co_select_user() {
; GFX7-NEXT: s_add_u32 s7, s6, s6
; GFX7-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX7-NEXT: s_or_b32 s4, s4, s5
-; GFX7-NEXT: s_cmp_lg_u32 s4, 0
; GFX7-NEXT: s_addc_u32 s8, s6, 0
; GFX7-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX7-NEXT: s_and_b64 s[4:5], s[4:5], exec
@@ -31,8 +30,6 @@ define i32 @s_add_co_select_user() {
; GFX9-NEXT: s_load_dword s6, s[4:5], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_add_u32 s7, s6, s6
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX9-NEXT: s_addc_u32 s8, s6, 0
; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX9-NEXT: s_and_b64 s[4:5], s[4:5], exec
@@ -49,8 +46,6 @@ define i32 @s_add_co_select_user() {
; GFX10-NEXT: s_load_dword s4, s[4:5], 0x0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s5, s4, s4
-; GFX10-NEXT: s_cselect_b32 s6, -1, 0
-; GFX10-NEXT: s_cmp_lg_u32 s6, 0
; GFX10-NEXT: s_addc_u32 s6, s4, 0
; GFX10-NEXT: s_cselect_b32 s7, -1, 0
; GFX10-NEXT: s_and_b32 s7, s7, exec_lo
@@ -67,16 +62,13 @@ define i32 @s_add_co_select_user() {
; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_u32 s1, s0, s0
-; GFX11-NEXT: s_cselect_b32 s2, -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-NEXT: s_addc_u32 s2, s0, 0
; GFX11-NEXT: s_cselect_b32 s3, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_and_b32 s3, s3, exec_lo
; GFX11-NEXT: s_cselect_b32 s2, s2, 0
; GFX11-NEXT: s_cmp_gt_u32 s0, 31
; GFX11-NEXT: s_cselect_b32 s0, s1, s2
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
bb:
@@ -104,7 +96,6 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
; GFX7-NEXT: s_add_u32 s0, s2, s2
; GFX7-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX7-NEXT: s_or_b32 s0, s0, s1
-; GFX7-NEXT: s_cmp_lg_u32 s0, 0
; GFX7-NEXT: s_addc_u32 s0, s2, 0
; GFX7-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX7-NEXT: s_andn2_b64 vcc, exec, s[0:1]
@@ -125,12 +116,10 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
;
; GFX9-LABEL: s_add_co_br_user:
; GFX9: ; %bb.0: ; %bb
-; GFX9-NEXT: s_load_dword s2, s[8:9], 0x0
+; GFX9-NEXT: s_load_dword s0, s[8:9], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s0, s2, s2
-; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_addc_u32 s0, s2, 0
+; GFX9-NEXT: s_add_u32 s1, s0, s0
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[0:1]
; GFX9-NEXT: s_cbranch_vccnz .LBB1_2
@@ -153,8 +142,6 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
; GFX10-NEXT: s_load_dword s0, s[8:9], 0x0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s1, s0, s0
-; GFX10-NEXT: s_cselect_b32 s1, -1, 0
-; GFX10-NEXT: s_cmp_lg_u32 s1, 0
; GFX10-NEXT: s_addc_u32 s0, s0, 0
; GFX10-NEXT: s_cselect_b32 s0, -1, 0
; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s0
@@ -178,11 +165,9 @@ define amdgpu_kernel void @s_add_co_br_user(i32 %i) {
; GFX11-NEXT: s_load_b32 s0, s[4:5], 0x0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_u32 s1, s0, s0
-; GFX11-NEXT: s_cselect_b32 s1, -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
; GFX11-NEXT: s_addc_u32 s0, s0, 0
; GFX11-NEXT: s_cselect_b32 s0, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB1_2
; GFX11-NEXT: ; %bb.1: ; %bb0
diff --git a/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll b/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
index 13206ad..f45070c 100644
--- a/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/fadd-fma-fmul-combine.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s
-; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s
+; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s
+; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=preserve-sign < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FLUSH %s
-; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FASTFMA %s
-; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-SLOWFMA %s
+; RUN: llc -mtriple=amdgcn -mattr=+fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-FASTFMA %s
+; RUN: llc -mtriple=amdgcn -mattr=-fast-fmaf,+mad-mac-f32-insts -denormal-fp-math-f32=ieee < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-SLOWFMA %s
; FIXME: This should also fold when fma is actually fast if an FMA
; exists in the original program.
diff --git a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
index 62847b1..9a17538 100644
--- a/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fcopysign.f16.ll
@@ -1117,7 +1117,6 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
; SI: ; %bb.0:
; SI-NEXT: s_and_b32 s3, s1, 0x1ff
; SI-NEXT: s_or_b32 s0, s3, s0
-; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: s_lshr_b32 s0, s1, 8
@@ -1169,7 +1168,6 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
; VI: ; %bb.0:
; VI-NEXT: s_and_b32 s3, s1, 0x1ff
; VI-NEXT: s_or_b32 s0, s3, s0
-; VI-NEXT: s_cmp_lg_u32 s0, 0
; VI-NEXT: s_cselect_b64 s[4:5], -1, 0
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; VI-NEXT: s_lshr_b32 s0, s1, 8
@@ -1217,7 +1215,6 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
; GFX9: ; %bb.0:
; GFX9-NEXT: s_and_b32 s3, s1, 0x1ff
; GFX9-NEXT: s_or_b32 s0, s3, s0
-; GFX9-NEXT: s_cmp_lg_u32 s0, 0
; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; GFX9-NEXT: s_lshr_b32 s0, s1, 8
@@ -1264,11 +1261,9 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
; GFX11-TRUE16-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_and_b32 s3, s1, 0x1ff
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_or_b32 s0, s3, s0
-; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-TRUE16-NEXT: s_cselect_b32 s0, -1, 0
-; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GFX11-TRUE16-NEXT: s_bfe_u32 s0, s1, 0xb0014
; GFX11-TRUE16-NEXT: s_lshr_b32 s1, s1, 8
@@ -1320,11 +1315,9 @@ define amdgpu_ps i16 @s_copysign_out_f16_mag_f64_sign_f16(double inreg %mag, hal
; GFX11-FAKE16-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_and_b32 s3, s1, 0x1ff
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_or_b32 s0, s3, s0
-; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-FAKE16-NEXT: s_cselect_b32 s0, -1, 0
-; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
; GFX11-FAKE16-NEXT: s_bfe_u32 s0, s1, 0xb0014
; GFX11-FAKE16-NEXT: s_lshr_b32 s1, s1, 8
@@ -4023,7 +4016,6 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
; SI-NEXT: s_and_b32 s6, s4, 0xffe
; SI-NEXT: s_and_b32 s4, s1, 0x1ff
; SI-NEXT: s_or_b32 s0, s4, s0
-; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: v_cvt_f16_f32_e32 v0, s5
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[4:5]
@@ -4066,7 +4058,6 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
; SI-NEXT: s_and_b32 s5, s0, 0xffe
; SI-NEXT: s_and_b32 s0, s3, 0x1ff
; SI-NEXT: s_or_b32 s0, s0, s2
-; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; SI-NEXT: v_readfirstlane_b32 s0, v2
@@ -4120,10 +4111,9 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
; VI-LABEL: s_copysign_out_v2f16_mag_v2f64_sign_v2f16:
; VI: ; %bb.0:
; VI-NEXT: s_lshr_b32 s5, s3, 8
-; VI-NEXT: s_and_b32 s6, s3, 0x1ff
; VI-NEXT: s_and_b32 s5, s5, 0xffe
+; VI-NEXT: s_and_b32 s6, s3, 0x1ff
; VI-NEXT: s_or_b32 s2, s6, s2
-; VI-NEXT: s_cmp_lg_u32 s2, 0
; VI-NEXT: s_cselect_b64 s[6:7], -1, 0
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7]
; VI-NEXT: s_bfe_u32 s3, s3, 0xb0014
@@ -4163,7 +4153,6 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
; VI-NEXT: s_and_b32 s7, s2, 0xffe
; VI-NEXT: s_and_b32 s2, s1, 0x1ff
; VI-NEXT: s_or_b32 s0, s2, s0
-; VI-NEXT: s_cmp_lg_u32 s0, 0
; VI-NEXT: s_cselect_b64 s[2:3], -1, 0
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
; VI-NEXT: s_bfe_u32 s1, s1, 0xb0014
@@ -4209,10 +4198,9 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
; GFX9-LABEL: s_copysign_out_v2f16_mag_v2f64_sign_v2f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_lshr_b32 s5, s3, 8
-; GFX9-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX9-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX9-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX9-NEXT: s_or_b32 s2, s6, s2
-; GFX9-NEXT: s_cmp_lg_u32 s2, 0
; GFX9-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7]
; GFX9-NEXT: s_bfe_u32 s6, s3, 0xb0014
@@ -4254,7 +4242,6 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
; GFX9-NEXT: s_and_b32 s6, s2, 0xffe
; GFX9-NEXT: s_and_b32 s2, s1, 0x1ff
; GFX9-NEXT: s_or_b32 s0, s2, s0
-; GFX9-NEXT: s_cmp_lg_u32 s0, 0
; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
@@ -4301,11 +4288,10 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
;
; GFX11-LABEL: s_copysign_out_v2f16_mag_v2f64_sign_v2f16:
; GFX11: ; %bb.0:
-; GFX11-NEXT: s_and_b32 s5, s3, 0x1ff
-; GFX11-NEXT: s_lshr_b32 s6, s3, 8
-; GFX11-NEXT: s_or_b32 s2, s5, s2
-; GFX11-NEXT: s_and_b32 s5, s6, 0xffe
-; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_lshr_b32 s5, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s3, 0x1ff
+; GFX11-NEXT: s_and_b32 s5, s5, 0xffe
+; GFX11-NEXT: s_or_b32 s2, s6, s2
; GFX11-NEXT: s_cselect_b32 s2, -1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
@@ -4348,13 +4334,12 @@ define amdgpu_ps i32 @s_copysign_out_v2f16_mag_v2f64_sign_v2f16(<2 x double> inr
; GFX11-NEXT: s_cmpk_eq_i32 s2, 0x40f
; GFX11-NEXT: s_cselect_b32 s2, s5, s6
; GFX11-NEXT: s_lshr_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s6, s1, 0x1ff
; GFX11-NEXT: s_lshr_b32 s5, s1, 8
; GFX11-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-NEXT: s_or_b32 s0, s6, s0
+; GFX11-NEXT: s_and_b32 s6, s1, 0x1ff
; GFX11-NEXT: s_and_b32 s5, s5, 0xffe
; GFX11-NEXT: s_or_b32 s2, s3, s2
-; GFX11-NEXT: s_cmp_lg_u32 s0, 0
+; GFX11-NEXT: s_or_b32 s0, s6, s0
; GFX11-NEXT: s_cselect_b32 s0, -1, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
diff --git a/llvm/test/CodeGen/AMDGPU/fpext.f16.ll b/llvm/test/CodeGen/AMDGPU/fpext.f16.ll
index d41e2c6..8df7564 100644
--- a/llvm/test/CodeGen/AMDGPU/fpext.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fpext.f16.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=SI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,VI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,GFX9 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-TRUE16 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-FAKE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX89,GFX9 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope --check-prefixes=GFX11-FAKE16 %s
define amdgpu_kernel void @fpext_f16_to_f32(
; SI-LABEL: fpext_f16_to_f32:
diff --git a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
index a43292d..a043d53 100644
--- a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck -check-prefixes=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
define amdgpu_kernel void @fptosi_f16_to_i16(
diff --git a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
index 96cb621..af1ab37 100644
--- a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
define amdgpu_kernel void @fptoui_f16_to_i16(
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
index b0dd187..c28b25c7 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -599,10 +599,8 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; SI-GISEL-NEXT: s_addk_i32 s3, 0xfc10
; SI-GISEL-NEXT: s_and_b32 s6, s6, 0xffe
; SI-GISEL-NEXT: s_or_b32 s4, s7, s4
-; SI-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; SI-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; SI-GISEL-NEXT: s_or_b32 s4, s6, s4
-; SI-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; SI-GISEL-NEXT: s_cselect_b32 s6, 1, 0
; SI-GISEL-NEXT: s_lshl_b32 s6, s6, 9
; SI-GISEL-NEXT: s_lshl_b32 s7, s3, 12
@@ -711,10 +709,8 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; VI-GISEL-NEXT: s_addk_i32 s4, 0xfc10
; VI-GISEL-NEXT: s_and_b32 s5, s5, 0xffe
; VI-GISEL-NEXT: s_or_b32 s2, s6, s2
-; VI-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; VI-GISEL-NEXT: s_cselect_b32 s2, 1, 0
; VI-GISEL-NEXT: s_or_b32 s2, s5, s2
-; VI-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; VI-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; VI-GISEL-NEXT: s_sub_i32 s7, 1, s4
; VI-GISEL-NEXT: s_lshl_b32 s6, s4, 12
@@ -824,10 +820,8 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX9-GISEL-NEXT: s_addk_i32 s4, 0xfc10
; GFX9-GISEL-NEXT: s_and_b32 s5, s5, 0xffe
; GFX9-GISEL-NEXT: s_or_b32 s2, s6, s2
-; GFX9-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX9-GISEL-NEXT: s_cselect_b32 s2, 1, 0
; GFX9-GISEL-NEXT: s_or_b32 s2, s5, s2
-; GFX9-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX9-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX9-GISEL-NEXT: s_sub_i32 s7, 1, s4
; GFX9-GISEL-NEXT: s_lshl_b32 s6, s4, 12
@@ -937,10 +931,8 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX950-GISEL-NEXT: s_addk_i32 s4, 0xfc10
; GFX950-GISEL-NEXT: s_and_b32 s5, s5, 0xffe
; GFX950-GISEL-NEXT: s_or_b32 s2, s6, s2
-; GFX950-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX950-GISEL-NEXT: s_cselect_b32 s2, 1, 0
; GFX950-GISEL-NEXT: s_or_b32 s2, s5, s2
-; GFX950-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX950-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX950-GISEL-NEXT: s_sub_i32 s7, 1, s4
; GFX950-GISEL-NEXT: s_lshl_b32 s6, s4, 12
@@ -1118,17 +1110,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-TRUE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX11-GISEL-TRUE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
; GFX11-GISEL-TRUE16-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s2, s6, s2
+; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX11-GISEL-TRUE16-NEXT: s_addk_i32 s4, 0xfc10
; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s2, s6, s2
; GFX11-GISEL-TRUE16-NEXT: s_cselect_b32 s2, 1, 0
-; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s2, s5, s2
-; GFX11-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-GISEL-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX11-GISEL-TRUE16-NEXT: s_sub_i32 s6, 1, s4
; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s8, s2, 0x1000
@@ -1175,17 +1165,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX11-GISEL-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
; GFX11-GISEL-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX11-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
; GFX11-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
+; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX11-GISEL-FAKE16-NEXT: s_addk_i32 s4, 0xfc10
; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
; GFX11-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 1, 0
-; GFX11-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s2, s5, s2
-; GFX11-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX11-GISEL-FAKE16-NEXT: s_sub_i32 s6, 1, s4
; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s8, s2, 0x1000
@@ -1366,17 +1354,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX1250-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-TRUE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
; GFX1250-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX1250-GISEL-TRUE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
; GFX1250-GISEL-TRUE16-NEXT: s_lshr_b32 s5, s3, 8
-; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s2, s6, s2
+; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX1250-GISEL-TRUE16-NEXT: s_addk_co_i32 s4, 0xfc10
; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX1250-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s2, s6, s2
; GFX1250-GISEL-TRUE16-NEXT: s_cselect_b32 s2, 1, 0
-; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s2, s5, s2
-; GFX1250-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX1250-GISEL-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX1250-GISEL-TRUE16-NEXT: s_sub_co_i32 s6, 1, s4
; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s8, s2, 0x1000
@@ -1423,17 +1409,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(
; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-FAKE16-NEXT: s_load_b64 s[2:3], s[2:3], 0x0
; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s3, 0xb0014
; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s3, 8
-; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s6, s2
; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, 1, 0
-; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s5, s2
-; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s2, 0
; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s8, s2, 0x1000
@@ -2154,10 +2138,8 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; SI-GISEL-NEXT: s_addk_i32 s3, 0xfc10
; SI-GISEL-NEXT: s_and_b32 s8, s8, 0xffe
; SI-GISEL-NEXT: s_or_b32 s4, s9, s4
-; SI-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; SI-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; SI-GISEL-NEXT: s_or_b32 s4, s8, s4
-; SI-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; SI-GISEL-NEXT: s_cselect_b32 s8, 1, 0
; SI-GISEL-NEXT: s_lshl_b32 s8, s8, 9
; SI-GISEL-NEXT: s_lshl_b32 s9, s3, 12
@@ -2193,12 +2175,10 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; SI-GISEL-NEXT: s_and_b32 s4, s4, 0x8000
; SI-GISEL-NEXT: s_addk_i32 s5, 0xfc10
; SI-GISEL-NEXT: s_and_b32 s8, s8, 0xffe
-; SI-GISEL-NEXT: s_or_b32 s6, s9, s6
; SI-GISEL-NEXT: s_or_b32 s3, s4, s3
-; SI-GISEL-NEXT: s_cmp_lg_u32 s6, 0
+; SI-GISEL-NEXT: s_or_b32 s4, s9, s6
; SI-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; SI-GISEL-NEXT: s_or_b32 s4, s8, s4
-; SI-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; SI-GISEL-NEXT: s_cselect_b32 s6, 1, 0
; SI-GISEL-NEXT: s_lshl_b32 s6, s6, 9
; SI-GISEL-NEXT: s_lshl_b32 s8, s5, 12
@@ -2355,10 +2335,8 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; VI-GISEL-NEXT: s_addk_i32 s2, 0xfc10
; VI-GISEL-NEXT: s_and_b32 s3, s3, 0xffe
; VI-GISEL-NEXT: s_or_b32 s4, s8, s4
-; VI-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; VI-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; VI-GISEL-NEXT: s_or_b32 s3, s3, s4
-; VI-GISEL-NEXT: s_cmp_lg_u32 s3, 0
; VI-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; VI-GISEL-NEXT: s_sub_i32 s9, 1, s2
; VI-GISEL-NEXT: s_lshl_b32 s8, s2, 12
@@ -2392,14 +2370,12 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; VI-GISEL-NEXT: s_or_b32 s2, s3, s2
; VI-GISEL-NEXT: s_bfe_u32 s3, s7, 0xb0014
; VI-GISEL-NEXT: s_lshr_b32 s4, s7, 8
-; VI-GISEL-NEXT: s_and_b32 s5, s7, 0x1ff
; VI-GISEL-NEXT: s_addk_i32 s3, 0xfc10
; VI-GISEL-NEXT: s_and_b32 s4, s4, 0xffe
+; VI-GISEL-NEXT: s_and_b32 s5, s7, 0x1ff
; VI-GISEL-NEXT: s_or_b32 s5, s5, s6
-; VI-GISEL-NEXT: s_cmp_lg_u32 s5, 0
; VI-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; VI-GISEL-NEXT: s_or_b32 s4, s4, s5
-; VI-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; VI-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; VI-GISEL-NEXT: s_sub_i32 s8, 1, s3
; VI-GISEL-NEXT: s_lshl_b32 s6, s3, 12
@@ -2555,10 +2531,8 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX9-GISEL-NEXT: s_addk_i32 s2, 0xfc10
; GFX9-GISEL-NEXT: s_and_b32 s3, s3, 0xffe
; GFX9-GISEL-NEXT: s_or_b32 s4, s8, s4
-; GFX9-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; GFX9-GISEL-NEXT: s_or_b32 s3, s3, s4
-; GFX9-GISEL-NEXT: s_cmp_lg_u32 s3, 0
; GFX9-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; GFX9-GISEL-NEXT: s_sub_i32 s9, 1, s2
; GFX9-GISEL-NEXT: s_lshl_b32 s8, s2, 12
@@ -2592,14 +2566,12 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX9-GISEL-NEXT: s_or_b32 s2, s3, s2
; GFX9-GISEL-NEXT: s_bfe_u32 s3, s7, 0xb0014
; GFX9-GISEL-NEXT: s_lshr_b32 s4, s7, 8
-; GFX9-GISEL-NEXT: s_and_b32 s5, s7, 0x1ff
; GFX9-GISEL-NEXT: s_addk_i32 s3, 0xfc10
; GFX9-GISEL-NEXT: s_and_b32 s4, s4, 0xffe
+; GFX9-GISEL-NEXT: s_and_b32 s5, s7, 0x1ff
; GFX9-GISEL-NEXT: s_or_b32 s5, s5, s6
-; GFX9-GISEL-NEXT: s_cmp_lg_u32 s5, 0
; GFX9-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX9-GISEL-NEXT: s_or_b32 s4, s4, s5
-; GFX9-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; GFX9-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX9-GISEL-NEXT: s_sub_i32 s8, 1, s3
; GFX9-GISEL-NEXT: s_lshl_b32 s6, s3, 12
@@ -2752,10 +2724,8 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX950-GISEL-NEXT: s_addk_i32 s2, 0xfc10
; GFX950-GISEL-NEXT: s_and_b32 s3, s3, 0xffe
; GFX950-GISEL-NEXT: s_or_b32 s4, s8, s4
-; GFX950-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; GFX950-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; GFX950-GISEL-NEXT: s_or_b32 s3, s3, s4
-; GFX950-GISEL-NEXT: s_cmp_lg_u32 s3, 0
; GFX950-GISEL-NEXT: s_cselect_b32 s4, 1, 0
; GFX950-GISEL-NEXT: s_sub_i32 s9, 1, s2
; GFX950-GISEL-NEXT: s_lshl_b32 s8, s2, 12
@@ -2789,14 +2759,12 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX950-GISEL-NEXT: s_or_b32 s2, s3, s2
; GFX950-GISEL-NEXT: s_bfe_u32 s3, s7, 0xb0014
; GFX950-GISEL-NEXT: s_lshr_b32 s4, s7, 8
-; GFX950-GISEL-NEXT: s_and_b32 s5, s7, 0x1ff
; GFX950-GISEL-NEXT: s_addk_i32 s3, 0xfc10
; GFX950-GISEL-NEXT: s_and_b32 s4, s4, 0xffe
+; GFX950-GISEL-NEXT: s_and_b32 s5, s7, 0x1ff
; GFX950-GISEL-NEXT: s_or_b32 s5, s5, s6
-; GFX950-GISEL-NEXT: s_cmp_lg_u32 s5, 0
; GFX950-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX950-GISEL-NEXT: s_or_b32 s4, s4, s5
-; GFX950-GISEL-NEXT: s_cmp_lg_u32 s4, 0
; GFX950-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX950-GISEL-NEXT: s_sub_i32 s8, 1, s3
; GFX950-GISEL-NEXT: s_lshl_b32 s6, s3, 12
@@ -3073,17 +3041,15 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-TRUE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
; GFX11-GISEL-TRUE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX11-GISEL-TRUE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
; GFX11-GISEL-TRUE16-NEXT: s_lshr_b32 s3, s5, 8
-; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, s8, s4
+; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX11-GISEL-TRUE16-NEXT: s_addk_i32 s2, 0xfc10
; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX11-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s4, s8, s4
; GFX11-GISEL-TRUE16-NEXT: s_cselect_b32 s4, 1, 0
-; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s3, s3, s4
-; GFX11-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-GISEL-TRUE16-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-GISEL-TRUE16-NEXT: s_sub_i32 s8, 1, s2
; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s10, s3, 0x1000
@@ -3115,19 +3081,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-TRUE16-NEXT: s_cmpk_eq_i32 s2, 0x40f
; GFX11-GISEL-TRUE16-NEXT: s_cselect_b32 s2, s4, s3
; GFX11-GISEL-TRUE16-NEXT: s_lshr_b32 s3, s5, 16
-; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX11-GISEL-TRUE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
; GFX11-GISEL-TRUE16-NEXT: s_lshr_b32 s5, s7, 8
; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s6, s8, s6
+; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX11-GISEL-TRUE16-NEXT: s_addk_i32 s4, 0xfc10
; GFX11-GISEL-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s2, s3, s2
-; GFX11-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s3, s8, s6
; GFX11-GISEL-TRUE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s3, s5, s3
-; GFX11-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-GISEL-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX11-GISEL-TRUE16-NEXT: s_sub_i32 s6, 1, s4
; GFX11-GISEL-TRUE16-NEXT: s_or_b32 s9, s3, 0x1000
@@ -3176,17 +3140,15 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
; GFX11-GISEL-FAKE16-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX11-GISEL-FAKE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
; GFX11-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 8
-; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
+; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX11-GISEL-FAKE16-NEXT: s_addk_i32 s2, 0xfc10
; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX11-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
; GFX11-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
-; GFX11-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s4
-; GFX11-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
; GFX11-GISEL-FAKE16-NEXT: s_sub_i32 s8, 1, s2
; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s10, s3, 0x1000
@@ -3218,19 +3180,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX11-GISEL-FAKE16-NEXT: s_cmpk_eq_i32 s2, 0x40f
; GFX11-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s4, s3
; GFX11-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 16
-; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX11-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
; GFX11-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s7, 8
; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX11-GISEL-FAKE16-NEXT: s_addk_i32 s4, 0xfc10
; GFX11-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
-; GFX11-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s3, s8, s6
; GFX11-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX11-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s3, s5, s3
-; GFX11-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX11-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX11-GISEL-FAKE16-NEXT: s_sub_i32 s6, 1, s4
; GFX11-GISEL-FAKE16-NEXT: s_or_b32 s9, s3, 0x1000
@@ -3511,17 +3471,15 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX1250-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-TRUE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
; GFX1250-GISEL-TRUE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX1250-GISEL-TRUE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
; GFX1250-GISEL-TRUE16-NEXT: s_lshr_b32 s3, s5, 8
-; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s4, s8, s4
+; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX1250-GISEL-TRUE16-NEXT: s_addk_co_i32 s2, 0xfc10
; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX1250-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s4, s8, s4
; GFX1250-GISEL-TRUE16-NEXT: s_cselect_b32 s4, 1, 0
-; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s3, s3, s4
-; GFX1250-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX1250-GISEL-TRUE16-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-GISEL-TRUE16-NEXT: s_sub_co_i32 s8, 1, s2
; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s10, s3, 0x1000
@@ -3553,19 +3511,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX1250-GISEL-TRUE16-NEXT: s_cmp_eq_u32 s2, 0x40f
; GFX1250-GISEL-TRUE16-NEXT: s_cselect_b32 s2, s4, s3
; GFX1250-GISEL-TRUE16-NEXT: s_lshr_b32 s3, s5, 16
-; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX1250-GISEL-TRUE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
; GFX1250-GISEL-TRUE16-NEXT: s_lshr_b32 s5, s7, 8
; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX1250-GISEL-TRUE16-NEXT: s_addk_co_i32 s4, 0xfc10
; GFX1250-GISEL-TRUE16-NEXT: s_and_b32 s5, s5, 0xffe
; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s2, s3, s2
-; GFX1250-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s3, s8, s6
; GFX1250-GISEL-TRUE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s3, s5, s3
-; GFX1250-GISEL-TRUE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX1250-GISEL-TRUE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX1250-GISEL-TRUE16-NEXT: s_sub_co_i32 s6, 1, s4
; GFX1250-GISEL-TRUE16-NEXT: s_or_b32 s9, s3, 0x1000
@@ -3614,17 +3570,15 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
; GFX1250-GISEL-FAKE16-NEXT: s_load_b128 s[4:7], s[2:3], 0x0
; GFX1250-GISEL-FAKE16-NEXT: s_wait_kmcnt 0x0
-; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s2, s5, 0xb0014
; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 8
-; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s5, 0x1ff
; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s2, 0xfc10
; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0xffe
-; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s4, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s4, s8, s4
; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
-; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s3, s4
-; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s4, 1, 0
; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s8, 1, s2
; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s10, s3, 0x1000
@@ -3656,19 +3610,17 @@ define amdgpu_kernel void @fptrunc_v2f64_to_v2f16(
; GFX1250-GISEL-FAKE16-NEXT: s_cmp_eq_u32 s2, 0x40f
; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s2, s4, s3
; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s3, s5, 16
-; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX1250-GISEL-FAKE16-NEXT: s_bfe_u32 s4, s7, 0xb0014
; GFX1250-GISEL-FAKE16-NEXT: s_lshr_b32 s5, s7, 8
; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s3, s3, 0x8000
-; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s6, s8, s6
+; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s8, s7, 0x1ff
; GFX1250-GISEL-FAKE16-NEXT: s_addk_co_i32 s4, 0xfc10
; GFX1250-GISEL-FAKE16-NEXT: s_and_b32 s5, s5, 0xffe
; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s2, s3, s2
-; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s8, s6
; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s3, 1, 0
-; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1250-GISEL-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s3, s5, s3
-; GFX1250-GISEL-FAKE16-NEXT: s_cmp_lg_u32 s3, 0
; GFX1250-GISEL-FAKE16-NEXT: s_cselect_b32 s5, 1, 0
; GFX1250-GISEL-FAKE16-NEXT: s_sub_co_i32 s6, 1, s4
; GFX1250-GISEL-FAKE16-NEXT: s_or_b32 s9, s3, 0x1000
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
index 5d31177..b6b26a4 100644
--- a/llvm/test/CodeGen/AMDGPU/fptrunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.ll
@@ -2,14 +2,14 @@
; RUN: llc -mtriple=amdgcn < %s | FileCheck -check-prefixes=SI %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI-SDAG,VI-SAFE-SDAG %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=1 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI-GISEL,VI-SAFE-GISEL %s
-; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=0 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI-SDAG,VI-UNSAFE-SDAG %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI-SDAG,VI-UNSAFE-SDAG %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX10-SDAG,GFX10-SAFE-SDAG %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=1 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX10-GISEL,GFX10-SAFE-GISEL %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=0 -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX10-SDAG,GFX10-UNSAFE-SDAG %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX10-SDAG,GFX10-UNSAFE-SDAG %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-SAFE-SDAG %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-SAFE-GISEL %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,+real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-TRUE16 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-FAKE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-TRUE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-SDAG,GFX11-UNSAFE-DAG-FAKE16 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-UNSAFE-GISEL-TRUE16 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-GISEL,GFX11-UNSAFE-GISEL-FAKE16 %s
@@ -182,7 +182,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; SI-NEXT: s_and_b32 s1, s7, 0x1ff
; SI-NEXT: s_and_b32 s8, s0, 0xffe
; SI-NEXT: s_or_b32 s0, s1, s6
-; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: s_bfe_u32 s0, s7, 0xb0014
@@ -237,7 +236,6 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; VI-SDAG-NEXT: s_and_b32 s8, s4, 0xffe
; VI-SDAG-NEXT: s_and_b32 s4, s7, 0x1ff
; VI-SDAG-NEXT: s_or_b32 s4, s4, s6
-; VI-SDAG-NEXT: s_cmp_lg_u32 s4, 0
; VI-SDAG-NEXT: s_mov_b32 s1, s5
; VI-SDAG-NEXT: s_cselect_b64 s[4:5], -1, 0
; VI-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
@@ -290,10 +288,8 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; VI-GISEL-NEXT: s_addk_i32 s4, 0xfc10
; VI-GISEL-NEXT: s_and_b32 s5, s5, 0xffe
; VI-GISEL-NEXT: s_or_b32 s2, s6, s2
-; VI-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; VI-GISEL-NEXT: s_cselect_b32 s2, 1, 0
; VI-GISEL-NEXT: s_or_b32 s2, s5, s2
-; VI-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; VI-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; VI-GISEL-NEXT: s_sub_i32 s7, 1, s4
; VI-GISEL-NEXT: s_lshl_b32 s6, s4, 12
@@ -335,11 +331,10 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX10-SDAG: ; %bb.0:
; GFX10-SDAG-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX10-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX10-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX10-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX10-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-SDAG-NEXT: s_lshr_b32 s4, s3, 8
+; GFX10-SDAG-NEXT: s_and_b32 s5, s3, 0x1ff
+; GFX10-SDAG-NEXT: s_and_b32 s4, s4, 0xffe
+; GFX10-SDAG-NEXT: s_or_b32 s2, s5, s2
; GFX10-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX10-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
; GFX10-SDAG-NEXT: s_bfe_u32 s2, s3, 0xb0014
@@ -387,16 +382,14 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX10-GISEL: ; %bb.0:
; GFX10-GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-GISEL-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX10-GISEL-NEXT: s_bfe_u32 s4, s3, 0xb0014
; GFX10-GISEL-NEXT: s_lshr_b32 s5, s3, 8
-; GFX10-GISEL-NEXT: s_or_b32 s2, s6, s2
+; GFX10-GISEL-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX10-GISEL-NEXT: s_addk_i32 s4, 0xfc10
; GFX10-GISEL-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX10-GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX10-GISEL-NEXT: s_or_b32 s2, s6, s2
; GFX10-GISEL-NEXT: s_cselect_b32 s2, 1, 0
; GFX10-GISEL-NEXT: s_or_b32 s2, s5, s2
-; GFX10-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX10-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX10-GISEL-NEXT: s_sub_i32 s6, 1, s4
; GFX10-GISEL-NEXT: s_or_b32 s8, s2, 0x1000
@@ -438,11 +431,10 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX11-SDAG: ; %bb.0:
; GFX11-SDAG-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-SDAG-NEXT: s_and_b32 s4, s3, 0x1ff
-; GFX11-SDAG-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-SDAG-NEXT: s_or_b32 s2, s4, s2
-; GFX11-SDAG-NEXT: s_and_b32 s4, s5, 0xffe
-; GFX11-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-SDAG-NEXT: s_lshr_b32 s4, s3, 8
+; GFX11-SDAG-NEXT: s_and_b32 s5, s3, 0x1ff
+; GFX11-SDAG-NEXT: s_and_b32 s4, s4, 0xffe
+; GFX11-SDAG-NEXT: s_or_b32 s2, s5, s2
; GFX11-SDAG-NEXT: s_cselect_b32 s2, -1, 0
; GFX11-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
@@ -498,17 +490,15 @@ define amdgpu_kernel void @fptrunc_f64_to_f16(ptr addrspace(1) %out, double %in)
; GFX11-GISEL: ; %bb.0:
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX11-GISEL-NEXT: s_bfe_u32 s4, s3, 0xb0014
; GFX11-GISEL-NEXT: s_lshr_b32 s5, s3, 8
-; GFX11-GISEL-NEXT: s_or_b32 s2, s6, s2
+; GFX11-GISEL-NEXT: s_and_b32 s6, s3, 0x1ff
; GFX11-GISEL-NEXT: s_addk_i32 s4, 0xfc10
; GFX11-GISEL-NEXT: s_and_b32 s5, s5, 0xffe
-; GFX11-GISEL-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-GISEL-NEXT: s_or_b32 s2, s6, s2
; GFX11-GISEL-NEXT: s_cselect_b32 s2, 1, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-GISEL-NEXT: s_or_b32 s2, s5, s2
-; GFX11-GISEL-NEXT: s_cmp_lg_u32 s2, 0
; GFX11-GISEL-NEXT: s_cselect_b32 s5, 1, 0
; GFX11-GISEL-NEXT: s_sub_i32 s6, 1, s4
; GFX11-GISEL-NEXT: s_or_b32 s8, s2, 0x1000
diff --git a/llvm/test/CodeGen/AMDGPU/fract.f64.ll b/llvm/test/CodeGen/AMDGPU/fract.f64.ll
index f09c1c6..cc2e78d 100644
--- a/llvm/test/CodeGen/AMDGPU/fract.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fract.f64.ll
@@ -2,8 +2,8 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=bonaire < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -enable-unsafe-fp-math < %s | FileCheck --check-prefixes=GCN,SI,FUNC %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck --check-prefixes=GCN,SI,FUNC %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s
declare double @llvm.fabs.f64(double) #0
declare double @llvm.floor.f64(double) #0
diff --git a/llvm/test/CodeGen/AMDGPU/fract.ll b/llvm/test/CodeGen/AMDGPU/fract.ll
index 8ef0fcf..723fd93 100644
--- a/llvm/test/CodeGen/AMDGPU/fract.ll
+++ b/llvm/test/CodeGen/AMDGPU/fract.ll
@@ -1,8 +1,8 @@
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck --check-prefix=GCN %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=bonaire < %s | FileCheck --check-prefix=GCN %s
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefix=GCN %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -enable-unsafe-fp-math < %s | FileCheck --check-prefix=GCN %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck --check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn < %s | FileCheck --check-prefix=GCN %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global < %s | FileCheck --check-prefix=GCN %s
declare float @llvm.fabs.f32(float) #0
declare float @llvm.floor.f32(float) #0
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
index 37756d1..31f277f 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
@@ -472,7 +472,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB1_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -536,11 +535,10 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB1_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -606,7 +604,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -660,12 +657,11 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v1, s3, v1
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v1, s4, v1
; GFX1164-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -710,9 +706,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v1, s2, v1
; GFX1132-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1690,7 +1685,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB3_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1754,11 +1748,10 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1064-NEXT: .LBB3_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1824,7 +1817,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1878,12 +1870,11 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v1, s3, v1
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v1, s4, v1
; GFX1164-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1928,9 +1919,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v1, s2, v1
; GFX1132-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2968,7 +2958,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB5_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3032,11 +3021,10 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB5_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3102,7 +3090,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3156,12 +3143,11 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v1, s3, v1
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v1, s4, v1
; GFX1164-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3206,9 +3192,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v1, s2, v1
; GFX1132-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3742,7 +3727,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3806,11 +3790,10 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB6_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3876,7 +3859,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3930,12 +3912,11 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v1, s3, v1
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v1, s4, v1
; GFX1164-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3980,9 +3961,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v1, s2, v1
; GFX1132-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5019,7 +4999,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_default_scop
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB8_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5083,11 +5062,10 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_default_scop
; GFX1064-NEXT: .LBB8_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5153,7 +5131,6 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_default_scop
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5207,12 +5184,11 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_default_scop
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5270,9 +5246,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_default_scop
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6284,7 +6259,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB10_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6354,7 +6328,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6424,7 +6397,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6485,8 +6457,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6550,7 +6520,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7717,7 +7686,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB12_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7787,7 +7755,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7857,7 +7824,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7918,8 +7884,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -7983,7 +7947,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9150,7 +9113,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB14_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9220,7 +9182,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9290,7 +9251,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9351,8 +9311,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9416,7 +9374,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10065,7 +10022,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB15_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10135,7 +10091,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10205,7 +10160,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10266,8 +10220,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10331,7 +10283,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11498,7 +11449,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defau
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB17_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11568,7 +11518,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defau
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11638,7 +11587,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defau
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11699,8 +11647,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defau
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11764,7 +11710,6 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_defau
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
index 6351bb3..4581efc 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
@@ -381,13 +381,12 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_agent_scope_
; GFX9-NEXT: .LBB1_1: ; %ComputeLoop
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX9-NEXT: v_readlane_b32 s4, v0, s2
+; GFX9-NEXT: v_readlane_b32 s3, v0, s2
+; GFX9-NEXT: v_max_f32_e64 v1, s3, s3
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
-; GFX9-NEXT: v_max_f32_e64 v2, s4, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_max_f32_e32 v2, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v1
; GFX9-NEXT: s_cbranch_scc1 .LBB1_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -457,7 +456,6 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_agent_scope_
; GFX1064-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -513,7 +511,6 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f32_e64 v2, s2, s2
; GFX1032-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB1_1
@@ -562,8 +559,7 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_agent_scope_
; GFX1164-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -610,11 +606,9 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_agent_scope_
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f32_e64 v2, s2, s2
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1420,13 +1414,12 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_one_as_scope
; GFX9-NEXT: .LBB3_1: ; %ComputeLoop
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX9-NEXT: v_readlane_b32 s4, v0, s2
+; GFX9-NEXT: v_readlane_b32 s3, v0, s2
+; GFX9-NEXT: v_max_f32_e64 v1, s3, s3
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
-; GFX9-NEXT: v_max_f32_e64 v2, s4, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_max_f32_e32 v2, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v1
; GFX9-NEXT: s_cbranch_scc1 .LBB3_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1496,7 +1489,6 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_one_as_scope
; GFX1064-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1552,7 +1544,6 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_one_as_scope
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f32_e64 v2, s2, s2
; GFX1032-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB3_1
@@ -1601,8 +1592,7 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_one_as_scope
; GFX1164-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1649,11 +1639,9 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_one_as_scope
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f32_e64 v2, s2, s2
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2459,13 +2447,12 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_default_scop
; GFX9-NEXT: .LBB5_1: ; %ComputeLoop
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX9-NEXT: v_readlane_b32 s4, v0, s2
+; GFX9-NEXT: v_readlane_b32 s3, v0, s2
+; GFX9-NEXT: v_max_f32_e64 v1, s3, s3
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
-; GFX9-NEXT: v_max_f32_e64 v2, s4, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_max_f32_e32 v2, v1, v2
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v1
; GFX9-NEXT: s_cbranch_scc1 .LBB5_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2535,7 +2522,6 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_default_scop
; GFX1064-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2591,7 +2577,6 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_default_scop
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f32_e64 v2, s2, s2
; GFX1032-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB5_1
@@ -2640,8 +2625,7 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_default_scop
; GFX1164-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2688,11 +2672,9 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_default_scop
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f32_e64 v2, s2, s2
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3591,7 +3573,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent
; GFX9-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX9-NEXT: s_cbranch_scc1 .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3665,7 +3646,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent
; GFX1064-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
; GFX1064-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3724,7 +3704,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
; GFX1032-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3774,8 +3753,7 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent
; GFX1164-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX1164-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3841,10 +3819,9 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent
; GFX1132-NEXT: v_readlane_b32 s3, v1, s1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX1132-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4859,7 +4836,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
; GFX9-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX9-NEXT: s_cbranch_scc1 .LBB9_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4933,7 +4909,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
; GFX1064-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
; GFX1064-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4992,7 +4967,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
; GFX1032-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5042,8 +5016,7 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
; GFX1164-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX1164-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5109,10 +5082,9 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
; GFX1132-NEXT: v_readlane_b32 s3, v1, s1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX1132-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6127,7 +6099,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defau
; GFX9-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX9-NEXT: s_cbranch_scc1 .LBB11_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6201,7 +6172,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defau
; GFX1064-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
; GFX1064-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6260,7 +6230,6 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defau
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f64 v[2:3], v[2:3], v[4:5]
; GFX1032-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6310,8 +6279,7 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defau
; GFX1164-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX1164-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6377,10 +6345,9 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_defau
; GFX1132-NEXT: v_readlane_b32 s3, v1, s1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f64 v[4:5], v[2:3], v[4:5]
; GFX1132-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
index a9ac008..bd570d9 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
@@ -381,13 +381,12 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_agent_scope_
; GFX9-NEXT: .LBB1_1: ; %ComputeLoop
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX9-NEXT: v_readlane_b32 s4, v0, s2
+; GFX9-NEXT: v_readlane_b32 s3, v0, s2
+; GFX9-NEXT: v_max_f32_e64 v1, s3, s3
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
-; GFX9-NEXT: v_max_f32_e64 v2, s4, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_min_f32_e32 v2, v1, v2
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v1
; GFX9-NEXT: s_cbranch_scc1 .LBB1_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -457,7 +456,6 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_agent_scope_
; GFX1064-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -513,7 +511,6 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f32_e64 v2, s2, s2
; GFX1032-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB1_1
@@ -562,8 +559,7 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_agent_scope_
; GFX1164-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -610,11 +606,9 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_agent_scope_
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f32_e64 v2, s2, s2
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1420,13 +1414,12 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_one_as_scope
; GFX9-NEXT: .LBB3_1: ; %ComputeLoop
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX9-NEXT: v_readlane_b32 s4, v0, s2
+; GFX9-NEXT: v_readlane_b32 s3, v0, s2
+; GFX9-NEXT: v_max_f32_e64 v1, s3, s3
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
-; GFX9-NEXT: v_max_f32_e64 v2, s4, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_min_f32_e32 v2, v1, v2
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v1
; GFX9-NEXT: s_cbranch_scc1 .LBB3_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1496,7 +1489,6 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_one_as_scope
; GFX1064-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1552,7 +1544,6 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_one_as_scope
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f32_e64 v2, s2, s2
; GFX1032-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB3_1
@@ -1601,8 +1592,7 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_one_as_scope
; GFX1164-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1649,11 +1639,9 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_one_as_scope
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f32_e64 v2, s2, s2
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2459,13 +2447,12 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_default_scop
; GFX9-NEXT: .LBB5_1: ; %ComputeLoop
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX9-NEXT: v_readlane_b32 s4, v0, s2
+; GFX9-NEXT: v_readlane_b32 s3, v0, s2
+; GFX9-NEXT: v_max_f32_e64 v1, s3, s3
+; GFX9-NEXT: v_max_f32_e32 v2, v2, v2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX9-NEXT: v_max_f32_e32 v1, v2, v2
-; GFX9-NEXT: v_max_f32_e64 v2, s4, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: v_min_f32_e32 v2, v1, v2
+; GFX9-NEXT: v_min_f32_e32 v2, v2, v1
; GFX9-NEXT: s_cbranch_scc1 .LBB5_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2535,7 +2522,6 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_default_scop
; GFX1064-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2591,7 +2577,6 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_default_scop
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_max_f32_e64 v2, s2, s2
; GFX1032-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB5_1
@@ -2640,8 +2625,7 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_default_scop
; GFX1164-NEXT: v_max_f32_e64 v2, s3, s3
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2688,11 +2672,9 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_default_scop
; GFX1132-NEXT: v_max_f32_e32 v1, v1, v1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_max_f32_e64 v2, s2, s2
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_min_f32_e32 v1, v1, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3591,7 +3573,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent
; GFX9-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX9-NEXT: s_cbranch_scc1 .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3665,7 +3646,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent
; GFX1064-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
; GFX1064-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3724,7 +3704,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
; GFX1032-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3774,8 +3753,7 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent
; GFX1164-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX1164-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3841,10 +3819,9 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent
; GFX1132-NEXT: v_readlane_b32 s3, v1, s1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX1132-NEXT: s_cbranch_scc1 .LBB7_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4859,7 +4836,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
; GFX9-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX9-NEXT: s_cbranch_scc1 .LBB9_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4933,7 +4909,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
; GFX1064-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
; GFX1064-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4992,7 +4967,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
; GFX1032-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5042,8 +5016,7 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
; GFX1164-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX1164-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5109,10 +5082,9 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
; GFX1132-NEXT: v_readlane_b32 s3, v1, s1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX1132-NEXT: s_cbranch_scc1 .LBB9_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6127,7 +6099,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defau
; GFX9-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX9-NEXT: s_cbranch_scc1 .LBB11_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6201,7 +6172,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defau
; GFX1064-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
; GFX1064-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6260,7 +6230,6 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defau
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_min_f64 v[2:3], v[2:3], v[4:5]
; GFX1032-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6310,8 +6279,7 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defau
; GFX1164-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX1164-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6377,10 +6345,9 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_defau
; GFX1132-NEXT: v_readlane_b32 s3, v1, s1
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_max_f64 v[4:5], s[2:3], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_min_f64 v[4:5], v[2:3], v[4:5]
; GFX1132-NEXT: s_cbranch_scc1 .LBB11_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
index 6311143..1f2d70c 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
@@ -532,7 +532,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB1_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -596,11 +595,10 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB1_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -666,7 +664,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -720,12 +717,11 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -783,9 +779,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB1_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1862,7 +1857,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB3_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -1926,11 +1920,10 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1064-NEXT: .LBB3_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -1996,7 +1989,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -2050,12 +2042,11 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -2113,9 +2104,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB3_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3192,7 +3182,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB5_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3256,11 +3245,10 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB5_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3326,7 +3314,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -3380,12 +3367,11 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -3443,9 +3429,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB5_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4018,7 +4003,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB6_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4082,11 +4066,10 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB6_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4152,7 +4135,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -4206,12 +4188,11 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -4269,9 +4250,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB6_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5347,7 +5327,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_default_scop
; GFX9-NEXT: v_readlane_b32 s4, v0, s2
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: v_add_f32_e32 v2, s4, v2
; GFX9-NEXT: s_cbranch_scc1 .LBB8_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5411,11 +5390,10 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_default_scop
; GFX1064-NEXT: .LBB8_1: ; %ComputeLoop
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_ff1_i32_b64 s2, s[0:1]
-; GFX1064-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1064-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1064-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s2
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1064-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5481,7 +5459,6 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_default_scop
; GFX1032-NEXT: v_readlane_b32 s2, v0, s1
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1032-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
@@ -5535,12 +5512,11 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_default_scop
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_ctz_i32_b64 s2, s[0:1]
-; GFX1164-NEXT: v_readlane_b32 s4, v0, s2
+; GFX1164-NEXT: v_readlane_b32 s3, v0, s2
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1164-NEXT: v_add_f32_e32 v2, s3, v2
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s2
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX1164-NEXT: v_add_f32_e32 v2, s4, v2
; GFX1164-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -5598,9 +5574,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_default_scop
; GFX1132-NEXT: s_ctz_i32_b32 s1, s0
; GFX1132-NEXT: v_readlane_b32 s2, v0, s1
; GFX1132-NEXT: s_lshl_b32 s1, 1, s1
-; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: v_add_f32_e32 v2, s2, v2
; GFX1132-NEXT: s_cbranch_scc1 .LBB8_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
@@ -6612,7 +6587,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB10_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6682,7 +6656,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6752,7 +6725,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6813,8 +6785,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -6878,7 +6848,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB10_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8044,7 +8013,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB12_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8114,7 +8082,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8184,7 +8151,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8245,8 +8211,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -8310,7 +8274,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB12_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9477,7 +9440,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB14_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9547,7 +9509,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9617,7 +9578,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9678,8 +9638,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -9743,7 +9701,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB14_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10392,7 +10349,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB15_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10462,7 +10418,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10532,7 +10487,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10593,8 +10547,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -10658,7 +10610,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB15_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11824,7 +11775,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defau
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX9-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX9-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX9-NEXT: s_cbranch_scc1 .LBB17_1
; GFX9-NEXT: ; %bb.2: ; %ComputeEnd
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11894,7 +11844,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defau
; GFX1064-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1064-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1064-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1064-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1064-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -11964,7 +11913,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defau
; GFX1032-NEXT: s_lshl_b32 s1, 1, s1
; GFX1032-NEXT: s_andn2_b32 s0, s0, s1
; GFX1032-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1032-NEXT: s_cmp_lg_u32 s0, 0
; GFX1032-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1032-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -12025,8 +11973,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defau
; GFX1164-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
; GFX1164-NEXT: s_lshl_b64 s[2:3], 1, s4
; GFX1164-NEXT: s_and_not1_b64 s[0:1], s[0:1], s[2:3]
-; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1164-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX1164-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1164-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
@@ -12090,7 +12036,6 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_defau
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX1132-NEXT: s_and_not1_b32 s0, s0, s1
; GFX1132-NEXT: v_add_f64 v[4:5], v[4:5], s[2:3]
-; GFX1132-NEXT: s_cmp_lg_u32 s0, 0
; GFX1132-NEXT: s_cbranch_scc1 .LBB17_1
; GFX1132-NEXT: ; %bb.2: ; %ComputeEnd
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
diff --git a/llvm/test/CodeGen/AMDGPU/inline-attr.ll b/llvm/test/CodeGen/AMDGPU/inline-attr.ll
index 4ae0ba0..4e93eca 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-attr.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-attr.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 5
-; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 -enable-unsafe-fp-math %s | FileCheck --check-prefixes=GCN,UNSAFE %s
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 %s | FileCheck --check-prefixes=GCN,UNSAFE %s
; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 -enable-no-nans-fp-math %s | FileCheck --check-prefixes=GCN,NONANS %s
; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -O3 -enable-no-infs-fp-math %s | FileCheck --check-prefixes=GCN,NOINFS %s
diff --git a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll
index eee232a..c3f3917 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll
@@ -136,19 +136,17 @@ define amdgpu_kernel void @f2(i32 %arg, i32 %arg1, i32 %arg2, i1 %arg3, i32 %arg
; GFX11-NEXT: .LBB2_6: ; %bb18
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
-; GFX11-NEXT: v_readfirstlane_b32 s13, v0
-; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_cselect_b32 s1, -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, s1
-; GFX11-NEXT: s_and_b32 s1, s8, s1
-; GFX11-NEXT: s_and_b32 s1, s1, exec_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: s_cselect_b32 s13, -1, 0
+; GFX11-NEXT: v_readfirstlane_b32 s1, v0
+; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, s13
+; GFX11-NEXT: s_and_b32 s13, s8, s13
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: s_and_b32 s13, s13, exec_lo
; GFX11-NEXT: v_readfirstlane_b32 s19, v2
-; GFX11-NEXT: s_cselect_b32 s1, s19, s13
-; GFX11-NEXT: s_and_b32 s13, 0xffff, s0
+; GFX11-NEXT: s_cselect_b32 s1, s19, s1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_b32 s1, s1, 1
-; GFX11-NEXT: s_cmp_lg_u32 s13, 0
+; GFX11-NEXT: s_and_b32 s13, 0xffff, s0
; GFX11-NEXT: s_cselect_b32 s13, -1, 0
; GFX11-NEXT: s_and_b32 s20, s9, exec_lo
; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, s13
diff --git a/llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll b/llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll
index b91963f..d23509b 100644
--- a/llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll
+++ b/llvm/test/CodeGen/AMDGPU/lds-dma-workgroup-release.ll
@@ -150,7 +150,6 @@ define amdgpu_kernel void @barrier_release(<4 x i32> inreg %rsrc,
; GFX10CU-NEXT: buffer_load_dword v0, s[8:11], 0 offen lds
; GFX10CU-NEXT: v_mov_b32_e32 v0, s13
; GFX10CU-NEXT: s_waitcnt vmcnt(0)
-; GFX10CU-NEXT: s_waitcnt_depctr 0xffe3
; GFX10CU-NEXT: s_barrier
; GFX10CU-NEXT: ds_read_b32 v0, v0
; GFX10CU-NEXT: s_waitcnt lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
index e8b8d05..e8eccb0 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.update.dpp.ll
@@ -147,14 +147,13 @@ define weak_odr amdgpu_kernel void @dpp_test1(ptr %arg) local_unnamed_addr {
; GFX8-OPT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX8-OPT-NEXT: v_mov_b32_e32 v2, 0
; GFX8-OPT-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-OPT-NEXT: s_barrier
-; GFX8-OPT-NEXT: v_add_u32_e32 v1, vcc, v1, v1
-; GFX8-OPT-NEXT: s_nop 1
-; GFX8-OPT-NEXT: v_mov_b32_dpp v2, v1 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
-; GFX8-OPT-NEXT: v_add_u32_e32 v2, vcc, v2, v1
-; GFX8-OPT-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-OPT-NEXT: v_add_u32_e32 v4, vcc, v1, v1
+; GFX8-OPT-NEXT: v_mov_b32_e32 v3, s1
; GFX8-OPT-NEXT: v_add_u32_e32 v0, vcc, s0, v0
-; GFX8-OPT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-OPT-NEXT: v_mov_b32_dpp v2, v4 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
+; GFX8-OPT-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-OPT-NEXT: v_add_u32_e32 v2, vcc, v2, v4
+; GFX8-OPT-NEXT: s_barrier
; GFX8-OPT-NEXT: flat_store_dword v[0:1], v2
; GFX8-OPT-NEXT: s_endpgm
;
@@ -194,14 +193,14 @@ define weak_odr amdgpu_kernel void @dpp_test1(ptr %arg) local_unnamed_addr {
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: ds_read_b32 v1, v0
-; GFX10-NEXT: s_barrier
-; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, s0, s0, v0
-; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v1
-; GFX10-NEXT: v_mov_b32_dpp v2, v1 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
-; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v1, v1
; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, s1, 0, s0
+; GFX10-NEXT: v_mov_b32_dpp v2, v3 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX10-NEXT: s_barrier
+; GFX10-NEXT: buffer_gl0_inv
; GFX10-NEXT: flat_store_dword v[0:1], v2
; GFX10-NEXT: s_endpgm
;
@@ -213,15 +212,15 @@ define weak_odr amdgpu_kernel void @dpp_test1(ptr %arg) local_unnamed_addr {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_and_b32_e32 v0, 0xffc, v0
; GFX11-NEXT: ds_load_b32 v1, v0
-; GFX11-NEXT: s_barrier
-; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: v_add_co_u32 v0, s0, s0, v0
-; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v1
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_dpp v2, v1 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
-; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v1
+; GFX11-NEXT: v_add_nc_u32_e32 v3, v1, v1
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s1, 0, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_dpp v2, v3 quad_perm:[1,0,3,2] row_mask:0xf bank_mask:0xf
+; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
+; GFX11-NEXT: s_barrier
+; GFX11-NEXT: buffer_gl0_inv
; GFX11-NEXT: flat_store_b32 v[0:1], v2
; GFX11-NEXT: s_endpgm
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
index 8748aff..6dc9199 100644
--- a/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-atomicrmw-fadd.ll
@@ -8265,12 +8265,10 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_readlane_b32 s6, v1, s3
-; GFX12-NEXT: s_lshl_b32 s7, 1, s3
; GFX12-NEXT: v_writelane_b32 v0, s0, s3
+; GFX12-NEXT: s_lshl_b32 s3, 1, s3
; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 s1, s1, s7
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12-NEXT: s_and_not1_b32 s1, s1, s3
; GFX12-NEXT: s_add_f32 s0, s0, s6
; GFX12-NEXT: s_cbranch_scc1 .LBB28_5
; GFX12-NEXT: ; %bb.6: ; %ComputeEnd
@@ -8351,14 +8349,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX942-NEXT: .LBB28_5: ; %ComputeLoop
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX942-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX942-NEXT: v_readfirstlane_b32 s8, v1
-; GFX942-NEXT: v_readlane_b32 s9, v2, s3
+; GFX942-NEXT: v_readfirstlane_b32 s6, v1
; GFX942-NEXT: s_mov_b32 m0, s3
+; GFX942-NEXT: v_readlane_b32 s8, v2, s3
+; GFX942-NEXT: v_writelane_b32 v0, s6, m0
+; GFX942-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX942-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX942-NEXT: v_writelane_b32 v0, s8, m0
-; GFX942-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX942-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX942-NEXT: v_add_f32_e32 v1, s8, v1
; GFX942-NEXT: s_cbranch_scc1 .LBB28_5
; GFX942-NEXT: ; %bb.6: ; %ComputeEnd
; GFX942-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -8440,15 +8437,14 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX11-NEXT: .LBB28_5: ; %ComputeLoop
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_ctz_i32_b32 s1, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readlane_b32 s6, v2, s1
-; GFX11-NEXT: s_lshl_b32 s7, 1, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 s0, s0, s7
; GFX11-NEXT: v_writelane_b32 v0, s3, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_add_f32_e32 v1, s6, v1
-; GFX11-NEXT: s_cmp_lg_u32 s0, 0
+; GFX11-NEXT: s_lshl_b32 s1, 1, s1
+; GFX11-NEXT: s_and_not1_b32 s0, s0, s1
; GFX11-NEXT: s_cbranch_scc1 .LBB28_5
; GFX11-NEXT: ; %bb.6: ; %ComputeEnd
; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -8528,11 +8524,10 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX10-NEXT: s_ff1_i32_b32 s1, s0
; GFX10-NEXT: v_readfirstlane_b32 s3, v1
; GFX10-NEXT: v_readlane_b32 s6, v2, s1
-; GFX10-NEXT: s_lshl_b32 s7, 1, s1
-; GFX10-NEXT: s_andn2_b32 s0, s0, s7
; GFX10-NEXT: v_writelane_b32 v0, s3, s1
; GFX10-NEXT: v_add_f32_e32 v1, s6, v1
-; GFX10-NEXT: s_cmp_lg_u32 s0, 0
+; GFX10-NEXT: s_lshl_b32 s1, 1, s1
+; GFX10-NEXT: s_andn2_b32 s0, s0, s1
; GFX10-NEXT: s_cbranch_scc1 .LBB28_5
; GFX10-NEXT: ; %bb.6: ; %ComputeEnd
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -8609,14 +8604,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX90A-NEXT: .LBB28_5: ; %ComputeLoop
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX90A-NEXT: v_readfirstlane_b32 s8, v1
-; GFX90A-NEXT: v_readlane_b32 s9, v2, s3
+; GFX90A-NEXT: v_readfirstlane_b32 s6, v1
; GFX90A-NEXT: s_mov_b32 m0, s3
+; GFX90A-NEXT: v_readlane_b32 s8, v2, s3
+; GFX90A-NEXT: v_writelane_b32 v0, s6, m0
+; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX90A-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX90A-NEXT: v_writelane_b32 v0, s8, m0
-; GFX90A-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX90A-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, s8, v1
; GFX90A-NEXT: s_cbranch_scc1 .LBB28_5
; GFX90A-NEXT: ; %bb.6: ; %ComputeEnd
; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -8692,14 +8686,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX908-NEXT: .LBB28_5: ; %ComputeLoop
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX908-NEXT: v_readfirstlane_b32 s8, v1
-; GFX908-NEXT: v_readlane_b32 s9, v2, s3
+; GFX908-NEXT: v_readfirstlane_b32 s6, v1
; GFX908-NEXT: s_mov_b32 m0, s3
+; GFX908-NEXT: v_readlane_b32 s8, v2, s3
+; GFX908-NEXT: v_writelane_b32 v0, s6, m0
+; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX908-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX908-NEXT: v_writelane_b32 v0, s8, m0
-; GFX908-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX908-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX908-NEXT: v_add_f32_e32 v1, s8, v1
; GFX908-NEXT: s_cbranch_scc1 .LBB28_5
; GFX908-NEXT: ; %bb.6: ; %ComputeEnd
; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -8776,14 +8769,13 @@ define amdgpu_kernel void @local_ds_fadd(ptr addrspace(1) %out, ptr addrspace(3)
; GFX8-NEXT: .LBB28_5: ; %ComputeLoop
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX8-NEXT: v_readfirstlane_b32 s8, v1
-; GFX8-NEXT: v_readlane_b32 s9, v2, s3
+; GFX8-NEXT: v_readfirstlane_b32 s6, v1
; GFX8-NEXT: s_mov_b32 m0, s3
+; GFX8-NEXT: v_readlane_b32 s8, v2, s3
+; GFX8-NEXT: v_writelane_b32 v0, s6, m0
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: v_writelane_b32 v0, s8, m0
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX8-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX8-NEXT: v_add_f32_e32 v1, s8, v1
; GFX8-NEXT: s_cbranch_scc1 .LBB28_5
; GFX8-NEXT: ; %bb.6: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9130,12 +9122,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX12-NEXT: s_wait_alu 0xfffe
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_readlane_b32 s6, v1, s3
-; GFX12-NEXT: s_lshl_b32 s7, 1, s3
; GFX12-NEXT: v_writelane_b32 v0, s0, s3
+; GFX12-NEXT: s_lshl_b32 s3, 1, s3
; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_and_not1_b32 s1, s1, s7
-; GFX12-NEXT: s_wait_alu 0xfffe
-; GFX12-NEXT: s_cmp_lg_u32 s1, 0
+; GFX12-NEXT: s_and_not1_b32 s1, s1, s3
; GFX12-NEXT: s_add_f32 s0, s0, s6
; GFX12-NEXT: s_cbranch_scc1 .LBB29_5
; GFX12-NEXT: ; %bb.6: ; %ComputeEnd
@@ -9212,14 +9202,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX942-NEXT: .LBB29_5: ; %ComputeLoop
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX942-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX942-NEXT: v_readfirstlane_b32 s8, v1
-; GFX942-NEXT: v_readlane_b32 s9, v2, s3
+; GFX942-NEXT: v_readfirstlane_b32 s6, v1
; GFX942-NEXT: s_mov_b32 m0, s3
+; GFX942-NEXT: v_readlane_b32 s8, v2, s3
+; GFX942-NEXT: v_writelane_b32 v0, s6, m0
+; GFX942-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX942-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX942-NEXT: v_writelane_b32 v0, s8, m0
-; GFX942-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX942-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX942-NEXT: v_add_f32_e32 v1, s8, v1
; GFX942-NEXT: s_cbranch_scc1 .LBB29_5
; GFX942-NEXT: ; %bb.6: ; %ComputeEnd
; GFX942-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9296,15 +9285,14 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX11-NEXT: .LBB29_5: ; %ComputeLoop
; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX11-NEXT: s_ctz_i32_b32 s1, s0
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s3, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readlane_b32 s6, v2, s1
-; GFX11-NEXT: s_lshl_b32 s7, 1, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_and_not1_b32 s0, s0, s7
; GFX11-NEXT: v_writelane_b32 v0, s3, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_add_f32_e32 v1, s6, v1
-; GFX11-NEXT: s_cmp_lg_u32 s0, 0
+; GFX11-NEXT: s_lshl_b32 s1, 1, s1
+; GFX11-NEXT: s_and_not1_b32 s0, s0, s1
; GFX11-NEXT: s_cbranch_scc1 .LBB29_5
; GFX11-NEXT: ; %bb.6: ; %ComputeEnd
; GFX11-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9377,11 +9365,10 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX10-NEXT: s_ff1_i32_b32 s1, s0
; GFX10-NEXT: v_readfirstlane_b32 s3, v1
; GFX10-NEXT: v_readlane_b32 s6, v2, s1
-; GFX10-NEXT: s_lshl_b32 s7, 1, s1
-; GFX10-NEXT: s_andn2_b32 s0, s0, s7
; GFX10-NEXT: v_writelane_b32 v0, s3, s1
; GFX10-NEXT: v_add_f32_e32 v1, s6, v1
-; GFX10-NEXT: s_cmp_lg_u32 s0, 0
+; GFX10-NEXT: s_lshl_b32 s1, 1, s1
+; GFX10-NEXT: s_andn2_b32 s0, s0, s1
; GFX10-NEXT: s_cbranch_scc1 .LBB29_5
; GFX10-NEXT: ; %bb.6: ; %ComputeEnd
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9453,14 +9440,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX90A-NEXT: .LBB29_5: ; %ComputeLoop
; GFX90A-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX90A-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX90A-NEXT: v_readfirstlane_b32 s8, v1
-; GFX90A-NEXT: v_readlane_b32 s9, v2, s3
+; GFX90A-NEXT: v_readfirstlane_b32 s6, v1
; GFX90A-NEXT: s_mov_b32 m0, s3
+; GFX90A-NEXT: v_readlane_b32 s8, v2, s3
+; GFX90A-NEXT: v_writelane_b32 v0, s6, m0
+; GFX90A-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX90A-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX90A-NEXT: v_writelane_b32 v0, s8, m0
-; GFX90A-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX90A-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX90A-NEXT: v_add_f32_e32 v1, s8, v1
; GFX90A-NEXT: s_cbranch_scc1 .LBB29_5
; GFX90A-NEXT: ; %bb.6: ; %ComputeEnd
; GFX90A-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9533,14 +9519,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX908-NEXT: .LBB29_5: ; %ComputeLoop
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX908-NEXT: v_readfirstlane_b32 s8, v1
-; GFX908-NEXT: v_readlane_b32 s9, v2, s3
+; GFX908-NEXT: v_readfirstlane_b32 s6, v1
; GFX908-NEXT: s_mov_b32 m0, s3
+; GFX908-NEXT: v_readlane_b32 s8, v2, s3
+; GFX908-NEXT: v_writelane_b32 v0, s6, m0
+; GFX908-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX908-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX908-NEXT: v_writelane_b32 v0, s8, m0
-; GFX908-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX908-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX908-NEXT: v_add_f32_e32 v1, s8, v1
; GFX908-NEXT: s_cbranch_scc1 .LBB29_5
; GFX908-NEXT: ; %bb.6: ; %ComputeEnd
; GFX908-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
@@ -9614,14 +9599,13 @@ define amdgpu_kernel void @local_ds_fadd_one_as(ptr addrspace(1) %out, ptr addrs
; GFX8-NEXT: .LBB29_5: ; %ComputeLoop
; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX8-NEXT: s_ff1_i32_b64 s3, s[0:1]
-; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
-; GFX8-NEXT: v_readfirstlane_b32 s8, v1
-; GFX8-NEXT: v_readlane_b32 s9, v2, s3
+; GFX8-NEXT: v_readfirstlane_b32 s6, v1
; GFX8-NEXT: s_mov_b32 m0, s3
+; GFX8-NEXT: v_readlane_b32 s8, v2, s3
+; GFX8-NEXT: v_writelane_b32 v0, s6, m0
+; GFX8-NEXT: s_lshl_b64 s[6:7], 1, s3
; GFX8-NEXT: s_andn2_b64 s[0:1], s[0:1], s[6:7]
-; GFX8-NEXT: v_writelane_b32 v0, s8, m0
-; GFX8-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX8-NEXT: v_add_f32_e32 v1, s9, v1
+; GFX8-NEXT: v_add_f32_e32 v1, s8, v1
; GFX8-NEXT: s_cbranch_scc1 .LBB29_5
; GFX8-NEXT: ; %bb.6: ; %ComputeEnd
; GFX8-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers.ll
index 516c3946..282a7ae 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-barriers.ll
@@ -15,7 +15,6 @@ define amdgpu_kernel void @test_s_barrier() {
;
; GFX10-CU-LABEL: test_s_barrier:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-CU-NEXT: s_barrier
; GFX10-CU-NEXT: s_endpgm
;
@@ -26,7 +25,6 @@ define amdgpu_kernel void @test_s_barrier() {
;
; GFX11-CU-LABEL: test_s_barrier:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: s_waitcnt_depctr 0xffe3
; GFX11-CU-NEXT: s_barrier
; GFX11-CU-NEXT: s_endpgm
;
@@ -38,7 +36,6 @@ define amdgpu_kernel void @test_s_barrier() {
;
; GFX12-CU-LABEL: test_s_barrier:
; GFX12-CU: ; %bb.0: ; %entry
-; GFX12-CU-NEXT: s_wait_alu 0xffe3
; GFX12-CU-NEXT: s_barrier_signal -1
; GFX12-CU-NEXT: s_barrier_wait -1
; GFX12-CU-NEXT: s_endpgm
@@ -63,8 +60,8 @@ define amdgpu_kernel void @test_s_barrier_workgroup_fence() {
;
; GFX10-CU-LABEL: test_s_barrier_workgroup_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-CU-NEXT: s_waitcnt_depctr 0xffe3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_barrier
; GFX10-CU-NEXT: s_endpgm
;
@@ -77,8 +74,8 @@ define amdgpu_kernel void @test_s_barrier_workgroup_fence() {
;
; GFX11-CU-LABEL: test_s_barrier_workgroup_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-CU-NEXT: s_waitcnt_depctr 0xffe3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_barrier
; GFX11-CU-NEXT: s_endpgm
;
@@ -94,8 +91,10 @@ define amdgpu_kernel void @test_s_barrier_workgroup_fence() {
;
; GFX12-CU-LABEL: test_s_barrier_workgroup_fence:
; GFX12-CU: ; %bb.0: ; %entry
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
-; GFX12-CU-NEXT: s_wait_alu 0xffe3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: s_barrier_signal -1
; GFX12-CU-NEXT: s_barrier_wait -1
; GFX12-CU-NEXT: s_endpgm
@@ -125,7 +124,6 @@ define amdgpu_kernel void @test_s_barrier_agent_fence() {
; GFX10-CU: ; %bb.0: ; %entry
; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX10-CU-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-CU-NEXT: s_barrier
; GFX10-CU-NEXT: s_endpgm
;
@@ -140,7 +138,6 @@ define amdgpu_kernel void @test_s_barrier_agent_fence() {
; GFX11-CU: ; %bb.0: ; %entry
; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
-; GFX11-CU-NEXT: s_waitcnt_depctr 0xffe3
; GFX11-CU-NEXT: s_barrier
; GFX11-CU-NEXT: s_endpgm
;
@@ -160,7 +157,6 @@ define amdgpu_kernel void @test_s_barrier_agent_fence() {
; GFX12-CU-NEXT: s_wait_samplecnt 0x0
; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX12-CU-NEXT: s_wait_alu 0xffe3
; GFX12-CU-NEXT: s_barrier_signal -1
; GFX12-CU-NEXT: s_barrier_wait -1
; GFX12-CU-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll
index 6a76f43..7efbff9 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence-mmra-global.ll
@@ -107,6 +107,8 @@ define amdgpu_kernel void @workgroup_release_fence() {
;
; GFX10-CU-LABEL: workgroup_release_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_release_fence:
@@ -139,6 +141,8 @@ define amdgpu_kernel void @workgroup_release_fence() {
;
; GFX11-CU-LABEL: workgroup_release_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_release_fence:
@@ -151,6 +155,10 @@ define amdgpu_kernel void @workgroup_release_fence() {
;
; GFX12-CU-LABEL: workgroup_release_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_release_fence:
@@ -181,6 +189,8 @@ define amdgpu_kernel void @workgroup_acq_rel_fence() {
;
; GFX10-CU-LABEL: workgroup_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_acq_rel_fence:
@@ -216,6 +226,8 @@ define amdgpu_kernel void @workgroup_acq_rel_fence() {
;
; GFX11-CU-LABEL: workgroup_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_acq_rel_fence:
@@ -229,6 +241,10 @@ define amdgpu_kernel void @workgroup_acq_rel_fence() {
;
; GFX12-CU-LABEL: workgroup_acq_rel_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_acq_rel_fence:
@@ -259,6 +275,8 @@ define amdgpu_kernel void @workgroup_seq_cst_fence() {
;
; GFX10-CU-LABEL: workgroup_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_seq_cst_fence:
@@ -294,6 +312,8 @@ define amdgpu_kernel void @workgroup_seq_cst_fence() {
;
; GFX11-CU-LABEL: workgroup_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_seq_cst_fence:
@@ -307,6 +327,10 @@ define amdgpu_kernel void @workgroup_seq_cst_fence() {
;
; GFX12-CU-LABEL: workgroup_seq_cst_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_seq_cst_fence:
@@ -412,6 +436,8 @@ define amdgpu_kernel void @workgroup_one_as_release_fence() {
;
; GFX10-CU-LABEL: workgroup_one_as_release_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_one_as_release_fence:
@@ -444,6 +470,8 @@ define amdgpu_kernel void @workgroup_one_as_release_fence() {
;
; GFX11-CU-LABEL: workgroup_one_as_release_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_one_as_release_fence:
@@ -456,6 +484,10 @@ define amdgpu_kernel void @workgroup_one_as_release_fence() {
;
; GFX12-CU-LABEL: workgroup_one_as_release_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_one_as_release_fence:
@@ -486,6 +518,8 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
;
; GFX10-CU-LABEL: workgroup_one_as_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_one_as_acq_rel_fence:
@@ -521,6 +555,8 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
;
; GFX11-CU-LABEL: workgroup_one_as_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_one_as_acq_rel_fence:
@@ -534,6 +570,10 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
;
; GFX12-CU-LABEL: workgroup_one_as_acq_rel_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_one_as_acq_rel_fence:
@@ -564,6 +604,8 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
;
; GFX10-CU-LABEL: workgroup_one_as_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_one_as_seq_cst_fence:
@@ -599,6 +641,8 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
;
; GFX11-CU-LABEL: workgroup_one_as_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_one_as_seq_cst_fence:
@@ -612,6 +656,10 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
;
; GFX12-CU-LABEL: workgroup_one_as_seq_cst_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_one_as_seq_cst_fence:
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll
index d288bfc..1cca64a 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-fence.ll
@@ -1093,7 +1093,8 @@ define amdgpu_kernel void @workgroup_release_fence() {
;
; GFX10-CU-LABEL: workgroup_release_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_release_fence:
@@ -1129,7 +1130,8 @@ define amdgpu_kernel void @workgroup_release_fence() {
;
; GFX11-CU-LABEL: workgroup_release_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_release_fence:
@@ -1142,7 +1144,10 @@ define amdgpu_kernel void @workgroup_release_fence() {
;
; GFX12-CU-LABEL: workgroup_release_fence:
; GFX12-CU: ; %bb.0: ; %entry
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_release_fence:
@@ -1175,7 +1180,8 @@ define amdgpu_kernel void @workgroup_acq_rel_fence() {
;
; GFX10-CU-LABEL: workgroup_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_acq_rel_fence:
@@ -1214,7 +1220,8 @@ define amdgpu_kernel void @workgroup_acq_rel_fence() {
;
; GFX11-CU-LABEL: workgroup_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_acq_rel_fence:
@@ -1228,7 +1235,10 @@ define amdgpu_kernel void @workgroup_acq_rel_fence() {
;
; GFX12-CU-LABEL: workgroup_acq_rel_fence:
; GFX12-CU: ; %bb.0: ; %entry
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_acq_rel_fence:
@@ -1261,7 +1271,8 @@ define amdgpu_kernel void @workgroup_seq_cst_fence() {
;
; GFX10-CU-LABEL: workgroup_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_seq_cst_fence:
@@ -1300,7 +1311,8 @@ define amdgpu_kernel void @workgroup_seq_cst_fence() {
;
; GFX11-CU-LABEL: workgroup_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_seq_cst_fence:
@@ -1314,7 +1326,10 @@ define amdgpu_kernel void @workgroup_seq_cst_fence() {
;
; GFX12-CU-LABEL: workgroup_seq_cst_fence:
; GFX12-CU: ; %bb.0: ; %entry
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_seq_cst_fence:
@@ -1420,6 +1435,8 @@ define amdgpu_kernel void @workgroup_one_as_release_fence() {
;
; GFX10-CU-LABEL: workgroup_one_as_release_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_one_as_release_fence:
@@ -1452,6 +1469,8 @@ define amdgpu_kernel void @workgroup_one_as_release_fence() {
;
; GFX11-CU-LABEL: workgroup_one_as_release_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_one_as_release_fence:
@@ -1464,6 +1483,10 @@ define amdgpu_kernel void @workgroup_one_as_release_fence() {
;
; GFX12-CU-LABEL: workgroup_one_as_release_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_one_as_release_fence:
@@ -1494,6 +1517,8 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
;
; GFX10-CU-LABEL: workgroup_one_as_acq_rel_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_one_as_acq_rel_fence:
@@ -1529,6 +1554,8 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
;
; GFX11-CU-LABEL: workgroup_one_as_acq_rel_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_one_as_acq_rel_fence:
@@ -1542,6 +1569,10 @@ define amdgpu_kernel void @workgroup_one_as_acq_rel_fence() {
;
; GFX12-CU-LABEL: workgroup_one_as_acq_rel_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_one_as_acq_rel_fence:
@@ -1572,6 +1603,8 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
;
; GFX10-CU-LABEL: workgroup_one_as_seq_cst_fence:
; GFX10-CU: ; %bb.0: ; %entry
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: workgroup_one_as_seq_cst_fence:
@@ -1607,6 +1640,8 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
;
; GFX11-CU-LABEL: workgroup_one_as_seq_cst_fence:
; GFX11-CU: ; %bb.0: ; %entry
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: workgroup_one_as_seq_cst_fence:
@@ -1620,6 +1655,10 @@ define amdgpu_kernel void @workgroup_one_as_seq_cst_fence() {
;
; GFX12-CU-LABEL: workgroup_one_as_seq_cst_fence:
; GFX12-CU: ; %bb.0: ; %entry
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: workgroup_one_as_seq_cst_fence:
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-volatile.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-volatile.ll
index d277441..2afa577 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-volatile.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-volatile.ll
@@ -1072,7 +1072,8 @@ define amdgpu_kernel void @flat_volatile_workgroup_release_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1109,7 +1110,8 @@ define amdgpu_kernel void @flat_volatile_workgroup_release_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -1136,7 +1138,10 @@ define amdgpu_kernel void @flat_volatile_workgroup_release_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll
index 3826953..d384aec 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-flat-workgroup.ll
@@ -656,12 +656,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1]
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -765,12 +765,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1]
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -800,12 +800,14 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_load(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_load_b32 v2, v[0:1]
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -1193,7 +1195,8 @@ define amdgpu_kernel void @flat_workgroup_release_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1278,7 +1281,8 @@ define amdgpu_kernel void @flat_workgroup_release_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -1305,7 +1309,10 @@ define amdgpu_kernel void @flat_workgroup_release_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -1372,7 +1379,8 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1457,7 +1465,8 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -1484,7 +1493,10 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -1891,7 +1903,8 @@ define amdgpu_kernel void @flat_workgroup_release_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -1976,7 +1989,8 @@ define amdgpu_kernel void @flat_workgroup_release_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -2003,7 +2017,10 @@ define amdgpu_kernel void @flat_workgroup_release_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -2074,9 +2091,11 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_acq_rel_atomicrmw:
@@ -2170,9 +2189,11 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_acq_rel_atomicrmw:
@@ -2200,9 +2221,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_acq_rel_atomicrmw:
@@ -2273,9 +2297,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_seq_cst_atomicrmw:
@@ -2369,9 +2395,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_seq_cst_atomicrmw:
@@ -2399,9 +2427,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_seq_cst_atomicrmw:
@@ -2697,12 +2728,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -2813,12 +2844,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -2850,12 +2881,14 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -2935,12 +2968,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -3051,12 +3084,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -3088,12 +3121,14 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -3731,7 +3766,8 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -3854,7 +3890,8 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -3889,7 +3926,10 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -4007,9 +4047,11 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_acq_rel_monotonic_cmpxchg:
@@ -4141,9 +4183,11 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_acq_rel_monotonic_cmpxchg:
@@ -4179,9 +4223,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_acq_rel_monotonic_cmpxchg:
@@ -4299,9 +4346,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_seq_cst_monotonic_cmpxchg:
@@ -4433,9 +4482,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_seq_cst_monotonic_cmpxchg:
@@ -4471,9 +4522,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_seq_cst_monotonic_cmpxchg:
@@ -5137,9 +5191,11 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_release_acquire_cmpxchg:
@@ -5271,9 +5327,11 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_release_acquire_cmpxchg:
@@ -5309,9 +5367,12 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_release_acquire_cmpxchg:
@@ -5429,9 +5490,11 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_acq_rel_acquire_cmpxchg:
@@ -5563,9 +5626,11 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_acq_rel_acquire_cmpxchg:
@@ -5601,9 +5666,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_acq_rel_acquire_cmpxchg:
@@ -5721,9 +5789,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_seq_cst_acquire_cmpxchg:
@@ -5855,9 +5925,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_seq_cst_acquire_cmpxchg:
@@ -5893,9 +5965,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_seq_cst_acquire_cmpxchg:
@@ -6013,9 +6088,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_seq_cst_seq_cst_cmpxchg:
@@ -6147,9 +6224,11 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_seq_cst_seq_cst_cmpxchg:
@@ -6185,9 +6264,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_seq_cst_seq_cst_cmpxchg:
@@ -6923,7 +7005,8 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
@@ -7070,7 +7153,8 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -7113,7 +7197,10 @@ define amdgpu_kernel void @flat_workgroup_release_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -7245,12 +7332,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -7399,12 +7486,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -7444,12 +7531,14 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -7577,12 +7666,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -7731,12 +7820,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -7776,12 +7865,14 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -8535,12 +8626,12 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -8689,12 +8780,12 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -8734,12 +8825,14 @@ define amdgpu_kernel void @flat_workgroup_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -8867,12 +8960,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -9021,12 +9114,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -9066,12 +9159,14 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -9199,12 +9294,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -9353,12 +9448,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -9398,12 +9493,14 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -9531,7 +9628,8 @@ define amdgpu_kernel void @flat_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9685,7 +9783,8 @@ define amdgpu_kernel void @flat_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9730,7 +9829,10 @@ define amdgpu_kernel void @flat_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9863,7 +9965,8 @@ define amdgpu_kernel void @flat_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -10017,7 +10120,8 @@ define amdgpu_kernel void @flat_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -10062,7 +10166,10 @@ define amdgpu_kernel void @flat_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -10195,12 +10302,12 @@ define amdgpu_kernel void @flat_workgroup_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -10349,12 +10456,12 @@ define amdgpu_kernel void @flat_workgroup_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -10394,12 +10501,14 @@ define amdgpu_kernel void @flat_workgroup_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -10527,12 +10636,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -10681,12 +10790,12 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -10726,12 +10835,14 @@ define amdgpu_kernel void @flat_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -10859,12 +10970,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -11013,12 +11124,12 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -11058,12 +11169,14 @@ define amdgpu_kernel void @flat_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -11732,10 +11845,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_load(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_load_dword v2, v[0:1]
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -11834,10 +11950,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_load(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_load_b32 v2, v[0:1]
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -11868,10 +11987,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_load(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_load_b32 v2, v[0:1]
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -12258,6 +12382,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -12339,6 +12465,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -12365,6 +12493,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -12430,6 +12562,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -12511,6 +12645,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -12537,6 +12673,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -12933,6 +13073,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -13014,6 +13156,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -13040,6 +13184,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -13107,7 +13255,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_acq_rel_atomicrmw:
@@ -13194,7 +13345,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_acq_rel_atomicrmw:
@@ -13222,7 +13376,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_acq_rel_atomicrmw:
@@ -13290,7 +13449,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v[0:1], v2
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_seq_cst_atomicrmw:
@@ -13377,7 +13539,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_seq_cst_atomicrmw:
@@ -13405,7 +13570,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s3
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v[0:1], v2
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_seq_cst_atomicrmw:
@@ -13696,10 +13866,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -13805,10 +13978,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -13841,10 +14017,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -13923,10 +14104,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_swap v2, v[0:1], v2 glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -14032,10 +14216,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -14068,10 +14255,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_swap_b32 v2, v[0:1], v2 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -14699,6 +14891,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -14818,6 +15012,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -14852,6 +15048,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -14966,7 +15166,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_acq_rel_monotonic_cmpxchg:
@@ -15091,7 +15294,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_acq_rel_monotonic_cmpxchg:
@@ -15127,7 +15333,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_acq_rel_monotonic_cmpxchg:
@@ -15242,7 +15453,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_seq_cst_monotonic_cmpxchg:
@@ -15367,7 +15581,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_seq_cst_monotonic_cmpxchg:
@@ -15403,7 +15620,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_seq_cst_monotonic_cmpxchg:
@@ -16046,7 +16268,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_release_acquire_cmpxchg:
@@ -16171,7 +16396,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_release_acquire_cmpxchg:
@@ -16207,7 +16435,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_release_acquire_cmpxchg:
@@ -16322,7 +16555,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_acq_rel_acquire_cmpxchg:
@@ -16447,7 +16683,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_acq_rel_acquire_cmpxchg:
@@ -16483,7 +16722,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_acq_rel_acquire_cmpxchg:
@@ -16598,7 +16842,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_seq_cst_acquire_cmpxchg:
@@ -16723,7 +16970,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_seq_cst_acquire_cmpxchg:
@@ -16759,7 +17009,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_seq_cst_acquire_cmpxchg:
@@ -16874,6 +17129,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -16999,6 +17256,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -17035,6 +17294,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -17150,6 +17413,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
; GFX10-CU-NEXT: s_endpgm
;
@@ -17275,6 +17540,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -17311,6 +17578,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -17426,7 +17697,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_release_seq_cst_cmpxchg:
@@ -17551,7 +17825,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_release_seq_cst_cmpxchg:
@@ -17587,7 +17864,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_release_seq_cst_cmpxchg:
@@ -17702,7 +17984,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_acq_rel_seq_cst_cmpxchg:
@@ -17827,7 +18112,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_acq_rel_seq_cst_cmpxchg:
@@ -17863,7 +18151,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_acq_rel_seq_cst_cmpxchg:
@@ -17978,7 +18271,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v[0:1], v[2:3]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: flat_workgroup_one_as_seq_cst_seq_cst_cmpxchg:
@@ -18103,7 +18399,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: flat_workgroup_one_as_seq_cst_seq_cst_cmpxchg:
@@ -18139,7 +18438,12 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v[0:1], v[2:3] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: flat_workgroup_one_as_seq_cst_seq_cst_cmpxchg:
@@ -18870,6 +19174,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
@@ -19013,6 +19319,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -19055,6 +19363,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -19185,10 +19497,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -19332,10 +19647,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -19376,10 +19694,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -19506,10 +19829,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -19653,10 +19979,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -19697,10 +20026,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -20445,10 +20779,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -20592,10 +20929,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -20636,10 +20976,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -20766,10 +21111,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -20913,10 +21261,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -20957,10 +21308,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -21087,10 +21443,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -21234,10 +21593,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -21278,10 +21640,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -21408,6 +21775,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
@@ -21555,6 +21924,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -21599,6 +21970,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_monotonic_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -21729,6 +22104,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
@@ -21876,6 +22253,8 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -21920,6 +22299,10 @@ define amdgpu_kernel void @flat_workgroup_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
@@ -22050,10 +22433,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -22197,10 +22583,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -22241,10 +22630,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -22371,10 +22765,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -22518,10 +22915,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -22562,10 +22962,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
@@ -22692,10 +23097,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s7
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: flat_atomic_cmpswap v2, v[0:1], v[2:3] glc
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: flat_store_dword v[0:1], v2
; GFX10-CU-NEXT: s_endpgm
;
@@ -22839,10 +23247,13 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 glc
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX11-CU-NEXT: s_endpgm
;
@@ -22883,10 +23294,15 @@ define amdgpu_kernel void @flat_workgroup_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, v0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: flat_atomic_cmpswap_b32 v2, v[0:1], v[2:3] offset:16 th:TH_ATOMIC_RETURN
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: flat_store_b32 v[0:1], v2
; GFX12-CU-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-volatile.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-volatile.ll
index 3bf5ed8..c326edf 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-volatile.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-volatile.ll
@@ -959,7 +959,8 @@ define amdgpu_kernel void @global_volatile_workgroup_release_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-CU-NEXT: s_endpgm
;
@@ -1001,7 +1002,8 @@ define amdgpu_kernel void @global_volatile_workgroup_release_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_endpgm
;
@@ -1026,7 +1028,10 @@ define amdgpu_kernel void @global_volatile_workgroup_release_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-CU-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-workgroup.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-workgroup.ll
index b755c5d..868b438 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-workgroup.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-global-workgroup.ll
@@ -667,7 +667,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX10-CU-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
; GFX10-CU-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x8
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_load_dword v1, v0, s[6:7]
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -763,7 +764,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_load(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX11-CU-NEXT: s_load_b64 s[2:3], s[4:5], 0x0
; GFX11-CU-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[2:3]
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -790,7 +792,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_load(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX12-CU-NEXT: s_load_b64 s[2:3], s[4:5], 0x0
; GFX12-CU-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: global_load_b32 v1, v0, s[2:3]
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
@@ -1204,7 +1209,8 @@ define amdgpu_kernel void @global_workgroup_release_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-CU-NEXT: s_endpgm
;
@@ -1290,7 +1296,8 @@ define amdgpu_kernel void @global_workgroup_release_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_endpgm
;
@@ -1315,7 +1322,10 @@ define amdgpu_kernel void @global_workgroup_release_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-CU-NEXT: s_endpgm
;
@@ -1391,7 +1401,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-CU-NEXT: s_endpgm
;
@@ -1477,7 +1488,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_endpgm
;
@@ -1502,7 +1514,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-CU-NEXT: s_endpgm
;
@@ -1918,7 +1933,8 @@ define amdgpu_kernel void @global_workgroup_release_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[4:5]
; GFX10-CU-NEXT: s_endpgm
;
@@ -2003,7 +2019,8 @@ define amdgpu_kernel void @global_workgroup_release_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_endpgm
;
@@ -2028,7 +2045,10 @@ define amdgpu_kernel void @global_workgroup_release_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX12-CU-NEXT: s_endpgm
;
@@ -2105,8 +2125,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[4:5]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_acq_rel_atomicrmw:
@@ -2196,8 +2218,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_acq_rel_atomicrmw:
@@ -2223,8 +2247,12 @@ define amdgpu_kernel void @global_workgroup_acq_rel_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_acq_rel_atomicrmw:
@@ -2301,8 +2329,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[4:5]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_seq_cst_atomicrmw:
@@ -2392,8 +2422,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_seq_cst_atomicrmw:
@@ -2419,8 +2451,12 @@ define amdgpu_kernel void @global_workgroup_seq_cst_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_seq_cst_atomicrmw:
@@ -2705,7 +2741,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[4:5] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -2807,7 +2844,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -2837,7 +2875,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -2926,7 +2967,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[4:5] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -3028,7 +3070,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -3058,7 +3101,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -3644,7 +3690,8 @@ define amdgpu_kernel void @global_workgroup_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -3758,7 +3805,8 @@ define amdgpu_kernel void @global_workgroup_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -3791,7 +3839,10 @@ define amdgpu_kernel void @global_workgroup_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -3900,8 +3951,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_acq_rel_monotonic_cmpxchg:
@@ -4020,8 +4073,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_acq_rel_monotonic_cmpxchg:
@@ -4055,8 +4110,12 @@ define amdgpu_kernel void @global_workgroup_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_acq_rel_monotonic_cmpxchg:
@@ -4165,8 +4224,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_seq_cst_monotonic_cmpxchg:
@@ -4285,8 +4346,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_seq_cst_monotonic_cmpxchg:
@@ -4320,8 +4383,12 @@ define amdgpu_kernel void @global_workgroup_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_seq_cst_monotonic_cmpxchg:
@@ -4920,8 +4987,10 @@ define amdgpu_kernel void @global_workgroup_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_release_acquire_cmpxchg:
@@ -5040,8 +5109,10 @@ define amdgpu_kernel void @global_workgroup_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_release_acquire_cmpxchg:
@@ -5075,8 +5146,12 @@ define amdgpu_kernel void @global_workgroup_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_release_acquire_cmpxchg:
@@ -5185,8 +5260,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_acq_rel_acquire_cmpxchg:
@@ -5305,8 +5382,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_acq_rel_acquire_cmpxchg:
@@ -5340,8 +5419,12 @@ define amdgpu_kernel void @global_workgroup_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_acq_rel_acquire_cmpxchg:
@@ -5450,8 +5533,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_seq_cst_acquire_cmpxchg:
@@ -5570,8 +5655,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_seq_cst_acquire_cmpxchg:
@@ -5605,8 +5692,12 @@ define amdgpu_kernel void @global_workgroup_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_seq_cst_acquire_cmpxchg:
@@ -5715,7 +5806,8 @@ define amdgpu_kernel void @global_workgroup_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -5835,7 +5927,8 @@ define amdgpu_kernel void @global_workgroup_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -5870,7 +5963,10 @@ define amdgpu_kernel void @global_workgroup_monotonic_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -5980,7 +6076,8 @@ define amdgpu_kernel void @global_workgroup_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -6100,7 +6197,8 @@ define amdgpu_kernel void @global_workgroup_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -6135,7 +6233,10 @@ define amdgpu_kernel void @global_workgroup_acquire_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -6245,8 +6346,10 @@ define amdgpu_kernel void @global_workgroup_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_release_seq_cst_cmpxchg:
@@ -6365,8 +6468,10 @@ define amdgpu_kernel void @global_workgroup_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_release_seq_cst_cmpxchg:
@@ -6400,8 +6505,12 @@ define amdgpu_kernel void @global_workgroup_release_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_release_seq_cst_cmpxchg:
@@ -6510,8 +6619,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_acq_rel_seq_cst_cmpxchg:
@@ -6630,8 +6741,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_acq_rel_seq_cst_cmpxchg:
@@ -6665,8 +6778,12 @@ define amdgpu_kernel void @global_workgroup_acq_rel_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_acq_rel_seq_cst_cmpxchg:
@@ -6775,8 +6892,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_seq_cst_seq_cst_cmpxchg:
@@ -6895,8 +7014,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_seq_cst_seq_cst_cmpxchg:
@@ -6930,8 +7051,12 @@ define amdgpu_kernel void @global_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_seq_cst_seq_cst_cmpxchg:
@@ -7588,7 +7713,8 @@ define amdgpu_kernel void @global_workgroup_release_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -7717,7 +7843,8 @@ define amdgpu_kernel void @global_workgroup_release_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -7754,7 +7881,10 @@ define amdgpu_kernel void @global_workgroup_release_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -7877,7 +8007,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -8009,7 +8140,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -8047,7 +8179,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -8170,7 +8305,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -8302,7 +8438,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -8340,7 +8477,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -9009,7 +9149,8 @@ define amdgpu_kernel void @global_workgroup_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -9141,7 +9282,8 @@ define amdgpu_kernel void @global_workgroup_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -9179,7 +9321,10 @@ define amdgpu_kernel void @global_workgroup_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -9302,7 +9447,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -9434,7 +9580,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -9472,7 +9619,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -9595,7 +9745,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -9727,7 +9878,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -9765,7 +9917,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -9888,7 +10043,8 @@ define amdgpu_kernel void @global_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -10020,7 +10176,8 @@ define amdgpu_kernel void @global_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -10058,7 +10215,10 @@ define amdgpu_kernel void @global_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -10181,7 +10341,8 @@ define amdgpu_kernel void @global_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -10313,7 +10474,8 @@ define amdgpu_kernel void @global_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -10351,7 +10513,10 @@ define amdgpu_kernel void @global_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -10474,7 +10639,8 @@ define amdgpu_kernel void @global_workgroup_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -10606,7 +10772,8 @@ define amdgpu_kernel void @global_workgroup_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -10644,7 +10811,10 @@ define amdgpu_kernel void @global_workgroup_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -10767,7 +10937,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -10899,7 +11070,8 @@ define amdgpu_kernel void @global_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -10937,7 +11109,10 @@ define amdgpu_kernel void @global_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -11060,7 +11235,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -11192,7 +11368,8 @@ define amdgpu_kernel void @global_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -11230,7 +11407,10 @@ define amdgpu_kernel void @global_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -11914,7 +12094,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_load(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX10-CU-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
; GFX10-CU-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x8
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_load_dword v1, v0, s[6:7]
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -12009,7 +12190,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_load(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX11-CU-NEXT: s_load_b64 s[2:3], s[4:5], 0x0
; GFX11-CU-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_load_b32 v1, v0, s[2:3]
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -12036,6 +12218,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_load(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX12-CU-NEXT: s_load_b64 s[2:3], s[4:5], 0x0
; GFX12-CU-NEXT: s_load_b64 s[0:1], s[4:5], 0x8
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: global_load_b32 v1, v0, s[2:3]
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
@@ -12447,6 +12633,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12529,6 +12717,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_endpgm
;
@@ -12553,6 +12743,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-CU-NEXT: s_endpgm
;
@@ -12626,6 +12820,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_store(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
; GFX10-CU-NEXT: s_endpgm
;
@@ -12708,6 +12904,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_store(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_endpgm
;
@@ -12732,6 +12930,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_store(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, 0
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX12-CU-NEXT: s_endpgm
;
@@ -13145,6 +13347,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[4:5]
; GFX10-CU-NEXT: s_endpgm
;
@@ -13226,6 +13430,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX11-CU-NEXT: s_endpgm
;
@@ -13250,6 +13456,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
; GFX12-CU-NEXT: s_endpgm
;
@@ -13324,7 +13534,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[4:5]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_acq_rel_atomicrmw:
@@ -13411,7 +13624,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_acq_rel_atomicrmw:
@@ -13437,7 +13653,12 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_acq_rel_atomicrmw:
@@ -13512,7 +13733,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v0, v1, s[4:5]
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_seq_cst_atomicrmw:
@@ -13599,7 +13823,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_seq_cst_atomicrmw:
@@ -13625,7 +13852,12 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v0, v1, s[0:1]
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_seq_cst_atomicrmw:
@@ -13908,6 +14140,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[4:5] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -14006,6 +14240,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -14035,6 +14271,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -14121,6 +14361,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: s_load_dword s6, s[8:9], 0x8
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_swap v1, v0, v1, s[4:5] glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -14219,6 +14461,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -14248,6 +14492,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: s_load_b32 s2, s[4:5], 0x8
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_swap_b32 v1, v0, v1, s[0:1] th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -14831,6 +15079,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -14941,6 +15191,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -14973,6 +15225,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -15079,7 +15335,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_acq_rel_monotonic_cmpxchg:
@@ -15195,7 +15454,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_acq_rel_monotonic_cmpxchg:
@@ -15229,7 +15491,12 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_acq_rel_monotonic_cmpxchg:
@@ -15336,7 +15603,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_seq_cst_monotonic_cmpxchg:
@@ -15452,7 +15722,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_seq_cst_monotonic_cmpxchg:
@@ -15486,7 +15759,12 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_seq_cst_monotonic_cmpxchg:
@@ -16083,7 +16361,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_release_acquire_cmpxchg:
@@ -16199,7 +16480,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_release_acquire_cmpxchg:
@@ -16233,7 +16517,12 @@ define amdgpu_kernel void @global_workgroup_one_as_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_release_acquire_cmpxchg:
@@ -16340,7 +16629,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_acq_rel_acquire_cmpxchg:
@@ -16456,7 +16748,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_acq_rel_acquire_cmpxchg:
@@ -16490,7 +16785,12 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_acq_rel_acquire_cmpxchg:
@@ -16597,7 +16897,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_seq_cst_acquire_cmpxchg:
@@ -16713,7 +17016,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_seq_cst_acquire_cmpxchg:
@@ -16747,7 +17053,12 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_seq_cst_acquire_cmpxchg:
@@ -16854,6 +17165,8 @@ define amdgpu_kernel void @global_workgroup_one_as_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -16970,6 +17283,8 @@ define amdgpu_kernel void @global_workgroup_one_as_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -17004,6 +17319,10 @@ define amdgpu_kernel void @global_workgroup_one_as_monotonic_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -17111,6 +17430,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -17227,6 +17548,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -17261,6 +17584,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acquire_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -17368,7 +17695,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_release_seq_cst_cmpxchg:
@@ -17484,7 +17814,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_release_seq_cst_cmpxchg:
@@ -17518,7 +17851,12 @@ define amdgpu_kernel void @global_workgroup_one_as_release_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_release_seq_cst_cmpxchg:
@@ -17625,7 +17963,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_acq_rel_seq_cst_cmpxchg:
@@ -17741,7 +18082,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_acq_rel_seq_cst_cmpxchg:
@@ -17775,7 +18119,12 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_acq_rel_seq_cst_cmpxchg:
@@ -17882,7 +18231,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v0, v[1:2], s[4:5] offset:16
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: s_endpgm
;
; SKIP-CACHE-INV-LABEL: global_workgroup_one_as_seq_cst_seq_cst_cmpxchg:
@@ -17998,7 +18350,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: s_endpgm
;
; GFX12-WGP-LABEL: global_workgroup_one_as_seq_cst_seq_cst_cmpxchg:
@@ -18032,7 +18387,12 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v0, v[1:2], s[0:1] offset:16
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: s_endpgm
;
; GFX1250-LABEL: global_workgroup_one_as_seq_cst_seq_cst_cmpxchg:
@@ -18687,6 +19047,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_monotonic_ret_cmpxchg
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -18812,6 +19174,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_monotonic_ret_cmpxchg
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -18848,6 +19212,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_monotonic_ret_cmpxchg
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -18968,6 +19336,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_monotonic_ret_cmpxchg
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -19096,6 +19466,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_monotonic_ret_cmpxchg
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -19133,6 +19505,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_monotonic_ret_cmpxchg
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -19253,6 +19629,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_monotonic_ret_cmpxchg
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -19381,6 +19759,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_monotonic_ret_cmpxchg
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -19418,6 +19798,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_monotonic_ret_cmpxchg
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -20084,6 +20468,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -20212,6 +20598,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -20249,6 +20637,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -20369,6 +20761,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -20497,6 +20891,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -20534,6 +20930,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -20654,6 +21054,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -20782,6 +21184,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -20819,6 +21223,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -20939,6 +21347,8 @@ define amdgpu_kernel void @global_workgroup_one_as_monotonic_seq_cst_ret_cmpxchg
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -21067,6 +21477,8 @@ define amdgpu_kernel void @global_workgroup_one_as_monotonic_seq_cst_ret_cmpxchg
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -21104,6 +21516,10 @@ define amdgpu_kernel void @global_workgroup_one_as_monotonic_seq_cst_ret_cmpxchg
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -21224,6 +21640,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -21352,6 +21770,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -21389,6 +21809,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -21509,6 +21933,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -21637,6 +22063,8 @@ define amdgpu_kernel void @global_workgroup_one_as_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -21674,6 +22102,10 @@ define amdgpu_kernel void @global_workgroup_one_as_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -21794,6 +22226,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -21922,6 +22356,8 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -21959,6 +22395,10 @@ define amdgpu_kernel void @global_workgroup_one_as_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -22079,6 +22519,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v3, s6
; GFX10-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX10-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: global_atomic_cmpswap v1, v0, v[1:2], s[4:5] offset:16 glc
; GFX10-CU-NEXT: s_waitcnt vmcnt(0)
; GFX10-CU-NEXT: global_store_dword v0, v1, s[4:5]
@@ -22207,6 +22649,8 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX11-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX11-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 glc
; GFX11-CU-NEXT: s_waitcnt vmcnt(0)
; GFX11-CU-NEXT: global_store_b32 v0, v1, s[0:1]
@@ -22244,6 +22688,10 @@ define amdgpu_kernel void @global_workgroup_one_as_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v3, s2
; GFX12-CU-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
; GFX12-CU-NEXT: v_mov_b32_e32 v2, v3
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
; GFX12-CU-NEXT: global_atomic_cmpswap_b32 v1, v0, v[1:2], s[0:1] offset:16 th:TH_ATOMIC_RETURN
; GFX12-CU-NEXT: s_wait_loadcnt 0x0
; GFX12-CU-NEXT: global_store_b32 v0, v1, s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-agent.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-agent.ll
index 986b48b..712109d 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-agent.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-agent.ll
@@ -622,7 +622,8 @@ define amdgpu_kernel void @local_agent_seq_cst_load(
; GFX10-CU-NEXT: s_load_dword s4, s[8:9], 0x4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_read_b32 v1, v0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -719,7 +720,8 @@ define amdgpu_kernel void @local_agent_seq_cst_load(
; GFX11-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_load_b32 v1, v0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -749,7 +751,10 @@ define amdgpu_kernel void @local_agent_seq_cst_load(
; GFX12-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_load_b32 v1, v0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -1121,7 +1126,8 @@ define amdgpu_kernel void @local_agent_release_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1200,7 +1206,8 @@ define amdgpu_kernel void @local_agent_release_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1225,7 +1232,10 @@ define amdgpu_kernel void @local_agent_release_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1291,7 +1301,8 @@ define amdgpu_kernel void @local_agent_seq_cst_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1370,7 +1381,8 @@ define amdgpu_kernel void @local_agent_seq_cst_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1395,7 +1407,10 @@ define amdgpu_kernel void @local_agent_seq_cst_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1778,7 +1793,8 @@ define amdgpu_kernel void @local_agent_release_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1857,7 +1873,8 @@ define amdgpu_kernel void @local_agent_release_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1882,7 +1899,10 @@ define amdgpu_kernel void @local_agent_release_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1952,7 +1972,8 @@ define amdgpu_kernel void @local_agent_acq_rel_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2039,7 +2060,8 @@ define amdgpu_kernel void @local_agent_acq_rel_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2067,7 +2089,10 @@ define amdgpu_kernel void @local_agent_acq_rel_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2139,7 +2164,8 @@ define amdgpu_kernel void @local_agent_seq_cst_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2226,7 +2252,8 @@ define amdgpu_kernel void @local_agent_seq_cst_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2254,7 +2281,10 @@ define amdgpu_kernel void @local_agent_seq_cst_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2535,7 +2565,8 @@ define amdgpu_kernel void @local_agent_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2639,7 +2670,8 @@ define amdgpu_kernel void @local_agent_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2671,7 +2703,10 @@ define amdgpu_kernel void @local_agent_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2756,7 +2791,8 @@ define amdgpu_kernel void @local_agent_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2860,7 +2896,8 @@ define amdgpu_kernel void @local_agent_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2892,7 +2929,10 @@ define amdgpu_kernel void @local_agent_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -3348,7 +3388,8 @@ define amdgpu_kernel void @local_agent_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -3441,7 +3482,8 @@ define amdgpu_kernel void @local_agent_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -3470,7 +3512,10 @@ define amdgpu_kernel void @local_agent_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -3551,7 +3596,8 @@ define amdgpu_kernel void @local_agent_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3652,7 +3698,8 @@ define amdgpu_kernel void @local_agent_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3684,7 +3731,10 @@ define amdgpu_kernel void @local_agent_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -3767,7 +3817,8 @@ define amdgpu_kernel void @local_agent_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3868,7 +3919,8 @@ define amdgpu_kernel void @local_agent_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3900,7 +3952,10 @@ define amdgpu_kernel void @local_agent_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4375,7 +4430,8 @@ define amdgpu_kernel void @local_agent_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4476,7 +4532,8 @@ define amdgpu_kernel void @local_agent_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4508,7 +4565,10 @@ define amdgpu_kernel void @local_agent_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4591,7 +4651,8 @@ define amdgpu_kernel void @local_agent_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4692,7 +4753,8 @@ define amdgpu_kernel void @local_agent_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4724,7 +4786,10 @@ define amdgpu_kernel void @local_agent_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4807,7 +4872,8 @@ define amdgpu_kernel void @local_agent_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4908,7 +4974,8 @@ define amdgpu_kernel void @local_agent_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4940,7 +5007,10 @@ define amdgpu_kernel void @local_agent_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5023,7 +5093,8 @@ define amdgpu_kernel void @local_agent_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5124,7 +5195,8 @@ define amdgpu_kernel void @local_agent_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5156,7 +5228,10 @@ define amdgpu_kernel void @local_agent_monotonic_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5239,7 +5314,8 @@ define amdgpu_kernel void @local_agent_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5340,7 +5416,8 @@ define amdgpu_kernel void @local_agent_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5372,7 +5449,10 @@ define amdgpu_kernel void @local_agent_acquire_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5455,7 +5535,8 @@ define amdgpu_kernel void @local_agent_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5556,7 +5637,8 @@ define amdgpu_kernel void @local_agent_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5588,7 +5670,10 @@ define amdgpu_kernel void @local_agent_release_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5671,7 +5756,8 @@ define amdgpu_kernel void @local_agent_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5772,7 +5858,8 @@ define amdgpu_kernel void @local_agent_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5804,7 +5891,10 @@ define amdgpu_kernel void @local_agent_acq_rel_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5887,7 +5977,8 @@ define amdgpu_kernel void @local_agent_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5988,7 +6079,8 @@ define amdgpu_kernel void @local_agent_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -6020,7 +6112,10 @@ define amdgpu_kernel void @local_agent_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -6567,7 +6662,8 @@ define amdgpu_kernel void @local_agent_release_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6682,7 +6778,8 @@ define amdgpu_kernel void @local_agent_release_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6717,7 +6814,10 @@ define amdgpu_kernel void @local_agent_release_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
@@ -6814,7 +6914,8 @@ define amdgpu_kernel void @local_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -6932,7 +7033,8 @@ define amdgpu_kernel void @local_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -6968,7 +7070,10 @@ define amdgpu_kernel void @local_agent_acq_rel_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7065,7 +7170,8 @@ define amdgpu_kernel void @local_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7183,7 +7289,8 @@ define amdgpu_kernel void @local_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7219,7 +7326,10 @@ define amdgpu_kernel void @local_agent_seq_cst_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7778,7 +7888,8 @@ define amdgpu_kernel void @local_agent_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7896,7 +8007,8 @@ define amdgpu_kernel void @local_agent_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7932,7 +8044,10 @@ define amdgpu_kernel void @local_agent_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8029,7 +8144,8 @@ define amdgpu_kernel void @local_agent_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8147,7 +8263,8 @@ define amdgpu_kernel void @local_agent_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8183,7 +8300,10 @@ define amdgpu_kernel void @local_agent_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8280,7 +8400,8 @@ define amdgpu_kernel void @local_agent_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8398,7 +8519,8 @@ define amdgpu_kernel void @local_agent_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8434,7 +8556,10 @@ define amdgpu_kernel void @local_agent_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8531,7 +8656,8 @@ define amdgpu_kernel void @local_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8649,7 +8775,8 @@ define amdgpu_kernel void @local_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8685,7 +8812,10 @@ define amdgpu_kernel void @local_agent_monotonic_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8782,7 +8912,8 @@ define amdgpu_kernel void @local_agent_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8900,7 +9031,8 @@ define amdgpu_kernel void @local_agent_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8936,7 +9068,10 @@ define amdgpu_kernel void @local_agent_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9033,7 +9168,8 @@ define amdgpu_kernel void @local_agent_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9151,7 +9287,8 @@ define amdgpu_kernel void @local_agent_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9187,7 +9324,10 @@ define amdgpu_kernel void @local_agent_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9284,7 +9424,8 @@ define amdgpu_kernel void @local_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9402,7 +9543,8 @@ define amdgpu_kernel void @local_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9438,7 +9580,10 @@ define amdgpu_kernel void @local_agent_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9535,7 +9680,8 @@ define amdgpu_kernel void @local_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9653,7 +9799,8 @@ define amdgpu_kernel void @local_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9689,7 +9836,10 @@ define amdgpu_kernel void @local_agent_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-cluster.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-cluster.ll
index 8926893..6d1e4e6 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-cluster.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-cluster.ll
@@ -622,7 +622,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_load(
; GFX10-CU-NEXT: s_load_dword s4, s[8:9], 0x4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_read_b32 v1, v0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -719,7 +720,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_load(
; GFX11-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_load_b32 v1, v0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -749,7 +751,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_load(
; GFX12-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_load_b32 v1, v0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -1121,7 +1126,8 @@ define amdgpu_kernel void @local_cluster_release_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1200,7 +1206,8 @@ define amdgpu_kernel void @local_cluster_release_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1225,7 +1232,10 @@ define amdgpu_kernel void @local_cluster_release_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1291,7 +1301,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1370,7 +1381,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1395,7 +1407,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1778,7 +1793,8 @@ define amdgpu_kernel void @local_cluster_release_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1857,7 +1873,8 @@ define amdgpu_kernel void @local_cluster_release_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1882,7 +1899,10 @@ define amdgpu_kernel void @local_cluster_release_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1952,7 +1972,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2039,7 +2060,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2067,7 +2089,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2139,7 +2164,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2226,7 +2252,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2254,7 +2281,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2535,7 +2565,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2639,7 +2670,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2671,7 +2703,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2756,7 +2791,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2860,7 +2896,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2892,7 +2929,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -3348,7 +3388,8 @@ define amdgpu_kernel void @local_cluster_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -3441,7 +3482,8 @@ define amdgpu_kernel void @local_cluster_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -3470,7 +3512,10 @@ define amdgpu_kernel void @local_cluster_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -3551,7 +3596,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3652,7 +3698,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3684,7 +3731,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -3767,7 +3817,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3868,7 +3919,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3900,7 +3952,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4375,7 +4430,8 @@ define amdgpu_kernel void @local_cluster_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4476,7 +4532,8 @@ define amdgpu_kernel void @local_cluster_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4508,7 +4565,10 @@ define amdgpu_kernel void @local_cluster_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4591,7 +4651,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4692,7 +4753,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4724,7 +4786,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4807,7 +4872,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4908,7 +4974,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4940,7 +5007,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5023,7 +5093,8 @@ define amdgpu_kernel void @local_cluster_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5124,7 +5195,8 @@ define amdgpu_kernel void @local_cluster_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5156,7 +5228,10 @@ define amdgpu_kernel void @local_cluster_monotonic_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5239,7 +5314,8 @@ define amdgpu_kernel void @local_cluster_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5340,7 +5416,8 @@ define amdgpu_kernel void @local_cluster_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5372,7 +5449,10 @@ define amdgpu_kernel void @local_cluster_acquire_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5455,7 +5535,8 @@ define amdgpu_kernel void @local_cluster_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5556,7 +5637,8 @@ define amdgpu_kernel void @local_cluster_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5588,7 +5670,10 @@ define amdgpu_kernel void @local_cluster_release_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5671,7 +5756,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5772,7 +5858,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5804,7 +5891,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5887,7 +5977,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5988,7 +6079,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -6020,7 +6112,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -6567,7 +6662,8 @@ define amdgpu_kernel void @local_cluster_release_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6682,7 +6778,8 @@ define amdgpu_kernel void @local_cluster_release_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6717,7 +6814,10 @@ define amdgpu_kernel void @local_cluster_release_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
@@ -6814,7 +6914,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -6932,7 +7033,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -6968,7 +7070,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7065,7 +7170,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7183,7 +7289,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7219,7 +7326,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7778,7 +7888,8 @@ define amdgpu_kernel void @local_cluster_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7896,7 +8007,8 @@ define amdgpu_kernel void @local_cluster_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7932,7 +8044,10 @@ define amdgpu_kernel void @local_cluster_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8029,7 +8144,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8147,7 +8263,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8183,7 +8300,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8280,7 +8400,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8398,7 +8519,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8434,7 +8556,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8531,7 +8656,8 @@ define amdgpu_kernel void @local_cluster_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8649,7 +8775,8 @@ define amdgpu_kernel void @local_cluster_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8685,7 +8812,10 @@ define amdgpu_kernel void @local_cluster_monotonic_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8782,7 +8912,8 @@ define amdgpu_kernel void @local_cluster_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8900,7 +9031,8 @@ define amdgpu_kernel void @local_cluster_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8936,7 +9068,10 @@ define amdgpu_kernel void @local_cluster_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9033,7 +9168,8 @@ define amdgpu_kernel void @local_cluster_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9151,7 +9287,8 @@ define amdgpu_kernel void @local_cluster_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9187,7 +9324,10 @@ define amdgpu_kernel void @local_cluster_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9284,7 +9424,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9402,7 +9543,8 @@ define amdgpu_kernel void @local_cluster_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9438,7 +9580,10 @@ define amdgpu_kernel void @local_cluster_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9535,7 +9680,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9653,7 +9799,8 @@ define amdgpu_kernel void @local_cluster_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9689,7 +9836,10 @@ define amdgpu_kernel void @local_cluster_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-system.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-system.ll
index 81bbe0a..577d2ca 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-system.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-system.ll
@@ -622,7 +622,8 @@ define amdgpu_kernel void @local_system_seq_cst_load(
; GFX10-CU-NEXT: s_load_dword s4, s[8:9], 0x4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_read_b32 v1, v0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -719,7 +720,8 @@ define amdgpu_kernel void @local_system_seq_cst_load(
; GFX11-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_load_b32 v1, v0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -749,7 +751,10 @@ define amdgpu_kernel void @local_system_seq_cst_load(
; GFX12-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_load_b32 v1, v0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -1121,7 +1126,8 @@ define amdgpu_kernel void @local_system_release_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1200,7 +1206,8 @@ define amdgpu_kernel void @local_system_release_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1225,7 +1232,10 @@ define amdgpu_kernel void @local_system_release_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1291,7 +1301,8 @@ define amdgpu_kernel void @local_system_seq_cst_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1370,7 +1381,8 @@ define amdgpu_kernel void @local_system_seq_cst_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1395,7 +1407,10 @@ define amdgpu_kernel void @local_system_seq_cst_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1778,7 +1793,8 @@ define amdgpu_kernel void @local_system_release_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1857,7 +1873,8 @@ define amdgpu_kernel void @local_system_release_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1882,7 +1899,10 @@ define amdgpu_kernel void @local_system_release_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1952,7 +1972,8 @@ define amdgpu_kernel void @local_system_acq_rel_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2039,7 +2060,8 @@ define amdgpu_kernel void @local_system_acq_rel_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2067,7 +2089,10 @@ define amdgpu_kernel void @local_system_acq_rel_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2139,7 +2164,8 @@ define amdgpu_kernel void @local_system_seq_cst_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2226,7 +2252,8 @@ define amdgpu_kernel void @local_system_seq_cst_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2254,7 +2281,10 @@ define amdgpu_kernel void @local_system_seq_cst_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2535,7 +2565,8 @@ define amdgpu_kernel void @local_system_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2639,7 +2670,8 @@ define amdgpu_kernel void @local_system_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2671,7 +2703,10 @@ define amdgpu_kernel void @local_system_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2756,7 +2791,8 @@ define amdgpu_kernel void @local_system_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2860,7 +2896,8 @@ define amdgpu_kernel void @local_system_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2892,7 +2929,10 @@ define amdgpu_kernel void @local_system_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -3348,7 +3388,8 @@ define amdgpu_kernel void @local_system_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -3441,7 +3482,8 @@ define amdgpu_kernel void @local_system_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -3470,7 +3512,10 @@ define amdgpu_kernel void @local_system_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -3551,7 +3596,8 @@ define amdgpu_kernel void @local_system_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3652,7 +3698,8 @@ define amdgpu_kernel void @local_system_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3684,7 +3731,10 @@ define amdgpu_kernel void @local_system_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -3767,7 +3817,8 @@ define amdgpu_kernel void @local_system_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3868,7 +3919,8 @@ define amdgpu_kernel void @local_system_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3900,7 +3952,10 @@ define amdgpu_kernel void @local_system_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4375,7 +4430,8 @@ define amdgpu_kernel void @local_system_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4476,7 +4532,8 @@ define amdgpu_kernel void @local_system_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4508,7 +4565,10 @@ define amdgpu_kernel void @local_system_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4591,7 +4651,8 @@ define amdgpu_kernel void @local_system_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4692,7 +4753,8 @@ define amdgpu_kernel void @local_system_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4724,7 +4786,10 @@ define amdgpu_kernel void @local_system_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4807,7 +4872,8 @@ define amdgpu_kernel void @local_system_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4908,7 +4974,8 @@ define amdgpu_kernel void @local_system_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4940,7 +5007,10 @@ define amdgpu_kernel void @local_system_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5023,7 +5093,8 @@ define amdgpu_kernel void @local_system_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5124,7 +5195,8 @@ define amdgpu_kernel void @local_system_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5156,7 +5228,10 @@ define amdgpu_kernel void @local_system_monotonic_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5239,7 +5314,8 @@ define amdgpu_kernel void @local_system_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5340,7 +5416,8 @@ define amdgpu_kernel void @local_system_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5372,7 +5449,10 @@ define amdgpu_kernel void @local_system_acquire_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5455,7 +5535,8 @@ define amdgpu_kernel void @local_system_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5556,7 +5637,8 @@ define amdgpu_kernel void @local_system_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5588,7 +5670,10 @@ define amdgpu_kernel void @local_system_release_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5671,7 +5756,8 @@ define amdgpu_kernel void @local_system_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5772,7 +5858,8 @@ define amdgpu_kernel void @local_system_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5804,7 +5891,10 @@ define amdgpu_kernel void @local_system_acq_rel_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5887,7 +5977,8 @@ define amdgpu_kernel void @local_system_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5988,7 +6079,8 @@ define amdgpu_kernel void @local_system_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -6020,7 +6112,10 @@ define amdgpu_kernel void @local_system_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -6567,7 +6662,8 @@ define amdgpu_kernel void @local_system_release_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6682,7 +6778,8 @@ define amdgpu_kernel void @local_system_release_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6717,7 +6814,10 @@ define amdgpu_kernel void @local_system_release_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
@@ -6814,7 +6914,8 @@ define amdgpu_kernel void @local_system_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -6932,7 +7033,8 @@ define amdgpu_kernel void @local_system_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -6968,7 +7070,10 @@ define amdgpu_kernel void @local_system_acq_rel_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7065,7 +7170,8 @@ define amdgpu_kernel void @local_system_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7183,7 +7289,8 @@ define amdgpu_kernel void @local_system_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7219,7 +7326,10 @@ define amdgpu_kernel void @local_system_seq_cst_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7778,7 +7888,8 @@ define amdgpu_kernel void @local_system_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7896,7 +8007,8 @@ define amdgpu_kernel void @local_system_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7932,7 +8044,10 @@ define amdgpu_kernel void @local_system_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8029,7 +8144,8 @@ define amdgpu_kernel void @local_system_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8147,7 +8263,8 @@ define amdgpu_kernel void @local_system_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8183,7 +8300,10 @@ define amdgpu_kernel void @local_system_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8280,7 +8400,8 @@ define amdgpu_kernel void @local_system_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8398,7 +8519,8 @@ define amdgpu_kernel void @local_system_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8434,7 +8556,10 @@ define amdgpu_kernel void @local_system_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8531,7 +8656,8 @@ define amdgpu_kernel void @local_system_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8649,7 +8775,8 @@ define amdgpu_kernel void @local_system_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8685,7 +8812,10 @@ define amdgpu_kernel void @local_system_monotonic_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8782,7 +8912,8 @@ define amdgpu_kernel void @local_system_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8900,7 +9031,8 @@ define amdgpu_kernel void @local_system_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8936,7 +9068,10 @@ define amdgpu_kernel void @local_system_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9033,7 +9168,8 @@ define amdgpu_kernel void @local_system_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9151,7 +9287,8 @@ define amdgpu_kernel void @local_system_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9187,7 +9324,10 @@ define amdgpu_kernel void @local_system_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9284,7 +9424,8 @@ define amdgpu_kernel void @local_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9402,7 +9543,8 @@ define amdgpu_kernel void @local_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9438,7 +9580,10 @@ define amdgpu_kernel void @local_system_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9535,7 +9680,8 @@ define amdgpu_kernel void @local_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9653,7 +9799,8 @@ define amdgpu_kernel void @local_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9689,7 +9836,10 @@ define amdgpu_kernel void @local_system_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-volatile.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-volatile.ll
index 980141a..d686e7a 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-volatile.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-volatile.ll
@@ -819,7 +819,8 @@ define amdgpu_kernel void @local_volatile_workgroup_release_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -854,7 +855,8 @@ define amdgpu_kernel void @local_volatile_workgroup_release_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -879,7 +881,10 @@ define amdgpu_kernel void @local_volatile_workgroup_release_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-workgroup.ll b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-workgroup.ll
index 6a233a2..ab4d783 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-workgroup.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local-workgroup.ll
@@ -622,7 +622,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_load(
; GFX10-CU-NEXT: s_load_dword s4, s[8:9], 0x4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_read_b32 v1, v0
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -719,7 +720,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_load(
; GFX11-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_load_b32 v1, v0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -749,7 +751,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_load(
; GFX12-CU-NEXT: s_load_b32 s0, s[4:5], 0x4
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_load_b32 v1, v0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -1121,7 +1126,8 @@ define amdgpu_kernel void @local_workgroup_release_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1200,7 +1206,8 @@ define amdgpu_kernel void @local_workgroup_release_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1225,7 +1232,10 @@ define amdgpu_kernel void @local_workgroup_release_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1291,7 +1301,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_store(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_write_b32 v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1370,7 +1381,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_store(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_store_b32 v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1395,7 +1407,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_store(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_store_b32 v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1778,7 +1793,8 @@ define amdgpu_kernel void @local_workgroup_release_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_endpgm
;
@@ -1857,7 +1873,8 @@ define amdgpu_kernel void @local_workgroup_release_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_endpgm
;
@@ -1882,7 +1899,10 @@ define amdgpu_kernel void @local_workgroup_release_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_endpgm
;
@@ -1952,7 +1972,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2039,7 +2060,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2067,7 +2089,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2139,7 +2164,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v0, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -2226,7 +2252,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -2254,7 +2281,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v0, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -2535,7 +2565,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2639,7 +2670,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2671,7 +2703,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2756,7 +2791,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_ret_atomicrmw(
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_wrxchg_rtn_b32 v1, v0, v1
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -2860,7 +2896,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_ret_atomicrmw(
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -2892,7 +2929,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_ret_atomicrmw(
; GFX12-CU-NEXT: s_wait_kmcnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_storexchg_rtn_b32 v1, v0, v1
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -3348,7 +3388,8 @@ define amdgpu_kernel void @local_workgroup_release_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_endpgm
;
@@ -3441,7 +3482,8 @@ define amdgpu_kernel void @local_workgroup_release_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_endpgm
;
@@ -3470,7 +3512,10 @@ define amdgpu_kernel void @local_workgroup_release_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_endpgm
;
@@ -3551,7 +3596,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3652,7 +3698,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3684,7 +3731,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -3767,7 +3817,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_monotonic_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -3868,7 +3919,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_monotonic_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -3900,7 +3952,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_monotonic_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4375,7 +4430,8 @@ define amdgpu_kernel void @local_workgroup_release_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4476,7 +4532,8 @@ define amdgpu_kernel void @local_workgroup_release_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4508,7 +4565,10 @@ define amdgpu_kernel void @local_workgroup_release_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4591,7 +4651,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4692,7 +4753,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4724,7 +4786,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -4807,7 +4872,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_acquire_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -4908,7 +4974,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_acquire_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -4940,7 +5007,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_acquire_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5023,7 +5093,8 @@ define amdgpu_kernel void @local_workgroup_monotonic_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5124,7 +5195,8 @@ define amdgpu_kernel void @local_workgroup_monotonic_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5156,7 +5228,10 @@ define amdgpu_kernel void @local_workgroup_monotonic_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5239,7 +5314,8 @@ define amdgpu_kernel void @local_workgroup_acquire_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5340,7 +5416,8 @@ define amdgpu_kernel void @local_workgroup_acquire_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5372,7 +5449,10 @@ define amdgpu_kernel void @local_workgroup_acquire_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5455,7 +5535,8 @@ define amdgpu_kernel void @local_workgroup_release_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5556,7 +5637,8 @@ define amdgpu_kernel void @local_workgroup_release_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5588,7 +5670,10 @@ define amdgpu_kernel void @local_workgroup_release_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5671,7 +5756,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5772,7 +5858,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -5804,7 +5891,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -5887,7 +5977,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s5
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s4
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_b32 v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: s_endpgm
@@ -5988,7 +6079,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: s_endpgm
@@ -6020,7 +6112,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_seq_cst_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s1
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s0
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_b32 v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: s_endpgm
@@ -6567,7 +6662,8 @@ define amdgpu_kernel void @local_workgroup_release_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6682,7 +6778,8 @@ define amdgpu_kernel void @local_workgroup_release_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
@@ -6717,7 +6814,10 @@ define amdgpu_kernel void @local_workgroup_release_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: s_wait_dscnt 0x0
@@ -6814,7 +6914,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -6932,7 +7033,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -6968,7 +7070,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7065,7 +7170,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7183,7 +7289,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7219,7 +7326,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_monotonic_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7778,7 +7888,8 @@ define amdgpu_kernel void @local_workgroup_release_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -7896,7 +8007,8 @@ define amdgpu_kernel void @local_workgroup_release_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -7932,7 +8044,10 @@ define amdgpu_kernel void @local_workgroup_release_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8029,7 +8144,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8147,7 +8263,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8183,7 +8300,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8280,7 +8400,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8398,7 +8519,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8434,7 +8556,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_acquire_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8531,7 +8656,8 @@ define amdgpu_kernel void @local_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8649,7 +8775,8 @@ define amdgpu_kernel void @local_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8685,7 +8812,10 @@ define amdgpu_kernel void @local_workgroup_monotonic_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8782,7 +8912,8 @@ define amdgpu_kernel void @local_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -8900,7 +9031,8 @@ define amdgpu_kernel void @local_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -8936,7 +9068,10 @@ define amdgpu_kernel void @local_workgroup_acquire_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9033,7 +9168,8 @@ define amdgpu_kernel void @local_workgroup_release_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9151,7 +9287,8 @@ define amdgpu_kernel void @local_workgroup_release_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9187,7 +9324,10 @@ define amdgpu_kernel void @local_workgroup_release_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9284,7 +9424,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9402,7 +9543,8 @@ define amdgpu_kernel void @local_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9438,7 +9580,10 @@ define amdgpu_kernel void @local_workgroup_acq_rel_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9535,7 +9680,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
; GFX10-CU-NEXT: v_mov_b32_e32 v1, s6
; GFX10-CU-NEXT: v_mov_b32_e32 v2, s5
-; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX10-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-CU-NEXT: ds_cmpst_rtn_b32 v1, v0, v1, v2 offset:16
; GFX10-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-CU-NEXT: v_mov_b32_e32 v0, s4
@@ -9653,7 +9799,8 @@ define amdgpu_kernel void @local_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX11-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX11-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
+; GFX11-CU-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX11-CU-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-CU-NEXT: v_mov_b32_e32 v0, s0
@@ -9689,7 +9836,10 @@ define amdgpu_kernel void @local_workgroup_seq_cst_seq_cst_ret_cmpxchg(
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
; GFX12-CU-NEXT: v_mov_b32_e32 v1, s2
; GFX12-CU-NEXT: v_mov_b32_e32 v2, s1
-; GFX12-CU-NEXT: s_wait_dscnt 0x0
+; GFX12-CU-NEXT: s_wait_bvhcnt 0x0
+; GFX12-CU-NEXT: s_wait_samplecnt 0x0
+; GFX12-CU-NEXT: s_wait_storecnt 0x0
+; GFX12-CU-NEXT: s_wait_loadcnt_dscnt 0x0
; GFX12-CU-NEXT: ds_cmpstore_rtn_b32 v1, v0, v1, v2 offset:16
; GFX12-CU-NEXT: s_wait_dscnt 0x0
; GFX12-CU-NEXT: v_mov_b32_e32 v0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-compare.mir b/llvm/test/CodeGen/AMDGPU/optimize-compare.mir
index c1cf06e..fba42c4 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-compare.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-compare.mir
@@ -388,9 +388,8 @@ body: |
; GCN-NEXT: liveins: $sgpr0, $vgpr0_vgpr1
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 1, killed [[COPY]], implicit-def $scc
- ; GCN-NEXT: S_NOP 0, implicit killed $scc
- ; GCN-NEXT: S_CMP_EQ_U32 killed [[S_AND_B32_]], 1, implicit-def $scc
+ ; GCN-NEXT: S_BITCMP1_B32 killed [[COPY]], 0, implicit-def $scc
+ ; GCN-NEXT: S_NOP 0, implicit $scc
; GCN-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc
; GCN-NEXT: S_BRANCH %bb.1
; GCN-NEXT: {{ $}}
@@ -417,6 +416,80 @@ body: |
S_ENDPGM 0
...
+---
+name: xor_1_cmp_lg_0_killed_scc
+body: |
+ ; GCN-LABEL: name: xor_1_cmp_lg_0_killed_scc
+ ; GCN: bb.0:
+ ; GCN-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; GCN-NEXT: liveins: $sgpr0, $vgpr0_vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GCN-NEXT: [[S_XOR_B32_:%[0-9]+]]:sreg_32 = S_XOR_B32 1, killed [[COPY]], implicit-def $scc
+ ; GCN-NEXT: S_NOP 0, implicit $scc
+ ; GCN-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc
+ ; GCN-NEXT: S_BRANCH %bb.1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.1:
+ ; GCN-NEXT: successors: %bb.2(0x80000000)
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.2:
+ ; GCN-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $sgpr0, $vgpr0_vgpr1
+
+ %0:sreg_32 = COPY $sgpr0
+ %1:sreg_32 = S_XOR_B32 1, killed %0, implicit-def $scc
+ S_NOP 0, implicit killed $scc
+ S_CMP_LG_U32 killed %1:sreg_32, 0, implicit-def $scc
+ S_CBRANCH_SCC0 %bb.2, implicit $scc
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+
+ bb.2:
+ S_ENDPGM 0
+
+...
+---
+name: absdiff_1_cmp_lg_0_killed_scc
+body: |
+ ; GCN-LABEL: name: absdiff_1_cmp_lg_0_killed_scc
+ ; GCN: bb.0:
+ ; GCN-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ ; GCN-NEXT: liveins: $sgpr0, $vgpr0_vgpr1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
+ ; GCN-NEXT: [[S_ABSDIFF_I32_:%[0-9]+]]:sreg_32 = S_ABSDIFF_I32 1, killed [[COPY]], implicit-def $scc
+ ; GCN-NEXT: S_NOP 0, implicit $scc
+ ; GCN-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc
+ ; GCN-NEXT: S_BRANCH %bb.1
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.1:
+ ; GCN-NEXT: successors: %bb.2(0x80000000)
+ ; GCN-NEXT: {{ $}}
+ ; GCN-NEXT: bb.2:
+ ; GCN-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $sgpr0, $vgpr0_vgpr1
+
+ %0:sreg_32 = COPY $sgpr0
+ %1:sreg_32 = S_ABSDIFF_I32 1, killed %0, implicit-def $scc
+ S_NOP 0, implicit killed $scc
+ S_CMP_LG_U32 killed %1:sreg_32, 0, implicit-def $scc
+ S_CBRANCH_SCC0 %bb.2, implicit $scc
+ S_BRANCH %bb.1
+
+ bb.1:
+ successors: %bb.2(0x80000000)
+
+ bb.2:
+ S_ENDPGM 0
+
+...
---
name: and_1_cmp_eq_1_clobbered_scc
@@ -2070,8 +2143,7 @@ body: |
; GCN-NEXT: liveins: $sgpr0, $vgpr0_vgpr1
; GCN-NEXT: {{ $}}
; GCN-NEXT: [[COPY:%[0-9]+]]:sreg_32 = COPY $sgpr0
- ; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 3, killed [[COPY]], implicit-def dead $scc
- ; GCN-NEXT: S_CMP_LG_U32 killed [[S_AND_B32_]], 0, implicit-def $scc
+ ; GCN-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 3, killed [[COPY]], implicit-def $scc
; GCN-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc
; GCN-NEXT: S_BRANCH %bb.1
; GCN-NEXT: {{ $}}
diff --git a/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll b/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll
index ef3e04c..6ce614b 100644
--- a/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll
+++ b/llvm/test/CodeGen/AMDGPU/prevent-fmul-hoist-ir.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=fast -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=FP-CONTRACT-FAST %s
-; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=off --enable-unsafe-fp-math -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=UNSAFE-FP-MATH %s
+; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=off -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=UNSAFE-FP-MATH %s
; RUN: opt -S -passes='simplifycfg<hoist-common-insts>' -mtriple=amdgcn-- --fp-contract=off -mcpu=gfx1030 < %s | FileCheck -check-prefix=GFX -check-prefix=NO-UNSAFE-FP-MATH %s
define double @is_profitable_f64_contract(ptr dereferenceable(8) %ptr_x, ptr dereferenceable(8) %ptr_y, ptr dereferenceable(8) %ptr_a) #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll b/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll
index f53aaaa..dd5f838 100644
--- a/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll
+++ b/llvm/test/CodeGen/AMDGPU/s_cmp_0.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck %s
declare i32 @llvm.ctpop.i32(i32)
declare i64 @llvm.ctpop.i64(i64)
@@ -10,7 +10,6 @@ define amdgpu_ps i32 @shl32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: shl32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_lshl_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -25,7 +24,6 @@ define amdgpu_ps i32 @shl64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: shl64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_lshl_b64 s[0:1], s[0:1], s2
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -40,7 +38,6 @@ define amdgpu_ps i32 @lshr32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: lshr32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_lshr_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -55,7 +52,6 @@ define amdgpu_ps i32 @lshr64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: lshr64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -70,7 +66,6 @@ define amdgpu_ps i32 @ashr32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: ashr32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_ashr_i32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -85,7 +80,6 @@ define amdgpu_ps i32 @ashr64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: ashr64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_ashr_i64 s[0:1], s[0:1], s2
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -100,7 +94,6 @@ define amdgpu_ps i32 @abs32(i32 inreg %val0) {
; CHECK-LABEL: abs32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_abs_i32 s0, s0
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s0
; CHECK-NEXT: ;;#ASMEND
@@ -121,7 +114,6 @@ define amdgpu_ps i32 @and32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: and32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_and_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -136,7 +128,6 @@ define amdgpu_ps i32 @and64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: and64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -151,7 +142,6 @@ define amdgpu_ps i32 @or32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: or32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_or_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -166,7 +156,6 @@ define amdgpu_ps i32 @or64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: or64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -181,7 +170,6 @@ define amdgpu_ps i32 @xor32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: xor32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_xor_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -196,7 +184,6 @@ define amdgpu_ps i32 @xor64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: xor64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -211,7 +198,6 @@ define amdgpu_ps i32 @nand32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: nand32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_nand_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s0
; CHECK-NEXT: ;;#ASMEND
@@ -231,7 +217,6 @@ define amdgpu_ps i32 @nand64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: nand64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_nand_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s[0:1]
; CHECK-NEXT: ;;#ASMEND
@@ -251,7 +236,6 @@ define amdgpu_ps i32 @nor32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: nor32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_nor_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s0
; CHECK-NEXT: ;;#ASMEND
@@ -271,7 +255,6 @@ define amdgpu_ps i32 @nor64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: nor64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_nor_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s[0:1]
; CHECK-NEXT: ;;#ASMEND
@@ -291,7 +274,6 @@ define amdgpu_ps i32 @xnor32(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: xnor32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_xnor_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s0
; CHECK-NEXT: ;;#ASMEND
@@ -311,7 +293,6 @@ define amdgpu_ps i32 @xnor64(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: xnor64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_xnor_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s[0:1]
; CHECK-NEXT: ;;#ASMEND
@@ -331,7 +312,6 @@ define amdgpu_ps i32 @andn232(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: andn232:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_andn2_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -347,7 +327,6 @@ define amdgpu_ps i32 @nandn264(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: nandn264:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_andn2_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -363,7 +342,6 @@ define amdgpu_ps i32 @orn232(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: orn232:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_orn2_b32 s0, s0, s1
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -379,7 +357,6 @@ define amdgpu_ps i32 @orn264(i64 inreg %val0, i64 inreg %val1) {
; CHECK-LABEL: orn264:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_orn2_b64 s[0:1], s[0:1], s[2:3]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -395,7 +372,6 @@ define amdgpu_ps i32 @bfe_i32(i32 inreg %val0) {
; CHECK-LABEL: bfe_i32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_bfe_i32 s0, s0, 0x80010
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -433,7 +409,6 @@ define amdgpu_ps i32 @bfe_u32(i32 inreg %val0) {
; CHECK-LABEL: bfe_u32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_bfe_u32 s0, s0, 0x80010
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
@@ -513,7 +488,6 @@ define amdgpu_ps i32 @bcnt132(i32 inreg %val0) {
; CHECK-LABEL: bcnt132:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_bcnt1_i32_b32 s0, s0
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s0
; CHECK-NEXT: ;;#ASMEND
@@ -552,7 +526,6 @@ define amdgpu_ps i32 @quadmask32(i32 inreg %val0) {
; CHECK-LABEL: quadmask32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_quadmask_b32 s0, s0
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s0
; CHECK-NEXT: ;;#ASMEND
@@ -571,7 +544,6 @@ define amdgpu_ps i32 @quadmask64(i64 inreg %val0) {
; CHECK-LABEL: quadmask64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_quadmask_b64 s[0:1], s[0:1]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s[0:1]
; CHECK-NEXT: ;;#ASMEND
@@ -590,7 +562,6 @@ define amdgpu_ps i32 @not32(i32 inreg %val0) {
; CHECK-LABEL: not32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_not_b32 s0, s0
-; CHECK-NEXT: s_cmp_lg_u32 s0, 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s0
; CHECK-NEXT: ;;#ASMEND
@@ -609,7 +580,6 @@ define amdgpu_ps i32 @not64(i64 inreg %val0) {
; CHECK-LABEL: not64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_not_b64 s[0:1], s[0:1]
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: ;;#ASMSTART
; CHECK-NEXT: ; use s[0:1]
; CHECK-NEXT: ;;#ASMEND
@@ -623,3 +593,35 @@ define amdgpu_ps i32 @not64(i64 inreg %val0) {
%zext = zext i1 %cmp to i32
ret i32 %zext
}
+
+
+; --------------------------------------------------------------------------------
+; Negative tests
+; --------------------------------------------------------------------------------
+
+@1 = extern_weak dso_local addrspace(4) constant i32
+
+define amdgpu_ps i32 @si_pc_add_rel_offset_must_not_optimize() {
+; CHECK-LABEL: si_pc_add_rel_offset_must_not_optimize:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_getpc_b64 s[0:1]
+; CHECK-NEXT: s_add_u32 s0, s0, __unnamed_1@rel32@lo+4
+; CHECK-NEXT: s_addc_u32 s1, s1, __unnamed_1@rel32@hi+12
+; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
+; CHECK-NEXT: s_cbranch_scc0 .LBB35_2
+; CHECK-NEXT: ; %bb.1: ; %endif
+; CHECK-NEXT: s_mov_b32 s0, 1
+; CHECK-NEXT: s_branch .LBB35_3
+; CHECK-NEXT: .LBB35_2: ; %if
+; CHECK-NEXT: s_mov_b32 s0, 0
+; CHECK-NEXT: s_branch .LBB35_3
+; CHECK-NEXT: .LBB35_3:
+ %cmp = icmp ne ptr addrspace(4) @1, null
+ br i1 %cmp, label %endif, label %if
+
+if:
+ ret i32 0
+
+endif:
+ ret i32 1
+}
diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll
index a828ee0..7552f6b 100644
--- a/llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll
+++ b/llvm/test/CodeGen/AMDGPU/s_uaddo_usubo_pseudo.ll
@@ -12,8 +12,6 @@ define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0) {
; CHECK-LABEL: s_uaddo_pseudo:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_add_u32 s0, s0, 1
-; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[0:1], 0
; CHECK-NEXT: s_addc_u32 s0, 1, 0
; CHECK-NEXT: ; return to shader part epilog
%pair = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %val0, i32 1)
@@ -32,8 +30,6 @@ define amdgpu_ps i32 @s_usubo_pseudo(i32 inreg %val0, i32 inreg %val1) {
; CHECK-LABEL: s_usubo_pseudo:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_sub_u32 s0, s0, 1
-; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
-; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_subb_u32 s0, s1, 0
; CHECK-NEXT: ; return to shader part epilog
%pair = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %val0, i32 1)
diff --git a/llvm/test/CodeGen/AMDGPU/schedule-barrier-latency.mir b/llvm/test/CodeGen/AMDGPU/schedule-barrier-latency.mir
new file mode 100644
index 0000000..93f7bcc
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/schedule-barrier-latency.mir
@@ -0,0 +1,83 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -passes=postmisched -o - %s | FileCheck %s
+
+# Ensure WMMA operations stay before the final atomic fence and barrier group.
+# This allows the latency of the WMMA operations to be hidden by barrier wait.
+---
+name: test
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr12, $vgpr36, $vgpr37, $vgpr38, $vgpr39, $vgpr40, $vgpr1_vgpr2_vgpr3_vgpr4, $vgpr5_vgpr6_vgpr7_vgpr8, $vgpr9_vgpr10_vgpr11_vgpr12, $vgpr13_vgpr14_vgpr15_vgpr16, $vgpr17_vgpr18_vgpr19_vgpr20, $vgpr21_vgpr22_vgpr23_vgpr24, $vgpr25_vgpr26_vgpr27_vgpr28, $vgpr29_vgpr30_vgpr31_vgpr32
+
+ ; CHECK-LABEL: name: test
+ ; CHECK: liveins: $sgpr0, $sgpr12, $vgpr36, $vgpr37, $vgpr38, $vgpr39, $vgpr40, $vgpr1_vgpr2_vgpr3_vgpr4, $vgpr5_vgpr6_vgpr7_vgpr8, $vgpr9_vgpr10_vgpr11_vgpr12, $vgpr13_vgpr14_vgpr15_vgpr16, $vgpr17_vgpr18_vgpr19_vgpr20, $vgpr21_vgpr22_vgpr23_vgpr24, $vgpr25_vgpr26_vgpr27_vgpr28, $vgpr29_vgpr30_vgpr31_vgpr32
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: ATOMIC_FENCE 5, 2
+ ; CHECK-NEXT: S_BARRIER
+ ; CHECK-NEXT: ATOMIC_FENCE 4, 2
+ ; CHECK-NEXT: BUNDLE implicit-def $vgpr41_vgpr42_vgpr43_vgpr44, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr41_vgpr42, implicit-def $vgpr41_vgpr42_vgpr43, implicit-def $vgpr42_vgpr43, implicit-def $vgpr42_vgpr43_vgpr44, implicit-def $vgpr43_vgpr44, implicit-def $vgpr45_vgpr46_vgpr47_vgpr48, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr45_vgpr46, implicit-def $vgpr45_vgpr46_vgpr47, implicit-def $vgpr46_vgpr47, implicit-def $vgpr46_vgpr47_vgpr48, implicit-def $vgpr47_vgpr48, implicit-def $vgpr49_vgpr50_vgpr51_vgpr52, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr49_vgpr50, implicit-def $vgpr49_vgpr50_vgpr51, implicit-def $vgpr50_vgpr51, implicit-def $vgpr50_vgpr51_vgpr52, implicit-def $vgpr51_vgpr52, implicit-def $vgpr53_vgpr54_vgpr55_vgpr56, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr53_vgpr54, implicit-def $vgpr53_vgpr54_vgpr55, implicit-def $vgpr54_vgpr55, implicit-def $vgpr54_vgpr55_vgpr56, implicit-def $vgpr55_vgpr56, implicit-def $vgpr57_vgpr58_vgpr59_vgpr60, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr57_vgpr58, implicit-def $vgpr57_vgpr58_vgpr59, implicit-def $vgpr58_vgpr59, implicit-def $vgpr58_vgpr59_vgpr60, implicit-def $vgpr59_vgpr60, implicit-def $vgpr61_vgpr62_vgpr63_vgpr64, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit-def $vgpr61_vgpr62, implicit-def $vgpr61_vgpr62_vgpr63, implicit-def $vgpr62_vgpr63, implicit-def $vgpr62_vgpr63_vgpr64, implicit-def $vgpr63_vgpr64, implicit-def $vgpr65_vgpr66_vgpr67_vgpr68, implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr65_vgpr66, implicit-def $vgpr65_vgpr66_vgpr67, implicit-def $vgpr66_vgpr67, implicit-def $vgpr66_vgpr67_vgpr68, implicit-def $vgpr67_vgpr68, implicit-def $vgpr69_vgpr70_vgpr71_vgpr72, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr69_vgpr70, implicit-def $vgpr69_vgpr70_vgpr71, implicit-def $vgpr70_vgpr71, implicit-def $vgpr70_vgpr71_vgpr72, implicit-def $vgpr71_vgpr72, implicit-def $vgpr73_vgpr74_vgpr75_vgpr76, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr73_vgpr74, implicit-def $vgpr73_vgpr74_vgpr75, implicit-def $vgpr74_vgpr75, implicit-def $vgpr74_vgpr75_vgpr76, implicit-def $vgpr75_vgpr76, implicit-def $vgpr77_vgpr78_vgpr79_vgpr80, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit-def $vgpr77_vgpr78, implicit-def $vgpr77_vgpr78_vgpr79, implicit-def $vgpr78_vgpr79, implicit-def $vgpr78_vgpr79_vgpr80, implicit-def $vgpr79_vgpr80, implicit-def $vgpr81_vgpr82_vgpr83_vgpr84, implicit-def $vgpr81, implicit-def $vgpr81_lo16, implicit-def $vgpr81_hi16, implicit-def $vgpr82, implicit-def $vgpr82_lo16, implicit-def $vgpr82_hi16, implicit-def $vgpr83, implicit-def $vgpr83_lo16, implicit-def $vgpr83_hi16, implicit-def $vgpr84, implicit-def $vgpr84_lo16, implicit-def $vgpr84_hi16, implicit-def $vgpr81_vgpr82, implicit-def $vgpr81_vgpr82_vgpr83, implicit-def $vgpr82_vgpr83, implicit-def $vgpr82_vgpr83_vgpr84, implicit-def $vgpr83_vgpr84, implicit-def $vgpr85_vgpr86_vgpr87_vgpr88, implicit-def $vgpr85, implicit-def $vgpr85_lo16, implicit-def $vgpr85_hi16, implicit-def $vgpr86, implicit-def $vgpr86_lo16, implicit-def $vgpr86_hi16, implicit-def $vgpr87, implicit-def $vgpr87_lo16, implicit-def $vgpr87_hi16, implicit-def $vgpr88, implicit-def $vgpr88_lo16, implicit-def $vgpr88_hi16, implicit-def $vgpr85_vgpr86, implicit-def $vgpr85_vgpr86_vgpr87, implicit-def $vgpr86_vgpr87, implicit-def $vgpr86_vgpr87_vgpr88, implicit-def $vgpr87_vgpr88, implicit killed $vgpr36, implicit $exec, implicit killed $vgpr37, implicit killed $vgpr38 {
+ ; CHECK-NEXT: $vgpr41_vgpr42_vgpr43_vgpr44 = DS_READ_B128_gfx9 $vgpr36, 0, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr45_vgpr46_vgpr47_vgpr48 = DS_READ2_B64_gfx9 $vgpr36, 2, 3, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr49_vgpr50_vgpr51_vgpr52 = DS_READ_B128_gfx9 $vgpr37, 0, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr53_vgpr54_vgpr55_vgpr56 = DS_READ2_B64_gfx9 $vgpr37, 2, 3, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr57_vgpr58_vgpr59_vgpr60 = DS_READ_B128_gfx9 $vgpr37, 768, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr61_vgpr62_vgpr63_vgpr64 = DS_READ2_B64_gfx9 killed $vgpr37, 98, 99, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr65_vgpr66_vgpr67_vgpr68 = DS_READ_B128_gfx9 $vgpr36, 768, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr69_vgpr70_vgpr71_vgpr72 = DS_READ2_B64_gfx9 $vgpr36, 98, 99, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr73_vgpr74_vgpr75_vgpr76 = DS_READ_B128_gfx9 $vgpr36, 1536, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr77_vgpr78_vgpr79_vgpr80 = DS_READ2_B64_gfx9 $vgpr36, 194, 195, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr81_vgpr82_vgpr83_vgpr84 = DS_READ_B128_gfx9 killed $vgpr36, 2304, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: $vgpr85_vgpr86_vgpr87_vgpr88 = DS_READ2_B64_gfx9 killed $vgpr38, 0, 1, 0, implicit $exec :: (load (s128), addrspace 3)
+ ; CHECK-NEXT: }
+ ; CHECK-NEXT: $sgpr1 = S_ADD_I32 $sgpr0, 16, implicit-def dead $scc
+ ; CHECK-NEXT: $vgpr39 = V_ADD_U32_e32 32, killed $vgpr39, implicit $exec
+ ; CHECK-NEXT: $vgpr40 = V_ADD_U32_e32 32, killed $vgpr40, implicit $exec
+ ; CHECK-NEXT: S_CMP_LT_U32 killed $sgpr0, killed $sgpr12, implicit-def $scc
+ ; CHECK-NEXT: $sgpr0 = S_MOV_B32 killed $sgpr1
+ ; CHECK-NEXT: early-clobber $vgpr29_vgpr30_vgpr31_vgpr32 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48, 8, $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr29_vgpr30_vgpr31_vgpr32, 0, 0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr25_vgpr26_vgpr27_vgpr28 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48, 8, $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr25_vgpr26_vgpr27_vgpr28, 0, 0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr21_vgpr22_vgpr23_vgpr24 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72, 8, $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr21_vgpr22_vgpr23_vgpr24, 0, 0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr17_vgpr18_vgpr19_vgpr20 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72, 8, $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr17_vgpr18_vgpr19_vgpr20, 0, 0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr13_vgpr14_vgpr15_vgpr16 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80, 8, $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr13_vgpr14_vgpr15_vgpr16, 0, 0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr9_vgpr10_vgpr11_vgpr12 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80, 8, $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr9_vgpr10_vgpr11_vgpr12, 0, 0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr5_vgpr6_vgpr7_vgpr8 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88, 8, killed $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr5_vgpr6_vgpr7_vgpr8, 0, 0, implicit $exec
+ ; CHECK-NEXT: early-clobber $vgpr1_vgpr2_vgpr3_vgpr4 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88, 8, killed $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr1_vgpr2_vgpr3_vgpr4, 0, 0, implicit $exec
+ ; CHECK-NEXT: ATOMIC_FENCE 5, 2
+ ; CHECK-NEXT: S_BARRIER
+ ; CHECK-NEXT: ATOMIC_FENCE 4, 2
+ ATOMIC_FENCE 5, 2
+ S_BARRIER
+ ATOMIC_FENCE 4, 2
+ BUNDLE implicit-def $vgpr41_vgpr42_vgpr43_vgpr44, implicit-def $vgpr41, implicit-def $vgpr41_lo16, implicit-def $vgpr41_hi16, implicit-def $vgpr42, implicit-def $vgpr42_lo16, implicit-def $vgpr42_hi16, implicit-def $vgpr43, implicit-def $vgpr43_lo16, implicit-def $vgpr43_hi16, implicit-def $vgpr44, implicit-def $vgpr44_lo16, implicit-def $vgpr44_hi16, implicit-def $vgpr41_vgpr42, implicit-def $vgpr41_vgpr42_vgpr43, implicit-def $vgpr42_vgpr43, implicit-def $vgpr42_vgpr43_vgpr44, implicit-def $vgpr43_vgpr44, implicit-def $vgpr45_vgpr46_vgpr47_vgpr48, implicit-def $vgpr45, implicit-def $vgpr45_lo16, implicit-def $vgpr45_hi16, implicit-def $vgpr46, implicit-def $vgpr46_lo16, implicit-def $vgpr46_hi16, implicit-def $vgpr47, implicit-def $vgpr47_lo16, implicit-def $vgpr47_hi16, implicit-def $vgpr48, implicit-def $vgpr48_lo16, implicit-def $vgpr48_hi16, implicit-def $vgpr45_vgpr46, implicit-def $vgpr45_vgpr46_vgpr47, implicit-def $vgpr46_vgpr47, implicit-def $vgpr46_vgpr47_vgpr48, implicit-def $vgpr47_vgpr48, implicit-def $vgpr49_vgpr50_vgpr51_vgpr52, implicit-def $vgpr49, implicit-def $vgpr49_lo16, implicit-def $vgpr49_hi16, implicit-def $vgpr50, implicit-def $vgpr50_lo16, implicit-def $vgpr50_hi16, implicit-def $vgpr51, implicit-def $vgpr51_lo16, implicit-def $vgpr51_hi16, implicit-def $vgpr52, implicit-def $vgpr52_lo16, implicit-def $vgpr52_hi16, implicit-def $vgpr49_vgpr50, implicit-def $vgpr49_vgpr50_vgpr51, implicit-def $vgpr50_vgpr51, implicit-def $vgpr50_vgpr51_vgpr52, implicit-def $vgpr51_vgpr52, implicit-def $vgpr53_vgpr54_vgpr55_vgpr56, implicit-def $vgpr53, implicit-def $vgpr53_lo16, implicit-def $vgpr53_hi16, implicit-def $vgpr54, implicit-def $vgpr54_lo16, implicit-def $vgpr54_hi16, implicit-def $vgpr55, implicit-def $vgpr55_lo16, implicit-def $vgpr55_hi16, implicit-def $vgpr56, implicit-def $vgpr56_lo16, implicit-def $vgpr56_hi16, implicit-def $vgpr53_vgpr54, implicit-def $vgpr53_vgpr54_vgpr55, implicit-def $vgpr54_vgpr55, implicit-def $vgpr54_vgpr55_vgpr56, implicit-def $vgpr55_vgpr56, implicit-def $vgpr57_vgpr58_vgpr59_vgpr60, implicit-def $vgpr57, implicit-def $vgpr57_lo16, implicit-def $vgpr57_hi16, implicit-def $vgpr58, implicit-def $vgpr58_lo16, implicit-def $vgpr58_hi16, implicit-def $vgpr59, implicit-def $vgpr59_lo16, implicit-def $vgpr59_hi16, implicit-def $vgpr60, implicit-def $vgpr60_lo16, implicit-def $vgpr60_hi16, implicit-def $vgpr57_vgpr58, implicit-def $vgpr57_vgpr58_vgpr59, implicit-def $vgpr58_vgpr59, implicit-def $vgpr58_vgpr59_vgpr60, implicit-def $vgpr59_vgpr60, implicit-def $vgpr61_vgpr62_vgpr63_vgpr64, implicit-def $vgpr61, implicit-def $vgpr61_lo16, implicit-def $vgpr61_hi16, implicit-def $vgpr62, implicit-def $vgpr62_lo16, implicit-def $vgpr62_hi16, implicit-def $vgpr63, implicit-def $vgpr63_lo16, implicit-def $vgpr63_hi16, implicit-def $vgpr64, implicit-def $vgpr64_lo16, implicit-def $vgpr64_hi16, implicit-def $vgpr61_vgpr62, implicit-def $vgpr61_vgpr62_vgpr63, implicit-def $vgpr62_vgpr63, implicit-def $vgpr62_vgpr63_vgpr64, implicit-def $vgpr63_vgpr64, implicit-def $vgpr65_vgpr66_vgpr67_vgpr68, implicit-def $vgpr65, implicit-def $vgpr65_lo16, implicit-def $vgpr65_hi16, implicit-def $vgpr66, implicit-def $vgpr66_lo16, implicit-def $vgpr66_hi16, implicit-def $vgpr67, implicit-def $vgpr67_lo16, implicit-def $vgpr67_hi16, implicit-def $vgpr68, implicit-def $vgpr68_lo16, implicit-def $vgpr68_hi16, implicit-def $vgpr65_vgpr66, implicit-def $vgpr65_vgpr66_vgpr67, implicit-def $vgpr66_vgpr67, implicit-def $vgpr66_vgpr67_vgpr68, implicit-def $vgpr67_vgpr68, implicit-def $vgpr69_vgpr70_vgpr71_vgpr72, implicit-def $vgpr69, implicit-def $vgpr69_lo16, implicit-def $vgpr69_hi16, implicit-def $vgpr70, implicit-def $vgpr70_lo16, implicit-def $vgpr70_hi16, implicit-def $vgpr71, implicit-def $vgpr71_lo16, implicit-def $vgpr71_hi16, implicit-def $vgpr72, implicit-def $vgpr72_lo16, implicit-def $vgpr72_hi16, implicit-def $vgpr69_vgpr70, implicit-def $vgpr69_vgpr70_vgpr71, implicit-def $vgpr70_vgpr71, implicit-def $vgpr70_vgpr71_vgpr72, implicit-def $vgpr71_vgpr72, implicit-def $vgpr73_vgpr74_vgpr75_vgpr76, implicit-def $vgpr73, implicit-def $vgpr73_lo16, implicit-def $vgpr73_hi16, implicit-def $vgpr74, implicit-def $vgpr74_lo16, implicit-def $vgpr74_hi16, implicit-def $vgpr75, implicit-def $vgpr75_lo16, implicit-def $vgpr75_hi16, implicit-def $vgpr76, implicit-def $vgpr76_lo16, implicit-def $vgpr76_hi16, implicit-def $vgpr73_vgpr74, implicit-def $vgpr73_vgpr74_vgpr75, implicit-def $vgpr74_vgpr75, implicit-def $vgpr74_vgpr75_vgpr76, implicit-def $vgpr75_vgpr76, implicit-def $vgpr77_vgpr78_vgpr79_vgpr80, implicit-def $vgpr77, implicit-def $vgpr77_lo16, implicit-def $vgpr77_hi16, implicit-def $vgpr78, implicit-def $vgpr78_lo16, implicit-def $vgpr78_hi16, implicit-def $vgpr79, implicit-def $vgpr79_lo16, implicit-def $vgpr79_hi16, implicit-def $vgpr80, implicit-def $vgpr80_lo16, implicit-def $vgpr80_hi16, implicit-def $vgpr77_vgpr78, implicit-def $vgpr77_vgpr78_vgpr79, implicit-def $vgpr78_vgpr79, implicit-def $vgpr78_vgpr79_vgpr80, implicit-def $vgpr79_vgpr80, implicit-def $vgpr81_vgpr82_vgpr83_vgpr84, implicit-def $vgpr81, implicit-def $vgpr81_lo16, implicit-def $vgpr81_hi16, implicit-def $vgpr82, implicit-def $vgpr82_lo16, implicit-def $vgpr82_hi16, implicit-def $vgpr83, implicit-def $vgpr83_lo16, implicit-def $vgpr83_hi16, implicit-def $vgpr84, implicit-def $vgpr84_lo16, implicit-def $vgpr84_hi16, implicit-def $vgpr81_vgpr82, implicit-def $vgpr81_vgpr82_vgpr83, implicit-def $vgpr82_vgpr83, implicit-def $vgpr82_vgpr83_vgpr84, implicit-def $vgpr83_vgpr84, implicit-def $vgpr85_vgpr86_vgpr87_vgpr88, implicit-def $vgpr85, implicit-def $vgpr85_lo16, implicit-def $vgpr85_hi16, implicit-def $vgpr86, implicit-def $vgpr86_lo16, implicit-def $vgpr86_hi16, implicit-def $vgpr87, implicit-def $vgpr87_lo16, implicit-def $vgpr87_hi16, implicit-def $vgpr88, implicit-def $vgpr88_lo16, implicit-def $vgpr88_hi16, implicit-def $vgpr85_vgpr86, implicit-def $vgpr85_vgpr86_vgpr87, implicit-def $vgpr86_vgpr87, implicit-def $vgpr86_vgpr87_vgpr88, implicit-def $vgpr87_vgpr88, implicit $vgpr36, implicit $exec, implicit $vgpr37, implicit $vgpr38 {
+ $vgpr41_vgpr42_vgpr43_vgpr44 = DS_READ_B128_gfx9 $vgpr36, 0, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr45_vgpr46_vgpr47_vgpr48 = DS_READ2_B64_gfx9 $vgpr36, 2, 3, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr49_vgpr50_vgpr51_vgpr52 = DS_READ_B128_gfx9 $vgpr37, 0, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr53_vgpr54_vgpr55_vgpr56 = DS_READ2_B64_gfx9 $vgpr37, 2, 3, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr57_vgpr58_vgpr59_vgpr60 = DS_READ_B128_gfx9 $vgpr37, 768, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr61_vgpr62_vgpr63_vgpr64 = DS_READ2_B64_gfx9 $vgpr37, 98, 99, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr65_vgpr66_vgpr67_vgpr68 = DS_READ_B128_gfx9 $vgpr36, 768, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr69_vgpr70_vgpr71_vgpr72 = DS_READ2_B64_gfx9 $vgpr36, 98, 99, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr73_vgpr74_vgpr75_vgpr76 = DS_READ_B128_gfx9 $vgpr36, 1536, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr77_vgpr78_vgpr79_vgpr80 = DS_READ2_B64_gfx9 $vgpr36, 194, 195, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr81_vgpr82_vgpr83_vgpr84 = DS_READ_B128_gfx9 $vgpr36, 2304, 0, implicit $exec :: (load (s128), addrspace 3)
+ $vgpr85_vgpr86_vgpr87_vgpr88 = DS_READ2_B64_gfx9 $vgpr38, 0, 1, 0, implicit $exec :: (load (s128), addrspace 3)
+ }
+ $sgpr1 = S_ADD_I32 $sgpr0, 16, implicit-def dead $scc
+ $vgpr39 = V_ADD_U32_e32 32, killed $vgpr39, implicit $exec
+ $vgpr40 = V_ADD_U32_e32 32, killed $vgpr40, implicit $exec
+ S_CMP_LT_U32 killed $sgpr0, $sgpr12, implicit-def $scc
+ early-clobber $vgpr29_vgpr30_vgpr31_vgpr32 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48, 8, $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr29_vgpr30_vgpr31_vgpr32, 0, 0, implicit $exec
+ early-clobber $vgpr25_vgpr26_vgpr27_vgpr28 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48, 8, $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr25_vgpr26_vgpr27_vgpr28, 0, 0, implicit $exec
+ early-clobber $vgpr21_vgpr22_vgpr23_vgpr24 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72, 8, $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr21_vgpr22_vgpr23_vgpr24, 0, 0, implicit $exec
+ early-clobber $vgpr17_vgpr18_vgpr19_vgpr20 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72, 8, $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr17_vgpr18_vgpr19_vgpr20, 0, 0, implicit $exec
+ early-clobber $vgpr13_vgpr14_vgpr15_vgpr16 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80, 8, $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr13_vgpr14_vgpr15_vgpr16, 0, 0, implicit $exec
+ early-clobber $vgpr9_vgpr10_vgpr11_vgpr12 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80, 8, $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr9_vgpr10_vgpr11_vgpr12, 0, 0, implicit $exec
+ early-clobber $vgpr5_vgpr6_vgpr7_vgpr8 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88, 8, killed $vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56, 8, killed $vgpr5_vgpr6_vgpr7_vgpr8, 0, 0, implicit $exec
+ early-clobber $vgpr1_vgpr2_vgpr3_vgpr4 = V_WMMA_F32_16X16X16_F16_twoaddr_w64 8, killed $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88, 8, killed $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64, 8, killed $vgpr1_vgpr2_vgpr3_vgpr4, 0, 0, implicit $exec
+ $sgpr0 = S_MOV_B32 killed $sgpr1
+ ATOMIC_FENCE 5, 2
+ S_BARRIER
+ ATOMIC_FENCE 4, 2
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 5f6d622..71f5a94 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -56,10 +56,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-NEXT: s_addc_u32 s15, 0, s16
; GCN-NEXT: s_add_u32 s16, s0, s1
; GCN-NEXT: v_mov_b32_e32 v0, s16
-; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s12, v0
+; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_addc_u32 s14, s14, s15
; GCN-NEXT: s_mul_i32 s0, s12, s14
; GCN-NEXT: v_readfirstlane_b32 s1, v0
@@ -90,7 +89,6 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-NEXT: s_add_u32 s15, s16, s0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_addc_u32 s14, s14, s12
; GCN-NEXT: s_ashr_i32 s12, s7, 31
; GCN-NEXT: s_add_u32 s0, s6, s12
@@ -116,52 +114,50 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-NEXT: v_readfirstlane_b32 s4, v0
; GCN-NEXT: s_addc_u32 s4, s4, 0
; GCN-NEXT: s_mul_i32 s14, s7, s14
-; GCN-NEXT: s_add_u32 s14, s1, s14
-; GCN-NEXT: v_mov_b32_e32 v0, s14
+; GCN-NEXT: s_add_u32 s16, s1, s14
+; GCN-NEXT: v_mov_b32_e32 v0, s16
; GCN-NEXT: v_mul_hi_u32 v0, s10, v0
-; GCN-NEXT: s_addc_u32 s15, 0, s4
+; GCN-NEXT: s_addc_u32 s17, 0, s4
; GCN-NEXT: s_mov_b32 s1, s5
-; GCN-NEXT: s_mul_i32 s4, s10, s15
+; GCN-NEXT: s_mul_i32 s4, s10, s17
; GCN-NEXT: v_readfirstlane_b32 s5, v0
; GCN-NEXT: s_add_i32 s4, s5, s4
-; GCN-NEXT: s_mul_i32 s5, s11, s14
-; GCN-NEXT: s_add_i32 s16, s4, s5
-; GCN-NEXT: s_sub_i32 s17, s7, s16
-; GCN-NEXT: s_mul_i32 s4, s10, s14
+; GCN-NEXT: s_mul_i32 s5, s11, s16
+; GCN-NEXT: s_add_i32 s18, s4, s5
+; GCN-NEXT: s_sub_i32 s14, s7, s18
+; GCN-NEXT: s_mul_i32 s4, s10, s16
; GCN-NEXT: s_sub_u32 s6, s6, s4
; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GCN-NEXT: s_or_b32 s18, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s18, 0
-; GCN-NEXT: s_subb_u32 s17, s17, s11
-; GCN-NEXT: s_sub_u32 s19, s6, s10
-; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT: s_or_b32 s15, s4, s5
+; GCN-NEXT: s_subb_u32 s19, s14, s11
+; GCN-NEXT: s_sub_u32 s20, s6, s10
+; GCN-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GCN-NEXT: s_or_b32 s14, s14, s15
+; GCN-NEXT: s_subb_u32 s14, s19, 0
+; GCN-NEXT: s_cmp_ge_u32 s14, s11
+; GCN-NEXT: s_cselect_b32 s15, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s20, s10
+; GCN-NEXT: s_cselect_b32 s19, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s14, s11
+; GCN-NEXT: s_cselect_b32 s14, s19, s15
+; GCN-NEXT: s_add_u32 s15, s16, 1
+; GCN-NEXT: s_addc_u32 s19, s17, 0
+; GCN-NEXT: s_add_u32 s20, s16, 2
+; GCN-NEXT: s_addc_u32 s21, s17, 0
+; GCN-NEXT: s_cmp_lg_u32 s14, 0
+; GCN-NEXT: s_cselect_b32 s14, s20, s15
+; GCN-NEXT: s_cselect_b32 s15, s21, s19
; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_subb_u32 s4, s17, 0
+; GCN-NEXT: s_subb_u32 s4, s7, s18
; GCN-NEXT: s_cmp_ge_u32 s4, s11
; GCN-NEXT: s_cselect_b32 s5, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s19, s10
-; GCN-NEXT: s_cselect_b32 s17, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s4, s11
-; GCN-NEXT: s_cselect_b32 s4, s17, s5
-; GCN-NEXT: s_add_u32 s5, s14, 1
-; GCN-NEXT: s_addc_u32 s17, s15, 0
-; GCN-NEXT: s_add_u32 s19, s14, 2
-; GCN-NEXT: s_addc_u32 s20, s15, 0
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_cselect_b32 s4, s19, s5
-; GCN-NEXT: s_cselect_b32 s5, s20, s17
-; GCN-NEXT: s_cmp_lg_u32 s18, 0
-; GCN-NEXT: s_subb_u32 s7, s7, s16
-; GCN-NEXT: s_cmp_ge_u32 s7, s11
-; GCN-NEXT: s_cselect_b32 s16, -1, 0
; GCN-NEXT: s_cmp_ge_u32 s6, s10
; GCN-NEXT: s_cselect_b32 s6, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s7, s11
-; GCN-NEXT: s_cselect_b32 s6, s6, s16
-; GCN-NEXT: s_cmp_lg_u32 s6, 0
-; GCN-NEXT: s_cselect_b32 s5, s5, s15
-; GCN-NEXT: s_cselect_b32 s4, s4, s14
+; GCN-NEXT: s_cmp_eq_u32 s4, s11
+; GCN-NEXT: s_cselect_b32 s4, s6, s5
+; GCN-NEXT: s_cmp_lg_u32 s4, 0
+; GCN-NEXT: s_cselect_b32 s5, s15, s17
+; GCN-NEXT: s_cselect_b32 s4, s14, s16
; GCN-NEXT: s_xor_b64 s[6:7], s[12:13], s[8:9]
; GCN-NEXT: s_xor_b64 s[4:5], s[4:5], s[6:7]
; GCN-NEXT: s_sub_u32 s4, s4, s6
@@ -208,7 +204,6 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_add_u32 s18, s16, 1
; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_or_b32 s10, s10, s11
-; GCN-IR-NEXT: s_cmp_lg_u32 s10, 0
; GCN-IR-NEXT: s_addc_u32 s10, s17, 0
; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s16, 63, s16
@@ -242,7 +237,6 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_add_u32 s14, s14, 1
; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
; GCN-IR-NEXT: s_or_b32 s20, s20, s21
-; GCN-IR-NEXT: s_cmp_lg_u32 s20, 0
; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[12:13], s[8:9]
@@ -1195,10 +1189,9 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_addc_u32 s12, 0, s13
; GCN-NEXT: s_add_u32 s13, s8, s9
; GCN-NEXT: v_mov_b32_e32 v0, s13
-; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: s_or_b32 s8, s8, s9
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
; GCN-NEXT: s_addc_u32 s11, s11, s12
; GCN-NEXT: s_mul_i32 s8, s2, s11
; GCN-NEXT: v_readfirstlane_b32 s9, v0
@@ -1229,7 +1222,6 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_add_u32 s2, s13, s2
; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: s_or_b32 s8, s8, s9
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
; GCN-NEXT: s_addc_u32 s8, s11, s10
; GCN-NEXT: v_mul_hi_u32 v1, s2, 24
; GCN-NEXT: v_mul_hi_u32 v0, s8, 24
@@ -1238,48 +1230,46 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: v_readfirstlane_b32 s10, v1
; GCN-NEXT: v_readfirstlane_b32 s9, v0
; GCN-NEXT: s_add_u32 s8, s10, s8
-; GCN-NEXT: s_addc_u32 s10, 0, s9
-; GCN-NEXT: v_mov_b32_e32 v0, s10
+; GCN-NEXT: s_addc_u32 s12, 0, s9
+; GCN-NEXT: v_mov_b32_e32 v0, s12
; GCN-NEXT: v_mul_hi_u32 v0, s6, v0
-; GCN-NEXT: s_mul_i32 s8, s7, s10
+; GCN-NEXT: s_mul_i32 s8, s7, s12
; GCN-NEXT: v_readfirstlane_b32 s9, v0
-; GCN-NEXT: s_add_i32 s11, s9, s8
-; GCN-NEXT: s_sub_i32 s12, 0, s11
-; GCN-NEXT: s_mul_i32 s8, s6, s10
-; GCN-NEXT: s_sub_u32 s13, 24, s8
-; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GCN-NEXT: s_or_b32 s14, s8, s9
-; GCN-NEXT: s_cmp_lg_u32 s14, 0
-; GCN-NEXT: s_subb_u32 s12, s12, s7
-; GCN-NEXT: s_sub_u32 s15, s13, s6
+; GCN-NEXT: s_add_i32 s13, s9, s8
+; GCN-NEXT: s_sub_i32 s10, 0, s13
+; GCN-NEXT: s_mul_i32 s8, s6, s12
+; GCN-NEXT: s_sub_u32 s14, 24, s8
; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-NEXT: s_or_b32 s11, s8, s9
+; GCN-NEXT: s_subb_u32 s15, s10, s7
+; GCN-NEXT: s_sub_u32 s16, s14, s6
+; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-NEXT: s_or_b32 s10, s10, s11
+; GCN-NEXT: s_subb_u32 s10, s15, 0
+; GCN-NEXT: s_cmp_ge_u32 s10, s7
+; GCN-NEXT: s_cselect_b32 s11, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s16, s6
+; GCN-NEXT: s_cselect_b32 s15, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s10, s7
+; GCN-NEXT: s_cselect_b32 s10, s15, s11
+; GCN-NEXT: s_add_u32 s11, s12, 1
+; GCN-NEXT: s_addc_u32 s15, 0, 0
+; GCN-NEXT: s_add_u32 s16, s12, 2
+; GCN-NEXT: s_addc_u32 s17, 0, 0
+; GCN-NEXT: s_cmp_lg_u32 s10, 0
+; GCN-NEXT: s_cselect_b32 s10, s16, s11
+; GCN-NEXT: s_cselect_b32 s11, s17, s15
; GCN-NEXT: s_or_b32 s8, s8, s9
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
-; GCN-NEXT: s_subb_u32 s8, s12, 0
+; GCN-NEXT: s_subb_u32 s8, 0, s13
; GCN-NEXT: s_cmp_ge_u32 s8, s7
; GCN-NEXT: s_cselect_b32 s9, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s15, s6
-; GCN-NEXT: s_cselect_b32 s12, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s8, s7
-; GCN-NEXT: s_cselect_b32 s8, s12, s9
-; GCN-NEXT: s_add_u32 s9, s10, 1
-; GCN-NEXT: s_addc_u32 s12, 0, 0
-; GCN-NEXT: s_add_u32 s15, s10, 2
-; GCN-NEXT: s_addc_u32 s16, 0, 0
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
-; GCN-NEXT: s_cselect_b32 s8, s15, s9
-; GCN-NEXT: s_cselect_b32 s9, s16, s12
-; GCN-NEXT: s_cmp_lg_u32 s14, 0
-; GCN-NEXT: s_subb_u32 s11, 0, s11
-; GCN-NEXT: s_cmp_ge_u32 s11, s7
-; GCN-NEXT: s_cselect_b32 s12, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s13, s6
+; GCN-NEXT: s_cmp_ge_u32 s14, s6
; GCN-NEXT: s_cselect_b32 s6, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s11, s7
-; GCN-NEXT: s_cselect_b32 s6, s6, s12
+; GCN-NEXT: s_cmp_eq_u32 s8, s7
+; GCN-NEXT: s_cselect_b32 s6, s6, s9
; GCN-NEXT: s_cmp_lg_u32 s6, 0
-; GCN-NEXT: s_cselect_b32 s7, s9, 0
-; GCN-NEXT: s_cselect_b32 s6, s8, s10
+; GCN-NEXT: s_cselect_b32 s7, s11, 0
+; GCN-NEXT: s_cselect_b32 s6, s10, s12
; GCN-NEXT: s_xor_b64 s[6:7], s[6:7], s[4:5]
; GCN-NEXT: s_sub_u32 s6, s6, s4
; GCN-NEXT: s_subb_u32 s7, s7, s4
@@ -1315,7 +1305,6 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s12, s10, 1
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_or_b32 s8, s8, s9
-; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
; GCN-IR-NEXT: s_addc_u32 s8, s11, 0
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s10, 63, s10
@@ -1348,7 +1337,6 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s16, s16, 1
; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
; GCN-IR-NEXT: s_or_b32 s18, s18, s19
-; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[10:11], s[6:7]
diff --git a/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll b/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll
index 09596e9..7ddd90e 100644
--- a/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
define amdgpu_kernel void @sitofp_i16_to_f16(
; SI-LABEL: sitofp_i16_to_f16:
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index bbd1793..e12e31b 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -1513,7 +1513,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s8
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s9
; GCN-NEXT: s_sub_u32 s3, 0, s8
-; GCN-NEXT: s_subb_u32 s12, 0, s9
+; GCN-NEXT: s_subb_u32 s10, 0, s9
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -1522,56 +1522,52 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_readfirstlane_b32 s13, v1
-; GCN-NEXT: v_readfirstlane_b32 s10, v0
-; GCN-NEXT: s_mul_i32 s11, s3, s13
-; GCN-NEXT: s_mul_hi_u32 s15, s3, s10
-; GCN-NEXT: s_mul_i32 s14, s12, s10
-; GCN-NEXT: s_add_i32 s11, s15, s11
-; GCN-NEXT: s_add_i32 s11, s11, s14
-; GCN-NEXT: s_mul_i32 s16, s3, s10
-; GCN-NEXT: s_mul_i32 s15, s10, s11
-; GCN-NEXT: s_mul_hi_u32 s17, s10, s16
-; GCN-NEXT: s_mul_hi_u32 s14, s10, s11
+; GCN-NEXT: v_readfirstlane_b32 s11, v1
+; GCN-NEXT: v_readfirstlane_b32 s12, v0
+; GCN-NEXT: s_mul_i32 s13, s3, s11
+; GCN-NEXT: s_mul_hi_u32 s15, s3, s12
+; GCN-NEXT: s_mul_i32 s14, s10, s12
+; GCN-NEXT: s_add_i32 s13, s15, s13
+; GCN-NEXT: s_add_i32 s13, s13, s14
+; GCN-NEXT: s_mul_i32 s16, s3, s12
+; GCN-NEXT: s_mul_i32 s15, s12, s13
+; GCN-NEXT: s_mul_hi_u32 s17, s12, s16
+; GCN-NEXT: s_mul_hi_u32 s14, s12, s13
; GCN-NEXT: s_add_u32 s15, s17, s15
; GCN-NEXT: s_addc_u32 s14, 0, s14
-; GCN-NEXT: s_mul_hi_u32 s18, s13, s16
-; GCN-NEXT: s_mul_i32 s16, s13, s16
+; GCN-NEXT: s_mul_hi_u32 s18, s11, s16
+; GCN-NEXT: s_mul_i32 s16, s11, s16
; GCN-NEXT: s_add_u32 s15, s15, s16
-; GCN-NEXT: s_mul_hi_u32 s17, s13, s11
+; GCN-NEXT: s_mul_hi_u32 s17, s11, s13
; GCN-NEXT: s_addc_u32 s14, s14, s18
; GCN-NEXT: s_addc_u32 s15, s17, 0
-; GCN-NEXT: s_mul_i32 s11, s13, s11
-; GCN-NEXT: s_add_u32 s11, s14, s11
+; GCN-NEXT: s_mul_i32 s13, s11, s13
+; GCN-NEXT: s_add_u32 s13, s14, s13
; GCN-NEXT: s_addc_u32 s14, 0, s15
-; GCN-NEXT: s_add_u32 s15, s10, s11
-; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GCN-NEXT: s_addc_u32 s13, s13, s14
-; GCN-NEXT: s_mul_i32 s10, s3, s13
-; GCN-NEXT: s_mul_hi_u32 s11, s3, s15
-; GCN-NEXT: s_add_i32 s10, s11, s10
-; GCN-NEXT: s_mul_i32 s12, s12, s15
-; GCN-NEXT: s_add_i32 s10, s10, s12
-; GCN-NEXT: s_mul_i32 s3, s3, s15
-; GCN-NEXT: s_mul_hi_u32 s12, s13, s3
-; GCN-NEXT: s_mul_i32 s14, s13, s3
-; GCN-NEXT: s_mul_i32 s17, s15, s10
-; GCN-NEXT: s_mul_hi_u32 s3, s15, s3
-; GCN-NEXT: s_mul_hi_u32 s16, s15, s10
+; GCN-NEXT: s_add_u32 s12, s12, s13
+; GCN-NEXT: s_addc_u32 s11, s11, s14
+; GCN-NEXT: s_mul_i32 s13, s3, s11
+; GCN-NEXT: s_mul_hi_u32 s14, s3, s12
+; GCN-NEXT: s_add_i32 s13, s14, s13
+; GCN-NEXT: s_mul_i32 s10, s10, s12
+; GCN-NEXT: s_add_i32 s13, s13, s10
+; GCN-NEXT: s_mul_i32 s3, s3, s12
+; GCN-NEXT: s_mul_hi_u32 s14, s11, s3
+; GCN-NEXT: s_mul_i32 s15, s11, s3
+; GCN-NEXT: s_mul_i32 s17, s12, s13
+; GCN-NEXT: s_mul_hi_u32 s3, s12, s3
+; GCN-NEXT: s_mul_hi_u32 s16, s12, s13
; GCN-NEXT: s_add_u32 s3, s3, s17
; GCN-NEXT: s_addc_u32 s16, 0, s16
-; GCN-NEXT: s_add_u32 s3, s3, s14
-; GCN-NEXT: s_mul_hi_u32 s11, s13, s10
-; GCN-NEXT: s_addc_u32 s3, s16, s12
-; GCN-NEXT: s_addc_u32 s11, s11, 0
-; GCN-NEXT: s_mul_i32 s10, s13, s10
-; GCN-NEXT: s_add_u32 s3, s3, s10
-; GCN-NEXT: s_addc_u32 s12, 0, s11
-; GCN-NEXT: s_add_u32 s3, s15, s3
-; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[10:11], 0
-; GCN-NEXT: s_addc_u32 s14, s13, s12
+; GCN-NEXT: s_add_u32 s3, s3, s15
+; GCN-NEXT: s_mul_hi_u32 s10, s11, s13
+; GCN-NEXT: s_addc_u32 s3, s16, s14
+; GCN-NEXT: s_addc_u32 s10, s10, 0
+; GCN-NEXT: s_mul_i32 s13, s11, s13
+; GCN-NEXT: s_add_u32 s3, s3, s13
+; GCN-NEXT: s_addc_u32 s10, 0, s10
+; GCN-NEXT: s_add_u32 s3, s12, s3
+; GCN-NEXT: s_addc_u32 s14, s11, s10
; GCN-NEXT: s_ashr_i32 s10, s5, 31
; GCN-NEXT: s_add_u32 s12, s4, s10
; GCN-NEXT: s_mov_b32 s11, s10
@@ -1600,11 +1596,9 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: s_mul_i32 s3, s8, s3
; GCN-NEXT: s_sub_u32 s3, s12, s3
; GCN-NEXT: s_cselect_b64 s[14:15], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0
; GCN-NEXT: s_subb_u32 s12, s16, s9
; GCN-NEXT: s_sub_u32 s18, s3, s8
; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
; GCN-NEXT: s_subb_u32 s19, s12, 0
; GCN-NEXT: s_cmp_ge_u32 s19, s9
; GCN-NEXT: s_cselect_b32 s20, -1, 0
@@ -1614,12 +1608,10 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: s_cselect_b32 s20, s21, s20
; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
; GCN-NEXT: s_subb_u32 s12, s12, s9
-; GCN-NEXT: s_sub_u32 s21, s18, s8
-; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT: s_sub_u32 s16, s18, s8
; GCN-NEXT: s_subb_u32 s12, s12, 0
; GCN-NEXT: s_cmp_lg_u32 s20, 0
-; GCN-NEXT: s_cselect_b32 s16, s21, s18
+; GCN-NEXT: s_cselect_b32 s16, s16, s18
; GCN-NEXT: s_cselect_b32 s12, s12, s19
; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0
; GCN-NEXT: s_subb_u32 s5, s13, s5
@@ -1931,11 +1923,9 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: v_readfirstlane_b32 s14, v0
; TONGA-NEXT: s_sub_u32 s12, s12, s14
; TONGA-NEXT: s_cselect_b64 s[14:15], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0
; TONGA-NEXT: s_subb_u32 s3, s3, s7
; TONGA-NEXT: s_sub_u32 s18, s12, s6
; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
; TONGA-NEXT: s_subb_u32 s19, s3, 0
; TONGA-NEXT: s_cmp_ge_u32 s19, s7
; TONGA-NEXT: s_cselect_b32 s20, -1, 0
@@ -1945,12 +1935,10 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: s_cselect_b32 s20, s21, s20
; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
; TONGA-NEXT: s_subb_u32 s3, s3, s7
-; TONGA-NEXT: s_sub_u32 s21, s18, s6
-; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT: s_sub_u32 s16, s18, s6
; TONGA-NEXT: s_subb_u32 s3, s3, 0
; TONGA-NEXT: s_cmp_lg_u32 s20, 0
-; TONGA-NEXT: s_cselect_b32 s16, s21, s18
+; TONGA-NEXT: s_cselect_b32 s16, s16, s18
; TONGA-NEXT: s_cselect_b32 s3, s3, s19
; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0
; TONGA-NEXT: s_subb_u32 s5, s13, s5
@@ -2730,7 +2718,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s7
; GCN-NEXT: s_sub_u32 s9, 0, s6
-; GCN-NEXT: s_subb_u32 s16, 0, s7
+; GCN-NEXT: s_subb_u32 s14, 0, s7
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2739,56 +2727,52 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_readfirstlane_b32 s17, v1
-; GCN-NEXT: v_readfirstlane_b32 s14, v0
-; GCN-NEXT: s_mul_i32 s15, s9, s17
-; GCN-NEXT: s_mul_hi_u32 s19, s9, s14
-; GCN-NEXT: s_mul_i32 s18, s16, s14
-; GCN-NEXT: s_add_i32 s15, s19, s15
-; GCN-NEXT: s_add_i32 s15, s15, s18
-; GCN-NEXT: s_mul_i32 s20, s9, s14
-; GCN-NEXT: s_mul_i32 s19, s14, s15
-; GCN-NEXT: s_mul_hi_u32 s21, s14, s20
-; GCN-NEXT: s_mul_hi_u32 s18, s14, s15
+; GCN-NEXT: v_readfirstlane_b32 s15, v1
+; GCN-NEXT: v_readfirstlane_b32 s16, v0
+; GCN-NEXT: s_mul_i32 s17, s9, s15
+; GCN-NEXT: s_mul_hi_u32 s19, s9, s16
+; GCN-NEXT: s_mul_i32 s18, s14, s16
+; GCN-NEXT: s_add_i32 s17, s19, s17
+; GCN-NEXT: s_add_i32 s17, s17, s18
+; GCN-NEXT: s_mul_i32 s20, s9, s16
+; GCN-NEXT: s_mul_i32 s19, s16, s17
+; GCN-NEXT: s_mul_hi_u32 s21, s16, s20
+; GCN-NEXT: s_mul_hi_u32 s18, s16, s17
; GCN-NEXT: s_add_u32 s19, s21, s19
; GCN-NEXT: s_addc_u32 s18, 0, s18
-; GCN-NEXT: s_mul_hi_u32 s22, s17, s20
-; GCN-NEXT: s_mul_i32 s20, s17, s20
+; GCN-NEXT: s_mul_hi_u32 s22, s15, s20
+; GCN-NEXT: s_mul_i32 s20, s15, s20
; GCN-NEXT: s_add_u32 s19, s19, s20
-; GCN-NEXT: s_mul_hi_u32 s21, s17, s15
+; GCN-NEXT: s_mul_hi_u32 s21, s15, s17
; GCN-NEXT: s_addc_u32 s18, s18, s22
; GCN-NEXT: s_addc_u32 s19, s21, 0
-; GCN-NEXT: s_mul_i32 s15, s17, s15
-; GCN-NEXT: s_add_u32 s15, s18, s15
+; GCN-NEXT: s_mul_i32 s17, s15, s17
+; GCN-NEXT: s_add_u32 s17, s18, s17
; GCN-NEXT: s_addc_u32 s18, 0, s19
-; GCN-NEXT: s_add_u32 s19, s14, s15
-; GCN-NEXT: s_cselect_b64 s[14:15], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0
-; GCN-NEXT: s_addc_u32 s17, s17, s18
-; GCN-NEXT: s_mul_i32 s14, s9, s17
-; GCN-NEXT: s_mul_hi_u32 s15, s9, s19
-; GCN-NEXT: s_add_i32 s14, s15, s14
-; GCN-NEXT: s_mul_i32 s16, s16, s19
-; GCN-NEXT: s_add_i32 s14, s14, s16
-; GCN-NEXT: s_mul_i32 s9, s9, s19
-; GCN-NEXT: s_mul_hi_u32 s16, s17, s9
-; GCN-NEXT: s_mul_i32 s18, s17, s9
-; GCN-NEXT: s_mul_i32 s21, s19, s14
-; GCN-NEXT: s_mul_hi_u32 s9, s19, s9
-; GCN-NEXT: s_mul_hi_u32 s20, s19, s14
+; GCN-NEXT: s_add_u32 s16, s16, s17
+; GCN-NEXT: s_addc_u32 s15, s15, s18
+; GCN-NEXT: s_mul_i32 s17, s9, s15
+; GCN-NEXT: s_mul_hi_u32 s18, s9, s16
+; GCN-NEXT: s_add_i32 s17, s18, s17
+; GCN-NEXT: s_mul_i32 s14, s14, s16
+; GCN-NEXT: s_add_i32 s17, s17, s14
+; GCN-NEXT: s_mul_i32 s9, s9, s16
+; GCN-NEXT: s_mul_hi_u32 s18, s15, s9
+; GCN-NEXT: s_mul_i32 s19, s15, s9
+; GCN-NEXT: s_mul_i32 s21, s16, s17
+; GCN-NEXT: s_mul_hi_u32 s9, s16, s9
+; GCN-NEXT: s_mul_hi_u32 s20, s16, s17
; GCN-NEXT: s_add_u32 s9, s9, s21
; GCN-NEXT: s_addc_u32 s20, 0, s20
-; GCN-NEXT: s_add_u32 s9, s9, s18
-; GCN-NEXT: s_mul_hi_u32 s15, s17, s14
-; GCN-NEXT: s_addc_u32 s9, s20, s16
-; GCN-NEXT: s_addc_u32 s15, s15, 0
-; GCN-NEXT: s_mul_i32 s14, s17, s14
-; GCN-NEXT: s_add_u32 s9, s9, s14
-; GCN-NEXT: s_addc_u32 s16, 0, s15
-; GCN-NEXT: s_add_u32 s9, s19, s9
-; GCN-NEXT: s_cselect_b64 s[14:15], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[14:15], 0
-; GCN-NEXT: s_addc_u32 s18, s17, s16
+; GCN-NEXT: s_add_u32 s9, s9, s19
+; GCN-NEXT: s_mul_hi_u32 s14, s15, s17
+; GCN-NEXT: s_addc_u32 s9, s20, s18
+; GCN-NEXT: s_addc_u32 s14, s14, 0
+; GCN-NEXT: s_mul_i32 s17, s15, s17
+; GCN-NEXT: s_add_u32 s9, s9, s17
+; GCN-NEXT: s_addc_u32 s14, 0, s14
+; GCN-NEXT: s_add_u32 s9, s16, s9
+; GCN-NEXT: s_addc_u32 s18, s15, s14
; GCN-NEXT: s_ashr_i32 s14, s11, 31
; GCN-NEXT: s_add_u32 s16, s10, s14
; GCN-NEXT: s_mov_b32 s15, s14
@@ -2817,11 +2801,9 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_mul_i32 s9, s6, s9
; GCN-NEXT: s_sub_u32 s9, s16, s9
; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
; GCN-NEXT: s_subb_u32 s16, s20, s7
; GCN-NEXT: s_sub_u32 s22, s9, s6
; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0
; GCN-NEXT: s_subb_u32 s23, s16, 0
; GCN-NEXT: s_cmp_ge_u32 s23, s7
; GCN-NEXT: s_cselect_b32 s24, -1, 0
@@ -2831,12 +2813,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_cselect_b32 s24, s25, s24
; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0
; GCN-NEXT: s_subb_u32 s16, s16, s7
-; GCN-NEXT: s_sub_u32 s25, s22, s6
-; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT: s_sub_u32 s20, s22, s6
; GCN-NEXT: s_subb_u32 s16, s16, 0
; GCN-NEXT: s_cmp_lg_u32 s24, 0
-; GCN-NEXT: s_cselect_b32 s20, s25, s22
+; GCN-NEXT: s_cselect_b32 s20, s20, s22
; GCN-NEXT: s_cselect_b32 s16, s16, s23
; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
; GCN-NEXT: s_subb_u32 s11, s17, s11
@@ -2887,7 +2867,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s10
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s11
; GCN-NEXT: s_sub_u32 s3, 0, s10
-; GCN-NEXT: s_subb_u32 s14, 0, s11
+; GCN-NEXT: s_subb_u32 s12, 0, s11
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2896,56 +2876,52 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_readfirstlane_b32 s15, v1
-; GCN-NEXT: v_readfirstlane_b32 s12, v0
-; GCN-NEXT: s_mul_i32 s13, s3, s15
-; GCN-NEXT: s_mul_hi_u32 s17, s3, s12
-; GCN-NEXT: s_mul_i32 s16, s14, s12
-; GCN-NEXT: s_add_i32 s13, s17, s13
-; GCN-NEXT: s_add_i32 s13, s13, s16
-; GCN-NEXT: s_mul_i32 s18, s3, s12
-; GCN-NEXT: s_mul_i32 s17, s12, s13
-; GCN-NEXT: s_mul_hi_u32 s19, s12, s18
-; GCN-NEXT: s_mul_hi_u32 s16, s12, s13
+; GCN-NEXT: v_readfirstlane_b32 s13, v1
+; GCN-NEXT: v_readfirstlane_b32 s14, v0
+; GCN-NEXT: s_mul_i32 s15, s3, s13
+; GCN-NEXT: s_mul_hi_u32 s17, s3, s14
+; GCN-NEXT: s_mul_i32 s16, s12, s14
+; GCN-NEXT: s_add_i32 s15, s17, s15
+; GCN-NEXT: s_add_i32 s15, s15, s16
+; GCN-NEXT: s_mul_i32 s18, s3, s14
+; GCN-NEXT: s_mul_i32 s17, s14, s15
+; GCN-NEXT: s_mul_hi_u32 s19, s14, s18
+; GCN-NEXT: s_mul_hi_u32 s16, s14, s15
; GCN-NEXT: s_add_u32 s17, s19, s17
; GCN-NEXT: s_addc_u32 s16, 0, s16
-; GCN-NEXT: s_mul_hi_u32 s20, s15, s18
-; GCN-NEXT: s_mul_i32 s18, s15, s18
+; GCN-NEXT: s_mul_hi_u32 s20, s13, s18
+; GCN-NEXT: s_mul_i32 s18, s13, s18
; GCN-NEXT: s_add_u32 s17, s17, s18
-; GCN-NEXT: s_mul_hi_u32 s19, s15, s13
+; GCN-NEXT: s_mul_hi_u32 s19, s13, s15
; GCN-NEXT: s_addc_u32 s16, s16, s20
; GCN-NEXT: s_addc_u32 s17, s19, 0
-; GCN-NEXT: s_mul_i32 s13, s15, s13
-; GCN-NEXT: s_add_u32 s13, s16, s13
+; GCN-NEXT: s_mul_i32 s15, s13, s15
+; GCN-NEXT: s_add_u32 s15, s16, s15
; GCN-NEXT: s_addc_u32 s16, 0, s17
-; GCN-NEXT: s_add_u32 s17, s12, s13
-; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT: s_addc_u32 s15, s15, s16
-; GCN-NEXT: s_mul_i32 s12, s3, s15
-; GCN-NEXT: s_mul_hi_u32 s13, s3, s17
-; GCN-NEXT: s_add_i32 s12, s13, s12
-; GCN-NEXT: s_mul_i32 s14, s14, s17
-; GCN-NEXT: s_add_i32 s12, s12, s14
-; GCN-NEXT: s_mul_i32 s3, s3, s17
-; GCN-NEXT: s_mul_hi_u32 s14, s15, s3
-; GCN-NEXT: s_mul_i32 s16, s15, s3
-; GCN-NEXT: s_mul_i32 s19, s17, s12
-; GCN-NEXT: s_mul_hi_u32 s3, s17, s3
-; GCN-NEXT: s_mul_hi_u32 s18, s17, s12
+; GCN-NEXT: s_add_u32 s14, s14, s15
+; GCN-NEXT: s_addc_u32 s13, s13, s16
+; GCN-NEXT: s_mul_i32 s15, s3, s13
+; GCN-NEXT: s_mul_hi_u32 s16, s3, s14
+; GCN-NEXT: s_add_i32 s15, s16, s15
+; GCN-NEXT: s_mul_i32 s12, s12, s14
+; GCN-NEXT: s_add_i32 s15, s15, s12
+; GCN-NEXT: s_mul_i32 s3, s3, s14
+; GCN-NEXT: s_mul_hi_u32 s16, s13, s3
+; GCN-NEXT: s_mul_i32 s17, s13, s3
+; GCN-NEXT: s_mul_i32 s19, s14, s15
+; GCN-NEXT: s_mul_hi_u32 s3, s14, s3
+; GCN-NEXT: s_mul_hi_u32 s18, s14, s15
; GCN-NEXT: s_add_u32 s3, s3, s19
; GCN-NEXT: s_addc_u32 s18, 0, s18
-; GCN-NEXT: s_add_u32 s3, s3, s16
-; GCN-NEXT: s_mul_hi_u32 s13, s15, s12
-; GCN-NEXT: s_addc_u32 s3, s18, s14
-; GCN-NEXT: s_addc_u32 s13, s13, 0
-; GCN-NEXT: s_mul_i32 s12, s15, s12
-; GCN-NEXT: s_add_u32 s3, s3, s12
-; GCN-NEXT: s_addc_u32 s14, 0, s13
-; GCN-NEXT: s_add_u32 s3, s17, s3
-; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT: s_addc_u32 s16, s15, s14
+; GCN-NEXT: s_add_u32 s3, s3, s17
+; GCN-NEXT: s_mul_hi_u32 s12, s13, s15
+; GCN-NEXT: s_addc_u32 s3, s18, s16
+; GCN-NEXT: s_addc_u32 s12, s12, 0
+; GCN-NEXT: s_mul_i32 s15, s13, s15
+; GCN-NEXT: s_add_u32 s3, s3, s15
+; GCN-NEXT: s_addc_u32 s12, 0, s12
+; GCN-NEXT: s_add_u32 s3, s14, s3
+; GCN-NEXT: s_addc_u32 s16, s13, s12
; GCN-NEXT: s_ashr_i32 s12, s5, 31
; GCN-NEXT: s_add_u32 s14, s4, s12
; GCN-NEXT: s_mov_b32 s13, s12
@@ -2974,11 +2950,9 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_mul_i32 s3, s10, s3
; GCN-NEXT: s_sub_u32 s3, s14, s3
; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
; GCN-NEXT: s_subb_u32 s14, s18, s11
; GCN-NEXT: s_sub_u32 s20, s3, s10
; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
; GCN-NEXT: s_subb_u32 s21, s14, 0
; GCN-NEXT: s_cmp_ge_u32 s21, s11
; GCN-NEXT: s_cselect_b32 s22, -1, 0
@@ -2988,12 +2962,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_cselect_b32 s22, s23, s22
; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
; GCN-NEXT: s_subb_u32 s14, s14, s11
-; GCN-NEXT: s_sub_u32 s23, s20, s10
-; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT: s_sub_u32 s18, s20, s10
; GCN-NEXT: s_subb_u32 s14, s14, 0
; GCN-NEXT: s_cmp_lg_u32 s22, 0
-; GCN-NEXT: s_cselect_b32 s18, s23, s20
+; GCN-NEXT: s_cselect_b32 s18, s18, s20
; GCN-NEXT: s_cselect_b32 s14, s14, s21
; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
; GCN-NEXT: s_subb_u32 s5, s15, s5
@@ -3463,11 +3435,9 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_readfirstlane_b32 s14, v0
; TONGA-NEXT: s_sub_u32 s12, s12, s14
; TONGA-NEXT: s_cselect_b64 s[14:15], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0
; TONGA-NEXT: s_subb_u32 s1, s1, s7
; TONGA-NEXT: s_sub_u32 s18, s12, s6
; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
; TONGA-NEXT: s_subb_u32 s19, s1, 0
; TONGA-NEXT: s_cmp_ge_u32 s19, s7
; TONGA-NEXT: s_cselect_b32 s20, -1, 0
@@ -3477,12 +3447,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: s_cselect_b32 s20, s21, s20
; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
; TONGA-NEXT: s_subb_u32 s1, s1, s7
-; TONGA-NEXT: s_sub_u32 s21, s18, s6
-; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT: s_sub_u32 s16, s18, s6
; TONGA-NEXT: s_subb_u32 s1, s1, 0
; TONGA-NEXT: s_cmp_lg_u32 s20, 0
-; TONGA-NEXT: s_cselect_b32 s16, s21, s18
+; TONGA-NEXT: s_cselect_b32 s16, s16, s18
; TONGA-NEXT: s_cselect_b32 s1, s1, s19
; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0
; TONGA-NEXT: s_subb_u32 s3, s13, s3
@@ -4934,7 +4902,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s6
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s7
; GCN-NEXT: s_sub_u32 s17, 0, s6
-; GCN-NEXT: s_subb_u32 s24, 0, s7
+; GCN-NEXT: s_subb_u32 s22, 0, s7
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -4943,56 +4911,52 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_readfirstlane_b32 s25, v1
-; GCN-NEXT: v_readfirstlane_b32 s22, v0
-; GCN-NEXT: s_mul_i32 s23, s17, s25
-; GCN-NEXT: s_mul_hi_u32 s27, s17, s22
-; GCN-NEXT: s_mul_i32 s26, s24, s22
-; GCN-NEXT: s_add_i32 s23, s27, s23
-; GCN-NEXT: s_add_i32 s23, s23, s26
-; GCN-NEXT: s_mul_i32 s28, s17, s22
-; GCN-NEXT: s_mul_i32 s27, s22, s23
-; GCN-NEXT: s_mul_hi_u32 s29, s22, s28
-; GCN-NEXT: s_mul_hi_u32 s26, s22, s23
+; GCN-NEXT: v_readfirstlane_b32 s23, v1
+; GCN-NEXT: v_readfirstlane_b32 s24, v0
+; GCN-NEXT: s_mul_i32 s25, s17, s23
+; GCN-NEXT: s_mul_hi_u32 s27, s17, s24
+; GCN-NEXT: s_mul_i32 s26, s22, s24
+; GCN-NEXT: s_add_i32 s25, s27, s25
+; GCN-NEXT: s_add_i32 s25, s25, s26
+; GCN-NEXT: s_mul_i32 s28, s17, s24
+; GCN-NEXT: s_mul_i32 s27, s24, s25
+; GCN-NEXT: s_mul_hi_u32 s29, s24, s28
+; GCN-NEXT: s_mul_hi_u32 s26, s24, s25
; GCN-NEXT: s_add_u32 s27, s29, s27
; GCN-NEXT: s_addc_u32 s26, 0, s26
-; GCN-NEXT: s_mul_hi_u32 s30, s25, s28
-; GCN-NEXT: s_mul_i32 s28, s25, s28
+; GCN-NEXT: s_mul_hi_u32 s30, s23, s28
+; GCN-NEXT: s_mul_i32 s28, s23, s28
; GCN-NEXT: s_add_u32 s27, s27, s28
-; GCN-NEXT: s_mul_hi_u32 s29, s25, s23
+; GCN-NEXT: s_mul_hi_u32 s29, s23, s25
; GCN-NEXT: s_addc_u32 s26, s26, s30
; GCN-NEXT: s_addc_u32 s27, s29, 0
-; GCN-NEXT: s_mul_i32 s23, s25, s23
-; GCN-NEXT: s_add_u32 s23, s26, s23
+; GCN-NEXT: s_mul_i32 s25, s23, s25
+; GCN-NEXT: s_add_u32 s25, s26, s25
; GCN-NEXT: s_addc_u32 s26, 0, s27
-; GCN-NEXT: s_add_u32 s27, s22, s23
-; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0
-; GCN-NEXT: s_addc_u32 s25, s25, s26
-; GCN-NEXT: s_mul_i32 s22, s17, s25
-; GCN-NEXT: s_mul_hi_u32 s23, s17, s27
-; GCN-NEXT: s_add_i32 s22, s23, s22
-; GCN-NEXT: s_mul_i32 s24, s24, s27
-; GCN-NEXT: s_add_i32 s22, s22, s24
-; GCN-NEXT: s_mul_i32 s17, s17, s27
-; GCN-NEXT: s_mul_hi_u32 s24, s25, s17
-; GCN-NEXT: s_mul_i32 s26, s25, s17
-; GCN-NEXT: s_mul_i32 s29, s27, s22
-; GCN-NEXT: s_mul_hi_u32 s17, s27, s17
-; GCN-NEXT: s_mul_hi_u32 s28, s27, s22
+; GCN-NEXT: s_add_u32 s24, s24, s25
+; GCN-NEXT: s_addc_u32 s23, s23, s26
+; GCN-NEXT: s_mul_i32 s25, s17, s23
+; GCN-NEXT: s_mul_hi_u32 s26, s17, s24
+; GCN-NEXT: s_add_i32 s25, s26, s25
+; GCN-NEXT: s_mul_i32 s22, s22, s24
+; GCN-NEXT: s_add_i32 s25, s25, s22
+; GCN-NEXT: s_mul_i32 s17, s17, s24
+; GCN-NEXT: s_mul_hi_u32 s26, s23, s17
+; GCN-NEXT: s_mul_i32 s27, s23, s17
+; GCN-NEXT: s_mul_i32 s29, s24, s25
+; GCN-NEXT: s_mul_hi_u32 s17, s24, s17
+; GCN-NEXT: s_mul_hi_u32 s28, s24, s25
; GCN-NEXT: s_add_u32 s17, s17, s29
; GCN-NEXT: s_addc_u32 s28, 0, s28
-; GCN-NEXT: s_add_u32 s17, s17, s26
-; GCN-NEXT: s_mul_hi_u32 s23, s25, s22
-; GCN-NEXT: s_addc_u32 s17, s28, s24
-; GCN-NEXT: s_addc_u32 s23, s23, 0
-; GCN-NEXT: s_mul_i32 s22, s25, s22
-; GCN-NEXT: s_add_u32 s17, s17, s22
-; GCN-NEXT: s_addc_u32 s24, 0, s23
-; GCN-NEXT: s_add_u32 s17, s27, s17
-; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0
-; GCN-NEXT: s_addc_u32 s26, s25, s24
+; GCN-NEXT: s_add_u32 s17, s17, s27
+; GCN-NEXT: s_mul_hi_u32 s22, s23, s25
+; GCN-NEXT: s_addc_u32 s17, s28, s26
+; GCN-NEXT: s_addc_u32 s22, s22, 0
+; GCN-NEXT: s_mul_i32 s25, s23, s25
+; GCN-NEXT: s_add_u32 s17, s17, s25
+; GCN-NEXT: s_addc_u32 s22, 0, s22
+; GCN-NEXT: s_add_u32 s17, s24, s17
+; GCN-NEXT: s_addc_u32 s26, s23, s22
; GCN-NEXT: s_ashr_i32 s22, s19, 31
; GCN-NEXT: s_add_u32 s24, s18, s22
; GCN-NEXT: s_mov_b32 s23, s22
@@ -5021,11 +4985,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_mul_i32 s17, s6, s17
; GCN-NEXT: s_sub_u32 s17, s24, s17
; GCN-NEXT: s_cselect_b64 s[26:27], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0
; GCN-NEXT: s_subb_u32 s24, s28, s7
; GCN-NEXT: s_sub_u32 s30, s17, s6
; GCN-NEXT: s_cselect_b64 s[28:29], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[28:29], 0
; GCN-NEXT: s_subb_u32 s31, s24, 0
; GCN-NEXT: s_cmp_ge_u32 s31, s7
; GCN-NEXT: s_cselect_b32 s33, -1, 0
@@ -5035,12 +4997,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_cselect_b32 s33, s34, s33
; GCN-NEXT: s_cmp_lg_u64 s[28:29], 0
; GCN-NEXT: s_subb_u32 s24, s24, s7
-; GCN-NEXT: s_sub_u32 s34, s30, s6
-; GCN-NEXT: s_cselect_b64 s[28:29], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[28:29], 0
+; GCN-NEXT: s_sub_u32 s28, s30, s6
; GCN-NEXT: s_subb_u32 s24, s24, 0
; GCN-NEXT: s_cmp_lg_u32 s33, 0
-; GCN-NEXT: s_cselect_b32 s28, s34, s30
+; GCN-NEXT: s_cselect_b32 s28, s28, s30
; GCN-NEXT: s_cselect_b32 s24, s24, s31
; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0
; GCN-NEXT: s_subb_u32 s19, s25, s19
@@ -5091,7 +5051,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s18
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s19
; GCN-NEXT: s_sub_u32 s13, 0, s18
-; GCN-NEXT: s_subb_u32 s22, 0, s19
+; GCN-NEXT: s_subb_u32 s20, 0, s19
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -5100,56 +5060,52 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_readfirstlane_b32 s23, v1
-; GCN-NEXT: v_readfirstlane_b32 s20, v0
-; GCN-NEXT: s_mul_i32 s21, s13, s23
-; GCN-NEXT: s_mul_hi_u32 s25, s13, s20
-; GCN-NEXT: s_mul_i32 s24, s22, s20
-; GCN-NEXT: s_add_i32 s21, s25, s21
-; GCN-NEXT: s_add_i32 s21, s21, s24
-; GCN-NEXT: s_mul_i32 s26, s13, s20
-; GCN-NEXT: s_mul_i32 s25, s20, s21
-; GCN-NEXT: s_mul_hi_u32 s27, s20, s26
-; GCN-NEXT: s_mul_hi_u32 s24, s20, s21
+; GCN-NEXT: v_readfirstlane_b32 s21, v1
+; GCN-NEXT: v_readfirstlane_b32 s22, v0
+; GCN-NEXT: s_mul_i32 s23, s13, s21
+; GCN-NEXT: s_mul_hi_u32 s25, s13, s22
+; GCN-NEXT: s_mul_i32 s24, s20, s22
+; GCN-NEXT: s_add_i32 s23, s25, s23
+; GCN-NEXT: s_add_i32 s23, s23, s24
+; GCN-NEXT: s_mul_i32 s26, s13, s22
+; GCN-NEXT: s_mul_i32 s25, s22, s23
+; GCN-NEXT: s_mul_hi_u32 s27, s22, s26
+; GCN-NEXT: s_mul_hi_u32 s24, s22, s23
; GCN-NEXT: s_add_u32 s25, s27, s25
; GCN-NEXT: s_addc_u32 s24, 0, s24
-; GCN-NEXT: s_mul_hi_u32 s28, s23, s26
-; GCN-NEXT: s_mul_i32 s26, s23, s26
+; GCN-NEXT: s_mul_hi_u32 s28, s21, s26
+; GCN-NEXT: s_mul_i32 s26, s21, s26
; GCN-NEXT: s_add_u32 s25, s25, s26
-; GCN-NEXT: s_mul_hi_u32 s27, s23, s21
+; GCN-NEXT: s_mul_hi_u32 s27, s21, s23
; GCN-NEXT: s_addc_u32 s24, s24, s28
; GCN-NEXT: s_addc_u32 s25, s27, 0
-; GCN-NEXT: s_mul_i32 s21, s23, s21
-; GCN-NEXT: s_add_u32 s21, s24, s21
+; GCN-NEXT: s_mul_i32 s23, s21, s23
+; GCN-NEXT: s_add_u32 s23, s24, s23
; GCN-NEXT: s_addc_u32 s24, 0, s25
-; GCN-NEXT: s_add_u32 s25, s20, s21
-; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0
-; GCN-NEXT: s_addc_u32 s23, s23, s24
-; GCN-NEXT: s_mul_i32 s20, s13, s23
-; GCN-NEXT: s_mul_hi_u32 s21, s13, s25
-; GCN-NEXT: s_add_i32 s20, s21, s20
-; GCN-NEXT: s_mul_i32 s22, s22, s25
-; GCN-NEXT: s_add_i32 s20, s20, s22
-; GCN-NEXT: s_mul_i32 s13, s13, s25
-; GCN-NEXT: s_mul_hi_u32 s22, s23, s13
-; GCN-NEXT: s_mul_i32 s24, s23, s13
-; GCN-NEXT: s_mul_i32 s27, s25, s20
-; GCN-NEXT: s_mul_hi_u32 s13, s25, s13
-; GCN-NEXT: s_mul_hi_u32 s26, s25, s20
+; GCN-NEXT: s_add_u32 s22, s22, s23
+; GCN-NEXT: s_addc_u32 s21, s21, s24
+; GCN-NEXT: s_mul_i32 s23, s13, s21
+; GCN-NEXT: s_mul_hi_u32 s24, s13, s22
+; GCN-NEXT: s_add_i32 s23, s24, s23
+; GCN-NEXT: s_mul_i32 s20, s20, s22
+; GCN-NEXT: s_add_i32 s23, s23, s20
+; GCN-NEXT: s_mul_i32 s13, s13, s22
+; GCN-NEXT: s_mul_hi_u32 s24, s21, s13
+; GCN-NEXT: s_mul_i32 s25, s21, s13
+; GCN-NEXT: s_mul_i32 s27, s22, s23
+; GCN-NEXT: s_mul_hi_u32 s13, s22, s13
+; GCN-NEXT: s_mul_hi_u32 s26, s22, s23
; GCN-NEXT: s_add_u32 s13, s13, s27
; GCN-NEXT: s_addc_u32 s26, 0, s26
-; GCN-NEXT: s_add_u32 s13, s13, s24
-; GCN-NEXT: s_mul_hi_u32 s21, s23, s20
-; GCN-NEXT: s_addc_u32 s13, s26, s22
-; GCN-NEXT: s_addc_u32 s21, s21, 0
-; GCN-NEXT: s_mul_i32 s20, s23, s20
-; GCN-NEXT: s_add_u32 s13, s13, s20
-; GCN-NEXT: s_addc_u32 s22, 0, s21
-; GCN-NEXT: s_add_u32 s13, s25, s13
-; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0
-; GCN-NEXT: s_addc_u32 s24, s23, s22
+; GCN-NEXT: s_add_u32 s13, s13, s25
+; GCN-NEXT: s_mul_hi_u32 s20, s21, s23
+; GCN-NEXT: s_addc_u32 s13, s26, s24
+; GCN-NEXT: s_addc_u32 s20, s20, 0
+; GCN-NEXT: s_mul_i32 s23, s21, s23
+; GCN-NEXT: s_add_u32 s13, s13, s23
+; GCN-NEXT: s_addc_u32 s20, 0, s20
+; GCN-NEXT: s_add_u32 s13, s22, s13
+; GCN-NEXT: s_addc_u32 s24, s21, s20
; GCN-NEXT: s_ashr_i32 s20, s15, 31
; GCN-NEXT: s_add_u32 s22, s14, s20
; GCN-NEXT: s_mov_b32 s21, s20
@@ -5178,11 +5134,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_mul_i32 s13, s18, s13
; GCN-NEXT: s_sub_u32 s13, s22, s13
; GCN-NEXT: s_cselect_b64 s[24:25], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[24:25], 0
; GCN-NEXT: s_subb_u32 s22, s26, s19
; GCN-NEXT: s_sub_u32 s28, s13, s18
; GCN-NEXT: s_cselect_b64 s[26:27], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0
; GCN-NEXT: s_subb_u32 s29, s22, 0
; GCN-NEXT: s_cmp_ge_u32 s29, s19
; GCN-NEXT: s_cselect_b32 s30, -1, 0
@@ -5192,12 +5146,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_cselect_b32 s30, s31, s30
; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0
; GCN-NEXT: s_subb_u32 s22, s22, s19
-; GCN-NEXT: s_sub_u32 s31, s28, s18
-; GCN-NEXT: s_cselect_b64 s[26:27], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[26:27], 0
+; GCN-NEXT: s_sub_u32 s26, s28, s18
; GCN-NEXT: s_subb_u32 s22, s22, 0
; GCN-NEXT: s_cmp_lg_u32 s30, 0
-; GCN-NEXT: s_cselect_b32 s26, s31, s28
+; GCN-NEXT: s_cselect_b32 s26, s26, s28
; GCN-NEXT: s_cselect_b32 s22, s22, s29
; GCN-NEXT: s_cmp_lg_u64 s[24:25], 0
; GCN-NEXT: s_subb_u32 s15, s23, s15
@@ -5257,7 +5209,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s14
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s15
; GCN-NEXT: s_sub_u32 s9, 0, s14
-; GCN-NEXT: s_subb_u32 s18, 0, s15
+; GCN-NEXT: s_subb_u32 s16, 0, s15
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -5266,56 +5218,52 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_readfirstlane_b32 s19, v1
-; GCN-NEXT: v_readfirstlane_b32 s16, v0
-; GCN-NEXT: s_mul_i32 s17, s9, s19
-; GCN-NEXT: s_mul_hi_u32 s21, s9, s16
-; GCN-NEXT: s_mul_i32 s20, s18, s16
-; GCN-NEXT: s_add_i32 s17, s21, s17
-; GCN-NEXT: s_add_i32 s17, s17, s20
-; GCN-NEXT: s_mul_i32 s22, s9, s16
-; GCN-NEXT: s_mul_i32 s21, s16, s17
-; GCN-NEXT: s_mul_hi_u32 s23, s16, s22
-; GCN-NEXT: s_mul_hi_u32 s20, s16, s17
+; GCN-NEXT: v_readfirstlane_b32 s17, v1
+; GCN-NEXT: v_readfirstlane_b32 s18, v0
+; GCN-NEXT: s_mul_i32 s19, s9, s17
+; GCN-NEXT: s_mul_hi_u32 s21, s9, s18
+; GCN-NEXT: s_mul_i32 s20, s16, s18
+; GCN-NEXT: s_add_i32 s19, s21, s19
+; GCN-NEXT: s_add_i32 s19, s19, s20
+; GCN-NEXT: s_mul_i32 s22, s9, s18
+; GCN-NEXT: s_mul_i32 s21, s18, s19
+; GCN-NEXT: s_mul_hi_u32 s23, s18, s22
+; GCN-NEXT: s_mul_hi_u32 s20, s18, s19
; GCN-NEXT: s_add_u32 s21, s23, s21
; GCN-NEXT: s_addc_u32 s20, 0, s20
-; GCN-NEXT: s_mul_hi_u32 s24, s19, s22
-; GCN-NEXT: s_mul_i32 s22, s19, s22
+; GCN-NEXT: s_mul_hi_u32 s24, s17, s22
+; GCN-NEXT: s_mul_i32 s22, s17, s22
; GCN-NEXT: s_add_u32 s21, s21, s22
-; GCN-NEXT: s_mul_hi_u32 s23, s19, s17
+; GCN-NEXT: s_mul_hi_u32 s23, s17, s19
; GCN-NEXT: s_addc_u32 s20, s20, s24
; GCN-NEXT: s_addc_u32 s21, s23, 0
-; GCN-NEXT: s_mul_i32 s17, s19, s17
-; GCN-NEXT: s_add_u32 s17, s20, s17
+; GCN-NEXT: s_mul_i32 s19, s17, s19
+; GCN-NEXT: s_add_u32 s19, s20, s19
; GCN-NEXT: s_addc_u32 s20, 0, s21
-; GCN-NEXT: s_add_u32 s21, s16, s17
-; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
-; GCN-NEXT: s_addc_u32 s19, s19, s20
-; GCN-NEXT: s_mul_i32 s16, s9, s19
-; GCN-NEXT: s_mul_hi_u32 s17, s9, s21
-; GCN-NEXT: s_add_i32 s16, s17, s16
-; GCN-NEXT: s_mul_i32 s18, s18, s21
-; GCN-NEXT: s_add_i32 s16, s16, s18
-; GCN-NEXT: s_mul_i32 s9, s9, s21
-; GCN-NEXT: s_mul_hi_u32 s18, s19, s9
-; GCN-NEXT: s_mul_i32 s20, s19, s9
-; GCN-NEXT: s_mul_i32 s23, s21, s16
-; GCN-NEXT: s_mul_hi_u32 s9, s21, s9
-; GCN-NEXT: s_mul_hi_u32 s22, s21, s16
+; GCN-NEXT: s_add_u32 s18, s18, s19
+; GCN-NEXT: s_addc_u32 s17, s17, s20
+; GCN-NEXT: s_mul_i32 s19, s9, s17
+; GCN-NEXT: s_mul_hi_u32 s20, s9, s18
+; GCN-NEXT: s_add_i32 s19, s20, s19
+; GCN-NEXT: s_mul_i32 s16, s16, s18
+; GCN-NEXT: s_add_i32 s19, s19, s16
+; GCN-NEXT: s_mul_i32 s9, s9, s18
+; GCN-NEXT: s_mul_hi_u32 s20, s17, s9
+; GCN-NEXT: s_mul_i32 s21, s17, s9
+; GCN-NEXT: s_mul_i32 s23, s18, s19
+; GCN-NEXT: s_mul_hi_u32 s9, s18, s9
+; GCN-NEXT: s_mul_hi_u32 s22, s18, s19
; GCN-NEXT: s_add_u32 s9, s9, s23
; GCN-NEXT: s_addc_u32 s22, 0, s22
-; GCN-NEXT: s_add_u32 s9, s9, s20
-; GCN-NEXT: s_mul_hi_u32 s17, s19, s16
-; GCN-NEXT: s_addc_u32 s9, s22, s18
-; GCN-NEXT: s_addc_u32 s17, s17, 0
-; GCN-NEXT: s_mul_i32 s16, s19, s16
-; GCN-NEXT: s_add_u32 s9, s9, s16
-; GCN-NEXT: s_addc_u32 s18, 0, s17
-; GCN-NEXT: s_add_u32 s9, s21, s9
-; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
-; GCN-NEXT: s_addc_u32 s20, s19, s18
+; GCN-NEXT: s_add_u32 s9, s9, s21
+; GCN-NEXT: s_mul_hi_u32 s16, s17, s19
+; GCN-NEXT: s_addc_u32 s9, s22, s20
+; GCN-NEXT: s_addc_u32 s16, s16, 0
+; GCN-NEXT: s_mul_i32 s19, s17, s19
+; GCN-NEXT: s_add_u32 s9, s9, s19
+; GCN-NEXT: s_addc_u32 s16, 0, s16
+; GCN-NEXT: s_add_u32 s9, s18, s9
+; GCN-NEXT: s_addc_u32 s20, s17, s16
; GCN-NEXT: s_ashr_i32 s16, s11, 31
; GCN-NEXT: s_add_u32 s18, s10, s16
; GCN-NEXT: s_mov_b32 s17, s16
@@ -5344,11 +5292,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_mul_i32 s9, s14, s9
; GCN-NEXT: s_sub_u32 s9, s18, s9
; GCN-NEXT: s_cselect_b64 s[20:21], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0
; GCN-NEXT: s_subb_u32 s18, s22, s15
; GCN-NEXT: s_sub_u32 s24, s9, s14
; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0
; GCN-NEXT: s_subb_u32 s25, s18, 0
; GCN-NEXT: s_cmp_ge_u32 s25, s15
; GCN-NEXT: s_cselect_b32 s26, -1, 0
@@ -5358,12 +5304,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_cselect_b32 s26, s27, s26
; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0
; GCN-NEXT: s_subb_u32 s18, s18, s15
-; GCN-NEXT: s_sub_u32 s27, s24, s14
-; GCN-NEXT: s_cselect_b64 s[22:23], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[22:23], 0
+; GCN-NEXT: s_sub_u32 s22, s24, s14
; GCN-NEXT: s_subb_u32 s18, s18, 0
; GCN-NEXT: s_cmp_lg_u32 s26, 0
-; GCN-NEXT: s_cselect_b32 s22, s27, s24
+; GCN-NEXT: s_cselect_b32 s22, s22, s24
; GCN-NEXT: s_cselect_b32 s18, s18, s25
; GCN-NEXT: s_cmp_lg_u64 s[20:21], 0
; GCN-NEXT: s_subb_u32 s11, s19, s11
@@ -5420,7 +5364,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cvt_f32_u32_e32 v0, s10
; GCN-NEXT: v_cvt_f32_u32_e32 v1, s11
; GCN-NEXT: s_sub_u32 s3, 0, s10
-; GCN-NEXT: s_subb_u32 s14, 0, s11
+; GCN-NEXT: s_subb_u32 s12, 0, s11
; GCN-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GCN-NEXT: v_rcp_f32_e32 v0, v0
; GCN-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -5429,56 +5373,52 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GCN-NEXT: v_cvt_u32_f32_e32 v1, v1
; GCN-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT: v_readfirstlane_b32 s15, v1
-; GCN-NEXT: v_readfirstlane_b32 s12, v0
-; GCN-NEXT: s_mul_i32 s13, s3, s15
-; GCN-NEXT: s_mul_hi_u32 s17, s3, s12
-; GCN-NEXT: s_mul_i32 s16, s14, s12
-; GCN-NEXT: s_add_i32 s13, s17, s13
-; GCN-NEXT: s_add_i32 s13, s13, s16
-; GCN-NEXT: s_mul_i32 s18, s3, s12
-; GCN-NEXT: s_mul_i32 s17, s12, s13
-; GCN-NEXT: s_mul_hi_u32 s19, s12, s18
-; GCN-NEXT: s_mul_hi_u32 s16, s12, s13
+; GCN-NEXT: v_readfirstlane_b32 s13, v1
+; GCN-NEXT: v_readfirstlane_b32 s14, v0
+; GCN-NEXT: s_mul_i32 s15, s3, s13
+; GCN-NEXT: s_mul_hi_u32 s17, s3, s14
+; GCN-NEXT: s_mul_i32 s16, s12, s14
+; GCN-NEXT: s_add_i32 s15, s17, s15
+; GCN-NEXT: s_add_i32 s15, s15, s16
+; GCN-NEXT: s_mul_i32 s18, s3, s14
+; GCN-NEXT: s_mul_i32 s17, s14, s15
+; GCN-NEXT: s_mul_hi_u32 s19, s14, s18
+; GCN-NEXT: s_mul_hi_u32 s16, s14, s15
; GCN-NEXT: s_add_u32 s17, s19, s17
; GCN-NEXT: s_addc_u32 s16, 0, s16
-; GCN-NEXT: s_mul_hi_u32 s20, s15, s18
-; GCN-NEXT: s_mul_i32 s18, s15, s18
+; GCN-NEXT: s_mul_hi_u32 s20, s13, s18
+; GCN-NEXT: s_mul_i32 s18, s13, s18
; GCN-NEXT: s_add_u32 s17, s17, s18
-; GCN-NEXT: s_mul_hi_u32 s19, s15, s13
+; GCN-NEXT: s_mul_hi_u32 s19, s13, s15
; GCN-NEXT: s_addc_u32 s16, s16, s20
; GCN-NEXT: s_addc_u32 s17, s19, 0
-; GCN-NEXT: s_mul_i32 s13, s15, s13
-; GCN-NEXT: s_add_u32 s13, s16, s13
+; GCN-NEXT: s_mul_i32 s15, s13, s15
+; GCN-NEXT: s_add_u32 s15, s16, s15
; GCN-NEXT: s_addc_u32 s16, 0, s17
-; GCN-NEXT: s_add_u32 s17, s12, s13
-; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT: s_addc_u32 s15, s15, s16
-; GCN-NEXT: s_mul_i32 s12, s3, s15
-; GCN-NEXT: s_mul_hi_u32 s13, s3, s17
-; GCN-NEXT: s_add_i32 s12, s13, s12
-; GCN-NEXT: s_mul_i32 s14, s14, s17
-; GCN-NEXT: s_add_i32 s12, s12, s14
-; GCN-NEXT: s_mul_i32 s3, s3, s17
-; GCN-NEXT: s_mul_hi_u32 s14, s15, s3
-; GCN-NEXT: s_mul_i32 s16, s15, s3
-; GCN-NEXT: s_mul_i32 s19, s17, s12
-; GCN-NEXT: s_mul_hi_u32 s3, s17, s3
-; GCN-NEXT: s_mul_hi_u32 s18, s17, s12
+; GCN-NEXT: s_add_u32 s14, s14, s15
+; GCN-NEXT: s_addc_u32 s13, s13, s16
+; GCN-NEXT: s_mul_i32 s15, s3, s13
+; GCN-NEXT: s_mul_hi_u32 s16, s3, s14
+; GCN-NEXT: s_add_i32 s15, s16, s15
+; GCN-NEXT: s_mul_i32 s12, s12, s14
+; GCN-NEXT: s_add_i32 s15, s15, s12
+; GCN-NEXT: s_mul_i32 s3, s3, s14
+; GCN-NEXT: s_mul_hi_u32 s16, s13, s3
+; GCN-NEXT: s_mul_i32 s17, s13, s3
+; GCN-NEXT: s_mul_i32 s19, s14, s15
+; GCN-NEXT: s_mul_hi_u32 s3, s14, s3
+; GCN-NEXT: s_mul_hi_u32 s18, s14, s15
; GCN-NEXT: s_add_u32 s3, s3, s19
; GCN-NEXT: s_addc_u32 s18, 0, s18
-; GCN-NEXT: s_add_u32 s3, s3, s16
-; GCN-NEXT: s_mul_hi_u32 s13, s15, s12
-; GCN-NEXT: s_addc_u32 s3, s18, s14
-; GCN-NEXT: s_addc_u32 s13, s13, 0
-; GCN-NEXT: s_mul_i32 s12, s15, s12
-; GCN-NEXT: s_add_u32 s3, s3, s12
-; GCN-NEXT: s_addc_u32 s14, 0, s13
-; GCN-NEXT: s_add_u32 s3, s17, s3
-; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT: s_addc_u32 s16, s15, s14
+; GCN-NEXT: s_add_u32 s3, s3, s17
+; GCN-NEXT: s_mul_hi_u32 s12, s13, s15
+; GCN-NEXT: s_addc_u32 s3, s18, s16
+; GCN-NEXT: s_addc_u32 s12, s12, 0
+; GCN-NEXT: s_mul_i32 s15, s13, s15
+; GCN-NEXT: s_add_u32 s3, s3, s15
+; GCN-NEXT: s_addc_u32 s12, 0, s12
+; GCN-NEXT: s_add_u32 s3, s14, s3
+; GCN-NEXT: s_addc_u32 s16, s13, s12
; GCN-NEXT: s_ashr_i32 s12, s5, 31
; GCN-NEXT: s_add_u32 s14, s4, s12
; GCN-NEXT: s_mov_b32 s13, s12
@@ -5507,11 +5447,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_mul_i32 s3, s10, s3
; GCN-NEXT: s_sub_u32 s3, s14, s3
; GCN-NEXT: s_cselect_b64 s[16:17], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
; GCN-NEXT: s_subb_u32 s14, s18, s11
; GCN-NEXT: s_sub_u32 s20, s3, s10
; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
; GCN-NEXT: s_subb_u32 s21, s14, 0
; GCN-NEXT: s_cmp_ge_u32 s21, s11
; GCN-NEXT: s_cselect_b32 s22, -1, 0
@@ -5521,12 +5459,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_cselect_b32 s22, s23, s22
; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
; GCN-NEXT: s_subb_u32 s14, s14, s11
-; GCN-NEXT: s_sub_u32 s23, s20, s10
-; GCN-NEXT: s_cselect_b64 s[18:19], -1, 0
-; GCN-NEXT: s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT: s_sub_u32 s18, s20, s10
; GCN-NEXT: s_subb_u32 s14, s14, 0
; GCN-NEXT: s_cmp_lg_u32 s22, 0
-; GCN-NEXT: s_cselect_b32 s18, s23, s20
+; GCN-NEXT: s_cselect_b32 s18, s18, s20
; GCN-NEXT: s_cselect_b32 s14, s14, s21
; GCN-NEXT: s_cmp_lg_u64 s[16:17], 0
; GCN-NEXT: s_subb_u32 s5, s15, s5
@@ -6299,11 +6235,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_readfirstlane_b32 s14, v8
; TONGA-NEXT: s_sub_u32 s12, s12, s14
; TONGA-NEXT: s_cselect_b64 s[14:15], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0
; TONGA-NEXT: s_subb_u32 s1, s1, s7
; TONGA-NEXT: s_sub_u32 s18, s12, s6
; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
; TONGA-NEXT: s_subb_u32 s19, s1, 0
; TONGA-NEXT: s_cmp_ge_u32 s19, s7
; TONGA-NEXT: s_cselect_b32 s20, -1, 0
@@ -6313,12 +6247,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: s_cselect_b32 s20, s21, s20
; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
; TONGA-NEXT: s_subb_u32 s1, s1, s7
-; TONGA-NEXT: s_sub_u32 s21, s18, s6
-; TONGA-NEXT: s_cselect_b64 s[16:17], -1, 0
-; TONGA-NEXT: s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT: s_sub_u32 s16, s18, s6
; TONGA-NEXT: s_subb_u32 s1, s1, 0
; TONGA-NEXT: s_cmp_lg_u32 s20, 0
-; TONGA-NEXT: s_cselect_b32 s16, s21, s18
+; TONGA-NEXT: s_cselect_b32 s16, s16, s18
; TONGA-NEXT: s_cselect_b32 s1, s1, s19
; TONGA-NEXT: s_cmp_lg_u64 s[14:15], 0
; TONGA-NEXT: s_subb_u32 s3, s13, s3
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 33b0a5d..ea9bb04 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -51,10 +51,9 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-NEXT: s_addc_u32 s13, 0, s14
; GCN-NEXT: s_add_u32 s14, s0, s1
; GCN-NEXT: v_mov_b32_e32 v0, s14
-; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_addc_u32 s12, s12, s13
; GCN-NEXT: s_mul_i32 s0, s10, s12
; GCN-NEXT: v_readfirstlane_b32 s1, v0
@@ -85,7 +84,6 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-NEXT: s_add_u32 s11, s14, s0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_addc_u32 s1, s12, s10
; GCN-NEXT: v_mov_b32_e32 v0, s1
; GCN-NEXT: v_mul_hi_u32 v1, s6, v0
@@ -115,46 +113,43 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-NEXT: v_readfirstlane_b32 s10, v0
; GCN-NEXT: s_add_i32 s5, s10, s5
; GCN-NEXT: s_mul_i32 s10, s9, s4
-; GCN-NEXT: s_add_i32 s10, s5, s10
-; GCN-NEXT: s_sub_i32 s11, s7, s10
+; GCN-NEXT: s_add_i32 s12, s5, s10
+; GCN-NEXT: s_sub_i32 s10, s7, s12
; GCN-NEXT: s_mul_i32 s4, s8, s4
; GCN-NEXT: s_sub_u32 s6, s6, s4
; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GCN-NEXT: s_or_b32 s12, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s12, 0
-; GCN-NEXT: s_subb_u32 s11, s11, s9
-; GCN-NEXT: s_sub_u32 s13, s6, s8
-; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT: s_or_b32 s11, s4, s5
+; GCN-NEXT: s_subb_u32 s13, s10, s9
+; GCN-NEXT: s_sub_u32 s14, s6, s8
+; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-NEXT: s_or_b32 s15, s10, s11
+; GCN-NEXT: s_subb_u32 s15, s13, 0
+; GCN-NEXT: s_cmp_ge_u32 s15, s9
+; GCN-NEXT: s_cselect_b32 s16, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s14, s8
+; GCN-NEXT: s_cselect_b32 s17, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s15, s9
+; GCN-NEXT: s_cselect_b32 s16, s17, s16
+; GCN-NEXT: s_or_b32 s10, s10, s11
+; GCN-NEXT: s_subb_u32 s13, s13, s9
+; GCN-NEXT: s_sub_u32 s17, s14, s8
+; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-NEXT: s_or_b32 s10, s10, s11
+; GCN-NEXT: s_subb_u32 s10, s13, 0
+; GCN-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s11, s17, s14
+; GCN-NEXT: s_cselect_b32 s10, s10, s15
; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_subb_u32 s14, s11, 0
-; GCN-NEXT: s_cmp_ge_u32 s14, s9
+; GCN-NEXT: s_subb_u32 s4, s7, s12
+; GCN-NEXT: s_cmp_ge_u32 s4, s9
; GCN-NEXT: s_cselect_b32 s5, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s13, s8
-; GCN-NEXT: s_cselect_b32 s15, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s14, s9
-; GCN-NEXT: s_cselect_b32 s15, s15, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_subb_u32 s11, s11, s9
-; GCN-NEXT: s_sub_u32 s16, s13, s8
-; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_subb_u32 s4, s11, 0
-; GCN-NEXT: s_cmp_lg_u32 s15, 0
-; GCN-NEXT: s_cselect_b32 s5, s16, s13
-; GCN-NEXT: s_cselect_b32 s4, s4, s14
-; GCN-NEXT: s_cmp_lg_u32 s12, 0
-; GCN-NEXT: s_subb_u32 s7, s7, s10
-; GCN-NEXT: s_cmp_ge_u32 s7, s9
-; GCN-NEXT: s_cselect_b32 s10, -1, 0
; GCN-NEXT: s_cmp_ge_u32 s6, s8
-; GCN-NEXT: s_cselect_b32 s8, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s7, s9
-; GCN-NEXT: s_cselect_b32 s8, s8, s10
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
-; GCN-NEXT: s_cselect_b32 s4, s4, s7
-; GCN-NEXT: s_cselect_b32 s5, s5, s6
+; GCN-NEXT: s_cselect_b32 s7, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s4, s9
+; GCN-NEXT: s_cselect_b32 s5, s7, s5
+; GCN-NEXT: s_cmp_lg_u32 s5, 0
+; GCN-NEXT: s_cselect_b32 s4, s10, s4
+; GCN-NEXT: s_cselect_b32 s5, s11, s6
; GCN-NEXT: v_mov_b32_e32 v0, s5
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -187,7 +182,6 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_or_b32 s8, s8, s9
-; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
@@ -221,7 +215,6 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_add_u32 s16, s16, 1
; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
; GCN-IR-NEXT: s_or_b32 s18, s18, s19
-; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
@@ -1016,10 +1009,9 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: s_addc_u32 s13, 0, s14
; GCN-NEXT: s_add_u32 s14, s8, s9
; GCN-NEXT: v_mov_b32_e32 v0, s14
-; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: s_or_b32 s8, s8, s9
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
; GCN-NEXT: s_addc_u32 s12, s12, s13
; GCN-NEXT: s_mul_i32 s8, s10, s12
; GCN-NEXT: v_readfirstlane_b32 s9, v0
@@ -1050,7 +1042,6 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: s_add_u32 s11, s14, s8
; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: s_or_b32 s8, s8, s9
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
; GCN-NEXT: s_addc_u32 s10, s12, s10
; GCN-NEXT: s_ashr_i32 s8, s7, 31
; GCN-NEXT: s_add_u32 s6, s6, s8
@@ -1083,46 +1074,43 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-NEXT: v_readfirstlane_b32 s12, v0
; GCN-NEXT: s_add_i32 s11, s12, s11
; GCN-NEXT: s_mul_i32 s12, s5, s10
-; GCN-NEXT: s_add_i32 s12, s11, s12
-; GCN-NEXT: s_sub_i32 s13, s7, s12
+; GCN-NEXT: s_add_i32 s14, s11, s12
+; GCN-NEXT: s_sub_i32 s12, s7, s14
; GCN-NEXT: s_mul_i32 s10, s4, s10
; GCN-NEXT: s_sub_u32 s6, s6, s10
; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GCN-NEXT: s_or_b32 s14, s10, s11
-; GCN-NEXT: s_cmp_lg_u32 s14, 0
-; GCN-NEXT: s_subb_u32 s13, s13, s5
-; GCN-NEXT: s_sub_u32 s15, s6, s4
-; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GCN-NEXT: s_or_b32 s10, s10, s11
-; GCN-NEXT: s_cmp_lg_u32 s10, 0
-; GCN-NEXT: s_subb_u32 s16, s13, 0
-; GCN-NEXT: s_cmp_ge_u32 s16, s5
-; GCN-NEXT: s_cselect_b32 s11, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s15, s4
-; GCN-NEXT: s_cselect_b32 s17, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s16, s5
-; GCN-NEXT: s_cselect_b32 s17, s17, s11
-; GCN-NEXT: s_cmp_lg_u32 s10, 0
-; GCN-NEXT: s_subb_u32 s13, s13, s5
-; GCN-NEXT: s_sub_u32 s18, s15, s4
-; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-NEXT: s_or_b32 s13, s10, s11
+; GCN-NEXT: s_subb_u32 s15, s12, s5
+; GCN-NEXT: s_sub_u32 s16, s6, s4
+; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GCN-NEXT: s_or_b32 s17, s12, s13
+; GCN-NEXT: s_subb_u32 s17, s15, 0
+; GCN-NEXT: s_cmp_ge_u32 s17, s5
+; GCN-NEXT: s_cselect_b32 s18, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s16, s4
+; GCN-NEXT: s_cselect_b32 s19, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s17, s5
+; GCN-NEXT: s_cselect_b32 s18, s19, s18
+; GCN-NEXT: s_or_b32 s12, s12, s13
+; GCN-NEXT: s_subb_u32 s15, s15, s5
+; GCN-NEXT: s_sub_u32 s19, s16, s4
+; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GCN-NEXT: s_or_b32 s12, s12, s13
+; GCN-NEXT: s_subb_u32 s12, s15, 0
+; GCN-NEXT: s_cmp_lg_u32 s18, 0
+; GCN-NEXT: s_cselect_b32 s13, s19, s16
+; GCN-NEXT: s_cselect_b32 s12, s12, s17
; GCN-NEXT: s_or_b32 s10, s10, s11
-; GCN-NEXT: s_cmp_lg_u32 s10, 0
-; GCN-NEXT: s_subb_u32 s10, s13, 0
-; GCN-NEXT: s_cmp_lg_u32 s17, 0
-; GCN-NEXT: s_cselect_b32 s11, s18, s15
-; GCN-NEXT: s_cselect_b32 s10, s10, s16
-; GCN-NEXT: s_cmp_lg_u32 s14, 0
-; GCN-NEXT: s_subb_u32 s7, s7, s12
+; GCN-NEXT: s_subb_u32 s7, s7, s14
; GCN-NEXT: s_cmp_ge_u32 s7, s5
-; GCN-NEXT: s_cselect_b32 s12, -1, 0
+; GCN-NEXT: s_cselect_b32 s10, -1, 0
; GCN-NEXT: s_cmp_ge_u32 s6, s4
; GCN-NEXT: s_cselect_b32 s4, -1, 0
; GCN-NEXT: s_cmp_eq_u32 s7, s5
-; GCN-NEXT: s_cselect_b32 s4, s4, s12
+; GCN-NEXT: s_cselect_b32 s4, s4, s10
; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_cselect_b32 s5, s10, s7
-; GCN-NEXT: s_cselect_b32 s4, s11, s6
+; GCN-NEXT: s_cselect_b32 s5, s12, s7
+; GCN-NEXT: s_cselect_b32 s4, s13, s6
; GCN-NEXT: s_xor_b64 s[4:5], s[4:5], s[8:9]
; GCN-NEXT: s_sub_u32 s4, s4, s8
; GCN-NEXT: s_subb_u32 s5, s5, s8
@@ -1170,7 +1158,6 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-IR-NEXT: s_add_u32 s16, s14, 1
; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_or_b32 s10, s10, s11
-; GCN-IR-NEXT: s_cmp_lg_u32 s10, 0
; GCN-IR-NEXT: s_addc_u32 s10, s15, 0
; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s14, 63, s14
@@ -1204,7 +1191,6 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-IR-NEXT: s_add_u32 s18, s18, 1
; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
; GCN-IR-NEXT: s_or_b32 s20, s20, s21
-; GCN-IR-NEXT: s_cmp_lg_u32 s20, 0
; GCN-IR-NEXT: s_addc_u32 s19, s19, 0
; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[12:13], s[2:3]
@@ -1369,10 +1355,9 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_addc_u32 s10, 0, s11
; GCN-NEXT: s_add_u32 s11, s6, s7
; GCN-NEXT: v_mov_b32_e32 v0, s11
-; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-NEXT: s_or_b32 s6, s6, s7
-; GCN-NEXT: s_cmp_lg_u32 s6, 0
; GCN-NEXT: s_addc_u32 s9, s9, s10
; GCN-NEXT: s_mul_i32 s6, s2, s9
; GCN-NEXT: v_readfirstlane_b32 s7, v0
@@ -1403,7 +1388,6 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_add_u32 s2, s11, s2
; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-NEXT: s_or_b32 s6, s6, s7
-; GCN-NEXT: s_cmp_lg_u32 s6, 0
; GCN-NEXT: s_addc_u32 s6, s9, s8
; GCN-NEXT: v_mul_hi_u32 v1, s2, 24
; GCN-NEXT: v_mul_hi_u32 v0, s6, 24
@@ -1418,45 +1402,42 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_mul_i32 s7, s5, s6
; GCN-NEXT: s_mul_i32 s6, s4, s6
; GCN-NEXT: v_readfirstlane_b32 s8, v0
-; GCN-NEXT: s_add_i32 s8, s8, s7
-; GCN-NEXT: s_sub_i32 s9, 0, s8
-; GCN-NEXT: s_sub_u32 s10, 24, s6
-; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0
-; GCN-NEXT: s_or_b32 s11, s6, s7
-; GCN-NEXT: s_cmp_lg_u32 s11, 0
-; GCN-NEXT: s_subb_u32 s9, s9, s5
-; GCN-NEXT: s_sub_u32 s12, s10, s4
+; GCN-NEXT: s_add_i32 s10, s8, s7
+; GCN-NEXT: s_sub_i32 s8, 0, s10
+; GCN-NEXT: s_sub_u32 s11, 24, s6
; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-NEXT: s_or_b32 s9, s6, s7
+; GCN-NEXT: s_subb_u32 s12, s8, s5
+; GCN-NEXT: s_sub_u32 s13, s11, s4
+; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-NEXT: s_or_b32 s14, s8, s9
+; GCN-NEXT: s_subb_u32 s14, s12, 0
+; GCN-NEXT: s_cmp_ge_u32 s14, s5
+; GCN-NEXT: s_cselect_b32 s15, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s13, s4
+; GCN-NEXT: s_cselect_b32 s16, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s14, s5
+; GCN-NEXT: s_cselect_b32 s15, s16, s15
+; GCN-NEXT: s_or_b32 s8, s8, s9
+; GCN-NEXT: s_subb_u32 s12, s12, s5
+; GCN-NEXT: s_sub_u32 s16, s13, s4
+; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-NEXT: s_or_b32 s8, s8, s9
+; GCN-NEXT: s_subb_u32 s8, s12, 0
+; GCN-NEXT: s_cmp_lg_u32 s15, 0
+; GCN-NEXT: s_cselect_b32 s9, s16, s13
+; GCN-NEXT: s_cselect_b32 s8, s8, s14
; GCN-NEXT: s_or_b32 s6, s6, s7
-; GCN-NEXT: s_cmp_lg_u32 s6, 0
-; GCN-NEXT: s_subb_u32 s13, s9, 0
-; GCN-NEXT: s_cmp_ge_u32 s13, s5
+; GCN-NEXT: s_subb_u32 s6, 0, s10
+; GCN-NEXT: s_cmp_ge_u32 s6, s5
; GCN-NEXT: s_cselect_b32 s7, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s12, s4
-; GCN-NEXT: s_cselect_b32 s14, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s13, s5
-; GCN-NEXT: s_cselect_b32 s14, s14, s7
-; GCN-NEXT: s_cmp_lg_u32 s6, 0
-; GCN-NEXT: s_subb_u32 s9, s9, s5
-; GCN-NEXT: s_sub_u32 s15, s12, s4
-; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0
-; GCN-NEXT: s_or_b32 s6, s6, s7
-; GCN-NEXT: s_cmp_lg_u32 s6, 0
-; GCN-NEXT: s_subb_u32 s6, s9, 0
-; GCN-NEXT: s_cmp_lg_u32 s14, 0
-; GCN-NEXT: s_cselect_b32 s7, s15, s12
-; GCN-NEXT: s_cselect_b32 s6, s6, s13
-; GCN-NEXT: s_cmp_lg_u32 s11, 0
-; GCN-NEXT: s_subb_u32 s8, 0, s8
-; GCN-NEXT: s_cmp_ge_u32 s8, s5
-; GCN-NEXT: s_cselect_b32 s9, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s10, s4
+; GCN-NEXT: s_cmp_ge_u32 s11, s4
; GCN-NEXT: s_cselect_b32 s4, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s8, s5
-; GCN-NEXT: s_cselect_b32 s4, s4, s9
+; GCN-NEXT: s_cmp_eq_u32 s6, s5
+; GCN-NEXT: s_cselect_b32 s4, s4, s7
; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_cselect_b32 s4, s6, s8
-; GCN-NEXT: s_cselect_b32 s5, s7, s10
+; GCN-NEXT: s_cselect_b32 s4, s8, s6
+; GCN-NEXT: s_cselect_b32 s5, s9, s11
; GCN-NEXT: v_mov_b32_e32 v0, s5
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1489,7 +1470,6 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s8, s2, 1
; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_or_b32 s9, s10, s11
-; GCN-IR-NEXT: s_cmp_lg_u32 s9, 0
; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s2, 63, s2
@@ -1522,7 +1502,6 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s14, s14, 1
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_or_b32 s16, s16, s17
-; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[8:9], s[6:7]
diff --git a/llvm/test/CodeGen/AMDGPU/uaddo.ll b/llvm/test/CodeGen/AMDGPU/uaddo.ll
index bb5918b2..bdd22f25 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddo.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddo.ll
@@ -18,7 +18,6 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-NEXT: s_or_b32 s0, s0, s1
-; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_addc_u32 s3, s3, s9
; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
@@ -35,10 +34,8 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: s_add_u32 s2, s2, s4
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
; VI-NEXT: s_addc_u32 s3, s3, s5
+; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s3
@@ -53,14 +50,12 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s6, s2, s6
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_addc_u32 s4, s3, s7
+; GFX9-NEXT: s_add_u32 s4, s2, s6
+; GFX9-NEXT: s_addc_u32 s5, s3, s7
; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
@@ -73,8 +68,6 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s2, s2, s6
-; GFX10-NEXT: s_cselect_b32 s4, -1, 0
-; GFX10-NEXT: s_cmp_lg_u32 s4, 0
; GFX10-NEXT: s_addc_u32 s3, s3, s7
; GFX10-NEXT: s_cselect_b32 s4, -1, 0
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
@@ -91,14 +84,12 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_u32 s2, s2, s4
-; GFX11-NEXT: s_cselect_b32 s4, -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_addc_u32 s3, s3, s5
; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s3, 0, s2
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
@@ -444,7 +435,6 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_add_u32 s4, s4, s6
; SI-NEXT: s_cselect_b64 s[12:13], -1, 0
; SI-NEXT: s_or_b32 s6, s12, s13
-; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: s_addc_u32 s5, s5, s7
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
@@ -465,16 +455,14 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s2
-; VI-NEXT: s_add_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: s_add_u32 s0, s4, s6
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_addc_u32 s1, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s0
+; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
-; VI-NEXT: s_addc_u32 s0, s5, s7
-; VI-NEXT: v_mov_b32_e32 v4, s2
-; VI-NEXT: v_mov_b32_e32 v5, s0
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_mov_b32_e32 v3, s3
; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
@@ -486,12 +474,10 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s2, s12, s14
-; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_addc_u32 s0, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_add_u32 s0, s12, s14
+; GFX9-NEXT: s_addc_u32 s1, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
@@ -504,10 +490,8 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s0, s12, s14
-; GFX10-NEXT: s_cselect_b32 s1, -1, 0
-; GFX10-NEXT: v_mov_b32_e32 v0, s0
-; GFX10-NEXT: s_cmp_lg_u32 s1, 0
; GFX10-NEXT: s_addc_u32 s1, s13, s15
+; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: s_cselect_b32 s0, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
@@ -520,10 +504,8 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_add_u32 s4, s4, s6
-; GFX11-NEXT: s_cselect_b32 s6, -1, 0
-; GFX11-NEXT: v_mov_b32_e32 v0, s4
-; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_addc_u32 s5, s5, s7
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_cselect_b32 s4, -1, 0
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 41199b0..fd461ac 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -148,7 +148,6 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_or_b32 s8, s8, s9
-; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
@@ -182,7 +181,6 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_add_u32 s10, s10, 1
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_or_b32 s16, s16, s17
-; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[2:3], s[4:5]
@@ -831,10 +829,9 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_addc_u32 s10, 0, s11
; GCN-NEXT: s_add_u32 s11, s4, s5
; GCN-NEXT: v_mov_b32_e32 v0, s11
-; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s6, v0
+; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
; GCN-NEXT: s_addc_u32 s9, s9, s10
; GCN-NEXT: s_mul_i32 s4, s6, s9
; GCN-NEXT: v_readfirstlane_b32 s5, v0
@@ -865,7 +862,6 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_add_u32 s8, s11, s4
; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
; GCN-NEXT: s_addc_u32 s4, s9, s6
; GCN-NEXT: v_mul_hi_u32 v1, s8, 24
; GCN-NEXT: v_mul_hi_u32 v0, s4, 24
@@ -874,52 +870,50 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: v_readfirstlane_b32 s8, v1
; GCN-NEXT: v_readfirstlane_b32 s5, v0
; GCN-NEXT: s_add_u32 s4, s8, s4
-; GCN-NEXT: s_addc_u32 s8, 0, s5
-; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: s_addc_u32 s10, 0, s5
+; GCN-NEXT: v_mov_b32_e32 v0, s10
; GCN-NEXT: v_mul_hi_u32 v0, s2, v0
; GCN-NEXT: s_mov_b32 s4, s0
; GCN-NEXT: s_mov_b32 s5, s1
-; GCN-NEXT: s_mul_i32 s0, s3, s8
+; GCN-NEXT: s_mul_i32 s0, s3, s10
; GCN-NEXT: v_readfirstlane_b32 s1, v0
-; GCN-NEXT: s_add_i32 s9, s1, s0
-; GCN-NEXT: s_sub_i32 s10, 0, s9
-; GCN-NEXT: s_mul_i32 s0, s2, s8
-; GCN-NEXT: s_sub_u32 s11, 24, s0
-; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GCN-NEXT: s_or_b32 s12, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s12, 0
-; GCN-NEXT: s_subb_u32 s10, s10, s3
-; GCN-NEXT: s_sub_u32 s13, s11, s2
+; GCN-NEXT: s_add_i32 s11, s1, s0
+; GCN-NEXT: s_sub_i32 s8, 0, s11
+; GCN-NEXT: s_mul_i32 s0, s2, s10
+; GCN-NEXT: s_sub_u32 s12, 24, s0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GCN-NEXT: s_or_b32 s9, s0, s1
+; GCN-NEXT: s_subb_u32 s13, s8, s3
+; GCN-NEXT: s_sub_u32 s14, s12, s2
+; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-NEXT: s_or_b32 s8, s8, s9
+; GCN-NEXT: s_subb_u32 s8, s13, 0
+; GCN-NEXT: s_cmp_ge_u32 s8, s3
+; GCN-NEXT: s_cselect_b32 s9, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s14, s2
+; GCN-NEXT: s_cselect_b32 s13, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s8, s3
+; GCN-NEXT: s_cselect_b32 s8, s13, s9
+; GCN-NEXT: s_add_u32 s9, s10, 1
+; GCN-NEXT: s_addc_u32 s13, 0, 0
+; GCN-NEXT: s_add_u32 s14, s10, 2
+; GCN-NEXT: s_addc_u32 s15, 0, 0
+; GCN-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-NEXT: s_cselect_b32 s8, s14, s9
+; GCN-NEXT: s_cselect_b32 s9, s15, s13
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
-; GCN-NEXT: s_subb_u32 s0, s10, 0
+; GCN-NEXT: s_subb_u32 s0, 0, s11
; GCN-NEXT: s_cmp_ge_u32 s0, s3
; GCN-NEXT: s_cselect_b32 s1, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s13, s2
-; GCN-NEXT: s_cselect_b32 s10, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s12, s2
+; GCN-NEXT: s_cselect_b32 s2, -1, 0
; GCN-NEXT: s_cmp_eq_u32 s0, s3
-; GCN-NEXT: s_cselect_b32 s0, s10, s1
-; GCN-NEXT: s_add_u32 s1, s8, 1
-; GCN-NEXT: s_addc_u32 s10, 0, 0
-; GCN-NEXT: s_add_u32 s13, s8, 2
-; GCN-NEXT: s_addc_u32 s14, 0, 0
+; GCN-NEXT: s_cselect_b32 s0, s2, s1
; GCN-NEXT: s_cmp_lg_u32 s0, 0
-; GCN-NEXT: s_cselect_b32 s0, s13, s1
-; GCN-NEXT: s_cselect_b32 s1, s14, s10
-; GCN-NEXT: s_cmp_lg_u32 s12, 0
-; GCN-NEXT: s_subb_u32 s9, 0, s9
-; GCN-NEXT: s_cmp_ge_u32 s9, s3
-; GCN-NEXT: s_cselect_b32 s10, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s11, s2
-; GCN-NEXT: s_cselect_b32 s2, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s9, s3
-; GCN-NEXT: s_cselect_b32 s2, s2, s10
-; GCN-NEXT: s_cmp_lg_u32 s2, 0
-; GCN-NEXT: s_cselect_b32 s1, s1, 0
-; GCN-NEXT: s_cselect_b32 s0, s0, s8
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: v_mov_b32_e32 v1, s1
+; GCN-NEXT: s_cselect_b32 s0, s9, 0
+; GCN-NEXT: s_cselect_b32 s1, s8, s10
+; GCN-NEXT: v_mov_b32_e32 v0, s1
+; GCN-NEXT: v_mov_b32_e32 v1, s0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; GCN-NEXT: s_endpgm
;
@@ -945,7 +939,6 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s10, s8, 1
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_or_b32 s6, s6, s7
-; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
@@ -978,7 +971,6 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s14, s14, 1
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_or_b32 s16, s16, s17
-; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
@@ -1317,7 +1309,6 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s11, s8, 1
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_or_b32 s6, s6, s7
-; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
@@ -1347,7 +1338,6 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s10, s10, 1
; GCN-IR-NEXT: s_cselect_b64 s[12:13], -1, 0
; GCN-IR-NEXT: s_or_b32 s12, s12, s13
-; GCN-IR-NEXT: s_cmp_lg_u32 s12, 0
; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
; GCN-IR-NEXT: s_cselect_b64 s[12:13], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll b/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll
index 9bcba6c..2d7ce10 100644
--- a/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=SI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=VI %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
-; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 -enable-unsafe-fp-math < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=SI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global < %s | FileCheck -check-prefixes=VI %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
+; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
define amdgpu_kernel void @uitofp_i16_to_f16(
; SI-LABEL: uitofp_i16_to_f16:
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index cdcc914..137dc1f 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -51,10 +51,9 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-NEXT: s_addc_u32 s13, 0, s14
; GCN-NEXT: s_add_u32 s14, s0, s1
; GCN-NEXT: v_mov_b32_e32 v0, s14
-; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_addc_u32 s12, s12, s13
; GCN-NEXT: s_mul_i32 s0, s10, s12
; GCN-NEXT: v_readfirstlane_b32 s1, v0
@@ -85,7 +84,6 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-NEXT: s_add_u32 s11, s14, s0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
; GCN-NEXT: s_addc_u32 s1, s12, s10
; GCN-NEXT: v_mov_b32_e32 v0, s1
; GCN-NEXT: v_mul_hi_u32 v1, s6, v0
@@ -115,46 +113,43 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-NEXT: v_readfirstlane_b32 s10, v0
; GCN-NEXT: s_add_i32 s5, s10, s5
; GCN-NEXT: s_mul_i32 s10, s9, s4
-; GCN-NEXT: s_add_i32 s10, s5, s10
-; GCN-NEXT: s_sub_i32 s11, s7, s10
+; GCN-NEXT: s_add_i32 s12, s5, s10
+; GCN-NEXT: s_sub_i32 s10, s7, s12
; GCN-NEXT: s_mul_i32 s4, s8, s4
; GCN-NEXT: s_sub_u32 s6, s6, s4
; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GCN-NEXT: s_or_b32 s12, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s12, 0
-; GCN-NEXT: s_subb_u32 s11, s11, s9
-; GCN-NEXT: s_sub_u32 s13, s6, s8
-; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GCN-NEXT: s_or_b32 s11, s4, s5
+; GCN-NEXT: s_subb_u32 s13, s10, s9
+; GCN-NEXT: s_sub_u32 s14, s6, s8
+; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-NEXT: s_or_b32 s15, s10, s11
+; GCN-NEXT: s_subb_u32 s15, s13, 0
+; GCN-NEXT: s_cmp_ge_u32 s15, s9
+; GCN-NEXT: s_cselect_b32 s16, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s14, s8
+; GCN-NEXT: s_cselect_b32 s17, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s15, s9
+; GCN-NEXT: s_cselect_b32 s16, s17, s16
+; GCN-NEXT: s_or_b32 s10, s10, s11
+; GCN-NEXT: s_subb_u32 s13, s13, s9
+; GCN-NEXT: s_sub_u32 s17, s14, s8
+; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-NEXT: s_or_b32 s10, s10, s11
+; GCN-NEXT: s_subb_u32 s10, s13, 0
+; GCN-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-NEXT: s_cselect_b32 s11, s17, s14
+; GCN-NEXT: s_cselect_b32 s10, s10, s15
; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_subb_u32 s14, s11, 0
-; GCN-NEXT: s_cmp_ge_u32 s14, s9
+; GCN-NEXT: s_subb_u32 s4, s7, s12
+; GCN-NEXT: s_cmp_ge_u32 s4, s9
; GCN-NEXT: s_cselect_b32 s5, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s13, s8
-; GCN-NEXT: s_cselect_b32 s15, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s14, s9
-; GCN-NEXT: s_cselect_b32 s15, s15, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_subb_u32 s11, s11, s9
-; GCN-NEXT: s_sub_u32 s16, s13, s8
-; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
-; GCN-NEXT: s_subb_u32 s4, s11, 0
-; GCN-NEXT: s_cmp_lg_u32 s15, 0
-; GCN-NEXT: s_cselect_b32 s5, s16, s13
-; GCN-NEXT: s_cselect_b32 s4, s4, s14
-; GCN-NEXT: s_cmp_lg_u32 s12, 0
-; GCN-NEXT: s_subb_u32 s7, s7, s10
-; GCN-NEXT: s_cmp_ge_u32 s7, s9
-; GCN-NEXT: s_cselect_b32 s10, -1, 0
; GCN-NEXT: s_cmp_ge_u32 s6, s8
-; GCN-NEXT: s_cselect_b32 s8, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s7, s9
-; GCN-NEXT: s_cselect_b32 s8, s8, s10
-; GCN-NEXT: s_cmp_lg_u32 s8, 0
-; GCN-NEXT: s_cselect_b32 s4, s4, s7
-; GCN-NEXT: s_cselect_b32 s5, s5, s6
+; GCN-NEXT: s_cselect_b32 s7, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s4, s9
+; GCN-NEXT: s_cselect_b32 s5, s7, s5
+; GCN-NEXT: s_cmp_lg_u32 s5, 0
+; GCN-NEXT: s_cselect_b32 s4, s10, s4
+; GCN-NEXT: s_cselect_b32 s5, s11, s6
; GCN-NEXT: v_mov_b32_e32 v0, s5
; GCN-NEXT: v_mov_b32_e32 v1, s4
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -187,7 +182,6 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_or_b32 s8, s8, s9
-; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
@@ -221,7 +215,6 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_add_u32 s16, s16, 1
; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
; GCN-IR-NEXT: s_or_b32 s18, s18, s19
-; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
@@ -853,10 +846,9 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_addc_u32 s10, 0, s11
; GCN-NEXT: s_add_u32 s11, s4, s5
; GCN-NEXT: v_mov_b32_e32 v0, s11
-; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN-NEXT: v_mul_hi_u32 v0, s6, v0
+; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
; GCN-NEXT: s_addc_u32 s9, s9, s10
; GCN-NEXT: s_mul_i32 s4, s6, s9
; GCN-NEXT: v_readfirstlane_b32 s5, v0
@@ -887,7 +879,6 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_add_u32 s8, s11, s4
; GCN-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN-NEXT: s_or_b32 s4, s4, s5
-; GCN-NEXT: s_cmp_lg_u32 s4, 0
; GCN-NEXT: s_addc_u32 s4, s9, s6
; GCN-NEXT: v_mul_hi_u32 v1, s8, 24
; GCN-NEXT: v_mul_hi_u32 v0, s4, 24
@@ -903,46 +894,43 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-NEXT: s_mov_b32 s5, s1
; GCN-NEXT: s_mul_i32 s0, s3, s8
; GCN-NEXT: v_readfirstlane_b32 s1, v0
-; GCN-NEXT: s_add_i32 s9, s1, s0
-; GCN-NEXT: s_sub_i32 s10, 0, s9
+; GCN-NEXT: s_add_i32 s10, s1, s0
+; GCN-NEXT: s_sub_i32 s9, 0, s10
; GCN-NEXT: s_mul_i32 s0, s2, s8
-; GCN-NEXT: s_sub_u32 s8, 24, s0
-; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GCN-NEXT: s_or_b32 s11, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s11, 0
-; GCN-NEXT: s_subb_u32 s10, s10, s3
-; GCN-NEXT: s_sub_u32 s12, s8, s2
+; GCN-NEXT: s_sub_u32 s11, 24, s0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GCN-NEXT: s_or_b32 s8, s0, s1
+; GCN-NEXT: s_subb_u32 s12, s9, s3
+; GCN-NEXT: s_sub_u32 s13, s11, s2
+; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-NEXT: s_or_b32 s14, s8, s9
+; GCN-NEXT: s_subb_u32 s14, s12, 0
+; GCN-NEXT: s_cmp_ge_u32 s14, s3
+; GCN-NEXT: s_cselect_b32 s15, -1, 0
+; GCN-NEXT: s_cmp_ge_u32 s13, s2
+; GCN-NEXT: s_cselect_b32 s16, -1, 0
+; GCN-NEXT: s_cmp_eq_u32 s14, s3
+; GCN-NEXT: s_cselect_b32 s15, s16, s15
+; GCN-NEXT: s_or_b32 s8, s8, s9
+; GCN-NEXT: s_subb_u32 s12, s12, s3
+; GCN-NEXT: s_sub_u32 s16, s13, s2
+; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-NEXT: s_or_b32 s8, s8, s9
+; GCN-NEXT: s_subb_u32 s8, s12, 0
+; GCN-NEXT: s_cmp_lg_u32 s15, 0
+; GCN-NEXT: s_cselect_b32 s9, s16, s13
+; GCN-NEXT: s_cselect_b32 s8, s8, s14
; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
-; GCN-NEXT: s_subb_u32 s13, s10, 0
-; GCN-NEXT: s_cmp_ge_u32 s13, s3
+; GCN-NEXT: s_subb_u32 s0, 0, s10
+; GCN-NEXT: s_cmp_ge_u32 s0, s3
; GCN-NEXT: s_cselect_b32 s1, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s12, s2
-; GCN-NEXT: s_cselect_b32 s14, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s13, s3
-; GCN-NEXT: s_cselect_b32 s14, s14, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
-; GCN-NEXT: s_subb_u32 s10, s10, s3
-; GCN-NEXT: s_sub_u32 s15, s12, s2
-; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GCN-NEXT: s_or_b32 s0, s0, s1
-; GCN-NEXT: s_cmp_lg_u32 s0, 0
-; GCN-NEXT: s_subb_u32 s0, s10, 0
-; GCN-NEXT: s_cmp_lg_u32 s14, 0
-; GCN-NEXT: s_cselect_b32 s1, s15, s12
-; GCN-NEXT: s_cselect_b32 s0, s0, s13
-; GCN-NEXT: s_cmp_lg_u32 s11, 0
-; GCN-NEXT: s_subb_u32 s9, 0, s9
-; GCN-NEXT: s_cmp_ge_u32 s9, s3
-; GCN-NEXT: s_cselect_b32 s10, -1, 0
-; GCN-NEXT: s_cmp_ge_u32 s8, s2
+; GCN-NEXT: s_cmp_ge_u32 s11, s2
; GCN-NEXT: s_cselect_b32 s2, -1, 0
-; GCN-NEXT: s_cmp_eq_u32 s9, s3
-; GCN-NEXT: s_cselect_b32 s2, s2, s10
-; GCN-NEXT: s_cmp_lg_u32 s2, 0
-; GCN-NEXT: s_cselect_b32 s0, s0, s9
-; GCN-NEXT: s_cselect_b32 s1, s1, s8
+; GCN-NEXT: s_cmp_eq_u32 s0, s3
+; GCN-NEXT: s_cselect_b32 s1, s2, s1
+; GCN-NEXT: s_cmp_lg_u32 s1, 0
+; GCN-NEXT: s_cselect_b32 s0, s8, s0
+; GCN-NEXT: s_cselect_b32 s1, s9, s11
; GCN-NEXT: v_mov_b32_e32 v0, s1
; GCN-NEXT: v_mov_b32_e32 v1, s0
; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@@ -970,7 +958,6 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s10, s8, 1
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_or_b32 s6, s6, s7
-; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
@@ -1003,7 +990,6 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s14, s14, 1
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_or_b32 s16, s16, s17
-; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
@@ -1093,7 +1079,6 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s11, s8, 1
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_or_b32 s6, s6, s7
-; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
@@ -1123,7 +1108,6 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_add_u32 s12, s12, 1
; GCN-IR-NEXT: s_cselect_b64 s[14:15], -1, 0
; GCN-IR-NEXT: s_or_b32 s14, s14, s15
-; GCN-IR-NEXT: s_cmp_lg_u32 s14, 0
; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
; GCN-IR-NEXT: s_cselect_b64 s[14:15], -1, 0
; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/usubo.ll b/llvm/test/CodeGen/AMDGPU/usubo.ll
index d67a7b1..e8db647 100644
--- a/llvm/test/CodeGen/AMDGPU/usubo.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubo.ll
@@ -18,7 +18,6 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-NEXT: s_or_b32 s0, s0, s1
-; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_subb_u32 s3, s3, s9
; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
@@ -35,10 +34,8 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: s_sub_u32 s2, s2, s4
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
; VI-NEXT: s_subb_u32 s3, s3, s5
+; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s3
@@ -53,14 +50,12 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_sub_u32 s6, s2, s6
-; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT: s_subb_u32 s4, s3, s7
+; GFX9-NEXT: s_sub_u32 s4, s2, s6
+; GFX9-NEXT: s_subb_u32 s5, s3, s7
; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
-; GFX9-NEXT: v_mov_b32_e32 v1, s4
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
@@ -73,8 +68,6 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_sub_u32 s2, s2, s6
-; GFX10-NEXT: s_cselect_b32 s4, -1, 0
-; GFX10-NEXT: s_cmp_lg_u32 s4, 0
; GFX10-NEXT: s_subb_u32 s3, s3, s7
; GFX10-NEXT: s_cselect_b32 s4, -1, 0
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
@@ -91,14 +84,12 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_sub_u32 s2, s2, s4
-; GFX11-NEXT: s_cselect_b32 s4, -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_lg_u32 s4, 0
; GFX11-NEXT: s_subb_u32 s3, s3, s5
; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s3, 0, s2
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
@@ -443,7 +434,6 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_sub_u32 s4, s4, s6
; SI-NEXT: s_cselect_b64 s[12:13], -1, 0
; SI-NEXT: s_or_b32 s6, s12, s13
-; SI-NEXT: s_cmp_lg_u32 s6, 0
; SI-NEXT: s_subb_u32 s5, s5, s7
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
@@ -464,16 +454,14 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v2, s2
-; VI-NEXT: s_sub_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: s_sub_u32 s0, s4, s6
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_subb_u32 s1, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s0
+; VI-NEXT: v_mov_b32_e32 v5, s1
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
-; VI-NEXT: s_subb_u32 s0, s5, s7
-; VI-NEXT: v_mov_b32_e32 v4, s2
-; VI-NEXT: v_mov_b32_e32 v5, s0
-; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: v_mov_b32_e32 v3, s3
; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
@@ -485,12 +473,10 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_sub_u32 s2, s12, s14
-; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT: s_subb_u32 s0, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_sub_u32 s0, s12, s14
+; GFX9-NEXT: s_subb_u32 s1, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
@@ -503,10 +489,8 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_sub_u32 s0, s12, s14
-; GFX10-NEXT: s_cselect_b32 s1, -1, 0
-; GFX10-NEXT: v_mov_b32_e32 v0, s0
-; GFX10-NEXT: s_cmp_lg_u32 s1, 0
; GFX10-NEXT: s_subb_u32 s1, s13, s15
+; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: s_cselect_b32 s0, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
@@ -519,10 +503,8 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_sub_u32 s4, s4, s6
-; GFX11-NEXT: s_cselect_b32 s6, -1, 0
-; GFX11-NEXT: v_mov_b32_e32 v0, s4
-; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: s_subb_u32 s5, s5, s7
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_cselect_b32 s4, -1, 0
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll
index f3cb5a7..30f5277 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.ll
@@ -26,17 +26,17 @@ define amdgpu_kernel void @barrier_vmcnt_global(ptr addrspace(1) %arg) {
; GFX9-LABEL: barrier_vmcnt_global:
; GFX9: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; GFX9-NEXT: v_add_u32_e32 v2, 1, v0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: global_load_dword v2, v1, s[0:1]
-; GFX9-NEXT: v_add_u32_e32 v1, 1, v0
-; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: global_load_dword v3, v1, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2]
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v3, v1, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_barrier
-; GFX9-NEXT: global_store_dword v[0:1], v2, off
+; GFX9-NEXT: global_store_dword v[0:1], v3, off
; GFX9-NEXT: s_endpgm
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
@@ -369,10 +369,9 @@ define amdgpu_kernel void @barrier_vmcnt_vscnt_flat_workgroup(ptr %arg) {
; GFX8-NEXT: flat_load_dword v3, v[2:3]
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 1, v0
; GFX8-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2]
-; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v4, v1, vcc
-; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX8-NEXT: s_barrier
; GFX8-NEXT: flat_store_dword v[0:1], v3
; GFX8-NEXT: s_endpgm
@@ -393,10 +392,9 @@ define amdgpu_kernel void @barrier_vmcnt_vscnt_flat_workgroup(ptr %arg) {
; GFX9-NEXT: flat_load_dword v3, v[2:3]
; GFX9-NEXT: v_add_u32_e32 v2, 1, v0
; GFX9-NEXT: v_lshrrev_b64 v[0:1], 30, v[1:2]
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v4, v1, vcc
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_barrier
; GFX9-NEXT: flat_store_dword v[0:1], v3
; GFX9-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 75db387..28c6b40 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -774,44 +774,40 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: s_add_u32 s11, s12, s11
; GFX1032-NEXT: s_addc_u32 s12, 0, s13
; GFX1032-NEXT: s_add_u32 s8, s8, s11
-; GFX1032-NEXT: s_cselect_b32 s11, -1, 0
-; GFX1032-NEXT: s_mul_hi_u32 s13, s9, s8
-; GFX1032-NEXT: s_cmp_lg_u32 s11, 0
-; GFX1032-NEXT: s_mul_i32 s11, s9, s8
; GFX1032-NEXT: s_addc_u32 s5, s5, s12
-; GFX1032-NEXT: s_mul_i32 s10, s10, s8
+; GFX1032-NEXT: s_mul_hi_u32 s11, s9, s8
+; GFX1032-NEXT: s_mul_i32 s12, s9, s8
; GFX1032-NEXT: s_mul_i32 s9, s9, s5
-; GFX1032-NEXT: s_mul_hi_u32 s12, s8, s11
-; GFX1032-NEXT: s_add_i32 s9, s13, s9
-; GFX1032-NEXT: s_mul_hi_u32 s13, s5, s11
+; GFX1032-NEXT: s_mul_i32 s10, s10, s8
+; GFX1032-NEXT: s_add_i32 s9, s11, s9
+; GFX1032-NEXT: s_mul_i32 s11, s5, s12
; GFX1032-NEXT: s_add_i32 s9, s9, s10
-; GFX1032-NEXT: s_mul_i32 s10, s5, s11
+; GFX1032-NEXT: s_mul_hi_u32 s10, s8, s12
; GFX1032-NEXT: s_mul_i32 s15, s8, s9
; GFX1032-NEXT: s_mul_hi_u32 s14, s8, s9
-; GFX1032-NEXT: s_add_u32 s12, s12, s15
+; GFX1032-NEXT: s_add_u32 s10, s10, s15
+; GFX1032-NEXT: s_mul_hi_u32 s13, s5, s12
; GFX1032-NEXT: s_addc_u32 s14, 0, s14
-; GFX1032-NEXT: s_mul_hi_u32 s11, s5, s9
-; GFX1032-NEXT: s_add_u32 s10, s12, s10
+; GFX1032-NEXT: s_mul_hi_u32 s12, s5, s9
+; GFX1032-NEXT: s_add_u32 s10, s10, s11
; GFX1032-NEXT: s_mul_i32 s9, s5, s9
; GFX1032-NEXT: s_addc_u32 s10, s14, s13
-; GFX1032-NEXT: s_addc_u32 s11, s11, 0
+; GFX1032-NEXT: s_addc_u32 s11, s12, 0
; GFX1032-NEXT: s_add_u32 s9, s10, s9
; GFX1032-NEXT: s_addc_u32 s10, 0, s11
; GFX1032-NEXT: s_add_u32 s8, s8, s9
-; GFX1032-NEXT: s_cselect_b32 s9, -1, 0
-; GFX1032-NEXT: s_mul_hi_u32 s11, s2, s8
-; GFX1032-NEXT: s_cmp_lg_u32 s9, 0
-; GFX1032-NEXT: s_mul_hi_u32 s9, s3, s8
; GFX1032-NEXT: s_addc_u32 s5, s5, s10
-; GFX1032-NEXT: s_mul_i32 s8, s3, s8
+; GFX1032-NEXT: s_mul_hi_u32 s9, s2, s8
; GFX1032-NEXT: s_mul_i32 s12, s2, s5
-; GFX1032-NEXT: s_mul_hi_u32 s10, s2, s5
-; GFX1032-NEXT: s_add_u32 s11, s11, s12
-; GFX1032-NEXT: s_addc_u32 s10, 0, s10
+; GFX1032-NEXT: s_mul_hi_u32 s11, s2, s5
+; GFX1032-NEXT: s_mul_hi_u32 s10, s3, s8
+; GFX1032-NEXT: s_mul_i32 s8, s3, s8
+; GFX1032-NEXT: s_add_u32 s9, s9, s12
+; GFX1032-NEXT: s_addc_u32 s11, 0, s11
; GFX1032-NEXT: s_mul_hi_u32 s13, s3, s5
-; GFX1032-NEXT: s_add_u32 s8, s11, s8
+; GFX1032-NEXT: s_add_u32 s8, s9, s8
; GFX1032-NEXT: s_mul_i32 s5, s3, s5
-; GFX1032-NEXT: s_addc_u32 s8, s10, s9
+; GFX1032-NEXT: s_addc_u32 s8, s11, s10
; GFX1032-NEXT: s_addc_u32 s9, s13, 0
; GFX1032-NEXT: s_add_u32 s5, s8, s5
; GFX1032-NEXT: s_addc_u32 s8, 0, s9
@@ -824,11 +820,8 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: s_sub_i32 s11, s3, s9
; GFX1032-NEXT: s_sub_u32 s10, s2, s10
; GFX1032-NEXT: s_cselect_b32 s12, -1, 0
-; GFX1032-NEXT: s_cmp_lg_u32 s12, 0
; GFX1032-NEXT: s_subb_u32 s11, s11, s1
; GFX1032-NEXT: s_sub_u32 s13, s10, s0
-; GFX1032-NEXT: s_cselect_b32 s14, -1, 0
-; GFX1032-NEXT: s_cmp_lg_u32 s14, 0
; GFX1032-NEXT: s_subb_u32 s11, s11, 0
; GFX1032-NEXT: s_cmp_ge_u32 s11, s1
; GFX1032-NEXT: s_cselect_b32 s14, -1, 0
@@ -901,8 +894,8 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1064-NEXT: ; %bb.1:
; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s0
; GFX1064-NEXT: v_cvt_f32_u32_e32 v1, s1
-; GFX1064-NEXT: s_sub_u32 s9, 0, s0
-; GFX1064-NEXT: s_subb_u32 s10, 0, s1
+; GFX1064-NEXT: s_sub_u32 s8, 0, s0
+; GFX1064-NEXT: s_subb_u32 s9, 0, s1
; GFX1064-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GFX1064-NEXT: v_rcp_f32_e32 v0, v0
; GFX1064-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -911,109 +904,102 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1064-NEXT: v_madmk_f32 v0, v1, 0xcf800000, v0
; GFX1064-NEXT: v_cvt_u32_f32_e32 v1, v1
; GFX1064-NEXT: v_cvt_u32_f32_e32 v0, v0
-; GFX1064-NEXT: v_readfirstlane_b32 s8, v1
-; GFX1064-NEXT: v_readfirstlane_b32 s4, v0
-; GFX1064-NEXT: s_mul_i32 s5, s9, s8
-; GFX1064-NEXT: s_mul_hi_u32 s12, s9, s4
-; GFX1064-NEXT: s_mul_i32 s11, s10, s4
-; GFX1064-NEXT: s_add_i32 s5, s12, s5
-; GFX1064-NEXT: s_mul_i32 s13, s9, s4
-; GFX1064-NEXT: s_add_i32 s5, s5, s11
-; GFX1064-NEXT: s_mul_hi_u32 s12, s4, s13
-; GFX1064-NEXT: s_mul_i32 s15, s4, s5
-; GFX1064-NEXT: s_mul_hi_u32 s14, s8, s13
-; GFX1064-NEXT: s_mul_i32 s11, s8, s13
-; GFX1064-NEXT: s_mul_hi_u32 s13, s4, s5
+; GFX1064-NEXT: v_readfirstlane_b32 s4, v1
+; GFX1064-NEXT: v_readfirstlane_b32 s5, v0
+; GFX1064-NEXT: s_mul_i32 s10, s8, s4
+; GFX1064-NEXT: s_mul_hi_u32 s12, s8, s5
+; GFX1064-NEXT: s_mul_i32 s11, s9, s5
+; GFX1064-NEXT: s_add_i32 s10, s12, s10
+; GFX1064-NEXT: s_mul_i32 s13, s8, s5
+; GFX1064-NEXT: s_add_i32 s10, s10, s11
+; GFX1064-NEXT: s_mul_hi_u32 s12, s5, s13
+; GFX1064-NEXT: s_mul_i32 s15, s5, s10
+; GFX1064-NEXT: s_mul_hi_u32 s14, s4, s13
+; GFX1064-NEXT: s_mul_i32 s11, s4, s13
+; GFX1064-NEXT: s_mul_hi_u32 s13, s5, s10
; GFX1064-NEXT: s_add_u32 s12, s12, s15
; GFX1064-NEXT: s_addc_u32 s13, 0, s13
-; GFX1064-NEXT: s_mul_hi_u32 s16, s8, s5
+; GFX1064-NEXT: s_mul_hi_u32 s16, s4, s10
; GFX1064-NEXT: s_add_u32 s11, s12, s11
-; GFX1064-NEXT: s_mul_i32 s5, s8, s5
+; GFX1064-NEXT: s_mul_i32 s10, s4, s10
; GFX1064-NEXT: s_addc_u32 s11, s13, s14
; GFX1064-NEXT: s_addc_u32 s12, s16, 0
-; GFX1064-NEXT: s_add_u32 s5, s11, s5
+; GFX1064-NEXT: s_add_u32 s10, s11, s10
; GFX1064-NEXT: s_addc_u32 s11, 0, s12
-; GFX1064-NEXT: s_add_u32 s12, s4, s5
-; GFX1064-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX1064-NEXT: s_mul_hi_u32 s13, s9, s12
-; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT: s_mul_i32 s4, s9, s12
-; GFX1064-NEXT: s_addc_u32 s8, s8, s11
-; GFX1064-NEXT: s_mul_i32 s10, s10, s12
-; GFX1064-NEXT: s_mul_i32 s9, s9, s8
-; GFX1064-NEXT: s_mul_hi_u32 s5, s12, s4
-; GFX1064-NEXT: s_add_i32 s9, s13, s9
-; GFX1064-NEXT: s_mul_hi_u32 s11, s8, s4
-; GFX1064-NEXT: s_add_i32 s9, s9, s10
-; GFX1064-NEXT: s_mul_i32 s4, s8, s4
-; GFX1064-NEXT: s_mul_i32 s14, s12, s9
-; GFX1064-NEXT: s_mul_hi_u32 s13, s12, s9
-; GFX1064-NEXT: s_add_u32 s5, s5, s14
+; GFX1064-NEXT: s_add_u32 s5, s5, s10
+; GFX1064-NEXT: s_addc_u32 s4, s4, s11
+; GFX1064-NEXT: s_mul_hi_u32 s10, s8, s5
+; GFX1064-NEXT: s_mul_i32 s11, s8, s5
+; GFX1064-NEXT: s_mul_i32 s8, s8, s4
+; GFX1064-NEXT: s_mul_i32 s9, s9, s5
+; GFX1064-NEXT: s_add_i32 s8, s10, s8
+; GFX1064-NEXT: s_mul_i32 s10, s4, s11
+; GFX1064-NEXT: s_add_i32 s8, s8, s9
+; GFX1064-NEXT: s_mul_hi_u32 s9, s5, s11
+; GFX1064-NEXT: s_mul_i32 s14, s5, s8
+; GFX1064-NEXT: s_mul_hi_u32 s13, s5, s8
+; GFX1064-NEXT: s_add_u32 s9, s9, s14
+; GFX1064-NEXT: s_mul_hi_u32 s12, s4, s11
; GFX1064-NEXT: s_addc_u32 s13, 0, s13
-; GFX1064-NEXT: s_mul_hi_u32 s10, s8, s9
-; GFX1064-NEXT: s_add_u32 s4, s5, s4
-; GFX1064-NEXT: s_mul_i32 s9, s8, s9
-; GFX1064-NEXT: s_addc_u32 s4, s13, s11
-; GFX1064-NEXT: s_addc_u32 s5, s10, 0
-; GFX1064-NEXT: s_add_u32 s4, s4, s9
-; GFX1064-NEXT: s_addc_u32 s9, 0, s5
-; GFX1064-NEXT: s_add_u32 s10, s12, s4
-; GFX1064-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX1064-NEXT: s_mul_hi_u32 s11, s2, s10
-; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT: s_mul_hi_u32 s4, s3, s10
-; GFX1064-NEXT: s_addc_u32 s5, s8, s9
-; GFX1064-NEXT: s_mul_i32 s8, s3, s10
-; GFX1064-NEXT: s_mul_i32 s10, s2, s5
-; GFX1064-NEXT: s_mul_hi_u32 s9, s2, s5
-; GFX1064-NEXT: s_add_u32 s10, s11, s10
-; GFX1064-NEXT: s_addc_u32 s9, 0, s9
-; GFX1064-NEXT: s_mul_hi_u32 s12, s3, s5
-; GFX1064-NEXT: s_add_u32 s8, s10, s8
+; GFX1064-NEXT: s_mul_hi_u32 s11, s4, s8
+; GFX1064-NEXT: s_add_u32 s9, s9, s10
+; GFX1064-NEXT: s_mul_i32 s8, s4, s8
+; GFX1064-NEXT: s_addc_u32 s9, s13, s12
+; GFX1064-NEXT: s_addc_u32 s10, s11, 0
+; GFX1064-NEXT: s_add_u32 s8, s9, s8
+; GFX1064-NEXT: s_addc_u32 s9, 0, s10
+; GFX1064-NEXT: s_add_u32 s5, s5, s8
+; GFX1064-NEXT: s_addc_u32 s4, s4, s9
+; GFX1064-NEXT: s_mul_hi_u32 s8, s2, s5
+; GFX1064-NEXT: s_mul_i32 s11, s2, s4
+; GFX1064-NEXT: s_mul_hi_u32 s10, s2, s4
+; GFX1064-NEXT: s_mul_hi_u32 s9, s3, s5
; GFX1064-NEXT: s_mul_i32 s5, s3, s5
-; GFX1064-NEXT: s_addc_u32 s4, s9, s4
+; GFX1064-NEXT: s_add_u32 s8, s8, s11
+; GFX1064-NEXT: s_addc_u32 s10, 0, s10
+; GFX1064-NEXT: s_mul_hi_u32 s12, s3, s4
+; GFX1064-NEXT: s_add_u32 s5, s8, s5
+; GFX1064-NEXT: s_mul_i32 s4, s3, s4
+; GFX1064-NEXT: s_addc_u32 s5, s10, s9
; GFX1064-NEXT: s_addc_u32 s8, s12, 0
-; GFX1064-NEXT: s_add_u32 s10, s4, s5
+; GFX1064-NEXT: s_add_u32 s10, s5, s4
; GFX1064-NEXT: s_addc_u32 s11, 0, s8
; GFX1064-NEXT: s_mul_hi_u32 s4, s0, s10
; GFX1064-NEXT: s_mul_i32 s5, s0, s11
; GFX1064-NEXT: s_mul_i32 s8, s1, s10
; GFX1064-NEXT: s_add_i32 s4, s4, s5
-; GFX1064-NEXT: s_add_i32 s12, s4, s8
+; GFX1064-NEXT: s_add_i32 s8, s4, s8
; GFX1064-NEXT: s_mul_i32 s4, s0, s10
-; GFX1064-NEXT: s_sub_i32 s8, s3, s12
-; GFX1064-NEXT: s_sub_u32 s13, s2, s4
+; GFX1064-NEXT: s_sub_i32 s9, s3, s8
+; GFX1064-NEXT: s_sub_u32 s12, s2, s4
; GFX1064-NEXT: s_cselect_b64 s[4:5], -1, 0
-; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT: s_subb_u32 s14, s8, s1
-; GFX1064-NEXT: s_sub_u32 s15, s13, s0
-; GFX1064-NEXT: s_cselect_b64 s[8:9], -1, 0
-; GFX1064-NEXT: s_cmp_lg_u64 s[8:9], 0
-; GFX1064-NEXT: s_subb_u32 s8, s14, 0
-; GFX1064-NEXT: s_cmp_ge_u32 s8, s1
-; GFX1064-NEXT: s_cselect_b32 s9, -1, 0
-; GFX1064-NEXT: s_cmp_ge_u32 s15, s0
+; GFX1064-NEXT: s_subb_u32 s9, s9, s1
+; GFX1064-NEXT: s_sub_u32 s13, s12, s0
+; GFX1064-NEXT: s_subb_u32 s9, s9, 0
+; GFX1064-NEXT: s_cmp_ge_u32 s9, s1
; GFX1064-NEXT: s_cselect_b32 s14, -1, 0
-; GFX1064-NEXT: s_cmp_eq_u32 s8, s1
-; GFX1064-NEXT: s_cselect_b32 s8, s14, s9
-; GFX1064-NEXT: s_add_u32 s9, s10, 1
+; GFX1064-NEXT: s_cmp_ge_u32 s13, s0
+; GFX1064-NEXT: s_cselect_b32 s13, -1, 0
+; GFX1064-NEXT: s_cmp_eq_u32 s9, s1
+; GFX1064-NEXT: s_cselect_b32 s9, s13, s14
+; GFX1064-NEXT: s_add_u32 s13, s10, 1
; GFX1064-NEXT: s_addc_u32 s14, s11, 0
; GFX1064-NEXT: s_add_u32 s15, s10, 2
; GFX1064-NEXT: s_addc_u32 s16, s11, 0
-; GFX1064-NEXT: s_cmp_lg_u32 s8, 0
-; GFX1064-NEXT: s_cselect_b32 s15, s15, s9
+; GFX1064-NEXT: s_cmp_lg_u32 s9, 0
+; GFX1064-NEXT: s_cselect_b32 s13, s15, s13
; GFX1064-NEXT: s_cselect_b32 s14, s16, s14
; GFX1064-NEXT: s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT: s_subb_u32 s3, s3, s12
+; GFX1064-NEXT: s_subb_u32 s3, s3, s8
; GFX1064-NEXT: s_cmp_ge_u32 s3, s1
; GFX1064-NEXT: s_cselect_b32 s4, -1, 0
-; GFX1064-NEXT: s_cmp_ge_u32 s13, s0
+; GFX1064-NEXT: s_cmp_ge_u32 s12, s0
; GFX1064-NEXT: s_cselect_b32 s5, -1, 0
; GFX1064-NEXT: s_cmp_eq_u32 s3, s1
; GFX1064-NEXT: s_cselect_b32 s1, s5, s4
; GFX1064-NEXT: s_cmp_lg_u32 s1, 0
; GFX1064-NEXT: s_cselect_b32 s5, s14, s11
-; GFX1064-NEXT: s_cselect_b32 s4, s15, s10
+; GFX1064-NEXT: s_cselect_b32 s4, s13, s10
; GFX1064-NEXT: s_cbranch_execnz .LBB15_3
; GFX1064-NEXT: .LBB15_2:
; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s0
diff --git a/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll b/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
index 64d055b..4445383 100644
--- a/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
+++ b/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
@@ -271,7 +271,6 @@ define i1 @workgroup_nonzero() {
; DAGISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; DAGISEL-GFX8-NEXT: s_or_b32 s4, s12, s13
; DAGISEL-GFX8-NEXT: s_or_b32 s4, s4, s14
-; DAGISEL-GFX8-NEXT: s_cmp_lg_u32 s4, 0
; DAGISEL-GFX8-NEXT: s_cselect_b64 s[4:5], -1, 0
; DAGISEL-GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; DAGISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
@@ -281,7 +280,6 @@ define i1 @workgroup_nonzero() {
; DAGISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; DAGISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
; DAGISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
-; DAGISEL-GFX942-NEXT: s_cmp_lg_u32 s0, 0
; DAGISEL-GFX942-NEXT: s_cselect_b64 s[0:1], -1, 0
; DAGISEL-GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; DAGISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -299,8 +297,6 @@ define i1 @workgroup_nonzero() {
; DAGISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
; DAGISEL-GFX12-NEXT: s_or_b32 s0, s0, s1
-; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
-; DAGISEL-GFX12-NEXT: s_cmp_lg_u32 s0, 0
; DAGISEL-GFX12-NEXT: s_cselect_b32 s0, -1, 0
; DAGISEL-GFX12-NEXT: s_wait_alu 0xfffe
; DAGISEL-GFX12-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
@@ -311,7 +307,6 @@ define i1 @workgroup_nonzero() {
; GISEL-GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-GFX8-NEXT: s_or_b32 s4, s12, s13
; GISEL-GFX8-NEXT: s_or_b32 s4, s4, s14
-; GISEL-GFX8-NEXT: s_cmp_lg_u32 s4, 0
; GISEL-GFX8-NEXT: s_cselect_b32 s4, 1, 0
; GISEL-GFX8-NEXT: v_mov_b32_e32 v0, s4
; GISEL-GFX8-NEXT: s_setpc_b64 s[30:31]
@@ -321,7 +316,6 @@ define i1 @workgroup_nonzero() {
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-GFX942-NEXT: s_or_b32 s0, s12, s13
; GISEL-GFX942-NEXT: s_or_b32 s0, s0, s14
-; GISEL-GFX942-NEXT: s_cmp_lg_u32 s0, 0
; GISEL-GFX942-NEXT: s_cselect_b32 s0, 1, 0
; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, s0
; GISEL-GFX942-NEXT: s_setpc_b64 s[30:31]
@@ -339,8 +333,6 @@ define i1 @workgroup_nonzero() {
; GISEL-GFX12-NEXT: s_or_b32 s0, ttmp9, s0
; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
; GISEL-GFX12-NEXT: s_or_b32 s0, s0, s1
-; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
-; GISEL-GFX12-NEXT: s_cmp_lg_u32 s0, 0
; GISEL-GFX12-NEXT: s_cselect_b32 s0, 1, 0
; GISEL-GFX12-NEXT: s_wait_alu 0xfffe
; GISEL-GFX12-NEXT: v_mov_b32_e32 v0, s0
diff --git a/llvm/test/CodeGen/ARM/llround-conv.ll b/llvm/test/CodeGen/ARM/llround-conv.ll
index f734db8..20fe272 100644
--- a/llvm/test/CodeGen/ARM/llround-conv.ll
+++ b/llvm/test/CodeGen/ARM/llround-conv.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=armv7-none-eabi -float-abi=soft | FileCheck %s --check-prefixes=CHECK,CHECK-SOFT
; RUN: llc < %s -mtriple=armv7-none-eabihf -mattr=+vfp2 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-NOFP16
+; RUN: llc < %s -mtriple=armv8-none-eabihf -mattr=+fp-armv8 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FPv8
; RUN: llc < %s -mtriple=armv8-none-eabihf -mattr=+fp-armv8,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
define i64 @testmsxh_builtin(half %x) {
@@ -22,6 +23,14 @@ define i64 @testmsxh_builtin(half %x) {
; CHECK-NOFP16-NEXT: bl llroundf
; CHECK-NOFP16-NEXT: pop {r11, pc}
;
+; CHECK-FPv8-LABEL: testmsxh_builtin:
+; CHECK-FPv8: @ %bb.0: @ %entry
+; CHECK-FPv8-NEXT: .save {r11, lr}
+; CHECK-FPv8-NEXT: push {r11, lr}
+; CHECK-FPv8-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FPv8-NEXT: bl llroundf
+; CHECK-FPv8-NEXT: pop {r11, pc}
+;
; CHECK-FP16-LABEL: testmsxh_builtin:
; CHECK-FP16: @ %bb.0: @ %entry
; CHECK-FP16-NEXT: .save {r11, lr}
diff --git a/llvm/test/CodeGen/ARM/lround-conv.ll b/llvm/test/CodeGen/ARM/lround-conv.ll
index 03f7a0d..7466bcb 100644
--- a/llvm/test/CodeGen/ARM/lround-conv.ll
+++ b/llvm/test/CodeGen/ARM/lround-conv.ll
@@ -4,11 +4,39 @@
; RUN: llc < %s -mtriple=armv8-none-eabihf -mattr=+fp-armv8 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FPv8
; RUN: llc < %s -mtriple=armv8-none-eabihf -mattr=+fp-armv8,+fullfp16 -float-abi=hard | FileCheck %s --check-prefixes=CHECK,CHECK-FP16
-;define i32 @testmswh_builtin(half %x) {
-;entry:
-; %0 = tail call i32 @llvm.lround.i32.f16(half %x)
-; ret i32 %0
-;}
+define i32 @testmswh_builtin(half %x) {
+; CHECK-SOFT-LABEL: testmswh_builtin:
+; CHECK-SOFT: @ %bb.0: @ %entry
+; CHECK-SOFT-NEXT: .save {r11, lr}
+; CHECK-SOFT-NEXT: push {r11, lr}
+; CHECK-SOFT-NEXT: bl __aeabi_h2f
+; CHECK-SOFT-NEXT: pop {r11, lr}
+; CHECK-SOFT-NEXT: b lroundf
+;
+; CHECK-NOFP16-LABEL: testmswh_builtin:
+; CHECK-NOFP16: @ %bb.0: @ %entry
+; CHECK-NOFP16-NEXT: .save {r11, lr}
+; CHECK-NOFP16-NEXT: push {r11, lr}
+; CHECK-NOFP16-NEXT: vmov r0, s0
+; CHECK-NOFP16-NEXT: bl __aeabi_h2f
+; CHECK-NOFP16-NEXT: vmov s0, r0
+; CHECK-NOFP16-NEXT: pop {r11, lr}
+; CHECK-NOFP16-NEXT: b lroundf
+;
+; CHECK-FPv8-LABEL: testmswh_builtin:
+; CHECK-FPv8: @ %bb.0: @ %entry
+; CHECK-FPv8-NEXT: vcvtb.f32.f16 s0, s0
+; CHECK-FPv8-NEXT: b lroundf
+;
+; CHECK-FP16-LABEL: testmswh_builtin:
+; CHECK-FP16: @ %bb.0: @ %entry
+; CHECK-FP16-NEXT: vcvta.s32.f16 s0, s0
+; CHECK-FP16-NEXT: vmov r0, s0
+; CHECK-FP16-NEXT: bx lr
+entry:
+ %0 = tail call i32 @llvm.lround.i32.f16(half %x)
+ ret i32 %0
+}
define i32 @testmsws_builtin(float %x) {
; CHECK-LABEL: testmsws_builtin:
@@ -40,8 +68,3 @@ entry:
ret i32 %0
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-FP16: {{.*}}
-; CHECK-FPv8: {{.*}}
-; CHECK-NOFP16: {{.*}}
-; CHECK-SOFT: {{.*}}
diff --git a/llvm/test/CodeGen/BPF/BTF/ptr-named-2.ll b/llvm/test/CodeGen/BPF/BTF/ptr-named-2.ll
new file mode 100644
index 0000000..df0cbeb
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/BTF/ptr-named-2.ll
@@ -0,0 +1,59 @@
+; RUN: llc -mtriple=bpfel -filetype=obj -o %t1 %s
+; RUN: llvm-objcopy --dump-section='.BTF'=%t2 %t1
+; RUN: %python %p/print_btf.py %t2 | FileCheck -check-prefixes=CHECK-BTF %s
+; RUN: llc -mtriple=bpfeb -filetype=obj -o %t1 %s
+; RUN: llvm-objcopy --dump-section='.BTF'=%t2 %t1
+; RUN: %python %p/print_btf.py %t2 | FileCheck -check-prefixes=CHECK-BTF %s
+;
+; This IR is hand-written.
+
+; ModuleID = 'ptr-named-2.ll'
+source_filename = "ptr-named-2.ll"
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "bpfel-unknown-none"
+
+%struct.TypeExamples = type { i32*, i32, i32, i32* }
+
+@type_examples = internal global %struct.TypeExamples zeroinitializer, align 8, !dbg !0
+
+!llvm.dbg.cu = !{!1}
+!llvm.module.flags = !{!2, !3, !4}
+!llvm.ident = !{!21}
+
+; CHECK-BTF: [1] STRUCT 'TypeExamples' size=32 vlen=4
+; CHECK-BTF-NEXT: 'ptr' type_id=2 bits_offset=0
+; CHECK-BTF-NEXT: 'volatile' type_id=4 bits_offset=64
+; CHECK-BTF-NEXT: 'const' type_id=5 bits_offset=128
+; CHECK-BTF-NEXT: 'restrict_ptr' type_id=6 bits_offset=192
+; CHECK-BTF-NEXT: [2] PTR '(anon)' type_id=3
+; CHECK-BTF-NEXT: [3] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
+; CHECK-BTF-NEXT: [4] VOLATILE '(anon)' type_id=3
+; CHECK-BTF-NEXT: [5] CONST '(anon)' type_id=3
+; CHECK-BTF-NEXT: [6] RESTRICT '(anon)' type_id=7
+; CHECK-BTF-NEXT: [7] PTR '(anon)' type_id=3
+; CHECK-BTF-NEXT: [8] VAR 'type_examples' type_id=1, linkage=static
+; CHECK-BTF-NEXT: [9] DATASEC '.bss' size=0 vlen=1
+; CHECK-BTF-NEXT: type_id=8 offset=0 size=24
+
+!0 = !DIGlobalVariableExpression(var: !5, expr: !DIExpression())
+!1 = distinct !DICompileUnit(language: DW_LANG_C99, file: !6, isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !7, globals: !8, splitDebugInlining: false, nameTableKind: None)
+!2 = !{i32 2, !"Dwarf Version", i32 4}
+!3 = !{i32 2, !"Debug Info Version", i32 3}
+!4 = !{i32 1, !"wchar_size", i32 4}
+!5 = distinct !DIGlobalVariable(name: "type_examples", scope: !1, file: !6, line: 12, type: !9, isLocal: true, isDefinition: true)
+!6 = !DIFile(filename: "ptr-named-2.ll", directory: "/tmp")
+!7 = !{}
+!8 = !{!0}
+!9 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "TypeExamples", file: !6, line: 5, size: 256, elements: !10)
+!10 = !{!11, !12, !13, !14}
+!11 = !DIDerivedType(tag: DW_TAG_member, name: "ptr", scope: !9, file: !6, line: 6, baseType: !15, size: 64)
+!12 = !DIDerivedType(tag: DW_TAG_member, name: "volatile", scope: !9, file: !6, line: 7, baseType: !17, size: 64, offset: 64)
+!13 = !DIDerivedType(tag: DW_TAG_member, name: "const", scope: !9, file: !6, line: 8, baseType: !18, size: 64, offset: 128)
+!14 = !DIDerivedType(tag: DW_TAG_member, name: "restrict_ptr", scope: !9, file: !6, line: 9, baseType: !19, size: 64, offset: 192)
+!15 = !DIDerivedType(tag: DW_TAG_pointer_type, name: "*int", baseType: !16, size: 64)
+!16 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!17 = !DIDerivedType(tag: DW_TAG_volatile_type, name: "volatile int", baseType: !16)
+!18 = !DIDerivedType(tag: DW_TAG_const_type, name: "const int", baseType: !16)
+!19 = !DIDerivedType(tag: DW_TAG_restrict_type, name: "*int restrict", baseType: !20)
+!20 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !16, size: 64)
+!21 = !{!"my hand-written IR"}
diff --git a/llvm/test/CodeGen/BPF/BTF/ptr-named.ll b/llvm/test/CodeGen/BPF/BTF/ptr-named.ll
new file mode 100644
index 0000000..675c34e
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/BTF/ptr-named.ll
@@ -0,0 +1,75 @@
+; RUN: llc -mtriple=bpfel -filetype=obj -o %t1 %s
+; RUN: llvm-objcopy --dump-section='.BTF'=%t2 %t1
+; RUN: %python %p/print_btf.py %t2 | FileCheck -check-prefixes=CHECK-BTF %s
+; RUN: llc -mtriple=bpfeb -filetype=obj -o %t1 %s
+; RUN: llvm-objcopy --dump-section='.BTF'=%t2 %t1
+; RUN: %python %p/print_btf.py %t2 | FileCheck -check-prefixes=CHECK-BTF %s
+;
+; Source:
+; #![no_std]
+; #![no_main]
+;
+; pub struct MyType {
+; ptr: *const u32,
+; }
+;
+; impl MyType {
+; pub const fn new() -> Self {
+; let ptr = core::ptr::null();
+; Self { ptr }
+; }
+; }
+;
+; unsafe impl Sync for MyType {}
+;
+; #[unsafe(no_mangle)]
+; pub static X: MyType = MyType::new();
+;
+; #[cfg(not(test))]
+; #[panic_handler]
+; fn panic(_info: &core::panic::PanicInfo) -> ! {
+; loop {}
+; }
+; Compilation flag:
+; cargo +nightly rustc -Zbuild-std=core --target=bpfel-unknown-none -- --emit=llvm-bc
+; llvm-extract --glob=X $(find target/ -name "*.bc" | head -n 1) -o ptr-named.bc
+; llvm-dis ptr-named.bc -o ptr-named.ll
+
+; ModuleID = 'ptr-named.bc'
+source_filename = "1m2uqe50qkwxmo53ydydvou91"
+target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128"
+target triple = "bpfel"
+
+@X = constant [8 x i8] zeroinitializer, align 8, !dbg !0
+
+!llvm.module.flags = !{!11, !12, !13, !14}
+!llvm.ident = !{!15}
+!llvm.dbg.cu = !{!16}
+
+; CHECK-BTF: [1] STRUCT 'MyType' size=8 vlen=1
+; CHECK-BTF-NEXT: 'ptr' type_id=2 bits_offset=0
+; CHECK-BTF-NEXT: [2] PTR '(anon)' type_id=3
+; CHECK-BTF-NEXT: [3] INT 'u32' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+; CHECK-BTF-NEXT: [4] VAR 'X' type_id=1, linkage=global
+; CHECK-BTF-NEXT: [5] DATASEC '.rodata' size=0 vlen=1
+; CHECK-BTF-NEXT: type_id=4 offset=0 size=8
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "X", scope: !2, file: !3, line: 19, type: !4, isLocal: false, isDefinition: true, align: 64)
+!2 = !DINamespace(name: "ptr_named", scope: null)
+!3 = !DIFile(filename: "ptr-named/src/main.rs", directory: "/tmp/ptr-named", checksumkind: CSK_MD5, checksum: "e37168304600b30cbb5ba168f0384932")
+!4 = !DICompositeType(tag: DW_TAG_structure_type, name: "MyType", scope: !2, file: !5, size: 64, align: 64, flags: DIFlagPublic, elements: !6, templateParams: !10, identifier: "7609fa40332dd486922f074276a171c3")
+!5 = !DIFile(filename: "<unknown>", directory: "")
+!6 = !{!7}
+!7 = !DIDerivedType(tag: DW_TAG_member, name: "ptr", scope: !4, file: !5, baseType: !8, size: 64, align: 64, flags: DIFlagPrivate)
+!8 = !DIDerivedType(tag: DW_TAG_pointer_type, name: "*const u32", baseType: !9, size: 64, align: 64, dwarfAddressSpace: 0)
+!9 = !DIBasicType(name: "u32", size: 32, encoding: DW_ATE_unsigned)
+!10 = !{}
+!11 = !{i32 8, !"PIC Level", i32 2}
+!12 = !{i32 7, !"PIE Level", i32 2}
+!13 = !{i32 7, !"Dwarf Version", i32 4}
+!14 = !{i32 2, !"Debug Info Version", i32 3}
+!15 = !{!"rustc version 1.92.0-nightly (c8905eaa6 2025-09-28)"}
+!16 = distinct !DICompileUnit(language: DW_LANG_Rust, file: !17, producer: "clang LLVM (rustc version 1.92.0-nightly (c8905eaa6 2025-09-28))", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !18, splitDebugInlining: false, nameTableKind: None)
+!17 = !DIFile(filename: "ptr-named/src/main.rs/@/1m2uqe50qkwxmo53ydydvou91", directory: "/tmp/ptr-named")
+!18 = !{!0}
diff --git a/llvm/test/CodeGen/DirectX/Metadata/resource-symbols.ll b/llvm/test/CodeGen/DirectX/Metadata/resource-symbols.ll
index 4f13f47..56798c8 100644
--- a/llvm/test/CodeGen/DirectX/Metadata/resource-symbols.ll
+++ b/llvm/test/CodeGen/DirectX/Metadata/resource-symbols.ll
@@ -28,6 +28,11 @@ define void @test() {
@llvm.dx.resource.handlefrombinding(i32 0, i32 10, i32 1, i32 0, ptr @SB.str)
; CHECK: %"StructuredBuffer<struct.S>" = type { %struct.S }
+ ; StructuredBuffer<float[3][2]>
+ %struct1 = call target("dx.RawBuffer", [3 x [2 x float]], 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 0, i32 12, i32 1, i32 0, ptr null)
+ ; CHECK: %"StructuredBuffer<float[3][2]>" = type { [3 x [2 x float]] }
+
; ByteAddressBuffer
%byteaddr = call target("dx.RawBuffer", i8, 0, 0)
@llvm.dx.resource.handlefrombinding(i32 0, i32 20, i32 1, i32 0, ptr null)
@@ -40,12 +45,14 @@ define void @test() {
; CHECK-NEXT: @[[T1:.*]] = external constant %"Buffer<int32_t>"
; CHECK-NEXT: @[[T2:.*]] = external constant %"Buffer<uint32_t3>"
; CHECK-NEXT: @[[S0:.*]] = external constant %"StructuredBuffer<struct.S>"
+; CHECK-NEXT: @[[S1:.*]] = external constant %"StructuredBuffer<float[3][2]>"
; CHECK-NEXT: @[[B0:.*]] = external constant %ByteAddressBuffer
; CHECK: !{i32 0, ptr @[[T0]], !"A"
; CHECK: !{i32 1, ptr @[[T1]], !""
; CHECK: !{i32 2, ptr @[[T2]], !""
; CHECK: !{i32 3, ptr @[[S0]], !"SB"
-; CHECK: !{i32 4, ptr @[[B0]], !""
+; CHECK: !{i32 4, ptr @[[S1]], !""
+; CHECK: !{i32 5, ptr @[[B0]], !""
attributes #0 = { nocallback nofree nosync nounwind willreturn memory(none) }
diff --git a/llvm/test/CodeGen/DirectX/strip-llvm-errno-tbaa.ll b/llvm/test/CodeGen/DirectX/strip-llvm-errno-tbaa.ll
new file mode 100644
index 0000000..9190d03
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/strip-llvm-errno-tbaa.ll
@@ -0,0 +1,19 @@
+; RUN: opt -S -dxil-prepare < %s | FileCheck %s
+
+; Ensures that dxil-prepare will remove the llvm.errno.tbaa metadata
+
+target triple = "dxil-unknown-shadermodel6.0-compute"
+
+define void @main() {
+entry:
+ ret void
+}
+
+; CHECK-NOT: !llvm.errno.tbaa
+; CHECK-NOT: {{^!}}
+
+!llvm.errno.tbaa = !{!0}
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
diff --git a/llvm/test/CodeGen/Hexagon/bitcast-i64-to-v64i1.ll b/llvm/test/CodeGen/Hexagon/bitcast-i64-to-v64i1.ll
new file mode 100644
index 0000000..f7e5cdb
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/bitcast-i64-to-v64i1.ll
@@ -0,0 +1,33 @@
+; RUN: llc --mtriple=hexagon -mattr=+hvxv79,+hvx-length128b < %s | FileCheck %s
+; CHECK-DAG: r[[REGH:([0-9]+)]]:[[REGL:([0-9]+)]] = combine(##.LCPI0_0,#-1)
+; CHECK-DAG: [[VREG1:v([0-9]+)]] = vmem(r[[REGH]]+#0)
+; CHECK-DAG: [[REG1:(r[0-9]+)]] = memw(r{{[0-9]+}}+#4)
+; CHECK-DAG: [[VREG2:v([0-9]+)]] = vsplat([[REG1]])
+; CHECK-DAG: [[REG2:(r[0-9]+)]] = memw(r{{[0-9]+}}+#0)
+; CHECK-DAG: [[VREG3:v([0-9]+)]] = vsplat([[REG2]])
+; CHECK-DAG: [[VREG4:v([0-9]+)]] = vand([[VREG2]],[[VREG1]])
+; CHECK-DAG: [[VREG5:v([0-9]+)]] = vand([[VREG3]],[[VREG1]])
+; CHECK-DAG: [[QREG:q[0-9]+]] = vand([[VREG4]],r{{[0-9]+}})
+; CHECK-DAG: [[VREG6:v([0-9]+)]] = vand([[QREG]],r{{[0-9]+}})
+; CHECK-DAG: [[QREG1:q[0-9]+]] = vand([[VREG5]],r{{[0-9]+}})
+; CHECK-DAG: [[VREG7:v([0-9]+)]] = vand([[QREG1]],r{{[0-9]+}})
+; CHECK-DAG: v{{[0-9]+}}.b = vpacke(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+; CHECK-DAG: v{{[0-9]+}}.b = vpacke(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+; CHECK-DAG: [[VREG8:v([0-9]+)]] = vror(v{{[0-9]+}},r{{[0-9]+}})
+; CHECK-DAG: [[VREG9:v([0-9]+)]] = vor([[VREG8]],v{{[0-9]+}})
+; CHECK-DAG: q{{[0-9]+}} = vand([[VREG9]],r{{[0-9]+}})
+define void @bitcast_i64_to_v64i1_full(ptr %in, ptr %out) {
+entry:
+ %load = load i64, ptr %in, align 4
+ %bitcast = bitcast i64 %load to <64 x i1>
+ %e0 = extractelement <64 x i1> %bitcast, i32 0
+ %e1 = extractelement <64 x i1> %bitcast, i32 1
+ %z0 = zext i1 %e0 to i8
+ %z1 = zext i1 %e1 to i8
+ %ptr0 = getelementptr i8, ptr %out, i32 0
+ %ptr1 = getelementptr i8, ptr %out, i32 1
+ store i8 %z0, ptr %ptr0, align 1
+ store i8 %z1, ptr %ptr1, align 1
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/Hexagon/insert-big.ll b/llvm/test/CodeGen/Hexagon/insert-big.ll
new file mode 100644
index 0000000..8735a66
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/insert-big.ll
@@ -0,0 +1,47 @@
+; Check that llc does not abort, which happened due to incorrect MIR.
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=1 < %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=2 < %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=3 < %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=4 < %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=5 < %s
+
+; Look for this symptom, in case llc does not check invalid IR.
+; CHECK-NOT: insert(%14,%5,#5,#5)
+
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=1 -debug-only=hexinsert -stop-after hexinsert < %s 2>&1 | FileCheck %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=2 -debug-only=hexinsert -stop-after hexinsert < %s 2>&1 | FileCheck %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=3 -debug-only=hexinsert -stop-after hexinsert < %s 2>&1 | FileCheck %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=4 -debug-only=hexinsert -stop-after hexinsert < %s 2>&1 | FileCheck %s
+; RUN: llc -O2 -mtriple=hexagon -insert-max-ifmap=5 -debug-only=hexinsert -stop-after hexinsert < %s 2>&1 | FileCheck %s
+
+; REQUIRES: asserts
+
+define i32 @f(i32 %0, i32 %1, i32 %2) {
+entry:
+ switch i32 %0, label %common.ret1 [
+ i32 8907, label %3
+ i32 4115, label %6
+ ]
+
+common.ret1:
+ %common.ret1.op = phi i32 [ %5, %3 ], [ %526, %6 ], [ 0, %entry ]
+ ret i32 %common.ret1.op
+
+3:
+ %4 = shl i32 %2, 5
+ %5 = and i32 %4, 992
+ br label %common.ret1
+
+6:
+ %7 = shl i32 %0, 10
+ %8 = and i32 %7, 7168
+ %9 = shl i32 %0, 5
+ %10 = and i32 %9, 992
+ %11 = or i32 %10, %8
+ %12 = and i32 %0, 1
+ %13 = or i32 %11, %12
+ %14 = shl i32 %1, 1
+ %15 = and i32 %14, 2031616
+ %526 = or i32 %13, %15
+ br label %common.ret1
+}
diff --git a/llvm/test/CodeGen/Hexagon/qfp-conv.ll b/llvm/test/CodeGen/Hexagon/qfp-conv.ll
new file mode 100644
index 0000000..d2d393e
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/qfp-conv.ll
@@ -0,0 +1,35 @@
+; RUN: llc -mtriple=hexagon -mattr=+hvxv68,+hvx,+hvx-length128b < %s | FileCheck %s
+
+; Test that the Qfloat optimization pass doesn't crash due to an invalid
+; instructions.
+
+; CHECK: v{{[0-9]+}}.hf = v{{[0-9]:[0-9]}}.qf32
+
+define void @test(
+ <32 x i32>* %optr,
+ <64 x i32> %in64,
+ <32 x i32> %va,
+ <32 x i32> %vb
+) local_unnamed_addr #0 {
+entry:
+ br label %for.body
+
+for.body:
+ %optr.068 = phi <32 x i32>* [ %optr, %entry ], [ %incdec.ptr6, %for.body ]
+ %0 = tail call <32 x i32> @llvm.hexagon.V6.vconv.hf.qf32.128B(<64 x i32> %in64) #2
+ %1 = tail call <32 x i32> @llvm.hexagon.V6.vdealh.128B(<32 x i32> %0) #2
+ %2 = tail call <128 x i1> @llvm.hexagon.V6.vgth.128B(<32 x i32> %va, <32 x i32> %1) #2
+ %3 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1> %2, <32 x i32> %va, <32 x i32> %vb) #2
+ %4 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %3, <32 x i32> %vb) #2
+ %5 = tail call <32 x i32> @llvm.hexagon.V6.vpackhub.sat.128B(<32 x i32> %va, <32 x i32> %4) #2
+ store <32 x i32> %5, <32 x i32>* %optr.068, align 1
+ %incdec.ptr6 = getelementptr inbounds <32 x i32>, <32 x i32>* %optr.068, i32 1
+ br label %for.body
+}
+
+declare <32 x i32> @llvm.hexagon.V6.vdealh.128B(<32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vconv.hf.qf32.128B(<64 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vpackhub.sat.128B(<32 x i32>, <32 x i32>) #1
+declare <128 x i1> @llvm.hexagon.V6.vgth.128B(<32 x i32>, <32 x i32>) #1
+declare <32 x i32> @llvm.hexagon.V6.vmux.128B(<128 x i1>, <32 x i32>, <32 x i32>) #1
diff --git a/llvm/test/CodeGen/Hexagon/qfp-enabled.ll b/llvm/test/CodeGen/Hexagon/qfp-enabled.ll
new file mode 100644
index 0000000..a5cc5fa
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/qfp-enabled.ll
@@ -0,0 +1,19 @@
+; Tests if the flag to disable qfp optimizer pass works or not.
+
+; RUN: llc -march=hexagon -mcpu=hexagonv69 -mattr=+hvxv69,+hvx-length128b \
+; RUN: < %s -o -| FileCheck %s --check-prefix=ENABLED
+; RUN: llc -march=hexagon -mcpu=hexagonv69 -mattr=+hvxv69,+hvx-length128b \
+; RUN: -disable-qfp-opt < %s -o -| FileCheck %s --check-prefix=DISABLED
+
+define dso_local <32 x i32> @conv1_qf32(<32 x i32> noundef %input1, <32 x i32> noundef %input2) local_unnamed_addr {
+entry:
+; DISABLED: [[V2:v[0-9]+]].qf32 = vadd(v0.sf,v1.sf)
+; DISABLED: [[V3:v[0-9]+]].sf = [[V2]].qf32
+; DISABLED: qf32 = vadd(v0.sf,[[V3]].sf)
+; ENABLED: [[V4:v[0-9]+]].qf32 = vadd(v0.sf,v1.sf)
+; ENABLED: qf32 = vadd([[V4]].qf32,v0.sf)
+ %0 = tail call <32 x i32> @llvm.hexagon.V6.vadd.sf.128B(<32 x i32> %input1, <32 x i32> %input2)
+ %1 = tail call <32 x i32> @llvm.hexagon.V6.vconv.sf.qf32.128B(<32 x i32> %0)
+ %2 = tail call <32 x i32> @llvm.hexagon.V6.vadd.sf.128B(<32 x i32> %input1, <32 x i32> %1)
+ ret <32 x i32> %2
+}
diff --git a/llvm/test/CodeGen/Hexagon/qfp-remove-kill.mir b/llvm/test/CodeGen/Hexagon/qfp-remove-kill.mir
new file mode 100644
index 0000000..d8dde7d
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/qfp-remove-kill.mir
@@ -0,0 +1,95 @@
+# RUN: llc -march=hexagon -mcpu=hexagonv68 -mattr=+hvxv68,+hvx-length128b \
+# RUN: -run-pass hexagon-qfp-optimizer -run-pass machineverifier %s -o - | FileCheck %s
+
+# Test that the killed RegState from DefMI operands are removed
+# killed RegState should be set for MI operands
+# CHECK-LABEL: name: qfpAdd
+# CHECK: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG1:([0-9]+)]]
+# CHECK-NEXT: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG2:([0-9]+)]]
+# CHECK-NEXT: V6_vadd_qf32 killed %[[REG1]], killed %[[REG2]]
+# CHECK-NEXT: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG3:([0-9]+)]]
+# CHECK-NEXT: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG4:([0-9]+)]]
+# CHECK-NEXT: V6_vadd_qf32 killed %[[REG3]], killed %[[REG4]]
+
+---
+name: qfpAdd
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $r0, $r1, $r2, $r3
+ %0:intregs = COPY $r0
+ %1:intregs = COPY $r1
+ %2:intregs = COPY $r2
+ %3:intregs = COPY $r3
+ %4:hvxvr = V6_vL32Ub_ai %0:intregs, 0
+ %5:hvxvr = V6_vL32Ub_ai %1:intregs, 0
+ %6:hvxvr = V6_vL32Ub_ai %2:intregs, 0
+ %7:hvxvr = V6_vL32Ub_ai %3:intregs, 0
+ %8:hvxvr = V6_vconv_sf_qf32 killed %4:hvxvr
+ %9:hvxvr = V6_vconv_sf_qf32 killed %5:hvxvr
+ %10:hvxvr = V6_vadd_sf %8:hvxvr, %9:hvxvr
+ %11:hvxvr = V6_vconv_sf_qf32 killed %6:hvxvr
+ %12:hvxvr = V6_vconv_sf_qf32 killed %7:hvxvr
+ %13:hvxvr = V6_vadd_sf killed %11:hvxvr, killed %12:hvxvr
+...
+
+
+# Test that the killed RegState from DefMI operands are removed
+# CHECK-LABEL: name: qfpAddMix
+# CHECK: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG1:([0-9]+)]]
+# CHECK-NEXT: V6_vadd_qf32_mix killed %[[REG1]], %{{[0-9]+}}
+# CHECK: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG2:([0-9]+)]]
+# CHECK-NEXT: V6_vadd_qf32_mix killed %[[REG2]], %{{[0-9]+}}
+
+---
+name: qfpAddMix
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $r0, $r1, $r2
+ %0:intregs = COPY $r0
+ %1:intregs = COPY $r1
+ %2:intregs = COPY $r2
+ %3:hvxvr = V6_vL32Ub_ai %0:intregs, 0
+ %4:hvxvr = V6_vL32Ub_ai %1:intregs, 0
+ %5:hvxvr = V6_vL32Ub_ai %2:intregs, 0
+ %6:hvxvr = V6_vmpy_qf32_sf %4, %5
+ %7:hvxvr = V6_vconv_sf_qf32 killed %6:hvxvr
+ %8:hvxvr = V6_vadd_sf %3:hvxvr, %7:hvxvr
+ %9:hvxvr = V6_vmpy_qf32_sf %4, %5
+ %10:hvxvr = V6_vconv_sf_qf32 killed %9:hvxvr
+ %11:hvxvr = V6_vadd_sf %3:hvxvr, killed %10:hvxvr
+...
+
+
+# Test that we do generate V6_vsub_qf32_mix for the below test.
+# V6_vsub_qf32_mix only allowes qf32 as first operand. In the test qf32
+# is passed as first operand. So, V6_vsub_qf32_mix must be generated.
+# CHECK-LABEL: name: qfpAddSwapMix
+# CHECK: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG1:([0-9]+)]]
+# CHECK-NEXT: V6_vadd_qf32_mix killed %[[REG1]], %{{[0-9]+}}
+# CHECK: %{{[0-9]+}}:hvxvr = V6_vconv_sf_qf32 %[[REG2:([0-9]+)]]
+# CHECK-NEXT: V6_vadd_qf32_mix killed %[[REG2]], %{{[0-9]+}}
+
+---
+name: qfpAddSwapMix
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $r0, $r1, $r2
+ %0:intregs = COPY $r0
+ %1:intregs = COPY $r1
+ %2:intregs = COPY $r2
+ %3:hvxvr = V6_vL32Ub_ai %0:intregs, 0
+ %4:hvxvr = V6_vL32Ub_ai %1:intregs, 0
+ %5:hvxvr = V6_vL32Ub_ai %2:intregs, 0
+ %6:hvxvr = V6_vmpy_qf32_sf %4, %5
+ %7:hvxvr = V6_vconv_sf_qf32 killed %6:hvxvr
+ %8:hvxvr = V6_vadd_sf %7:hvxvr, %3:hvxvr
+ %9:hvxvr = V6_vmpy_qf32_sf %4, %5
+ %10:hvxvr = V6_vconv_sf_qf32 killed %9:hvxvr
+ %11:hvxvr = V6_vadd_sf killed %10:hvxvr, %3:hvxvr
+...
diff --git a/llvm/test/CodeGen/Hexagon/qfp-subreg-bug.mir b/llvm/test/CodeGen/Hexagon/qfp-subreg-bug.mir
new file mode 100644
index 0000000..1d78203
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/qfp-subreg-bug.mir
@@ -0,0 +1,33 @@
+# RUN: llc -march=hexagon -mcpu=hexagonv69 -mattr=+hvxv69,+hvx-length128b -run-pass hexagon-qfp-optimizer %s -o - | FileCheck %s
+
+# CHECK: V6_vshuffvdd
+# CHECK: V6_vadd_sf
+# CHECK: V6_vadd_qf32_mix{{.*}}vsub_lo
+# CHECK: V6_vadd_qf32_mix{{.*}}vsub_hi
+
+---
+name: qfp_subreg_fix
+alignment: 16
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ %10:intregs = IMPLICIT_DEF
+ %9:hvxvr = V6_vL32Ub_ai %10, 0 :: (load (s1024) from `ptr undef`, align 4)
+ %11:intregs = A2_tfrsi 15360
+ %12:hvxvr = V6_lvsplath %11
+ %13:hvxwr = V6_vmpy_qf32_hf %9, %12
+ %15:hvxvr = V6_vconv_sf_qf32 %13.vsub_lo
+ %17:hvxvr = V6_vconv_sf_qf32 %13.vsub_hi
+ %18:intregslow8 = A2_tfrsi -4
+ %19:hvxwr = V6_vshuffvdd %17, %15, %18
+ %21:hvxvr = V6_vadd_sf %19.vsub_hi, %19.vsub_hi
+ %22:hvxvr = V6_vconv_sf_qf32 %21
+ %24:hvxvr = V6_vadd_sf %19.vsub_lo, %19.vsub_lo
+ %25:hvxvr = V6_vconv_sf_qf32 %24
+ %26:hvxvr = V6_vadd_sf %25, %19.vsub_lo
+ %27:hvxvr = V6_vconv_sf_qf32 %26
+ %28:hvxvr = V6_vadd_sf %22, %19.vsub_hi
+ %29:hvxvr = V6_vconv_sf_qf32 %28
+
+...
diff --git a/llvm/test/CodeGen/Hexagon/qfpopt-rem-conv-add.ll b/llvm/test/CodeGen/Hexagon/qfpopt-rem-conv-add.ll
new file mode 100644
index 0000000..c16370c
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/qfpopt-rem-conv-add.ll
@@ -0,0 +1,21 @@
+; Tests if generated vadd instruction takes in qf32
+; type as first parameter instead of a sf type without
+; any conversion instruction of type sf = qf32
+
+; RUN: llc -mtriple=hexagon < %s -o - | FileCheck %s
+
+; CHECK: [[V2:v[0-9]+]] = vxor([[V2]],[[V2]])
+; CHECK: [[V0:v[0-9]+]].qf32 = vmpy([[V0]].sf,[[V2]].sf)
+; CHECK: [[V1:v[0-9]+]].qf32 = vmpy([[V1]].sf,[[V2]].sf)
+; CHECK: [[V4:v[0-9]+]].qf32 = vadd([[V0]].qf32,[[V2]].sf)
+; CHECK: [[V5:v[0-9]+]].qf32 = vadd([[V1]].qf32,[[V2]].sf)
+
+define void @_Z19compute_ripple_geluIDF16_EviPT_PKS0_(ptr %out_ptr, <64 x float> %conv14.ripple.vectorized) #0 {
+entry:
+ %mul16.ripple.vectorized = fmul <64 x float> %conv14.ripple.vectorized, zeroinitializer
+ %conv17.ripple.vectorized = fptrunc <64 x float> %mul16.ripple.vectorized to <64 x half>
+ store <64 x half> %conv17.ripple.vectorized, ptr %out_ptr, align 2
+ ret void
+}
+
+attributes #0 = { "target-features"="+hvx-length128b,+hvxv75,+v75,-long-calls,-small-data" }
diff --git a/llvm/test/CodeGen/Hexagon/swp-phi.ll b/llvm/test/CodeGen/Hexagon/swp-phi.ll
index 9b0e126..6ce2481 100644
--- a/llvm/test/CodeGen/Hexagon/swp-phi.ll
+++ b/llvm/test/CodeGen/Hexagon/swp-phi.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=hexagon -enable-unsafe-fp-math -enable-pipeliner \
+; RUN: llc -mtriple=hexagon -enable-pipeliner \
; RUN: -pipeliner-prune-deps=false -stats -o /dev/null < %s
; REQUIRES: asserts
diff --git a/llvm/test/CodeGen/Hexagon/vect/qfp-mix.mir b/llvm/test/CodeGen/Hexagon/vect/qfp-mix.mir
new file mode 100644
index 0000000..9a9e938
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/vect/qfp-mix.mir
@@ -0,0 +1,79 @@
+# RUN: llc -march=hexagon -mcpu=hexagonv68 -mattr=+hvxv68,+hvx-length128b \
+# RUN: -run-pass hexagon-qfp-optimizer %s -o - | FileCheck %s
+
+
+# Test that the operands are swapped for Add if the second operand
+# is a qf32 to sf conversion. V6_vadd_qf32_mix supports first operand
+# as qf32.
+# CHECK-LABEL: name: qfpAddMix
+# CHECK: %[[REG:([0-9]+)]]:hvxvr = V6_vmpy_qf32_sf
+# CHECK: V6_vadd_qf32_mix %[[REG]]
+
+---
+name: qfpAddMix
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $r0, $r1, $r2, $r3
+ %0:intregs = COPY $r0
+ %1:intregs = COPY $r1
+ %2:intregs = COPY $r2
+ %3:hvxvr = V6_vL32Ub_ai %0:intregs, 0
+ %4:hvxvr = V6_vL32Ub_ai %1:intregs, 0
+ %5:hvxvr = V6_vL32Ub_ai %2:intregs, 0
+ %6:hvxvr = V6_vmpy_qf32_sf %4, %5
+ %7:hvxvr = V6_vconv_sf_qf32 %6:hvxvr
+ %8:hvxvr = V6_vadd_sf %3:hvxvr, %7:hvxvr
+...
+
+
+# Test that we do not generate V6_vsub_qf32_mix for the below test.
+# V6_vsub_qf32_mix only allowes qf32 as first operand. In the test qf32
+# is passed as second operand. As sub is not commutative, we should not
+# generate the mix instruction.
+# CHECK-LABEL: name: qfpSubNoMix
+# CHECK-NOT: V6_vsub_qf32_mix
+
+---
+name: qfpSubNoMix
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $r0, $r1, $r2, $r3
+ %0:intregs = COPY $r0
+ %1:intregs = COPY $r1
+ %2:intregs = COPY $r2
+ %3:hvxvr = V6_vL32Ub_ai %0:intregs, 0
+ %4:hvxvr = V6_vL32Ub_ai %1:intregs, 0
+ %5:hvxvr = V6_vL32Ub_ai %2:intregs, 0
+ %6:hvxvr = V6_vmpy_qf32_sf %4, %5
+ %7:hvxvr = V6_vconv_sf_qf32 %6:hvxvr
+ %8:hvxvr = V6_vsub_sf %3:hvxvr, %7:hvxvr
+...
+
+
+# Test that we do generate V6_vsub_qf32_mix for the below test.
+# V6_vsub_qf32_mix only allowes qf32 as first operand. In the test qf32
+# is passed as first operand. So, V6_vsub_qf32_mix must be generated.
+# CHECK-LABEL: name: qfpSubMix
+# CHECK: V6_vsub_qf32_mix
+
+---
+name: qfpSubMix
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $r0, $r1, $r2, $r3
+ %0:intregs = COPY $r0
+ %1:intregs = COPY $r1
+ %2:intregs = COPY $r2
+ %3:hvxvr = V6_vL32Ub_ai %0:intregs, 0
+ %4:hvxvr = V6_vL32Ub_ai %1:intregs, 0
+ %5:hvxvr = V6_vL32Ub_ai %2:intregs, 0
+ %6:hvxvr = V6_vmpy_qf32_sf %4, %5
+ %7:hvxvr = V6_vconv_sf_qf32 %6:hvxvr
+ %8:hvxvr = V6_vsub_sf %7:hvxvr, %3:hvxvr
+...
diff --git a/llvm/test/CodeGen/Hexagon/vect/qfp-zeroinit.mir b/llvm/test/CodeGen/Hexagon/vect/qfp-zeroinit.mir
new file mode 100644
index 0000000..f0b1d3c
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/vect/qfp-zeroinit.mir
@@ -0,0 +1,23 @@
+# RUN: llc -march=hexagon -mcpu=hexagonv68 -mattr=+hvxv68,+hvx-length128b -run-pass hexagon-qfp-optimizer %s -o - | FileCheck %s
+
+# CHECK-LABEL: name: qfpAdd32
+# CHECK: V6_vd0
+# CHECK-NEXT: V6_vL32Ub_ai
+# CHECK-NEXT: V6_vadd_sf
+# CHECK-NEXT: V6_vconv_sf_qf32
+# CHECK-NEXT: V6_vS32Ub_ai
+---
+name: qfpAdd32
+tracksRegLiveness: true
+
+body: |
+ bb.0:
+ liveins: $r0, $r1
+ %0:intregs = COPY $r0
+ %1:intregs = COPY $r1
+ %3:hvxvr = V6_vd0
+ %4:hvxvr = V6_vL32Ub_ai %0:intregs, 0
+ %5:hvxvr = V6_vadd_sf %3:hvxvr, %4:hvxvr
+ %6:hvxvr = V6_vconv_sf_qf32 %5:hvxvr
+ V6_vS32Ub_ai %1:intregs, 0, %6:hvxvr
+...
diff --git a/llvm/test/CodeGen/Hexagon/vect/unique-vreg-def.ll b/llvm/test/CodeGen/Hexagon/vect/unique-vreg-def.ll
new file mode 100644
index 0000000..2d46da7
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/vect/unique-vreg-def.ll
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: hexagon
+
+; This test was asserting because getVRegDef() was called on a register with
+; multiple defs.
+; Checks that the test does not assert and vsub is generated.
+; CHECK: vsub
+
+target triple = "hexagon"
+
+@v = common dso_local local_unnamed_addr global <32 x i32> zeroinitializer, align 128
+
+; Function Attrs: nounwind
+define dso_local void @hvx_twoSum(<32 x i32>* nocapture noundef writeonly %s2lo) local_unnamed_addr #0 {
+entry:
+ %0 = load <32 x i32>, <32 x i32>* @v, align 128
+ %call = tail call inreg <32 x i32> @MY_Vsf_equals_Vqf32(<32 x i32> noundef %0) #3
+ %1 = tail call <32 x i32> @llvm.hexagon.V6.vsub.sf.128B(<32 x i32> %call, <32 x i32> %call)
+ store <32 x i32> %1, <32 x i32>* @v, align 128
+ store <32 x i32> %1, <32 x i32>* %s2lo, align 128
+ ret void
+}
+
+declare dso_local inreg <32 x i32> @MY_Vsf_equals_Vqf32(<32 x i32> noundef) local_unnamed_addr #1
+
+; Function Attrs: nofree nosync nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vsub.sf.128B(<32 x i32>, <32 x i32>) #2
+
+attributes #0 = { nounwind "frame-pointer"="all" "min-legal-vector-width"="1024" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv73" "target-features"="+hvx-length128b,+hvxv73,+v73,-long-calls" }
+attributes #1 = { "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv73" "target-features"="+hvx-length128b,+hvxv73,+v73,-long-calls" }
+attributes #2 = { nofree nosync nounwind readnone }
+attributes #3 = { nounwind }
diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_2D_vocab.json b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_2D_vocab.json
index 2894fff..da0d13d 100644
--- a/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_2D_vocab.json
+++ b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_2D_vocab.json
@@ -1,5 +1,5 @@
{
- "entities" : {
+ "Opcodes" : {
"ABS_Fp":[1, 2],
"ADC":[3, 4],
"ADD":[5, 6],
@@ -7,5 +7,21 @@
"ADDPDrr":[9, 10],
"ADDPSrr":[11, 12],
"ADDSDrm":[13, 14]
+ },
+ "CommonOperands": {
+ "Immediate": [0.1, 0.1],
+ "MBB": [0.2, 0.2],
+ "FrameIndex": [0.3, 0.3],
+ "GlobalAddress": [0.4, 0.4]
+ },
+ "PhysicalRegisters": {
+ "GR32": [0.5, 0.5],
+ "GR64": [0.6, 0.6],
+ "XMM": [0.7, 0.7]
+ },
+ "VirtualRegisters": {
+ "GR32": [0.8, 0.8],
+ "GR64": [0.9, 0.9],
+ "XMM": [1.0, 1.0]
}
} \ No newline at end of file
diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json
new file mode 100644
index 0000000..f4b14a4
--- /dev/null
+++ b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json
@@ -0,0 +1,38 @@
+{
+ "Opcodes": {
+ "KILL": [0.1, 0.2, 0.3],
+ "MOV": [0.4, 0.5, 0.6],
+ "LEA": [0.7, 0.8, 0.9],
+ "RET": [1.0, 1.1, 1.2],
+ "ADD": [1.3, 1.4, 1.5],
+ "SUB": [1.6, 1.7, 1.8],
+ "IMUL": [1.9, 2.0, 2.1],
+ "AND": [2.2, 2.3, 2.4],
+ "OR": [2.5, 2.6, 2.7],
+ "XOR": [2.8, 2.9, 3.0],
+ "CMP": [3.1, 3.2, 3.3],
+ "TEST": [3.4, 3.5, 3.6],
+ "JMP": [3.7, 3.8, 3.9],
+ "CALL": [4.0, 4.1, 4.2],
+ "PUSH": [4.3, 4.4, 4.5],
+ "POP": [4.6, 4.7, 4.8],
+ "NOP": [4.9, 5.0, 5.1],
+ "COPY": [5.2, 5.3, 5.4]
+ },
+ "CommonOperands": {
+ "Immediate": [0.1, 0.1, 0.1],
+ "MBB": [0.2, 0.2, 0.2],
+ "FrameIndex": [0.3, 0.3, 0.3],
+ "GlobalAddress": [0.4, 0.4, 0.4]
+ },
+ "PhysicalRegisters": {
+ "GR32": [0.5, 0.5, 0.5],
+ "GR64": [0.6, 0.6, 0.6],
+ "XMM": [0.7, 0.7, 0.7]
+ },
+ "VirtualRegisters": {
+ "GR32": [0.8, 0.8, 0.8],
+ "GR64": [0.9, 0.9, 0.9],
+ "XMM": [1.0, 1.0, 1.0]
+ }
+} \ No newline at end of file
diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_inconsistent_dims.json b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_inconsistent_dims.json
index bf04163..6274fb7 100644
--- a/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_inconsistent_dims.json
+++ b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_inconsistent_dims.json
@@ -1,7 +1,16 @@
{
- "entities": {
+ "Opcodes": {
"ADD": [1.0, 2.0, 3.0],
"SUB": [1.5],
"MUL": [2.0, 3.0]
+ },
+ "CommonOperands": {
+ "Immediate": [1.0]
+ },
+ "PhysicalRegisters": {
+ "GR32": [1.0, 2.0]
+ },
+ "VirtualRegisters": {
+ "GR32": [1.0, 2.0, 3.0]
}
}
diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_zero_vocab.json b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_zero_vocab.json
index 63e8ccbd..7bfdf3b 100644
--- a/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_zero_vocab.json
+++ b/llvm/test/CodeGen/MIR2Vec/Inputs/mir2vec_zero_vocab.json
@@ -1,5 +1,5 @@
{
- "entities": {
+ "Opcodes": {
"ADD": [],
"SUB": [],
"MUL": [],
@@ -8,5 +8,14 @@
"JMP": [],
"CALL": [],
"RET": []
+ },
+ "CommonOperands": {
+ "Immediate": []
+ },
+ "PhysicalRegisters": {
+ "GR32": []
+ },
+ "VirtualRegisters": {
+ "GR32": []
}
} \ No newline at end of file
diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt
index 6327cff..d3c0da9 100644
--- a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt
+++ b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_print.txt
@@ -6880,3 +6880,294 @@ Key: XSHA: [ 0.00 0.00 ]
Key: XSTORE: [ 0.00 0.00 ]
Key: XSUSLDTRK: [ 0.00 0.00 ]
Key: XTEST: [ 0.00 0.00 ]
+Key: Immediate: [ 0.10 0.10 ]
+Key: CImmediate: [ 0.00 0.00 ]
+Key: FPImmediate: [ 0.00 0.00 ]
+Key: MBB: [ 0.20 0.20 ]
+Key: FrameIndex: [ 0.30 0.30 ]
+Key: ConstantPoolIndex: [ 0.00 0.00 ]
+Key: TargetIndex: [ 0.00 0.00 ]
+Key: JumpTableIndex: [ 0.00 0.00 ]
+Key: ExternalSymbol: [ 0.00 0.00 ]
+Key: GlobalAddress: [ 0.40 0.40 ]
+Key: BlockAddress: [ 0.00 0.00 ]
+Key: RegisterMask: [ 0.00 0.00 ]
+Key: RegisterLiveOut: [ 0.00 0.00 ]
+Key: Metadata: [ 0.00 0.00 ]
+Key: MCSymbol: [ 0.00 0.00 ]
+Key: CFIIndex: [ 0.00 0.00 ]
+Key: IntrinsicID: [ 0.00 0.00 ]
+Key: Predicate: [ 0.00 0.00 ]
+Key: ShuffleMask: [ 0.00 0.00 ]
+Key: PhyReg_GR8: [ 0.00 0.00 ]
+Key: PhyReg_GRH8: [ 0.00 0.00 ]
+Key: PhyReg_GR8_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR8_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR8_ABCD_H: [ 0.00 0.00 ]
+Key: PhyReg_GR8_ABCD_L: [ 0.00 0.00 ]
+Key: PhyReg_GRH16: [ 0.00 0.00 ]
+Key: PhyReg_GR16: [ 0.00 0.00 ]
+Key: PhyReg_GR16_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_VK1: [ 0.00 0.00 ]
+Key: PhyReg_VK16: [ 0.00 0.00 ]
+Key: PhyReg_VK2: [ 0.00 0.00 ]
+Key: PhyReg_VK4: [ 0.00 0.00 ]
+Key: PhyReg_VK8: [ 0.00 0.00 ]
+Key: PhyReg_VK16WM: [ 0.00 0.00 ]
+Key: PhyReg_VK1WM: [ 0.00 0.00 ]
+Key: PhyReg_VK2WM: [ 0.00 0.00 ]
+Key: PhyReg_VK4WM: [ 0.00 0.00 ]
+Key: PhyReg_VK8WM: [ 0.00 0.00 ]
+Key: PhyReg_SEGMENT_REG: [ 0.00 0.00 ]
+Key: PhyReg_GR16_ABCD: [ 0.00 0.00 ]
+Key: PhyReg_FPCCR: [ 0.00 0.00 ]
+Key: PhyReg_FR16X: [ 0.00 0.00 ]
+Key: PhyReg_FR16: [ 0.00 0.00 ]
+Key: PhyReg_VK16PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK1PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK2PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK4PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK8PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK1PAIR_with_sub_mask_0_in_VK1WM: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_FR32X: [ 0.00 0.00 ]
+Key: PhyReg_GR32: [ 0.50 0.50 ]
+Key: PhyReg_GR32_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_DEBUG_REG: [ 0.00 0.00 ]
+Key: PhyReg_FR32: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_VK32: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_RFP32: [ 0.00 0.00 ]
+Key: PhyReg_VK32WM: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ABCD: [ 0.00 0.00 ]
+Key: PhyReg_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_AD: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BPSP: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR32_DC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit: [ 0.00 0.00 ]
+Key: PhyReg_CCR: [ 0.00 0.00 ]
+Key: PhyReg_DFCCR: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_with_sub_32bit: [ 0.00 0.00 ]
+Key: PhyReg_RFP64: [ 0.00 0.00 ]
+Key: PhyReg_GR64: [ 0.60 0.60 ]
+Key: PhyReg_FR64X: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_CONTROL_REG: [ 0.00 0.00 ]
+Key: PhyReg_FR64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64PLTSafe: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2_NOSP_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TCW64_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_VK64: [ 0.00 0.00 ]
+Key: PhyReg_VR64: [ 0.00 0.00 ]
+Key: PhyReg_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TCW64_and_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_VK64WM: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR64PLTSafe_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_ABCD: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_AD: [ 0.00 0.00 ]
+Key: PhyReg_GR64_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR64_and_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BPSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_A: [ 0.00 0.00 ]
+Key: PhyReg_GR64_ArgRef_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_and_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_RST: [ 0.00 0.00 ]
+Key: PhyReg_RFP80: [ 0.00 0.00 ]
+Key: PhyReg_RFP80_7: [ 0.00 0.00 ]
+Key: PhyReg_VR128X: [ 0.00 0.00 ]
+Key: PhyReg_VR128: [ 0.00 0.00 ]
+Key: PhyReg_VR256X: [ 0.00 0.00 ]
+Key: PhyReg_VR256: [ 0.00 0.00 ]
+Key: PhyReg_VR512: [ 0.00 0.00 ]
+Key: PhyReg_VR512_0_15: [ 0.00 0.00 ]
+Key: PhyReg_TILE: [ 0.00 0.00 ]
+Key: PhyReg_TILEPAIR: [ 0.00 0.00 ]
+Key: VirtReg_GR8: [ 0.00 0.00 ]
+Key: VirtReg_GRH8: [ 0.00 0.00 ]
+Key: VirtReg_GR8_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR8_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR8_ABCD_H: [ 0.00 0.00 ]
+Key: VirtReg_GR8_ABCD_L: [ 0.00 0.00 ]
+Key: VirtReg_GRH16: [ 0.00 0.00 ]
+Key: VirtReg_GR16: [ 0.00 0.00 ]
+Key: VirtReg_GR16_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_VK1: [ 0.00 0.00 ]
+Key: VirtReg_VK16: [ 0.00 0.00 ]
+Key: VirtReg_VK2: [ 0.00 0.00 ]
+Key: VirtReg_VK4: [ 0.00 0.00 ]
+Key: VirtReg_VK8: [ 0.00 0.00 ]
+Key: VirtReg_VK16WM: [ 0.00 0.00 ]
+Key: VirtReg_VK1WM: [ 0.00 0.00 ]
+Key: VirtReg_VK2WM: [ 0.00 0.00 ]
+Key: VirtReg_VK4WM: [ 0.00 0.00 ]
+Key: VirtReg_VK8WM: [ 0.00 0.00 ]
+Key: VirtReg_SEGMENT_REG: [ 0.00 0.00 ]
+Key: VirtReg_GR16_ABCD: [ 0.00 0.00 ]
+Key: VirtReg_FPCCR: [ 0.00 0.00 ]
+Key: VirtReg_FR16X: [ 0.00 0.00 ]
+Key: VirtReg_FR16: [ 0.00 0.00 ]
+Key: VirtReg_VK16PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK1PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK2PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK4PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK8PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK1PAIR_with_sub_mask_0_in_VK1WM: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_FR32X: [ 0.00 0.00 ]
+Key: VirtReg_GR32: [ 0.80 0.80 ]
+Key: VirtReg_GR32_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_DEBUG_REG: [ 0.00 0.00 ]
+Key: VirtReg_FR32: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_VK32: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_RFP32: [ 0.00 0.00 ]
+Key: VirtReg_VK32WM: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ABCD: [ 0.00 0.00 ]
+Key: VirtReg_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_AD: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BPSP: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR32_DC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit: [ 0.00 0.00 ]
+Key: VirtReg_CCR: [ 0.00 0.00 ]
+Key: VirtReg_DFCCR: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_with_sub_32bit: [ 0.00 0.00 ]
+Key: VirtReg_RFP64: [ 0.00 0.00 ]
+Key: VirtReg_GR64: [ 0.90 0.90 ]
+Key: VirtReg_FR64X: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_CONTROL_REG: [ 0.00 0.00 ]
+Key: VirtReg_FR64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64PLTSafe: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2_NOSP_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TCW64_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_VK64: [ 0.00 0.00 ]
+Key: VirtReg_VR64: [ 0.00 0.00 ]
+Key: VirtReg_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TCW64_and_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_VK64WM: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR64PLTSafe_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_ABCD: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_AD: [ 0.00 0.00 ]
+Key: VirtReg_GR64_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR64_and_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BPSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_A: [ 0.00 0.00 ]
+Key: VirtReg_GR64_ArgRef_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_and_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_RST: [ 0.00 0.00 ]
+Key: VirtReg_RFP80: [ 0.00 0.00 ]
+Key: VirtReg_RFP80_7: [ 0.00 0.00 ]
+Key: VirtReg_VR128X: [ 0.00 0.00 ]
+Key: VirtReg_VR128: [ 0.00 0.00 ]
+Key: VirtReg_VR256X: [ 0.00 0.00 ]
+Key: VirtReg_VR256: [ 0.00 0.00 ]
+Key: VirtReg_VR512: [ 0.00 0.00 ]
+Key: VirtReg_VR512_0_15: [ 0.00 0.00 ]
+Key: VirtReg_TILE: [ 0.00 0.00 ]
+Key: VirtReg_TILEPAIR: [ 0.00 0.00 ]
diff --git a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt
index 4409e6d..c6e5508 100644
--- a/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt
+++ b/llvm/test/CodeGen/MIR2Vec/Inputs/reference_x86_vocab_wo=0.5_print.txt
@@ -6880,3 +6880,294 @@ Key: XSHA: [ 0.00 0.00 ]
Key: XSTORE: [ 0.00 0.00 ]
Key: XSUSLDTRK: [ 0.00 0.00 ]
Key: XTEST: [ 0.00 0.00 ]
+Key: Immediate: [ 0.10 0.10 ]
+Key: CImmediate: [ 0.00 0.00 ]
+Key: FPImmediate: [ 0.00 0.00 ]
+Key: MBB: [ 0.20 0.20 ]
+Key: FrameIndex: [ 0.30 0.30 ]
+Key: ConstantPoolIndex: [ 0.00 0.00 ]
+Key: TargetIndex: [ 0.00 0.00 ]
+Key: JumpTableIndex: [ 0.00 0.00 ]
+Key: ExternalSymbol: [ 0.00 0.00 ]
+Key: GlobalAddress: [ 0.40 0.40 ]
+Key: BlockAddress: [ 0.00 0.00 ]
+Key: RegisterMask: [ 0.00 0.00 ]
+Key: RegisterLiveOut: [ 0.00 0.00 ]
+Key: Metadata: [ 0.00 0.00 ]
+Key: MCSymbol: [ 0.00 0.00 ]
+Key: CFIIndex: [ 0.00 0.00 ]
+Key: IntrinsicID: [ 0.00 0.00 ]
+Key: Predicate: [ 0.00 0.00 ]
+Key: ShuffleMask: [ 0.00 0.00 ]
+Key: PhyReg_GR8: [ 0.00 0.00 ]
+Key: PhyReg_GRH8: [ 0.00 0.00 ]
+Key: PhyReg_GR8_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR8_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR8_ABCD_H: [ 0.00 0.00 ]
+Key: PhyReg_GR8_ABCD_L: [ 0.00 0.00 ]
+Key: PhyReg_GRH16: [ 0.00 0.00 ]
+Key: PhyReg_GR16: [ 0.00 0.00 ]
+Key: PhyReg_GR16_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_VK1: [ 0.00 0.00 ]
+Key: PhyReg_VK16: [ 0.00 0.00 ]
+Key: PhyReg_VK2: [ 0.00 0.00 ]
+Key: PhyReg_VK4: [ 0.00 0.00 ]
+Key: PhyReg_VK8: [ 0.00 0.00 ]
+Key: PhyReg_VK16WM: [ 0.00 0.00 ]
+Key: PhyReg_VK1WM: [ 0.00 0.00 ]
+Key: PhyReg_VK2WM: [ 0.00 0.00 ]
+Key: PhyReg_VK4WM: [ 0.00 0.00 ]
+Key: PhyReg_VK8WM: [ 0.00 0.00 ]
+Key: PhyReg_SEGMENT_REG: [ 0.00 0.00 ]
+Key: PhyReg_GR16_ABCD: [ 0.00 0.00 ]
+Key: PhyReg_FPCCR: [ 0.00 0.00 ]
+Key: PhyReg_FR16X: [ 0.00 0.00 ]
+Key: PhyReg_FR16: [ 0.00 0.00 ]
+Key: PhyReg_VK16PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK1PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK2PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK4PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK8PAIR: [ 0.00 0.00 ]
+Key: PhyReg_VK1PAIR_with_sub_mask_0_in_VK1WM: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_FR32X: [ 0.00 0.00 ]
+Key: PhyReg_GR32: [ 0.50 0.50 ]
+Key: PhyReg_GR32_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_DEBUG_REG: [ 0.00 0.00 ]
+Key: PhyReg_FR32: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_VK32: [ 0.00 0.00 ]
+Key: PhyReg_GR32_NOREX_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_RFP32: [ 0.00 0.00 ]
+Key: PhyReg_VK32WM: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ABCD: [ 0.00 0.00 ]
+Key: PhyReg_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_AD: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BPSP: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR32_DC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit: [ 0.00 0.00 ]
+Key: PhyReg_CCR: [ 0.00 0.00 ]
+Key: PhyReg_DFCCR: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit: [ 0.00 0.00 ]
+Key: PhyReg_LOW32_ADDR_ACCESS_with_sub_32bit: [ 0.00 0.00 ]
+Key: PhyReg_RFP64: [ 0.00 0.00 ]
+Key: PhyReg_GR64: [ 0.60 0.60 ]
+Key: PhyReg_FR64X: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_CONTROL_REG: [ 0.00 0.00 ]
+Key: PhyReg_FR64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64PLTSafe: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2_NOSP_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TCW64_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_VK64: [ 0.00 0.00 ]
+Key: PhyReg_VR64: [ 0.00 0.00 ]
+Key: PhyReg_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_NOSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TCW64_and_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: PhyReg_VK64WM: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: PhyReg_GR64PLTSafe_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_NOREX_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: PhyReg_GR64_ABCD: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_AD: [ 0.00 0.00 ]
+Key: PhyReg_GR64_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR64_and_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BPSP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_A: [ 0.00 0.00 ]
+Key: PhyReg_GR64_ArgRef_and_GR64_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_and_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: PhyReg_RST: [ 0.00 0.00 ]
+Key: PhyReg_RFP80: [ 0.00 0.00 ]
+Key: PhyReg_RFP80_7: [ 0.00 0.00 ]
+Key: PhyReg_VR128X: [ 0.00 0.00 ]
+Key: PhyReg_VR128: [ 0.00 0.00 ]
+Key: PhyReg_VR256X: [ 0.00 0.00 ]
+Key: PhyReg_VR256: [ 0.00 0.00 ]
+Key: PhyReg_VR512: [ 0.00 0.00 ]
+Key: PhyReg_VR512_0_15: [ 0.00 0.00 ]
+Key: PhyReg_TILE: [ 0.00 0.00 ]
+Key: PhyReg_TILEPAIR: [ 0.00 0.00 ]
+Key: VirtReg_GR8: [ 0.00 0.00 ]
+Key: VirtReg_GRH8: [ 0.00 0.00 ]
+Key: VirtReg_GR8_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR8_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR8_ABCD_H: [ 0.00 0.00 ]
+Key: VirtReg_GR8_ABCD_L: [ 0.00 0.00 ]
+Key: VirtReg_GRH16: [ 0.00 0.00 ]
+Key: VirtReg_GR16: [ 0.00 0.00 ]
+Key: VirtReg_GR16_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_VK1: [ 0.00 0.00 ]
+Key: VirtReg_VK16: [ 0.00 0.00 ]
+Key: VirtReg_VK2: [ 0.00 0.00 ]
+Key: VirtReg_VK4: [ 0.00 0.00 ]
+Key: VirtReg_VK8: [ 0.00 0.00 ]
+Key: VirtReg_VK16WM: [ 0.00 0.00 ]
+Key: VirtReg_VK1WM: [ 0.00 0.00 ]
+Key: VirtReg_VK2WM: [ 0.00 0.00 ]
+Key: VirtReg_VK4WM: [ 0.00 0.00 ]
+Key: VirtReg_VK8WM: [ 0.00 0.00 ]
+Key: VirtReg_SEGMENT_REG: [ 0.00 0.00 ]
+Key: VirtReg_GR16_ABCD: [ 0.00 0.00 ]
+Key: VirtReg_FPCCR: [ 0.00 0.00 ]
+Key: VirtReg_FR16X: [ 0.00 0.00 ]
+Key: VirtReg_FR16: [ 0.00 0.00 ]
+Key: VirtReg_VK16PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK1PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK2PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK4PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK8PAIR: [ 0.00 0.00 ]
+Key: VirtReg_VK1PAIR_with_sub_mask_0_in_VK1WM: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_FR32X: [ 0.00 0.00 ]
+Key: VirtReg_GR32: [ 0.80 0.80 ]
+Key: VirtReg_GR32_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_DEBUG_REG: [ 0.00 0.00 ]
+Key: VirtReg_FR32: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_VK32: [ 0.00 0.00 ]
+Key: VirtReg_GR32_NOREX_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_RFP32: [ 0.00 0.00 ]
+Key: VirtReg_VK32WM: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ABCD: [ 0.00 0.00 ]
+Key: VirtReg_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_AD: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BPSP: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR32_DC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit: [ 0.00 0.00 ]
+Key: VirtReg_CCR: [ 0.00 0.00 ]
+Key: VirtReg_DFCCR: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit: [ 0.00 0.00 ]
+Key: VirtReg_LOW32_ADDR_ACCESS_with_sub_32bit: [ 0.00 0.00 ]
+Key: VirtReg_RFP64: [ 0.00 0.00 ]
+Key: VirtReg_GR64: [ 0.90 0.90 ]
+Key: VirtReg_FR64X: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_CONTROL_REG: [ 0.00 0.00 ]
+Key: VirtReg_FR64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_16bit_in_GR16_NOREX2: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64PLTSafe: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2_NOSP_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TCW64_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_VK64: [ 0.00 0.00 ]
+Key: VirtReg_VR64: [ 0.00 0.00 ]
+Key: VirtReg_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_NOSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TCW64_and_GR64_TC_with_sub_8bit: [ 0.00 0.00 ]
+Key: VirtReg_VK64WM: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX: [ 0.00 0.00 ]
+Key: VirtReg_GR64PLTSafe_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_NOREX_and_GR64_TCW64: [ 0.00 0.00 ]
+Key: VirtReg_GR64_ABCD: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_AD: [ 0.00 0.00 ]
+Key: VirtReg_GR64_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR64_and_LOW32_ADDR_ACCESS_RBP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BPSP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_A: [ 0.00 0.00 ]
+Key: VirtReg_GR64_ArgRef_and_GR64_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_and_LOW32_ADDR_ACCESS: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI: [ 0.00 0.00 ]
+Key: VirtReg_RST: [ 0.00 0.00 ]
+Key: VirtReg_RFP80: [ 0.00 0.00 ]
+Key: VirtReg_RFP80_7: [ 0.00 0.00 ]
+Key: VirtReg_VR128X: [ 0.00 0.00 ]
+Key: VirtReg_VR128: [ 0.00 0.00 ]
+Key: VirtReg_VR256X: [ 0.00 0.00 ]
+Key: VirtReg_VR256: [ 0.00 0.00 ]
+Key: VirtReg_VR512: [ 0.00 0.00 ]
+Key: VirtReg_VR512_0_15: [ 0.00 0.00 ]
+Key: VirtReg_TILE: [ 0.00 0.00 ]
+Key: VirtReg_TILEPAIR: [ 0.00 0.00 ]
diff --git a/llvm/test/CodeGen/MIR2Vec/if-else.mir b/llvm/test/CodeGen/MIR2Vec/if-else.mir
new file mode 100644
index 0000000..f2572f5
--- /dev/null
+++ b/llvm/test/CodeGen/MIR2Vec/if-else.mir
@@ -0,0 +1,144 @@
+# REQUIRES: x86-registered-target
+# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=none -print-mir2vec -mir2vec-vocab-path=%S/Inputs/mir2vec_dummy_3D_vocab.json %s -o /dev/null 2>&1 | FileCheck %s
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+
+ define dso_local i32 @abc(i32 noundef %a, i32 noundef %b) {
+ entry:
+ %retval = alloca i32, align 4
+ %a.addr = alloca i32, align 4
+ %b.addr = alloca i32, align 4
+ store i32 %a, ptr %a.addr, align 4
+ store i32 %b, ptr %b.addr, align 4
+ %0 = load i32, ptr %a.addr, align 4
+ %1 = load i32, ptr %b.addr, align 4
+ %cmp = icmp sgt i32 %0, %1
+ br i1 %cmp, label %if.then, label %if.else
+
+ if.then: ; preds = %entry
+ %2 = load i32, ptr %b.addr, align 4
+ store i32 %2, ptr %retval, align 4
+ br label %return
+
+ if.else: ; preds = %entry
+ %3 = load i32, ptr %a.addr, align 4
+ store i32 %3, ptr %retval, align 4
+ br label %return
+
+ return: ; preds = %if.else, %if.then
+ %4 = load i32, ptr %retval, align 4
+ ret i32 %4
+ }
+...
+---
+name: abc
+alignment: 16
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+noPhis: false
+isSSA: true
+noVRegs: false
+hasFakeUses: false
+callsEHReturn: false
+callsUnwindInit: false
+hasEHContTarget: false
+hasEHScopes: false
+hasEHFunclets: false
+isOutlined: false
+debugInstrRef: true
+failsVerification: false
+tracksDebugUserValues: false
+registers:
+ - { id: 0, class: gr32, preferred-register: '', flags: [ ] }
+ - { id: 1, class: gr32, preferred-register: '', flags: [ ] }
+ - { id: 2, class: gr32, preferred-register: '', flags: [ ] }
+ - { id: 3, class: gr32, preferred-register: '', flags: [ ] }
+ - { id: 4, class: gr32, preferred-register: '', flags: [ ] }
+ - { id: 5, class: gr32, preferred-register: '', flags: [ ] }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+ - { reg: '$esi', virtual-reg: '%1' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ functionContext: ''
+ maxCallFrameSize: 4294967295
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ hasTailCall: false
+ isCalleeSavedInfoValid: false
+ localFrameSize: 0
+fixedStack: []
+stack:
+ - { id: 0, name: retval, type: default, offset: 0, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 1, name: a.addr, type: default, offset: 0, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 2, name: b.addr, type: default, offset: 0, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+entry_values: []
+callSites: []
+debugValueSubstitutions: []
+constants: []
+machineFunctionInfo:
+ amxProgModel: None
+body: |
+ bb.0.entry:
+ successors: %bb.1(0x40000000), %bb.2(0x40000000)
+ liveins: $edi, $esi
+
+ %1:gr32 = COPY $esi
+ %0:gr32 = COPY $edi
+ MOV32mr %stack.1.a.addr, 1, $noreg, 0, $noreg, %0 :: (store (s32) into %ir.a.addr)
+ MOV32mr %stack.2.b.addr, 1, $noreg, 0, $noreg, %1 :: (store (s32) into %ir.b.addr)
+ %2:gr32 = SUB32rr %0, %1, implicit-def $eflags
+ JCC_1 %bb.2, 14, implicit $eflags
+ JMP_1 %bb.1
+
+ bb.1.if.then:
+ successors: %bb.3(0x80000000)
+
+ %4:gr32 = MOV32rm %stack.2.b.addr, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from %ir.b.addr)
+ MOV32mr %stack.0.retval, 1, $noreg, 0, $noreg, killed %4 :: (store (s32) into %ir.retval)
+ JMP_1 %bb.3
+
+ bb.2.if.else:
+ successors: %bb.3(0x80000000)
+
+ %3:gr32 = MOV32rm %stack.1.a.addr, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from %ir.a.addr)
+ MOV32mr %stack.0.retval, 1, $noreg, 0, $noreg, killed %3 :: (store (s32) into %ir.retval)
+
+ bb.3.return:
+ %5:gr32 = MOV32rm %stack.0.retval, 1, $noreg, 0, $noreg :: (dereferenceable load (s32) from %ir.retval)
+ $eax = COPY %5
+ RET 0, $eax
+...
+
+# CHECK: Machine basic block vectors:
+# CHECK-NEXT: Machine basic block: abc:entry:
+# CHECK-NEXT: [ 23.60 24.20 24.80 ]
+# CHECK-NEXT: Machine basic block: abc:if.then:
+# CHECK-NEXT: [ 7.30 7.60 7.90 ]
+# CHECK-NEXT: Machine basic block: abc:if.else:
+# CHECK-NEXT: [ 3.40 3.60 3.80 ]
+# CHECK-NEXT: Machine basic block: abc:return:
+# CHECK-NEXT: [ 8.80 9.10 9.40 ]
diff --git a/llvm/test/CodeGen/MIR2Vec/mir2vec-basic-symbolic.mir b/llvm/test/CodeGen/MIR2Vec/mir2vec-basic-symbolic.mir
new file mode 100644
index 0000000..0fdcc81
--- /dev/null
+++ b/llvm/test/CodeGen/MIR2Vec/mir2vec-basic-symbolic.mir
@@ -0,0 +1,76 @@
+# REQUIRES: x86-registered-target
+# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=none -print-mir2vec -mir2vec-vocab-path=%S/Inputs/mir2vec_dummy_3D_vocab.json %s -o /dev/null 2>&1 | FileCheck %s
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+
+ define dso_local noundef i32 @add_function(i32 noundef %a, i32 noundef %b) {
+ entry:
+ %sum = add nsw i32 %a, %b
+ %result = mul nsw i32 %sum, 2
+ ret i32 %result
+ }
+
+ define dso_local void @simple_function() {
+ entry:
+ ret void
+ }
+...
+---
+name: add_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+ - { id: 3, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+ - { reg: '$esi', virtual-reg: '%1' }
+body: |
+ bb.0.entry:
+ liveins: $edi, $esi
+
+ %1:gr32 = COPY $esi
+ %0:gr32 = COPY $edi
+ %2:gr32 = nsw ADD32rr %0, %1, implicit-def dead $eflags
+ %3:gr32 = ADD32rr %2, %2, implicit-def dead $eflags
+ $eax = COPY %3
+ RET 0, $eax
+
+---
+name: simple_function
+alignment: 16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ RET 0
+
+# CHECK: MIR2Vec embeddings for machine function add_function:
+# CHECK: Function vector: [ 26.50 27.10 27.70 ]
+# CHECK-NEXT: Machine basic block vectors:
+# CHECK-NEXT: Machine basic block: add_function:entry:
+# CHECK-NEXT: [ 26.50 27.10 27.70 ]
+# CHECK-NEXT: Machine instruction vectors:
+# CHECK-NEXT: Machine instruction: %1:gr32 = COPY $esi
+# CHECK-NEXT: [ 6.00 6.10 6.20 ]
+# CHECK-NEXT: Machine instruction: %0:gr32 = COPY $edi
+# CHECK-NEXT: [ 6.00 6.10 6.20 ]
+# CHECK-NEXT: Machine instruction: %2:gr32 = nsw ADD32rr %0:gr32(tied-def 0), %1:gr32, implicit-def dead $eflags
+# CHECK-NEXT: [ 3.70 3.80 3.90 ]
+# CHECK-NEXT: Machine instruction: %3:gr32 = ADD32rr %2:gr32(tied-def 0), %2:gr32, implicit-def dead $eflags
+# CHECK-NEXT: [ 3.70 3.80 3.90 ]
+# CHECK-NEXT: Machine instruction: $eax = COPY %3:gr32
+# CHECK-NEXT: [ 6.00 6.10 6.20 ]
+# CHECK-NEXT: Machine instruction: RET 0, $eax
+# CHECK-NEXT: [ 1.10 1.20 1.30 ]
+
+# CHECK: MIR2Vec embeddings for machine function simple_function:
+# CHECK-NEXT:Function vector: [ 1.10 1.20 1.30 ]
+# CHECK-NEXT: Machine basic block vectors:
+# CHECK-NEXT: Machine basic block: simple_function:entry:
+# CHECK-NEXT: [ 1.10 1.20 1.30 ]
+# CHECK-NEXT: Machine instruction vectors:
+# CHECK-NEXT: Machine instruction: RET 0
+# CHECK-NEXT: [ 1.10 1.20 1.30 ]
diff --git a/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll b/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll
index 80b4048..13e908e 100644
--- a/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll
+++ b/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll
@@ -1,8 +1,8 @@
-; REQUIRES: x86_64-linux
-; RUN: llc -o /dev/null -print-mir2vec-vocab %s 2>&1 | FileCheck %s --check-prefix=CHECK-INVALID
-; RUN: llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_zero_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-ZERO-DIM
-; RUN: llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_invalid_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ENTITIES
-; RUN: llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_inconsistent_dims.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-INCONSISTENT-DIMS
+; REQUIRES: x86-registered-target
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -o /dev/null -print-mir2vec-vocab %s 2>&1 | FileCheck %s --check-prefix=CHECK-INVALID
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_zero_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-ZERO-DIM
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_invalid_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ENTITIES
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_inconsistent_dims.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-INCONSISTENT-DIMS
define dso_local void @test() {
entry:
@@ -10,6 +10,6 @@ define dso_local void @test() {
}
; CHECK-INVALID: MIR2Vec Vocabulary Printer: Failed to get vocabulary - MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path
-; CHECK-ZERO-DIM: MIR2Vec Vocabulary Printer: Failed to get vocabulary - Dimension of 'entities' section of the vocabulary is zero
-; CHECK-NO-ENTITIES: MIR2Vec Vocabulary Printer: Failed to get vocabulary - Missing 'entities' section in vocabulary file
-; CHECK-INCONSISTENT-DIMS: MIR2Vec Vocabulary Printer: Failed to get vocabulary - All vectors in the 'entities' section of the vocabulary are not of the same dimension
+; CHECK-ZERO-DIM: MIR2Vec Vocabulary Printer: Failed to get vocabulary - Dimension of 'Opcodes' section of the vocabulary is zero
+; CHECK-NO-ENTITIES: MIR2Vec Vocabulary Printer: Failed to get vocabulary - Missing 'Opcodes' section in vocabulary file
+; CHECK-INCONSISTENT-DIMS: MIR2Vec Vocabulary Printer: Failed to get vocabulary - All vectors in the 'Opcodes' section of the vocabulary are not of the same dimension
diff --git a/llvm/test/CodeGen/NVPTX/fma-assoc.ll b/llvm/test/CodeGen/NVPTX/fma-assoc.ll
index 6693c90..db0eae7 100644
--- a/llvm/test/CodeGen/NVPTX/fma-assoc.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-assoc.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | FileCheck %s -check-prefix=CHECK
-; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast -enable-unsafe-fp-math | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNSAFE
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-UNSAFE
+; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | %ptxas-verify %}
; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast | %ptxas-verify %}
-; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_20 -fp-contract=fast -enable-unsafe-fp-math | %ptxas-verify %}
define ptx_device float @t1_f32(float %x, float %y, float %z,
; CHECK-UNSAFE-LABEL: t1_f32(
diff --git a/llvm/test/CodeGen/PowerPC/all-atomics.ll b/llvm/test/CodeGen/PowerPC/all-atomics.ll
index 7e892fc..93968b71 100644
--- a/llvm/test/CodeGen/PowerPC/all-atomics.ll
+++ b/llvm/test/CodeGen/PowerPC/all-atomics.ll
@@ -33,7 +33,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 5, 0, 4
; CHECK-NEXT: addi 5, 5, 1
; CHECK-NEXT: stbcx. 5, 0, 4
-; CHECK-NEXT: bne 0, .LBB0_1
+; CHECK-NEXT: bne- 0, .LBB0_1
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: addis 5, 2, uc@toc@ha
; CHECK-NEXT: lwsync
@@ -44,7 +44,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 6, 0, 5
; CHECK-NEXT: addi 6, 6, 1
; CHECK-NEXT: stbcx. 6, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_3
+; CHECK-NEXT: bne- 0, .LBB0_3
; CHECK-NEXT: # %bb.4: # %entry
; CHECK-NEXT: addis 6, 2, ss@toc@ha
; CHECK-NEXT: lwsync
@@ -55,7 +55,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 7, 0, 6
; CHECK-NEXT: addi 7, 7, 1
; CHECK-NEXT: sthcx. 7, 0, 6
-; CHECK-NEXT: bne 0, .LBB0_5
+; CHECK-NEXT: bne- 0, .LBB0_5
; CHECK-NEXT: # %bb.6: # %entry
; CHECK-NEXT: addis 7, 2, us@toc@ha
; CHECK-NEXT: lwsync
@@ -66,7 +66,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 7, 0, 8
; CHECK-NEXT: addi 7, 7, 1
; CHECK-NEXT: sthcx. 7, 0, 8
-; CHECK-NEXT: bne 0, .LBB0_7
+; CHECK-NEXT: bne- 0, .LBB0_7
; CHECK-NEXT: # %bb.8: # %entry
; CHECK-NEXT: addis 7, 2, si@toc@ha
; CHECK-NEXT: lwsync
@@ -77,7 +77,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 7, 0, 9
; CHECK-NEXT: addi 7, 7, 1
; CHECK-NEXT: stwcx. 7, 0, 9
-; CHECK-NEXT: bne 0, .LBB0_9
+; CHECK-NEXT: bne- 0, .LBB0_9
; CHECK-NEXT: # %bb.10: # %entry
; CHECK-NEXT: addis 7, 2, ui@toc@ha
; CHECK-NEXT: lwsync
@@ -88,7 +88,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 7, 0, 10
; CHECK-NEXT: addi 7, 7, 1
; CHECK-NEXT: stwcx. 7, 0, 10
-; CHECK-NEXT: bne 0, .LBB0_11
+; CHECK-NEXT: bne- 0, .LBB0_11
; CHECK-NEXT: # %bb.12: # %entry
; CHECK-NEXT: addis 7, 2, sll@toc@ha
; CHECK-NEXT: lwsync
@@ -100,7 +100,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 12, 0, 11
; CHECK-NEXT: addi 12, 12, 1
; CHECK-NEXT: stdcx. 12, 0, 11
-; CHECK-NEXT: bne 0, .LBB0_13
+; CHECK-NEXT: bne- 0, .LBB0_13
; CHECK-NEXT: # %bb.14: # %entry
; CHECK-NEXT: addis 12, 2, ull@toc@ha
; CHECK-NEXT: lwsync
@@ -111,7 +111,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 30, 0, 12
; CHECK-NEXT: addi 0, 30, 1
; CHECK-NEXT: stdcx. 0, 0, 12
-; CHECK-NEXT: bne 0, .LBB0_15
+; CHECK-NEXT: bne- 0, .LBB0_15
; CHECK-NEXT: # %bb.16: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -120,7 +120,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 4
; CHECK-NEXT: sub 0, 0, 3
; CHECK-NEXT: stbcx. 0, 0, 4
-; CHECK-NEXT: bne 0, .LBB0_17
+; CHECK-NEXT: bne- 0, .LBB0_17
; CHECK-NEXT: # %bb.18: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -129,7 +129,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 5
; CHECK-NEXT: sub 0, 0, 3
; CHECK-NEXT: stbcx. 0, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_19
+; CHECK-NEXT: bne- 0, .LBB0_19
; CHECK-NEXT: # %bb.20: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -138,7 +138,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 6
; CHECK-NEXT: sub 0, 0, 3
; CHECK-NEXT: sthcx. 0, 0, 6
-; CHECK-NEXT: bne 0, .LBB0_21
+; CHECK-NEXT: bne- 0, .LBB0_21
; CHECK-NEXT: # %bb.22: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -147,7 +147,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 8
; CHECK-NEXT: sub 0, 0, 3
; CHECK-NEXT: sthcx. 0, 0, 8
-; CHECK-NEXT: bne 0, .LBB0_23
+; CHECK-NEXT: bne- 0, .LBB0_23
; CHECK-NEXT: # %bb.24: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -156,7 +156,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 9
; CHECK-NEXT: sub 0, 0, 3
; CHECK-NEXT: stwcx. 0, 0, 9
-; CHECK-NEXT: bne 0, .LBB0_25
+; CHECK-NEXT: bne- 0, .LBB0_25
; CHECK-NEXT: # %bb.26: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -165,7 +165,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 10
; CHECK-NEXT: sub 0, 0, 3
; CHECK-NEXT: stwcx. 0, 0, 10
-; CHECK-NEXT: bne 0, .LBB0_27
+; CHECK-NEXT: bne- 0, .LBB0_27
; CHECK-NEXT: # %bb.28: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -174,7 +174,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 11
; CHECK-NEXT: sub 0, 0, 7
; CHECK-NEXT: stdcx. 0, 0, 11
-; CHECK-NEXT: bne 0, .LBB0_29
+; CHECK-NEXT: bne- 0, .LBB0_29
; CHECK-NEXT: # %bb.30: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -183,7 +183,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 12
; CHECK-NEXT: sub 0, 0, 7
; CHECK-NEXT: stdcx. 0, 0, 12
-; CHECK-NEXT: bne 0, .LBB0_31
+; CHECK-NEXT: bne- 0, .LBB0_31
; CHECK-NEXT: # %bb.32: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -192,7 +192,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 4
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: stbcx. 0, 0, 4
-; CHECK-NEXT: bne 0, .LBB0_33
+; CHECK-NEXT: bne- 0, .LBB0_33
; CHECK-NEXT: # %bb.34: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -201,7 +201,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 5
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: stbcx. 0, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_35
+; CHECK-NEXT: bne- 0, .LBB0_35
; CHECK-NEXT: # %bb.36: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -210,7 +210,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 6
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: sthcx. 0, 0, 6
-; CHECK-NEXT: bne 0, .LBB0_37
+; CHECK-NEXT: bne- 0, .LBB0_37
; CHECK-NEXT: # %bb.38: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -219,7 +219,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 8
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: sthcx. 0, 0, 8
-; CHECK-NEXT: bne 0, .LBB0_39
+; CHECK-NEXT: bne- 0, .LBB0_39
; CHECK-NEXT: # %bb.40: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -228,7 +228,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 9
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: stwcx. 0, 0, 9
-; CHECK-NEXT: bne 0, .LBB0_41
+; CHECK-NEXT: bne- 0, .LBB0_41
; CHECK-NEXT: # %bb.42: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -237,7 +237,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 10
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: stwcx. 0, 0, 10
-; CHECK-NEXT: bne 0, .LBB0_43
+; CHECK-NEXT: bne- 0, .LBB0_43
; CHECK-NEXT: # %bb.44: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -246,7 +246,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 11
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: stdcx. 0, 0, 11
-; CHECK-NEXT: bne 0, .LBB0_45
+; CHECK-NEXT: bne- 0, .LBB0_45
; CHECK-NEXT: # %bb.46: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -255,7 +255,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 12
; CHECK-NEXT: ori 0, 0, 1
; CHECK-NEXT: stdcx. 0, 0, 12
-; CHECK-NEXT: bne 0, .LBB0_47
+; CHECK-NEXT: bne- 0, .LBB0_47
; CHECK-NEXT: # %bb.48: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -264,7 +264,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 4
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: stbcx. 0, 0, 4
-; CHECK-NEXT: bne 0, .LBB0_49
+; CHECK-NEXT: bne- 0, .LBB0_49
; CHECK-NEXT: # %bb.50: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -273,7 +273,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 5
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: stbcx. 0, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_51
+; CHECK-NEXT: bne- 0, .LBB0_51
; CHECK-NEXT: # %bb.52: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -282,7 +282,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 6
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: sthcx. 0, 0, 6
-; CHECK-NEXT: bne 0, .LBB0_53
+; CHECK-NEXT: bne- 0, .LBB0_53
; CHECK-NEXT: # %bb.54: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -291,7 +291,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 8
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: sthcx. 0, 0, 8
-; CHECK-NEXT: bne 0, .LBB0_55
+; CHECK-NEXT: bne- 0, .LBB0_55
; CHECK-NEXT: # %bb.56: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -300,7 +300,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 9
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: stwcx. 0, 0, 9
-; CHECK-NEXT: bne 0, .LBB0_57
+; CHECK-NEXT: bne- 0, .LBB0_57
; CHECK-NEXT: # %bb.58: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -309,7 +309,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 10
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: stwcx. 0, 0, 10
-; CHECK-NEXT: bne 0, .LBB0_59
+; CHECK-NEXT: bne- 0, .LBB0_59
; CHECK-NEXT: # %bb.60: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -318,7 +318,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 11
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: stdcx. 0, 0, 11
-; CHECK-NEXT: bne 0, .LBB0_61
+; CHECK-NEXT: bne- 0, .LBB0_61
; CHECK-NEXT: # %bb.62: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -327,7 +327,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 12
; CHECK-NEXT: xori 0, 0, 1
; CHECK-NEXT: stdcx. 0, 0, 12
-; CHECK-NEXT: bne 0, .LBB0_63
+; CHECK-NEXT: bne- 0, .LBB0_63
; CHECK-NEXT: # %bb.64: # %entry
; CHECK-NEXT: addis 30, 2, u128@toc@ha
; CHECK-NEXT: lwsync
@@ -361,7 +361,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 4
; CHECK-NEXT: nand 0, 3, 0
; CHECK-NEXT: stbcx. 0, 0, 4
-; CHECK-NEXT: bne 0, .LBB0_69
+; CHECK-NEXT: bne- 0, .LBB0_69
; CHECK-NEXT: # %bb.70: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -370,7 +370,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 5
; CHECK-NEXT: nand 0, 3, 0
; CHECK-NEXT: stbcx. 0, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_71
+; CHECK-NEXT: bne- 0, .LBB0_71
; CHECK-NEXT: # %bb.72: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -379,7 +379,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 6
; CHECK-NEXT: nand 0, 3, 0
; CHECK-NEXT: sthcx. 0, 0, 6
-; CHECK-NEXT: bne 0, .LBB0_73
+; CHECK-NEXT: bne- 0, .LBB0_73
; CHECK-NEXT: # %bb.74: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -388,7 +388,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 0, 0, 8
; CHECK-NEXT: nand 0, 3, 0
; CHECK-NEXT: sthcx. 0, 0, 8
-; CHECK-NEXT: bne 0, .LBB0_75
+; CHECK-NEXT: bne- 0, .LBB0_75
; CHECK-NEXT: # %bb.76: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -397,7 +397,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 9
; CHECK-NEXT: nand 0, 3, 0
; CHECK-NEXT: stwcx. 0, 0, 9
-; CHECK-NEXT: bne 0, .LBB0_77
+; CHECK-NEXT: bne- 0, .LBB0_77
; CHECK-NEXT: # %bb.78: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -406,7 +406,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 0, 0, 10
; CHECK-NEXT: nand 0, 3, 0
; CHECK-NEXT: stwcx. 0, 0, 10
-; CHECK-NEXT: bne 0, .LBB0_79
+; CHECK-NEXT: bne- 0, .LBB0_79
; CHECK-NEXT: # %bb.80: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -415,7 +415,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 11
; CHECK-NEXT: nand 0, 7, 0
; CHECK-NEXT: stdcx. 0, 0, 11
-; CHECK-NEXT: bne 0, .LBB0_81
+; CHECK-NEXT: bne- 0, .LBB0_81
; CHECK-NEXT: # %bb.82: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -424,7 +424,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 0, 0, 12
; CHECK-NEXT: nand 0, 7, 0
; CHECK-NEXT: stdcx. 0, 0, 12
-; CHECK-NEXT: bne 0, .LBB0_83
+; CHECK-NEXT: bne- 0, .LBB0_83
; CHECK-NEXT: # %bb.84: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -433,7 +433,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 0, 0, 4
; CHECK-NEXT: and 0, 3, 0
; CHECK-NEXT: stbcx. 0, 0, 4
-; CHECK-NEXT: bne 0, .LBB0_85
+; CHECK-NEXT: bne- 0, .LBB0_85
; CHECK-NEXT: # %bb.86: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -442,7 +442,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 4, 0, 5
; CHECK-NEXT: and 4, 3, 4
; CHECK-NEXT: stbcx. 4, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_87
+; CHECK-NEXT: bne- 0, .LBB0_87
; CHECK-NEXT: # %bb.88: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -451,7 +451,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 4, 0, 6
; CHECK-NEXT: and 4, 3, 4
; CHECK-NEXT: sthcx. 4, 0, 6
-; CHECK-NEXT: bne 0, .LBB0_89
+; CHECK-NEXT: bne- 0, .LBB0_89
; CHECK-NEXT: # %bb.90: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -460,7 +460,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 4, 0, 8
; CHECK-NEXT: and 4, 3, 4
; CHECK-NEXT: sthcx. 4, 0, 8
-; CHECK-NEXT: bne 0, .LBB0_91
+; CHECK-NEXT: bne- 0, .LBB0_91
; CHECK-NEXT: # %bb.92: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -469,7 +469,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 4, 0, 9
; CHECK-NEXT: and 4, 3, 4
; CHECK-NEXT: stwcx. 4, 0, 9
-; CHECK-NEXT: bne 0, .LBB0_93
+; CHECK-NEXT: bne- 0, .LBB0_93
; CHECK-NEXT: # %bb.94: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -478,7 +478,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 4, 0, 10
; CHECK-NEXT: and 4, 3, 4
; CHECK-NEXT: stwcx. 4, 0, 10
-; CHECK-NEXT: bne 0, .LBB0_95
+; CHECK-NEXT: bne- 0, .LBB0_95
; CHECK-NEXT: # %bb.96: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -487,7 +487,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 3, 0, 11
; CHECK-NEXT: and 3, 7, 3
; CHECK-NEXT: stdcx. 3, 0, 11
-; CHECK-NEXT: bne 0, .LBB0_97
+; CHECK-NEXT: bne- 0, .LBB0_97
; CHECK-NEXT: # %bb.98: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sync
@@ -496,7 +496,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 3, 0, 12
; CHECK-NEXT: and 3, 7, 3
; CHECK-NEXT: stdcx. 3, 0, 12
-; CHECK-NEXT: bne 0, .LBB0_99
+; CHECK-NEXT: bne- 0, .LBB0_99
; CHECK-NEXT: # %bb.100: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: ld 30, -16(1) # 8-byte Folded Reload
@@ -545,7 +545,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB0_1
+; AIX32-NEXT: bne- 0, L..BB0_1
; AIX32-NEXT: # %bb.2: # %entry
; AIX32-NEXT: lwz 3, L..C1(2) # @uc
; AIX32-NEXT: lwsync
@@ -564,7 +564,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 27
-; AIX32-NEXT: bne 0, L..BB0_3
+; AIX32-NEXT: bne- 0, L..BB0_3
; AIX32-NEXT: # %bb.4: # %entry
; AIX32-NEXT: lwz 3, L..C2(2) # @ss
; AIX32-NEXT: lwsync
@@ -584,7 +584,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB0_5
+; AIX32-NEXT: bne- 0, L..BB0_5
; AIX32-NEXT: # %bb.6: # %entry
; AIX32-NEXT: lwz 3, L..C3(2) # @us
; AIX32-NEXT: lwsync
@@ -604,7 +604,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 23
-; AIX32-NEXT: bne 0, L..BB0_7
+; AIX32-NEXT: bne- 0, L..BB0_7
; AIX32-NEXT: # %bb.8: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: lwz 20, L..C4(2) # @si
@@ -614,7 +614,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 20
; AIX32-NEXT: addi 3, 3, 1
; AIX32-NEXT: stwcx. 3, 0, 20
-; AIX32-NEXT: bne 0, L..BB0_9
+; AIX32-NEXT: bne- 0, L..BB0_9
; AIX32-NEXT: # %bb.10: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: lwz 19, L..C5(2) # @ui
@@ -624,7 +624,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 19
; AIX32-NEXT: addi 3, 3, 1
; AIX32-NEXT: stwcx. 3, 0, 19
-; AIX32-NEXT: bne 0, L..BB0_11
+; AIX32-NEXT: bne- 0, L..BB0_11
; AIX32-NEXT: # %bb.12: # %entry
; AIX32-NEXT: lwz 31, L..C6(2) # @sll
; AIX32-NEXT: lwsync
@@ -652,7 +652,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB0_13
+; AIX32-NEXT: bne- 0, L..BB0_13
; AIX32-NEXT: # %bb.14: # %entry
; AIX32-NEXT: li 3, 255
; AIX32-NEXT: lwsync
@@ -666,7 +666,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 27
-; AIX32-NEXT: bne 0, L..BB0_15
+; AIX32-NEXT: bne- 0, L..BB0_15
; AIX32-NEXT: # %bb.16: # %entry
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -681,7 +681,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB0_17
+; AIX32-NEXT: bne- 0, L..BB0_17
; AIX32-NEXT: # %bb.18: # %entry
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -696,7 +696,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 23
-; AIX32-NEXT: bne 0, L..BB0_19
+; AIX32-NEXT: bne- 0, L..BB0_19
; AIX32-NEXT: # %bb.20: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -705,7 +705,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 20
; AIX32-NEXT: sub 3, 3, 15
; AIX32-NEXT: stwcx. 3, 0, 20
-; AIX32-NEXT: bne 0, L..BB0_21
+; AIX32-NEXT: bne- 0, L..BB0_21
; AIX32-NEXT: # %bb.22: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -714,7 +714,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 19
; AIX32-NEXT: sub 3, 3, 15
; AIX32-NEXT: stwcx. 3, 0, 19
-; AIX32-NEXT: bne 0, L..BB0_23
+; AIX32-NEXT: bne- 0, L..BB0_23
; AIX32-NEXT: # %bb.24: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -740,7 +740,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB0_25
+; AIX32-NEXT: bne- 0, L..BB0_25
; AIX32-NEXT: # %bb.26: # %entry
; AIX32-NEXT: li 3, 255
; AIX32-NEXT: lwsync
@@ -754,7 +754,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 27
-; AIX32-NEXT: bne 0, L..BB0_27
+; AIX32-NEXT: bne- 0, L..BB0_27
; AIX32-NEXT: # %bb.28: # %entry
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -769,7 +769,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB0_29
+; AIX32-NEXT: bne- 0, L..BB0_29
; AIX32-NEXT: # %bb.30: # %entry
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -784,7 +784,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 23
-; AIX32-NEXT: bne 0, L..BB0_31
+; AIX32-NEXT: bne- 0, L..BB0_31
; AIX32-NEXT: # %bb.32: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -793,7 +793,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 20
; AIX32-NEXT: ori 3, 3, 1
; AIX32-NEXT: stwcx. 3, 0, 20
-; AIX32-NEXT: bne 0, L..BB0_33
+; AIX32-NEXT: bne- 0, L..BB0_33
; AIX32-NEXT: # %bb.34: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -802,7 +802,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 19
; AIX32-NEXT: ori 3, 3, 1
; AIX32-NEXT: stwcx. 3, 0, 19
-; AIX32-NEXT: bne 0, L..BB0_35
+; AIX32-NEXT: bne- 0, L..BB0_35
; AIX32-NEXT: # %bb.36: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -828,7 +828,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB0_37
+; AIX32-NEXT: bne- 0, L..BB0_37
; AIX32-NEXT: # %bb.38: # %entry
; AIX32-NEXT: li 3, 255
; AIX32-NEXT: lwsync
@@ -842,7 +842,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 27
-; AIX32-NEXT: bne 0, L..BB0_39
+; AIX32-NEXT: bne- 0, L..BB0_39
; AIX32-NEXT: # %bb.40: # %entry
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -857,7 +857,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB0_41
+; AIX32-NEXT: bne- 0, L..BB0_41
; AIX32-NEXT: # %bb.42: # %entry
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -872,7 +872,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 23
-; AIX32-NEXT: bne 0, L..BB0_43
+; AIX32-NEXT: bne- 0, L..BB0_43
; AIX32-NEXT: # %bb.44: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -881,7 +881,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 20
; AIX32-NEXT: xori 3, 3, 1
; AIX32-NEXT: stwcx. 3, 0, 20
-; AIX32-NEXT: bne 0, L..BB0_45
+; AIX32-NEXT: bne- 0, L..BB0_45
; AIX32-NEXT: # %bb.46: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -890,7 +890,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 19
; AIX32-NEXT: xori 3, 3, 1
; AIX32-NEXT: stwcx. 3, 0, 19
-; AIX32-NEXT: bne 0, L..BB0_47
+; AIX32-NEXT: bne- 0, L..BB0_47
; AIX32-NEXT: # %bb.48: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -986,7 +986,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB0_53
+; AIX32-NEXT: bne- 0, L..BB0_53
; AIX32-NEXT: # %bb.54: # %atomicrmw.end
; AIX32-NEXT: li 3, 255
; AIX32-NEXT: lwsync
@@ -1001,7 +1001,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 27
-; AIX32-NEXT: bne 0, L..BB0_55
+; AIX32-NEXT: bne- 0, L..BB0_55
; AIX32-NEXT: # %bb.56: # %atomicrmw.end
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -1017,7 +1017,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB0_57
+; AIX32-NEXT: bne- 0, L..BB0_57
; AIX32-NEXT: # %bb.58: # %atomicrmw.end
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -1033,7 +1033,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 23
-; AIX32-NEXT: bne 0, L..BB0_59
+; AIX32-NEXT: bne- 0, L..BB0_59
; AIX32-NEXT: # %bb.60: # %atomicrmw.end
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -1042,7 +1042,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 20
; AIX32-NEXT: nand 3, 29, 3
; AIX32-NEXT: stwcx. 3, 0, 20
-; AIX32-NEXT: bne 0, L..BB0_61
+; AIX32-NEXT: bne- 0, L..BB0_61
; AIX32-NEXT: # %bb.62: # %atomicrmw.end
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -1051,7 +1051,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 19
; AIX32-NEXT: nand 3, 29, 3
; AIX32-NEXT: stwcx. 3, 0, 19
-; AIX32-NEXT: bne 0, L..BB0_63
+; AIX32-NEXT: bne- 0, L..BB0_63
; AIX32-NEXT: # %bb.64: # %atomicrmw.end
; AIX32-NEXT: lwz 31, L..C6(2) # @sll
; AIX32-NEXT: lwsync
@@ -1079,7 +1079,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB0_65
+; AIX32-NEXT: bne- 0, L..BB0_65
; AIX32-NEXT: # %bb.66: # %atomicrmw.end
; AIX32-NEXT: li 3, 255
; AIX32-NEXT: lwsync
@@ -1093,7 +1093,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 27
-; AIX32-NEXT: bne 0, L..BB0_67
+; AIX32-NEXT: bne- 0, L..BB0_67
; AIX32-NEXT: # %bb.68: # %atomicrmw.end
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -1108,7 +1108,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB0_69
+; AIX32-NEXT: bne- 0, L..BB0_69
; AIX32-NEXT: # %bb.70: # %atomicrmw.end
; AIX32-NEXT: li 3, 0
; AIX32-NEXT: lwsync
@@ -1123,7 +1123,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 4, 5, 4
; AIX32-NEXT: stwcx. 4, 0, 23
-; AIX32-NEXT: bne 0, L..BB0_71
+; AIX32-NEXT: bne- 0, L..BB0_71
; AIX32-NEXT: # %bb.72: # %atomicrmw.end
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -1132,7 +1132,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 20
; AIX32-NEXT: and 3, 29, 3
; AIX32-NEXT: stwcx. 3, 0, 20
-; AIX32-NEXT: bne 0, L..BB0_73
+; AIX32-NEXT: bne- 0, L..BB0_73
; AIX32-NEXT: # %bb.74: # %atomicrmw.end
; AIX32-NEXT: lwsync
; AIX32-NEXT: sync
@@ -1141,7 +1141,7 @@ define dso_local void @test_op_ignore() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 19
; AIX32-NEXT: and 3, 29, 3
; AIX32-NEXT: stwcx. 3, 0, 19
-; AIX32-NEXT: bne 0, L..BB0_75
+; AIX32-NEXT: bne- 0, L..BB0_75
; AIX32-NEXT: # %bb.76: # %atomicrmw.end
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -1252,7 +1252,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 5, 0, 6
; CHECK-NEXT: addi 7, 5, 11
; CHECK-NEXT: stbcx. 7, 0, 6
-; CHECK-NEXT: bne 0, .LBB1_1
+; CHECK-NEXT: bne- 0, .LBB1_1
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 5, sc@toc@l(4)
@@ -1264,7 +1264,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 7, 0, 8
; CHECK-NEXT: addi 9, 7, 11
; CHECK-NEXT: stbcx. 9, 0, 8
-; CHECK-NEXT: bne 0, .LBB1_3
+; CHECK-NEXT: bne- 0, .LBB1_3
; CHECK-NEXT: # %bb.4: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 7, uc@toc@l(5)
@@ -1276,7 +1276,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 9, 0, 10
; CHECK-NEXT: addi 11, 9, 11
; CHECK-NEXT: sthcx. 11, 0, 10
-; CHECK-NEXT: bne 0, .LBB1_5
+; CHECK-NEXT: bne- 0, .LBB1_5
; CHECK-NEXT: # %bb.6: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 9, ss@toc@l(7)
@@ -1288,7 +1288,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 11, 0, 0
; CHECK-NEXT: addi 12, 11, 11
; CHECK-NEXT: sthcx. 12, 0, 0
-; CHECK-NEXT: bne 0, .LBB1_7
+; CHECK-NEXT: bne- 0, .LBB1_7
; CHECK-NEXT: # %bb.8: # %entry
; CHECK-NEXT: addis 12, 2, si@toc@ha
; CHECK-NEXT: lwsync
@@ -1300,7 +1300,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 11, 0, 29
; CHECK-NEXT: addi 30, 11, 11
; CHECK-NEXT: stwcx. 30, 0, 29
-; CHECK-NEXT: bne 0, .LBB1_9
+; CHECK-NEXT: bne- 0, .LBB1_9
; CHECK-NEXT: # %bb.10: # %entry
; CHECK-NEXT: addis 30, 2, ui@toc@ha
; CHECK-NEXT: lwsync
@@ -1312,7 +1312,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 11, 0, 27
; CHECK-NEXT: addi 28, 11, 11
; CHECK-NEXT: stwcx. 28, 0, 27
-; CHECK-NEXT: bne 0, .LBB1_11
+; CHECK-NEXT: bne- 0, .LBB1_11
; CHECK-NEXT: # %bb.12: # %entry
; CHECK-NEXT: addis 28, 2, sll@toc@ha
; CHECK-NEXT: lwsync
@@ -1325,7 +1325,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 26, 0, 25
; CHECK-NEXT: addi 24, 26, 11
; CHECK-NEXT: stdcx. 24, 0, 25
-; CHECK-NEXT: bne 0, .LBB1_13
+; CHECK-NEXT: bne- 0, .LBB1_13
; CHECK-NEXT: # %bb.14: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 26, sll@toc@l(28)
@@ -1337,7 +1337,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 24
; CHECK-NEXT: addi 22, 23, 11
; CHECK-NEXT: stdcx. 22, 0, 24
-; CHECK-NEXT: bne 0, .LBB1_15
+; CHECK-NEXT: bne- 0, .LBB1_15
; CHECK-NEXT: # %bb.16: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, ull@toc@l(26)
@@ -1347,7 +1347,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 6
; CHECK-NEXT: sub 22, 23, 3
; CHECK-NEXT: stbcx. 22, 0, 6
-; CHECK-NEXT: bne 0, .LBB1_17
+; CHECK-NEXT: bne- 0, .LBB1_17
; CHECK-NEXT: # %bb.18: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, sc@toc@l(4)
@@ -1357,7 +1357,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 8
; CHECK-NEXT: sub 22, 23, 3
; CHECK-NEXT: stbcx. 22, 0, 8
-; CHECK-NEXT: bne 0, .LBB1_19
+; CHECK-NEXT: bne- 0, .LBB1_19
; CHECK-NEXT: # %bb.20: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, uc@toc@l(5)
@@ -1367,7 +1367,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 10
; CHECK-NEXT: sub 22, 23, 3
; CHECK-NEXT: sthcx. 22, 0, 10
-; CHECK-NEXT: bne 0, .LBB1_21
+; CHECK-NEXT: bne- 0, .LBB1_21
; CHECK-NEXT: # %bb.22: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, ss@toc@l(7)
@@ -1377,7 +1377,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 0
; CHECK-NEXT: sub 22, 23, 3
; CHECK-NEXT: sthcx. 22, 0, 0
-; CHECK-NEXT: bne 0, .LBB1_23
+; CHECK-NEXT: bne- 0, .LBB1_23
; CHECK-NEXT: # %bb.24: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, us@toc@l(9)
@@ -1387,7 +1387,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 29
; CHECK-NEXT: sub 22, 23, 3
; CHECK-NEXT: stwcx. 22, 0, 29
-; CHECK-NEXT: bne 0, .LBB1_25
+; CHECK-NEXT: bne- 0, .LBB1_25
; CHECK-NEXT: # %bb.26: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, si@toc@l(12)
@@ -1397,7 +1397,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 27
; CHECK-NEXT: sub 22, 23, 3
; CHECK-NEXT: stwcx. 22, 0, 27
-; CHECK-NEXT: bne 0, .LBB1_27
+; CHECK-NEXT: bne- 0, .LBB1_27
; CHECK-NEXT: # %bb.28: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, ui@toc@l(30)
@@ -1407,7 +1407,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 25
; CHECK-NEXT: sub 22, 23, 11
; CHECK-NEXT: stdcx. 22, 0, 25
-; CHECK-NEXT: bne 0, .LBB1_29
+; CHECK-NEXT: bne- 0, .LBB1_29
; CHECK-NEXT: # %bb.30: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, sll@toc@l(28)
@@ -1417,7 +1417,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 24
; CHECK-NEXT: sub 22, 23, 11
; CHECK-NEXT: stdcx. 22, 0, 24
-; CHECK-NEXT: bne 0, .LBB1_31
+; CHECK-NEXT: bne- 0, .LBB1_31
; CHECK-NEXT: # %bb.32: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, ull@toc@l(26)
@@ -1427,7 +1427,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 6
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: stbcx. 22, 0, 6
-; CHECK-NEXT: bne 0, .LBB1_33
+; CHECK-NEXT: bne- 0, .LBB1_33
; CHECK-NEXT: # %bb.34: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, sc@toc@l(4)
@@ -1437,7 +1437,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 8
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: stbcx. 22, 0, 8
-; CHECK-NEXT: bne 0, .LBB1_35
+; CHECK-NEXT: bne- 0, .LBB1_35
; CHECK-NEXT: # %bb.36: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, uc@toc@l(5)
@@ -1447,7 +1447,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 10
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: sthcx. 22, 0, 10
-; CHECK-NEXT: bne 0, .LBB1_37
+; CHECK-NEXT: bne- 0, .LBB1_37
; CHECK-NEXT: # %bb.38: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, ss@toc@l(7)
@@ -1457,7 +1457,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 0
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: sthcx. 22, 0, 0
-; CHECK-NEXT: bne 0, .LBB1_39
+; CHECK-NEXT: bne- 0, .LBB1_39
; CHECK-NEXT: # %bb.40: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, us@toc@l(9)
@@ -1467,7 +1467,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 29
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: stwcx. 22, 0, 29
-; CHECK-NEXT: bne 0, .LBB1_41
+; CHECK-NEXT: bne- 0, .LBB1_41
; CHECK-NEXT: # %bb.42: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, si@toc@l(12)
@@ -1477,7 +1477,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 27
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: stwcx. 22, 0, 27
-; CHECK-NEXT: bne 0, .LBB1_43
+; CHECK-NEXT: bne- 0, .LBB1_43
; CHECK-NEXT: # %bb.44: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, ui@toc@l(30)
@@ -1487,7 +1487,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 25
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: stdcx. 22, 0, 25
-; CHECK-NEXT: bne 0, .LBB1_45
+; CHECK-NEXT: bne- 0, .LBB1_45
; CHECK-NEXT: # %bb.46: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, sll@toc@l(28)
@@ -1497,7 +1497,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 24
; CHECK-NEXT: ori 22, 23, 11
; CHECK-NEXT: stdcx. 22, 0, 24
-; CHECK-NEXT: bne 0, .LBB1_47
+; CHECK-NEXT: bne- 0, .LBB1_47
; CHECK-NEXT: # %bb.48: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, ull@toc@l(26)
@@ -1507,7 +1507,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 6
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: stbcx. 22, 0, 6
-; CHECK-NEXT: bne 0, .LBB1_49
+; CHECK-NEXT: bne- 0, .LBB1_49
; CHECK-NEXT: # %bb.50: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, sc@toc@l(4)
@@ -1517,7 +1517,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 8
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: stbcx. 22, 0, 8
-; CHECK-NEXT: bne 0, .LBB1_51
+; CHECK-NEXT: bne- 0, .LBB1_51
; CHECK-NEXT: # %bb.52: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, uc@toc@l(5)
@@ -1527,7 +1527,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 10
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: sthcx. 22, 0, 10
-; CHECK-NEXT: bne 0, .LBB1_53
+; CHECK-NEXT: bne- 0, .LBB1_53
; CHECK-NEXT: # %bb.54: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, ss@toc@l(7)
@@ -1537,7 +1537,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 0
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: sthcx. 22, 0, 0
-; CHECK-NEXT: bne 0, .LBB1_55
+; CHECK-NEXT: bne- 0, .LBB1_55
; CHECK-NEXT: # %bb.56: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, us@toc@l(9)
@@ -1547,7 +1547,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 29
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: stwcx. 22, 0, 29
-; CHECK-NEXT: bne 0, .LBB1_57
+; CHECK-NEXT: bne- 0, .LBB1_57
; CHECK-NEXT: # %bb.58: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, si@toc@l(12)
@@ -1557,7 +1557,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 27
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: stwcx. 22, 0, 27
-; CHECK-NEXT: bne 0, .LBB1_59
+; CHECK-NEXT: bne- 0, .LBB1_59
; CHECK-NEXT: # %bb.60: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, ui@toc@l(30)
@@ -1567,7 +1567,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 25
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: stdcx. 22, 0, 25
-; CHECK-NEXT: bne 0, .LBB1_61
+; CHECK-NEXT: bne- 0, .LBB1_61
; CHECK-NEXT: # %bb.62: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, sll@toc@l(28)
@@ -1577,7 +1577,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 24
; CHECK-NEXT: xori 22, 23, 11
; CHECK-NEXT: stdcx. 22, 0, 24
-; CHECK-NEXT: bne 0, .LBB1_63
+; CHECK-NEXT: bne- 0, .LBB1_63
; CHECK-NEXT: # %bb.64: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, ull@toc@l(26)
@@ -1587,7 +1587,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 6
; CHECK-NEXT: nand 22, 3, 23
; CHECK-NEXT: stbcx. 22, 0, 6
-; CHECK-NEXT: bne 0, .LBB1_65
+; CHECK-NEXT: bne- 0, .LBB1_65
; CHECK-NEXT: # %bb.66: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, sc@toc@l(4)
@@ -1597,7 +1597,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 8
; CHECK-NEXT: nand 22, 3, 23
; CHECK-NEXT: stbcx. 22, 0, 8
-; CHECK-NEXT: bne 0, .LBB1_67
+; CHECK-NEXT: bne- 0, .LBB1_67
; CHECK-NEXT: # %bb.68: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, uc@toc@l(5)
@@ -1607,7 +1607,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 10
; CHECK-NEXT: nand 22, 3, 23
; CHECK-NEXT: sthcx. 22, 0, 10
-; CHECK-NEXT: bne 0, .LBB1_69
+; CHECK-NEXT: bne- 0, .LBB1_69
; CHECK-NEXT: # %bb.70: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, ss@toc@l(7)
@@ -1617,7 +1617,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 23, 0, 0
; CHECK-NEXT: nand 22, 3, 23
; CHECK-NEXT: sthcx. 22, 0, 0
-; CHECK-NEXT: bne 0, .LBB1_71
+; CHECK-NEXT: bne- 0, .LBB1_71
; CHECK-NEXT: # %bb.72: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 23, us@toc@l(9)
@@ -1627,7 +1627,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 29
; CHECK-NEXT: nand 22, 3, 23
; CHECK-NEXT: stwcx. 22, 0, 29
-; CHECK-NEXT: bne 0, .LBB1_73
+; CHECK-NEXT: bne- 0, .LBB1_73
; CHECK-NEXT: # %bb.74: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, si@toc@l(12)
@@ -1637,7 +1637,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 23, 0, 27
; CHECK-NEXT: nand 22, 3, 23
; CHECK-NEXT: stwcx. 22, 0, 27
-; CHECK-NEXT: bne 0, .LBB1_75
+; CHECK-NEXT: bne- 0, .LBB1_75
; CHECK-NEXT: # %bb.76: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 23, ui@toc@l(30)
@@ -1647,7 +1647,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 25
; CHECK-NEXT: nand 22, 11, 23
; CHECK-NEXT: stdcx. 22, 0, 25
-; CHECK-NEXT: bne 0, .LBB1_77
+; CHECK-NEXT: bne- 0, .LBB1_77
; CHECK-NEXT: # %bb.78: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, sll@toc@l(28)
@@ -1657,7 +1657,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 23, 0, 24
; CHECK-NEXT: nand 22, 11, 23
; CHECK-NEXT: stdcx. 22, 0, 24
-; CHECK-NEXT: bne 0, .LBB1_79
+; CHECK-NEXT: bne- 0, .LBB1_79
; CHECK-NEXT: # %bb.80: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 23, ull@toc@l(26)
@@ -1667,7 +1667,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 23, 0, 6
; CHECK-NEXT: and 22, 3, 23
; CHECK-NEXT: stbcx. 22, 0, 6
-; CHECK-NEXT: bne 0, .LBB1_81
+; CHECK-NEXT: bne- 0, .LBB1_81
; CHECK-NEXT: # %bb.82: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 23, sc@toc@l(4)
@@ -1677,7 +1677,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 4, 0, 8
; CHECK-NEXT: and 6, 3, 4
; CHECK-NEXT: stbcx. 6, 0, 8
-; CHECK-NEXT: bne 0, .LBB1_83
+; CHECK-NEXT: bne- 0, .LBB1_83
; CHECK-NEXT: # %bb.84: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 4, uc@toc@l(5)
@@ -1687,7 +1687,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 4, 0, 10
; CHECK-NEXT: and 5, 3, 4
; CHECK-NEXT: sthcx. 5, 0, 10
-; CHECK-NEXT: bne 0, .LBB1_85
+; CHECK-NEXT: bne- 0, .LBB1_85
; CHECK-NEXT: # %bb.86: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 4, ss@toc@l(7)
@@ -1697,7 +1697,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 4, 0, 0
; CHECK-NEXT: and 5, 3, 4
; CHECK-NEXT: sthcx. 5, 0, 0
-; CHECK-NEXT: bne 0, .LBB1_87
+; CHECK-NEXT: bne- 0, .LBB1_87
; CHECK-NEXT: # %bb.88: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 4, us@toc@l(9)
@@ -1707,7 +1707,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 4, 0, 29
; CHECK-NEXT: and 5, 3, 4
; CHECK-NEXT: stwcx. 5, 0, 29
-; CHECK-NEXT: bne 0, .LBB1_89
+; CHECK-NEXT: bne- 0, .LBB1_89
; CHECK-NEXT: # %bb.90: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 4, si@toc@l(12)
@@ -1717,7 +1717,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 4, 0, 27
; CHECK-NEXT: and 5, 3, 4
; CHECK-NEXT: stwcx. 5, 0, 27
-; CHECK-NEXT: bne 0, .LBB1_91
+; CHECK-NEXT: bne- 0, .LBB1_91
; CHECK-NEXT: # %bb.92: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 4, ui@toc@l(30)
@@ -1727,7 +1727,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 3, 0, 25
; CHECK-NEXT: and 4, 11, 3
; CHECK-NEXT: stdcx. 4, 0, 25
-; CHECK-NEXT: bne 0, .LBB1_93
+; CHECK-NEXT: bne- 0, .LBB1_93
; CHECK-NEXT: # %bb.94: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 3, sll@toc@l(28)
@@ -1737,7 +1737,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 3, 0, 24
; CHECK-NEXT: and 4, 11, 3
; CHECK-NEXT: stdcx. 4, 0, 24
-; CHECK-NEXT: bne 0, .LBB1_95
+; CHECK-NEXT: bne- 0, .LBB1_95
; CHECK-NEXT: # %bb.96: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 3, ull@toc@l(26)
@@ -1794,7 +1794,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 25
-; AIX32-NEXT: bne 0, L..BB1_1
+; AIX32-NEXT: bne- 0, L..BB1_1
; AIX32-NEXT: # %bb.2: # %entry
; AIX32-NEXT: srw 3, 4, 26
; AIX32-NEXT: lwsync
@@ -1817,7 +1817,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 21
-; AIX32-NEXT: bne 0, L..BB1_3
+; AIX32-NEXT: bne- 0, L..BB1_3
; AIX32-NEXT: # %bb.4: # %entry
; AIX32-NEXT: srw 3, 4, 22
; AIX32-NEXT: lwz 23, L..C2(2) # @ss
@@ -1840,7 +1840,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 17
-; AIX32-NEXT: bne 0, L..BB1_5
+; AIX32-NEXT: bne- 0, L..BB1_5
; AIX32-NEXT: # %bb.6: # %entry
; AIX32-NEXT: srw 3, 4, 18
; AIX32-NEXT: lwz 19, L..C3(2) # @us
@@ -1863,7 +1863,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 14
-; AIX32-NEXT: bne 0, L..BB1_7
+; AIX32-NEXT: bne- 0, L..BB1_7
; AIX32-NEXT: # %bb.8: # %entry
; AIX32-NEXT: srw 3, 4, 15
; AIX32-NEXT: lwsync
@@ -1876,7 +1876,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 29
; AIX32-NEXT: addi 4, 3, 11
; AIX32-NEXT: stwcx. 4, 0, 29
-; AIX32-NEXT: bne 0, L..BB1_9
+; AIX32-NEXT: bne- 0, L..BB1_9
; AIX32-NEXT: # %bb.10: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 3, 0(29)
@@ -1887,7 +1887,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 28
; AIX32-NEXT: addi 4, 3, 11
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB1_11
+; AIX32-NEXT: bne- 0, L..BB1_11
; AIX32-NEXT: # %bb.12: # %entry
; AIX32-NEXT: lwz 31, L..C6(2) # @sll
; AIX32-NEXT: lwsync
@@ -1920,7 +1920,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 25
-; AIX32-NEXT: bne 0, L..BB1_13
+; AIX32-NEXT: bne- 0, L..BB1_13
; AIX32-NEXT: # %bb.14: # %entry
; AIX32-NEXT: srw 3, 4, 26
; AIX32-NEXT: lwsync
@@ -1938,7 +1938,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 21
-; AIX32-NEXT: bne 0, L..BB1_15
+; AIX32-NEXT: bne- 0, L..BB1_15
; AIX32-NEXT: # %bb.16: # %entry
; AIX32-NEXT: srw 3, 4, 22
; AIX32-NEXT: lwsync
@@ -1957,7 +1957,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 17
-; AIX32-NEXT: bne 0, L..BB1_17
+; AIX32-NEXT: bne- 0, L..BB1_17
; AIX32-NEXT: # %bb.18: # %entry
; AIX32-NEXT: srw 3, 4, 18
; AIX32-NEXT: lwsync
@@ -1975,7 +1975,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 14
-; AIX32-NEXT: bne 0, L..BB1_19
+; AIX32-NEXT: bne- 0, L..BB1_19
; AIX32-NEXT: # %bb.20: # %entry
; AIX32-NEXT: srw 3, 4, 15
; AIX32-NEXT: lwsync
@@ -1987,7 +1987,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 29
; AIX32-NEXT: sub 4, 3, 7
; AIX32-NEXT: stwcx. 4, 0, 29
-; AIX32-NEXT: bne 0, L..BB1_21
+; AIX32-NEXT: bne- 0, L..BB1_21
; AIX32-NEXT: # %bb.22: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 3, 0(29)
@@ -1997,7 +1997,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 28
; AIX32-NEXT: sub 4, 3, 7
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB1_23
+; AIX32-NEXT: bne- 0, L..BB1_23
; AIX32-NEXT: # %bb.24: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -2028,7 +2028,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 25
-; AIX32-NEXT: bne 0, L..BB1_25
+; AIX32-NEXT: bne- 0, L..BB1_25
; AIX32-NEXT: # %bb.26: # %entry
; AIX32-NEXT: srw 3, 4, 26
; AIX32-NEXT: lwsync
@@ -2046,7 +2046,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 21
-; AIX32-NEXT: bne 0, L..BB1_27
+; AIX32-NEXT: bne- 0, L..BB1_27
; AIX32-NEXT: # %bb.28: # %entry
; AIX32-NEXT: srw 3, 4, 22
; AIX32-NEXT: lwsync
@@ -2064,7 +2064,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 17
-; AIX32-NEXT: bne 0, L..BB1_29
+; AIX32-NEXT: bne- 0, L..BB1_29
; AIX32-NEXT: # %bb.30: # %entry
; AIX32-NEXT: srw 3, 4, 18
; AIX32-NEXT: lwsync
@@ -2082,7 +2082,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 14
-; AIX32-NEXT: bne 0, L..BB1_31
+; AIX32-NEXT: bne- 0, L..BB1_31
; AIX32-NEXT: # %bb.32: # %entry
; AIX32-NEXT: srw 3, 4, 15
; AIX32-NEXT: lwsync
@@ -2094,7 +2094,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 29
; AIX32-NEXT: ori 4, 3, 11
; AIX32-NEXT: stwcx. 4, 0, 29
-; AIX32-NEXT: bne 0, L..BB1_33
+; AIX32-NEXT: bne- 0, L..BB1_33
; AIX32-NEXT: # %bb.34: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 3, 0(29)
@@ -2104,7 +2104,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 28
; AIX32-NEXT: ori 4, 3, 11
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB1_35
+; AIX32-NEXT: bne- 0, L..BB1_35
; AIX32-NEXT: # %bb.36: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -2135,7 +2135,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 25
-; AIX32-NEXT: bne 0, L..BB1_37
+; AIX32-NEXT: bne- 0, L..BB1_37
; AIX32-NEXT: # %bb.38: # %entry
; AIX32-NEXT: srw 3, 4, 26
; AIX32-NEXT: lwsync
@@ -2153,7 +2153,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 21
-; AIX32-NEXT: bne 0, L..BB1_39
+; AIX32-NEXT: bne- 0, L..BB1_39
; AIX32-NEXT: # %bb.40: # %entry
; AIX32-NEXT: srw 3, 4, 22
; AIX32-NEXT: lwsync
@@ -2171,7 +2171,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 17
-; AIX32-NEXT: bne 0, L..BB1_41
+; AIX32-NEXT: bne- 0, L..BB1_41
; AIX32-NEXT: # %bb.42: # %entry
; AIX32-NEXT: srw 3, 4, 18
; AIX32-NEXT: lwsync
@@ -2189,7 +2189,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 14
-; AIX32-NEXT: bne 0, L..BB1_43
+; AIX32-NEXT: bne- 0, L..BB1_43
; AIX32-NEXT: # %bb.44: # %entry
; AIX32-NEXT: srw 3, 4, 15
; AIX32-NEXT: lwsync
@@ -2201,7 +2201,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 29
; AIX32-NEXT: xori 4, 3, 11
; AIX32-NEXT: stwcx. 4, 0, 29
-; AIX32-NEXT: bne 0, L..BB1_45
+; AIX32-NEXT: bne- 0, L..BB1_45
; AIX32-NEXT: # %bb.46: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 3, 0(29)
@@ -2211,7 +2211,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 28
; AIX32-NEXT: xori 4, 3, 11
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB1_47
+; AIX32-NEXT: bne- 0, L..BB1_47
; AIX32-NEXT: # %bb.48: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -2242,7 +2242,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 25
-; AIX32-NEXT: bne 0, L..BB1_49
+; AIX32-NEXT: bne- 0, L..BB1_49
; AIX32-NEXT: # %bb.50: # %entry
; AIX32-NEXT: srw 3, 4, 26
; AIX32-NEXT: lwsync
@@ -2261,7 +2261,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 21
-; AIX32-NEXT: bne 0, L..BB1_51
+; AIX32-NEXT: bne- 0, L..BB1_51
; AIX32-NEXT: # %bb.52: # %entry
; AIX32-NEXT: srw 3, 4, 22
; AIX32-NEXT: lwsync
@@ -2279,7 +2279,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 17
-; AIX32-NEXT: bne 0, L..BB1_53
+; AIX32-NEXT: bne- 0, L..BB1_53
; AIX32-NEXT: # %bb.54: # %entry
; AIX32-NEXT: srw 3, 4, 18
; AIX32-NEXT: lwsync
@@ -2297,7 +2297,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 14
-; AIX32-NEXT: bne 0, L..BB1_55
+; AIX32-NEXT: bne- 0, L..BB1_55
; AIX32-NEXT: # %bb.56: # %entry
; AIX32-NEXT: srw 3, 4, 15
; AIX32-NEXT: lwsync
@@ -2309,7 +2309,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 29
; AIX32-NEXT: nand 4, 7, 3
; AIX32-NEXT: stwcx. 4, 0, 29
-; AIX32-NEXT: bne 0, L..BB1_57
+; AIX32-NEXT: bne- 0, L..BB1_57
; AIX32-NEXT: # %bb.58: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 3, 0(29)
@@ -2319,7 +2319,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 28
; AIX32-NEXT: nand 4, 7, 3
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB1_59
+; AIX32-NEXT: bne- 0, L..BB1_59
; AIX32-NEXT: # %bb.60: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -2350,7 +2350,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 25
-; AIX32-NEXT: bne 0, L..BB1_61
+; AIX32-NEXT: bne- 0, L..BB1_61
; AIX32-NEXT: # %bb.62: # %entry
; AIX32-NEXT: srw 3, 4, 26
; AIX32-NEXT: lwsync
@@ -2368,7 +2368,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 21
-; AIX32-NEXT: bne 0, L..BB1_63
+; AIX32-NEXT: bne- 0, L..BB1_63
; AIX32-NEXT: # %bb.64: # %entry
; AIX32-NEXT: srw 3, 4, 22
; AIX32-NEXT: lwsync
@@ -2387,7 +2387,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 17
-; AIX32-NEXT: bne 0, L..BB1_65
+; AIX32-NEXT: bne- 0, L..BB1_65
; AIX32-NEXT: # %bb.66: # %entry
; AIX32-NEXT: srw 3, 4, 18
; AIX32-NEXT: lwsync
@@ -2405,7 +2405,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: and 5, 5, 3
; AIX32-NEXT: or 5, 5, 6
; AIX32-NEXT: stwcx. 5, 0, 14
-; AIX32-NEXT: bne 0, L..BB1_67
+; AIX32-NEXT: bne- 0, L..BB1_67
; AIX32-NEXT: # %bb.68: # %entry
; AIX32-NEXT: srw 3, 4, 15
; AIX32-NEXT: lwsync
@@ -2417,7 +2417,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 29
; AIX32-NEXT: and 4, 7, 3
; AIX32-NEXT: stwcx. 4, 0, 29
-; AIX32-NEXT: bne 0, L..BB1_69
+; AIX32-NEXT: bne- 0, L..BB1_69
; AIX32-NEXT: # %bb.70: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 3, 0(29)
@@ -2427,7 +2427,7 @@ define dso_local void @test_fetch_and_op() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 3, 0, 28
; AIX32-NEXT: and 4, 7, 3
; AIX32-NEXT: stwcx. 4, 0, 28
-; AIX32-NEXT: bne 0, L..BB1_71
+; AIX32-NEXT: bne- 0, L..BB1_71
; AIX32-NEXT: # %bb.72: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: li 4, 0
@@ -2599,7 +2599,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 8, 0, 7
; CHECK-NEXT: add 8, 6, 8
; CHECK-NEXT: stbcx. 8, 0, 7
-; CHECK-NEXT: bne 0, .LBB2_1
+; CHECK-NEXT: bne- 0, .LBB2_1
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 8, sc@toc@l(5)
@@ -2610,7 +2610,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 8, 0, 4
; CHECK-NEXT: add 8, 6, 8
; CHECK-NEXT: stbcx. 8, 0, 4
-; CHECK-NEXT: bne 0, .LBB2_3
+; CHECK-NEXT: bne- 0, .LBB2_3
; CHECK-NEXT: # %bb.4: # %entry
; CHECK-NEXT: addis 6, 2, ss@toc@ha
; CHECK-NEXT: lwsync
@@ -2623,7 +2623,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 10, 0, 9
; CHECK-NEXT: add 10, 8, 10
; CHECK-NEXT: sthcx. 10, 0, 9
-; CHECK-NEXT: bne 0, .LBB2_5
+; CHECK-NEXT: bne- 0, .LBB2_5
; CHECK-NEXT: # %bb.6: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: addis 8, 2, us@toc@ha
@@ -2636,7 +2636,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 12, 0, 11
; CHECK-NEXT: add 12, 10, 12
; CHECK-NEXT: sthcx. 12, 0, 11
-; CHECK-NEXT: bne 0, .LBB2_7
+; CHECK-NEXT: bne- 0, .LBB2_7
; CHECK-NEXT: # %bb.8: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: addis 10, 2, si@toc@ha
@@ -2649,7 +2649,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 30, 0, 0
; CHECK-NEXT: add 30, 12, 30
; CHECK-NEXT: stwcx. 30, 0, 0
-; CHECK-NEXT: bne 0, .LBB2_9
+; CHECK-NEXT: bne- 0, .LBB2_9
; CHECK-NEXT: # %bb.10: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: addis 12, 2, ui@toc@ha
@@ -2662,7 +2662,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 28, 0, 29
; CHECK-NEXT: add 28, 30, 28
; CHECK-NEXT: stwcx. 28, 0, 29
-; CHECK-NEXT: bne 0, .LBB2_11
+; CHECK-NEXT: bne- 0, .LBB2_11
; CHECK-NEXT: # %bb.12: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: addis 30, 2, sll@toc@ha
@@ -2675,7 +2675,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 26, 0, 27
; CHECK-NEXT: add 26, 28, 26
; CHECK-NEXT: stdcx. 26, 0, 27
-; CHECK-NEXT: bne 0, .LBB2_13
+; CHECK-NEXT: bne- 0, .LBB2_13
; CHECK-NEXT: # %bb.14: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: addis 28, 2, ull@toc@ha
@@ -2688,7 +2688,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 26
; CHECK-NEXT: add 24, 25, 24
; CHECK-NEXT: stdcx. 24, 0, 26
-; CHECK-NEXT: bne 0, .LBB2_15
+; CHECK-NEXT: bne- 0, .LBB2_15
; CHECK-NEXT: # %bb.16: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, ull@toc@l(28)
@@ -2699,7 +2699,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 7
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: stbcx. 24, 0, 7
-; CHECK-NEXT: bne 0, .LBB2_17
+; CHECK-NEXT: bne- 0, .LBB2_17
; CHECK-NEXT: # %bb.18: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, sc@toc@l(5)
@@ -2710,7 +2710,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 4
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: stbcx. 24, 0, 4
-; CHECK-NEXT: bne 0, .LBB2_19
+; CHECK-NEXT: bne- 0, .LBB2_19
; CHECK-NEXT: # %bb.20: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, uc@toc@l(3)
@@ -2721,7 +2721,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 9
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: sthcx. 24, 0, 9
-; CHECK-NEXT: bne 0, .LBB2_21
+; CHECK-NEXT: bne- 0, .LBB2_21
; CHECK-NEXT: # %bb.22: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, ss@toc@l(6)
@@ -2732,7 +2732,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 11
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: sthcx. 24, 0, 11
-; CHECK-NEXT: bne 0, .LBB2_23
+; CHECK-NEXT: bne- 0, .LBB2_23
; CHECK-NEXT: # %bb.24: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, us@toc@l(8)
@@ -2743,7 +2743,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 0
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: stwcx. 24, 0, 0
-; CHECK-NEXT: bne 0, .LBB2_25
+; CHECK-NEXT: bne- 0, .LBB2_25
; CHECK-NEXT: # %bb.26: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, si@toc@l(10)
@@ -2754,7 +2754,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 29
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: stwcx. 24, 0, 29
-; CHECK-NEXT: bne 0, .LBB2_27
+; CHECK-NEXT: bne- 0, .LBB2_27
; CHECK-NEXT: # %bb.28: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, ui@toc@l(12)
@@ -2765,7 +2765,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 27
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: stdcx. 24, 0, 27
-; CHECK-NEXT: bne 0, .LBB2_29
+; CHECK-NEXT: bne- 0, .LBB2_29
; CHECK-NEXT: # %bb.30: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, sll@toc@l(30)
@@ -2776,7 +2776,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 26
; CHECK-NEXT: sub 24, 24, 25
; CHECK-NEXT: stdcx. 24, 0, 26
-; CHECK-NEXT: bne 0, .LBB2_31
+; CHECK-NEXT: bne- 0, .LBB2_31
; CHECK-NEXT: # %bb.32: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, ull@toc@l(28)
@@ -2787,7 +2787,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 7
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: stbcx. 24, 0, 7
-; CHECK-NEXT: bne 0, .LBB2_33
+; CHECK-NEXT: bne- 0, .LBB2_33
; CHECK-NEXT: # %bb.34: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, sc@toc@l(5)
@@ -2798,7 +2798,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 4
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: stbcx. 24, 0, 4
-; CHECK-NEXT: bne 0, .LBB2_35
+; CHECK-NEXT: bne- 0, .LBB2_35
; CHECK-NEXT: # %bb.36: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, uc@toc@l(3)
@@ -2809,7 +2809,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 9
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: sthcx. 24, 0, 9
-; CHECK-NEXT: bne 0, .LBB2_37
+; CHECK-NEXT: bne- 0, .LBB2_37
; CHECK-NEXT: # %bb.38: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, ss@toc@l(6)
@@ -2820,7 +2820,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 11
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: sthcx. 24, 0, 11
-; CHECK-NEXT: bne 0, .LBB2_39
+; CHECK-NEXT: bne- 0, .LBB2_39
; CHECK-NEXT: # %bb.40: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, us@toc@l(8)
@@ -2831,7 +2831,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 0
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: stwcx. 24, 0, 0
-; CHECK-NEXT: bne 0, .LBB2_41
+; CHECK-NEXT: bne- 0, .LBB2_41
; CHECK-NEXT: # %bb.42: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, si@toc@l(10)
@@ -2842,7 +2842,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 29
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: stwcx. 24, 0, 29
-; CHECK-NEXT: bne 0, .LBB2_43
+; CHECK-NEXT: bne- 0, .LBB2_43
; CHECK-NEXT: # %bb.44: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, ui@toc@l(12)
@@ -2853,7 +2853,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 27
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: stdcx. 24, 0, 27
-; CHECK-NEXT: bne 0, .LBB2_45
+; CHECK-NEXT: bne- 0, .LBB2_45
; CHECK-NEXT: # %bb.46: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, sll@toc@l(30)
@@ -2864,7 +2864,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 26
; CHECK-NEXT: or 24, 25, 24
; CHECK-NEXT: stdcx. 24, 0, 26
-; CHECK-NEXT: bne 0, .LBB2_47
+; CHECK-NEXT: bne- 0, .LBB2_47
; CHECK-NEXT: # %bb.48: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, ull@toc@l(28)
@@ -2875,7 +2875,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 7
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: stbcx. 24, 0, 7
-; CHECK-NEXT: bne 0, .LBB2_49
+; CHECK-NEXT: bne- 0, .LBB2_49
; CHECK-NEXT: # %bb.50: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, sc@toc@l(5)
@@ -2886,7 +2886,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 4
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: stbcx. 24, 0, 4
-; CHECK-NEXT: bne 0, .LBB2_51
+; CHECK-NEXT: bne- 0, .LBB2_51
; CHECK-NEXT: # %bb.52: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, uc@toc@l(3)
@@ -2897,7 +2897,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 9
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: sthcx. 24, 0, 9
-; CHECK-NEXT: bne 0, .LBB2_53
+; CHECK-NEXT: bne- 0, .LBB2_53
; CHECK-NEXT: # %bb.54: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, ss@toc@l(6)
@@ -2908,7 +2908,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 11
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: sthcx. 24, 0, 11
-; CHECK-NEXT: bne 0, .LBB2_55
+; CHECK-NEXT: bne- 0, .LBB2_55
; CHECK-NEXT: # %bb.56: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, us@toc@l(8)
@@ -2919,7 +2919,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 0
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: stwcx. 24, 0, 0
-; CHECK-NEXT: bne 0, .LBB2_57
+; CHECK-NEXT: bne- 0, .LBB2_57
; CHECK-NEXT: # %bb.58: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, si@toc@l(10)
@@ -2930,7 +2930,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 29
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: stwcx. 24, 0, 29
-; CHECK-NEXT: bne 0, .LBB2_59
+; CHECK-NEXT: bne- 0, .LBB2_59
; CHECK-NEXT: # %bb.60: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, ui@toc@l(12)
@@ -2941,7 +2941,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 27
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: stdcx. 24, 0, 27
-; CHECK-NEXT: bne 0, .LBB2_61
+; CHECK-NEXT: bne- 0, .LBB2_61
; CHECK-NEXT: # %bb.62: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, sll@toc@l(30)
@@ -2952,7 +2952,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 26
; CHECK-NEXT: xor 24, 25, 24
; CHECK-NEXT: stdcx. 24, 0, 26
-; CHECK-NEXT: bne 0, .LBB2_63
+; CHECK-NEXT: bne- 0, .LBB2_63
; CHECK-NEXT: # %bb.64: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, ull@toc@l(28)
@@ -2963,7 +2963,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 7
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: stbcx. 24, 0, 7
-; CHECK-NEXT: bne 0, .LBB2_65
+; CHECK-NEXT: bne- 0, .LBB2_65
; CHECK-NEXT: # %bb.66: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, sc@toc@l(5)
@@ -2974,7 +2974,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 4
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: stbcx. 24, 0, 4
-; CHECK-NEXT: bne 0, .LBB2_67
+; CHECK-NEXT: bne- 0, .LBB2_67
; CHECK-NEXT: # %bb.68: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, uc@toc@l(3)
@@ -2985,7 +2985,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 9
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: sthcx. 24, 0, 9
-; CHECK-NEXT: bne 0, .LBB2_69
+; CHECK-NEXT: bne- 0, .LBB2_69
; CHECK-NEXT: # %bb.70: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, ss@toc@l(6)
@@ -2996,7 +2996,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 24, 0, 11
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: sthcx. 24, 0, 11
-; CHECK-NEXT: bne 0, .LBB2_71
+; CHECK-NEXT: bne- 0, .LBB2_71
; CHECK-NEXT: # %bb.72: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 24, us@toc@l(8)
@@ -3007,7 +3007,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 0
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: stwcx. 24, 0, 0
-; CHECK-NEXT: bne 0, .LBB2_73
+; CHECK-NEXT: bne- 0, .LBB2_73
; CHECK-NEXT: # %bb.74: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, si@toc@l(10)
@@ -3018,7 +3018,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 24, 0, 29
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: stwcx. 24, 0, 29
-; CHECK-NEXT: bne 0, .LBB2_75
+; CHECK-NEXT: bne- 0, .LBB2_75
; CHECK-NEXT: # %bb.76: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 24, ui@toc@l(12)
@@ -3029,7 +3029,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 27
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: stdcx. 24, 0, 27
-; CHECK-NEXT: bne 0, .LBB2_77
+; CHECK-NEXT: bne- 0, .LBB2_77
; CHECK-NEXT: # %bb.78: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, sll@toc@l(30)
@@ -3040,7 +3040,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 24, 0, 26
; CHECK-NEXT: nand 24, 25, 24
; CHECK-NEXT: stdcx. 24, 0, 26
-; CHECK-NEXT: bne 0, .LBB2_79
+; CHECK-NEXT: bne- 0, .LBB2_79
; CHECK-NEXT: # %bb.80: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 24, ull@toc@l(28)
@@ -3085,7 +3085,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 24, 0, 7
; CHECK-NEXT: and 24, 25, 24
; CHECK-NEXT: stbcx. 24, 0, 7
-; CHECK-NEXT: bne 0, .LBB2_85
+; CHECK-NEXT: bne- 0, .LBB2_85
; CHECK-NEXT: # %bb.86: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 24, sc@toc@l(5)
@@ -3096,7 +3096,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lbarx 5, 0, 4
; CHECK-NEXT: and 5, 7, 5
; CHECK-NEXT: stbcx. 5, 0, 4
-; CHECK-NEXT: bne 0, .LBB2_87
+; CHECK-NEXT: bne- 0, .LBB2_87
; CHECK-NEXT: # %bb.88: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 5, uc@toc@l(3)
@@ -3106,7 +3106,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 4, 0, 9
; CHECK-NEXT: and 4, 5, 4
; CHECK-NEXT: sthcx. 4, 0, 9
-; CHECK-NEXT: bne 0, .LBB2_89
+; CHECK-NEXT: bne- 0, .LBB2_89
; CHECK-NEXT: # %bb.90: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 4, ss@toc@l(6)
@@ -3117,7 +3117,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lharx 5, 0, 11
; CHECK-NEXT: and 5, 4, 5
; CHECK-NEXT: sthcx. 5, 0, 11
-; CHECK-NEXT: bne 0, .LBB2_91
+; CHECK-NEXT: bne- 0, .LBB2_91
; CHECK-NEXT: # %bb.92: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 5, us@toc@l(8)
@@ -3128,7 +3128,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 5, 0, 0
; CHECK-NEXT: and 5, 4, 5
; CHECK-NEXT: stwcx. 5, 0, 0
-; CHECK-NEXT: bne 0, .LBB2_93
+; CHECK-NEXT: bne- 0, .LBB2_93
; CHECK-NEXT: # %bb.94: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 5, si@toc@l(10)
@@ -3139,7 +3139,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: lwarx 5, 0, 29
; CHECK-NEXT: and 5, 4, 5
; CHECK-NEXT: stwcx. 5, 0, 29
-; CHECK-NEXT: bne 0, .LBB2_95
+; CHECK-NEXT: bne- 0, .LBB2_95
; CHECK-NEXT: # %bb.96: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 5, ui@toc@l(12)
@@ -3150,7 +3150,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 5, 0, 27
; CHECK-NEXT: and 5, 4, 5
; CHECK-NEXT: stdcx. 5, 0, 27
-; CHECK-NEXT: bne 0, .LBB2_97
+; CHECK-NEXT: bne- 0, .LBB2_97
; CHECK-NEXT: # %bb.98: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 5, sll@toc@l(30)
@@ -3161,7 +3161,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; CHECK-NEXT: ldarx 4, 0, 26
; CHECK-NEXT: and 4, 3, 4
; CHECK-NEXT: stdcx. 4, 0, 26
-; CHECK-NEXT: bne 0, .LBB2_99
+; CHECK-NEXT: bne- 0, .LBB2_99
; CHECK-NEXT: # %bb.100: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 4, ull@toc@l(28)
@@ -3225,7 +3225,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 22
-; AIX32-NEXT: bne 0, L..BB2_1
+; AIX32-NEXT: bne- 0, L..BB2_1
; AIX32-NEXT: # %bb.2: # %entry
; AIX32-NEXT: srw 4, 6, 24
; AIX32-NEXT: lwsync
@@ -3248,7 +3248,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 19
-; AIX32-NEXT: bne 0, L..BB2_3
+; AIX32-NEXT: bne- 0, L..BB2_3
; AIX32-NEXT: # %bb.4: # %entry
; AIX32-NEXT: srw 4, 6, 21
; AIX32-NEXT: lwz 23, L..C2(2) # @ss
@@ -3273,7 +3273,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 16
-; AIX32-NEXT: bne 0, L..BB2_5
+; AIX32-NEXT: bne- 0, L..BB2_5
; AIX32-NEXT: # %bb.6: # %entry
; AIX32-NEXT: srw 4, 6, 18
; AIX32-NEXT: lwz 20, L..C3(2) # @us
@@ -3298,7 +3298,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 14
-; AIX32-NEXT: bne 0, L..BB2_7
+; AIX32-NEXT: bne- 0, L..BB2_7
; AIX32-NEXT: # %bb.8: # %entry
; AIX32-NEXT: srw 4, 6, 15
; AIX32-NEXT: lwsync
@@ -3313,7 +3313,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 13
; AIX32-NEXT: add 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 13
-; AIX32-NEXT: bne 0, L..BB2_9
+; AIX32-NEXT: bne- 0, L..BB2_9
; AIX32-NEXT: # %bb.10: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(13)
@@ -3325,7 +3325,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 25
; AIX32-NEXT: add 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB2_11
+; AIX32-NEXT: bne- 0, L..BB2_11
; AIX32-NEXT: # %bb.12: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: lwz 31, L..C6(2) # @sll
@@ -3367,7 +3367,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 22
-; AIX32-NEXT: bne 0, L..BB2_13
+; AIX32-NEXT: bne- 0, L..BB2_13
; AIX32-NEXT: # %bb.14: # %entry
; AIX32-NEXT: srw 4, 6, 24
; AIX32-NEXT: lwsync
@@ -3387,7 +3387,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 19
-; AIX32-NEXT: bne 0, L..BB2_15
+; AIX32-NEXT: bne- 0, L..BB2_15
; AIX32-NEXT: # %bb.16: # %entry
; AIX32-NEXT: srw 4, 6, 21
; AIX32-NEXT: li 5, 0
@@ -3408,7 +3408,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 16
-; AIX32-NEXT: bne 0, L..BB2_17
+; AIX32-NEXT: bne- 0, L..BB2_17
; AIX32-NEXT: # %bb.18: # %entry
; AIX32-NEXT: srw 4, 6, 18
; AIX32-NEXT: lwsync
@@ -3429,7 +3429,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 14
-; AIX32-NEXT: bne 0, L..BB2_19
+; AIX32-NEXT: bne- 0, L..BB2_19
; AIX32-NEXT: # %bb.20: # %entry
; AIX32-NEXT: srw 4, 6, 15
; AIX32-NEXT: lwsync
@@ -3443,7 +3443,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 13
; AIX32-NEXT: sub 4, 4, 3
; AIX32-NEXT: stwcx. 4, 0, 13
-; AIX32-NEXT: bne 0, L..BB2_21
+; AIX32-NEXT: bne- 0, L..BB2_21
; AIX32-NEXT: # %bb.22: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(13)
@@ -3454,7 +3454,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 25
; AIX32-NEXT: sub 4, 4, 3
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB2_23
+; AIX32-NEXT: bne- 0, L..BB2_23
; AIX32-NEXT: # %bb.24: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(25)
@@ -3493,7 +3493,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 22
-; AIX32-NEXT: bne 0, L..BB2_25
+; AIX32-NEXT: bne- 0, L..BB2_25
; AIX32-NEXT: # %bb.26: # %entry
; AIX32-NEXT: srw 4, 6, 24
; AIX32-NEXT: lwsync
@@ -3513,7 +3513,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 19
-; AIX32-NEXT: bne 0, L..BB2_27
+; AIX32-NEXT: bne- 0, L..BB2_27
; AIX32-NEXT: # %bb.28: # %entry
; AIX32-NEXT: srw 4, 6, 21
; AIX32-NEXT: li 5, 0
@@ -3534,7 +3534,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 16
-; AIX32-NEXT: bne 0, L..BB2_29
+; AIX32-NEXT: bne- 0, L..BB2_29
; AIX32-NEXT: # %bb.30: # %entry
; AIX32-NEXT: srw 4, 6, 18
; AIX32-NEXT: lwsync
@@ -3555,7 +3555,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 14
-; AIX32-NEXT: bne 0, L..BB2_31
+; AIX32-NEXT: bne- 0, L..BB2_31
; AIX32-NEXT: # %bb.32: # %entry
; AIX32-NEXT: srw 4, 6, 15
; AIX32-NEXT: lwsync
@@ -3569,7 +3569,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 13
; AIX32-NEXT: or 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 13
-; AIX32-NEXT: bne 0, L..BB2_33
+; AIX32-NEXT: bne- 0, L..BB2_33
; AIX32-NEXT: # %bb.34: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(13)
@@ -3580,7 +3580,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 25
; AIX32-NEXT: or 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB2_35
+; AIX32-NEXT: bne- 0, L..BB2_35
; AIX32-NEXT: # %bb.36: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(25)
@@ -3617,7 +3617,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 22
-; AIX32-NEXT: bne 0, L..BB2_37
+; AIX32-NEXT: bne- 0, L..BB2_37
; AIX32-NEXT: # %bb.38: # %entry
; AIX32-NEXT: srw 4, 6, 24
; AIX32-NEXT: lwsync
@@ -3637,7 +3637,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 19
-; AIX32-NEXT: bne 0, L..BB2_39
+; AIX32-NEXT: bne- 0, L..BB2_39
; AIX32-NEXT: # %bb.40: # %entry
; AIX32-NEXT: srw 4, 6, 21
; AIX32-NEXT: li 5, 0
@@ -3658,7 +3658,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 16
-; AIX32-NEXT: bne 0, L..BB2_41
+; AIX32-NEXT: bne- 0, L..BB2_41
; AIX32-NEXT: # %bb.42: # %entry
; AIX32-NEXT: srw 4, 6, 18
; AIX32-NEXT: lwsync
@@ -3679,7 +3679,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 14
-; AIX32-NEXT: bne 0, L..BB2_43
+; AIX32-NEXT: bne- 0, L..BB2_43
; AIX32-NEXT: # %bb.44: # %entry
; AIX32-NEXT: srw 4, 6, 15
; AIX32-NEXT: lwsync
@@ -3693,7 +3693,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 13
; AIX32-NEXT: xor 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 13
-; AIX32-NEXT: bne 0, L..BB2_45
+; AIX32-NEXT: bne- 0, L..BB2_45
; AIX32-NEXT: # %bb.46: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(13)
@@ -3704,7 +3704,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 25
; AIX32-NEXT: xor 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB2_47
+; AIX32-NEXT: bne- 0, L..BB2_47
; AIX32-NEXT: # %bb.48: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(25)
@@ -3741,7 +3741,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 22
-; AIX32-NEXT: bne 0, L..BB2_49
+; AIX32-NEXT: bne- 0, L..BB2_49
; AIX32-NEXT: # %bb.50: # %entry
; AIX32-NEXT: srw 4, 6, 24
; AIX32-NEXT: lwsync
@@ -3761,7 +3761,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 19
-; AIX32-NEXT: bne 0, L..BB2_51
+; AIX32-NEXT: bne- 0, L..BB2_51
; AIX32-NEXT: # %bb.52: # %entry
; AIX32-NEXT: srw 4, 6, 21
; AIX32-NEXT: li 5, 0
@@ -3782,7 +3782,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 16
-; AIX32-NEXT: bne 0, L..BB2_53
+; AIX32-NEXT: bne- 0, L..BB2_53
; AIX32-NEXT: # %bb.54: # %entry
; AIX32-NEXT: srw 4, 6, 18
; AIX32-NEXT: lwsync
@@ -3803,7 +3803,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 14
-; AIX32-NEXT: bne 0, L..BB2_55
+; AIX32-NEXT: bne- 0, L..BB2_55
; AIX32-NEXT: # %bb.56: # %entry
; AIX32-NEXT: srw 4, 6, 15
; AIX32-NEXT: lwsync
@@ -3817,7 +3817,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 13
; AIX32-NEXT: nand 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 13
-; AIX32-NEXT: bne 0, L..BB2_57
+; AIX32-NEXT: bne- 0, L..BB2_57
; AIX32-NEXT: # %bb.58: # %entry
; AIX32-NEXT: stw 23, 56(1) # 4-byte Folded Spill
; AIX32-NEXT: stw 27, 60(1) # 4-byte Folded Spill
@@ -3830,7 +3830,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 25
; AIX32-NEXT: nand 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB2_59
+; AIX32-NEXT: bne- 0, L..BB2_59
; AIX32-NEXT: # %bb.60: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(25)
@@ -3951,7 +3951,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 22
-; AIX32-NEXT: bne 0, L..BB2_65
+; AIX32-NEXT: bne- 0, L..BB2_65
; AIX32-NEXT: # %bb.66: # %atomicrmw.end
; AIX32-NEXT: srw 4, 6, 24
; AIX32-NEXT: lwsync
@@ -3973,7 +3973,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 19
-; AIX32-NEXT: bne 0, L..BB2_67
+; AIX32-NEXT: bne- 0, L..BB2_67
; AIX32-NEXT: # %bb.68: # %atomicrmw.end
; AIX32-NEXT: srw 4, 6, 21
; AIX32-NEXT: li 5, 0
@@ -3993,7 +3993,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 16
-; AIX32-NEXT: bne 0, L..BB2_69
+; AIX32-NEXT: bne- 0, L..BB2_69
; AIX32-NEXT: # %bb.70: # %atomicrmw.end
; AIX32-NEXT: srw 4, 6, 18
; AIX32-NEXT: lwsync
@@ -4014,7 +4014,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: and 7, 7, 5
; AIX32-NEXT: or 7, 7, 8
; AIX32-NEXT: stwcx. 7, 0, 14
-; AIX32-NEXT: bne 0, L..BB2_71
+; AIX32-NEXT: bne- 0, L..BB2_71
; AIX32-NEXT: # %bb.72: # %atomicrmw.end
; AIX32-NEXT: srw 4, 6, 15
; AIX32-NEXT: lwsync
@@ -4028,7 +4028,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 13
; AIX32-NEXT: and 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 13
-; AIX32-NEXT: bne 0, L..BB2_73
+; AIX32-NEXT: bne- 0, L..BB2_73
; AIX32-NEXT: # %bb.74: # %atomicrmw.end
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(13)
@@ -4039,7 +4039,7 @@ define dso_local void @test_op_and_fetch() local_unnamed_addr #0 {
; AIX32-NEXT: lwarx 4, 0, 25
; AIX32-NEXT: and 4, 3, 4
; AIX32-NEXT: stwcx. 4, 0, 25
-; AIX32-NEXT: bne 0, L..BB2_75
+; AIX32-NEXT: bne- 0, L..BB2_75
; AIX32-NEXT: # %bb.76: # %atomicrmw.end
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(25)
@@ -5371,7 +5371,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: lbarx 5, 0, 4
; CHECK-NEXT: stbcx. 7, 0, 4
-; CHECK-NEXT: bne 0, .LBB4_1
+; CHECK-NEXT: bne- 0, .LBB4_1
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: addis 4, 2, uc@toc@ha
; CHECK-NEXT: lwsync
@@ -5382,7 +5382,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: lbarx 5, 0, 6
; CHECK-NEXT: stbcx. 7, 0, 6
-; CHECK-NEXT: bne 0, .LBB4_3
+; CHECK-NEXT: bne- 0, .LBB4_3
; CHECK-NEXT: # %bb.4: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stb 5, uc@toc@l(4)
@@ -5393,7 +5393,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: lharx 6, 0, 8
; CHECK-NEXT: sthcx. 7, 0, 8
-; CHECK-NEXT: bne 0, .LBB4_5
+; CHECK-NEXT: bne- 0, .LBB4_5
; CHECK-NEXT: # %bb.6: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 6, ss@toc@l(5)
@@ -5404,7 +5404,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: lharx 8, 0, 9
; CHECK-NEXT: sthcx. 7, 0, 9
-; CHECK-NEXT: bne 0, .LBB4_7
+; CHECK-NEXT: bne- 0, .LBB4_7
; CHECK-NEXT: # %bb.8: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: sth 8, us@toc@l(6)
@@ -5415,7 +5415,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: lwarx 9, 0, 10
; CHECK-NEXT: stwcx. 7, 0, 10
-; CHECK-NEXT: bne 0, .LBB4_9
+; CHECK-NEXT: bne- 0, .LBB4_9
; CHECK-NEXT: # %bb.10: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 9, si@toc@l(8)
@@ -5426,7 +5426,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: lwarx 10, 0, 11
; CHECK-NEXT: stwcx. 7, 0, 11
-; CHECK-NEXT: bne 0, .LBB4_11
+; CHECK-NEXT: bne- 0, .LBB4_11
; CHECK-NEXT: # %bb.12: # %entry
; CHECK-NEXT: addis 7, 2, sll@toc@ha
; CHECK-NEXT: lwsync
@@ -5438,7 +5438,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: ldarx 12, 0, 10
; CHECK-NEXT: stdcx. 11, 0, 10
-; CHECK-NEXT: bne 0, .LBB4_13
+; CHECK-NEXT: bne- 0, .LBB4_13
; CHECK-NEXT: # %bb.14: # %entry
; CHECK-NEXT: addis 10, 2, ull@toc@ha
; CHECK-NEXT: lwsync
@@ -5449,7 +5449,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; CHECK-NEXT: #
; CHECK-NEXT: ldarx 12, 0, 0
; CHECK-NEXT: stdcx. 11, 0, 0
-; CHECK-NEXT: bne 0, .LBB4_15
+; CHECK-NEXT: bne- 0, .LBB4_15
; CHECK-NEXT: # %bb.16: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: std 12, ull@toc@l(10)
@@ -5504,7 +5504,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; AIX32-NEXT: andc 9, 8, 6
; AIX32-NEXT: or 9, 7, 9
; AIX32-NEXT: stwcx. 9, 0, 5
-; AIX32-NEXT: bne 0, L..BB4_1
+; AIX32-NEXT: bne- 0, L..BB4_1
; AIX32-NEXT: # %bb.2: # %entry
; AIX32-NEXT: srw 4, 8, 4
; AIX32-NEXT: lwz 28, L..C1(2) # @uc
@@ -5525,7 +5525,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; AIX32-NEXT: andc 9, 8, 6
; AIX32-NEXT: or 9, 7, 9
; AIX32-NEXT: stwcx. 9, 0, 5
-; AIX32-NEXT: bne 0, L..BB4_3
+; AIX32-NEXT: bne- 0, L..BB4_3
; AIX32-NEXT: # %bb.4: # %entry
; AIX32-NEXT: srw 4, 8, 4
; AIX32-NEXT: lwz 27, L..C2(2) # @ss
@@ -5547,7 +5547,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; AIX32-NEXT: andc 9, 8, 6
; AIX32-NEXT: or 9, 7, 9
; AIX32-NEXT: stwcx. 9, 0, 5
-; AIX32-NEXT: bne 0, L..BB4_5
+; AIX32-NEXT: bne- 0, L..BB4_5
; AIX32-NEXT: # %bb.6: # %entry
; AIX32-NEXT: srw 4, 8, 4
; AIX32-NEXT: lwz 26, L..C3(2) # @us
@@ -5569,7 +5569,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; AIX32-NEXT: andc 9, 8, 6
; AIX32-NEXT: or 9, 7, 9
; AIX32-NEXT: stwcx. 9, 0, 5
-; AIX32-NEXT: bne 0, L..BB4_7
+; AIX32-NEXT: bne- 0, L..BB4_7
; AIX32-NEXT: # %bb.8: # %entry
; AIX32-NEXT: srw 4, 8, 4
; AIX32-NEXT: lwsync
@@ -5581,7 +5581,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; AIX32-NEXT: #
; AIX32-NEXT: lwarx 4, 0, 25
; AIX32-NEXT: stwcx. 3, 0, 25
-; AIX32-NEXT: bne 0, L..BB4_9
+; AIX32-NEXT: bne- 0, L..BB4_9
; AIX32-NEXT: # %bb.10: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 4, 0(25)
@@ -5591,7 +5591,7 @@ define dso_local void @test_lock() local_unnamed_addr #0 {
; AIX32-NEXT: #
; AIX32-NEXT: lwarx 4, 0, 24
; AIX32-NEXT: stwcx. 3, 0, 24
-; AIX32-NEXT: bne 0, L..BB4_11
+; AIX32-NEXT: bne- 0, L..BB4_11
; AIX32-NEXT: # %bb.12: # %entry
; AIX32-NEXT: lwz 31, L..C6(2) # @sll
; AIX32-NEXT: lwsync
@@ -5695,7 +5695,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 3, 0, 6
-; CHECK-NEXT: bne 0, .LBB5_1
+; CHECK-NEXT: bne- 0, .LBB5_1
; CHECK-NEXT: .LBB5_3: # %entry
; CHECK-NEXT: stw 5, ui@toc@l(4)
; CHECK-NEXT: addis 5, 2, si@toc@ha
@@ -5709,7 +5709,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; CHECK-NEXT: # %bb.5: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 3, 0, 7
-; CHECK-NEXT: bne 0, .LBB5_4
+; CHECK-NEXT: bne- 0, .LBB5_4
; CHECK-NEXT: .LBB5_6: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 8, si@toc@l(5)
@@ -5721,7 +5721,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; CHECK-NEXT: # %bb.8: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 3, 0, 6
-; CHECK-NEXT: bne 0, .LBB5_7
+; CHECK-NEXT: bne- 0, .LBB5_7
; CHECK-NEXT: .LBB5_9: # %entry
; CHECK-NEXT: lwsync
; CHECK-NEXT: stw 8, ui@toc@l(4)
@@ -5734,7 +5734,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; CHECK-NEXT: # %bb.11: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 3, 0, 7
-; CHECK-NEXT: bne 0, .LBB5_10
+; CHECK-NEXT: bne- 0, .LBB5_10
; CHECK-NEXT: .LBB5_12: # %entry
; CHECK-NEXT: stw 4, si@toc@l(5)
; CHECK-NEXT: blr
@@ -5751,7 +5751,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; AIX32-NEXT: # %bb.2: # %entry
; AIX32-NEXT: #
; AIX32-NEXT: stwcx. 3, 0, 4
-; AIX32-NEXT: bne 0, L..BB5_1
+; AIX32-NEXT: bne- 0, L..BB5_1
; AIX32-NEXT: L..BB5_3: # %entry
; AIX32-NEXT: stw 5, 0(4)
; AIX32-NEXT: lwz 5, L..C4(2) # @si
@@ -5764,7 +5764,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; AIX32-NEXT: # %bb.5: # %entry
; AIX32-NEXT: #
; AIX32-NEXT: stwcx. 3, 0, 5
-; AIX32-NEXT: bne 0, L..BB5_4
+; AIX32-NEXT: bne- 0, L..BB5_4
; AIX32-NEXT: L..BB5_6: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 6, 0(5)
@@ -5776,7 +5776,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; AIX32-NEXT: # %bb.8: # %entry
; AIX32-NEXT: #
; AIX32-NEXT: stwcx. 3, 0, 4
-; AIX32-NEXT: bne 0, L..BB5_7
+; AIX32-NEXT: bne- 0, L..BB5_7
; AIX32-NEXT: L..BB5_9: # %entry
; AIX32-NEXT: lwsync
; AIX32-NEXT: stw 6, 0(4)
@@ -5789,7 +5789,7 @@ define dso_local void @test_atomic() local_unnamed_addr #0 {
; AIX32-NEXT: # %bb.11: # %entry
; AIX32-NEXT: #
; AIX32-NEXT: stwcx. 3, 0, 5
-; AIX32-NEXT: bne 0, L..BB5_10
+; AIX32-NEXT: bne- 0, L..BB5_10
; AIX32-NEXT: L..BB5_12: # %entry
; AIX32-NEXT: stw 4, 0(5)
; AIX32-NEXT: blr
@@ -5870,7 +5870,7 @@ define dso_local i64 @atommax8(ptr nocapture noundef %ptr, i64 noundef %val) loc
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB7_1
+; CHECK-NEXT: bne- 0, .LBB7_1
; CHECK-NEXT: .LBB7_3: # %entry
; CHECK-NEXT: li 3, 55
; CHECK-NEXT: li 4, 66
@@ -5954,7 +5954,7 @@ define dso_local signext i32 @atommax4(ptr nocapture noundef %ptr, i32 noundef s
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB8_1
+; CHECK-NEXT: bne- 0, .LBB8_1
; CHECK-NEXT: .LBB8_3: # %entry
; CHECK-NEXT: li 3, 55
; CHECK-NEXT: li 4, 66
@@ -5973,7 +5973,7 @@ define dso_local signext i32 @atommax4(ptr nocapture noundef %ptr, i32 noundef s
; AIX32-NEXT: # %bb.2: # %entry
; AIX32-NEXT: #
; AIX32-NEXT: stwcx. 4, 0, 3
-; AIX32-NEXT: bne 0, L..BB8_1
+; AIX32-NEXT: bne- 0, L..BB8_1
; AIX32-NEXT: L..BB8_3: # %entry
; AIX32-NEXT: li 3, 55
; AIX32-NEXT: li 4, 66
@@ -6000,7 +6000,7 @@ define dso_local signext i16 @atommax2(ptr nocapture noundef %ptr, i16 noundef s
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB9_1
+; CHECK-NEXT: bne- 0, .LBB9_1
; CHECK-NEXT: .LBB9_3: # %entry
; CHECK-NEXT: li 3, 55
; CHECK-NEXT: li 4, 66
@@ -6033,7 +6033,7 @@ define dso_local signext i16 @atommax2(ptr nocapture noundef %ptr, i16 noundef s
; AIX32-NEXT: andc 10, 9, 7
; AIX32-NEXT: or 10, 8, 10
; AIX32-NEXT: stwcx. 10, 0, 3
-; AIX32-NEXT: bne 0, L..BB9_1
+; AIX32-NEXT: bne- 0, L..BB9_1
; AIX32-NEXT: L..BB9_3: # %entry
; AIX32-NEXT: srw 3, 9, 6
; AIX32-NEXT: lwsync
@@ -6063,7 +6063,7 @@ define dso_local zeroext i8 @atommax1(ptr nocapture noundef %ptr, i8 noundef zer
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stbcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB10_1
+; CHECK-NEXT: bne- 0, .LBB10_1
; CHECK-NEXT: .LBB10_3: # %entry
; CHECK-NEXT: li 3, 55
; CHECK-NEXT: li 4, 66
@@ -6092,7 +6092,7 @@ define dso_local zeroext i8 @atommax1(ptr nocapture noundef %ptr, i8 noundef zer
; AIX32-NEXT: andc 10, 9, 7
; AIX32-NEXT: or 10, 8, 10
; AIX32-NEXT: stwcx. 10, 0, 3
-; AIX32-NEXT: bne 0, L..BB10_1
+; AIX32-NEXT: bne- 0, L..BB10_1
; AIX32-NEXT: L..BB10_3: # %entry
; AIX32-NEXT: srw 3, 9, 5
; AIX32-NEXT: lwsync
diff --git a/llvm/test/CodeGen/PowerPC/atomic-minmax.ll b/llvm/test/CodeGen/PowerPC/atomic-minmax.ll
index 747d9e5..44a4f16 100644
--- a/llvm/test/CodeGen/PowerPC/atomic-minmax.ll
+++ b/llvm/test/CodeGen/PowerPC/atomic-minmax.ll
@@ -14,7 +14,7 @@ define void @a32min(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB0_1
+; CHECK-NEXT: bne- 0, .LBB0_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -34,7 +34,7 @@ define void @a32max(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB1_1
+; CHECK-NEXT: bne- 0, .LBB1_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -54,7 +54,7 @@ define void @a32umin(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB2_1
+; CHECK-NEXT: bne- 0, .LBB2_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -74,7 +74,7 @@ define void @a32umax(ptr nocapture dereferenceable(4) %minimum, i32 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB3_1
+; CHECK-NEXT: bne- 0, .LBB3_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -96,7 +96,7 @@ define void @a16min(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB4_1
+; CHECK-NEXT: bne- 0, .LBB4_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -118,7 +118,7 @@ define void @a16max(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB5_1
+; CHECK-NEXT: bne- 0, .LBB5_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -138,7 +138,7 @@ define void @a16umin(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB6_1
+; CHECK-NEXT: bne- 0, .LBB6_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -158,7 +158,7 @@ define void @a16umax(ptr nocapture dereferenceable(4) %minimum, i16 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB7_1
+; CHECK-NEXT: bne- 0, .LBB7_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -180,7 +180,7 @@ define void @a8min(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stbcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB8_1
+; CHECK-NEXT: bne- 0, .LBB8_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -202,7 +202,7 @@ define void @a8max(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stbcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB9_1
+; CHECK-NEXT: bne- 0, .LBB9_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -222,7 +222,7 @@ define void @a8umin(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stbcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB10_1
+; CHECK-NEXT: bne- 0, .LBB10_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -242,7 +242,7 @@ define void @a8umax(ptr nocapture dereferenceable(4) %minimum, i8 %val) #1 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stbcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB11_1
+; CHECK-NEXT: bne- 0, .LBB11_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -262,7 +262,7 @@ define void @a64min(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB12_1
+; CHECK-NEXT: bne- 0, .LBB12_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -282,7 +282,7 @@ define void @a64max(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB13_1
+; CHECK-NEXT: bne- 0, .LBB13_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -302,7 +302,7 @@ define void @a64umin(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB14_1
+; CHECK-NEXT: bne- 0, .LBB14_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -322,7 +322,7 @@ define void @a64umax(ptr nocapture dereferenceable(4) %minimum, i64 %val) #0 {
; CHECK-NEXT: # %bb.2: # %entry
; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 4, 0, 3
-; CHECK-NEXT: bne 0, .LBB15_1
+; CHECK-NEXT: bne- 0, .LBB15_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -356,7 +356,7 @@ define void @ae16min(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-NEXT: andc 8, 8, 6
; CHECK-NEXT: or 8, 7, 8
; CHECK-NEXT: stwcx. 8, 0, 3
-; CHECK-NEXT: bne 0, .LBB16_1
+; CHECK-NEXT: bne- 0, .LBB16_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -390,7 +390,7 @@ define void @ae16max(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-NEXT: andc 8, 8, 6
; CHECK-NEXT: or 8, 7, 8
; CHECK-NEXT: stwcx. 8, 0, 3
-; CHECK-NEXT: bne 0, .LBB17_1
+; CHECK-NEXT: bne- 0, .LBB17_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -421,7 +421,7 @@ define void @ae16umin(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-NEXT: andc 7, 7, 5
; CHECK-NEXT: or 7, 6, 7
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB18_1
+; CHECK-NEXT: bne- 0, .LBB18_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -452,7 +452,7 @@ define void @ae16umax(ptr nocapture dereferenceable(4) %minimum, i16 %val) #0 {
; CHECK-NEXT: andc 7, 7, 5
; CHECK-NEXT: or 7, 6, 7
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB19_1
+; CHECK-NEXT: bne- 0, .LBB19_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -485,7 +485,7 @@ define void @ae8min(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-NEXT: andc 8, 8, 6
; CHECK-NEXT: or 8, 7, 8
; CHECK-NEXT: stwcx. 8, 0, 3
-; CHECK-NEXT: bne 0, .LBB20_1
+; CHECK-NEXT: bne- 0, .LBB20_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -518,7 +518,7 @@ define void @ae8max(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-NEXT: andc 8, 8, 6
; CHECK-NEXT: or 8, 7, 8
; CHECK-NEXT: stwcx. 8, 0, 3
-; CHECK-NEXT: bne 0, .LBB21_1
+; CHECK-NEXT: bne- 0, .LBB21_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -548,7 +548,7 @@ define void @ae8umin(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-NEXT: andc 7, 7, 5
; CHECK-NEXT: or 7, 6, 7
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB22_1
+; CHECK-NEXT: bne- 0, .LBB22_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
@@ -578,7 +578,7 @@ define void @ae8umax(ptr nocapture dereferenceable(4) %minimum, i8 %val) #0 {
; CHECK-NEXT: andc 7, 7, 5
; CHECK-NEXT: or 7, 6, 7
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB23_1
+; CHECK-NEXT: bne- 0, .LBB23_1
; CHECK-NEXT: # %bb.3: # %entry
; CHECK-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/PowerPC/atomics-regression.ll b/llvm/test/CodeGen/PowerPC/atomics-regression.ll
index 90990bb..cfc3a99 100644
--- a/llvm/test/CodeGen/PowerPC/atomics-regression.ll
+++ b/llvm/test/CodeGen/PowerPC/atomics-regression.ll
@@ -2291,7 +2291,7 @@ define i8 @test120(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB120_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB120_1
+; PPC64LE-NEXT: bne- 0, .LBB120_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2306,7 +2306,7 @@ define i8 @test121(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB121_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: stbcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB121_1
+; PPC64LE-NEXT: bne- 0, .LBB121_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2321,7 +2321,7 @@ define i8 @test122(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB122_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB122_1
+; PPC64LE-NEXT: bne- 0, .LBB122_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2336,7 +2336,7 @@ define i8 @test123(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB123_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB123_1
+; PPC64LE-NEXT: bne- 0, .LBB123_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2352,7 +2352,7 @@ define i8 @test124(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB124_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB124_1
+; PPC64LE-NEXT: bne- 0, .LBB124_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2367,7 +2367,7 @@ define i16 @test125(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB125_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB125_1
+; PPC64LE-NEXT: bne- 0, .LBB125_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2382,7 +2382,7 @@ define i16 @test126(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB126_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: sthcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB126_1
+; PPC64LE-NEXT: bne- 0, .LBB126_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2397,7 +2397,7 @@ define i16 @test127(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB127_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB127_1
+; PPC64LE-NEXT: bne- 0, .LBB127_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2412,7 +2412,7 @@ define i16 @test128(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB128_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB128_1
+; PPC64LE-NEXT: bne- 0, .LBB128_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2428,7 +2428,7 @@ define i16 @test129(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB129_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB129_1
+; PPC64LE-NEXT: bne- 0, .LBB129_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2443,7 +2443,7 @@ define i32 @test130(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB130_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB130_1
+; PPC64LE-NEXT: bne- 0, .LBB130_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2458,7 +2458,7 @@ define i32 @test131(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB131_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB131_1
+; PPC64LE-NEXT: bne- 0, .LBB131_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2473,7 +2473,7 @@ define i32 @test132(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB132_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB132_1
+; PPC64LE-NEXT: bne- 0, .LBB132_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2488,7 +2488,7 @@ define i32 @test133(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB133_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB133_1
+; PPC64LE-NEXT: bne- 0, .LBB133_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2504,7 +2504,7 @@ define i32 @test134(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB134_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB134_1
+; PPC64LE-NEXT: bne- 0, .LBB134_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2519,7 +2519,7 @@ define i64 @test135(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB135_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB135_1
+; PPC64LE-NEXT: bne- 0, .LBB135_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2534,7 +2534,7 @@ define i64 @test136(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB136_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB136_1
+; PPC64LE-NEXT: bne- 0, .LBB136_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2549,7 +2549,7 @@ define i64 @test137(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB137_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB137_1
+; PPC64LE-NEXT: bne- 0, .LBB137_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2564,7 +2564,7 @@ define i64 @test138(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB138_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB138_1
+; PPC64LE-NEXT: bne- 0, .LBB138_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2580,7 +2580,7 @@ define i64 @test139(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB139_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB139_1
+; PPC64LE-NEXT: bne- 0, .LBB139_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2596,7 +2596,7 @@ define i8 @test140(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB140_1
+; PPC64LE-NEXT: bne- 0, .LBB140_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2612,7 +2612,7 @@ define i8 @test141(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB141_1
+; PPC64LE-NEXT: bne- 0, .LBB141_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2628,7 +2628,7 @@ define i8 @test142(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB142_1
+; PPC64LE-NEXT: bne- 0, .LBB142_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2644,7 +2644,7 @@ define i8 @test143(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB143_1
+; PPC64LE-NEXT: bne- 0, .LBB143_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2661,7 +2661,7 @@ define i8 @test144(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB144_1
+; PPC64LE-NEXT: bne- 0, .LBB144_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2677,7 +2677,7 @@ define i16 @test145(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB145_1
+; PPC64LE-NEXT: bne- 0, .LBB145_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2693,7 +2693,7 @@ define i16 @test146(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB146_1
+; PPC64LE-NEXT: bne- 0, .LBB146_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2709,7 +2709,7 @@ define i16 @test147(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB147_1
+; PPC64LE-NEXT: bne- 0, .LBB147_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2725,7 +2725,7 @@ define i16 @test148(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB148_1
+; PPC64LE-NEXT: bne- 0, .LBB148_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2742,7 +2742,7 @@ define i16 @test149(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB149_1
+; PPC64LE-NEXT: bne- 0, .LBB149_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2758,7 +2758,7 @@ define i32 @test150(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB150_1
+; PPC64LE-NEXT: bne- 0, .LBB150_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2774,7 +2774,7 @@ define i32 @test151(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB151_1
+; PPC64LE-NEXT: bne- 0, .LBB151_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2790,7 +2790,7 @@ define i32 @test152(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB152_1
+; PPC64LE-NEXT: bne- 0, .LBB152_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2806,7 +2806,7 @@ define i32 @test153(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB153_1
+; PPC64LE-NEXT: bne- 0, .LBB153_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2823,7 +2823,7 @@ define i32 @test154(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB154_1
+; PPC64LE-NEXT: bne- 0, .LBB154_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2839,7 +2839,7 @@ define i64 @test155(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB155_1
+; PPC64LE-NEXT: bne- 0, .LBB155_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2855,7 +2855,7 @@ define i64 @test156(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB156_1
+; PPC64LE-NEXT: bne- 0, .LBB156_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2871,7 +2871,7 @@ define i64 @test157(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB157_1
+; PPC64LE-NEXT: bne- 0, .LBB157_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2887,7 +2887,7 @@ define i64 @test158(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB158_1
+; PPC64LE-NEXT: bne- 0, .LBB158_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2904,7 +2904,7 @@ define i64 @test159(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB159_1
+; PPC64LE-NEXT: bne- 0, .LBB159_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2920,7 +2920,7 @@ define i8 @test160(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB160_1
+; PPC64LE-NEXT: bne- 0, .LBB160_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2936,7 +2936,7 @@ define i8 @test161(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB161_1
+; PPC64LE-NEXT: bne- 0, .LBB161_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -2952,7 +2952,7 @@ define i8 @test162(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB162_1
+; PPC64LE-NEXT: bne- 0, .LBB162_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -2968,7 +2968,7 @@ define i8 @test163(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB163_1
+; PPC64LE-NEXT: bne- 0, .LBB163_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -2985,7 +2985,7 @@ define i8 @test164(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB164_1
+; PPC64LE-NEXT: bne- 0, .LBB164_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3001,7 +3001,7 @@ define i16 @test165(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB165_1
+; PPC64LE-NEXT: bne- 0, .LBB165_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3017,7 +3017,7 @@ define i16 @test166(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB166_1
+; PPC64LE-NEXT: bne- 0, .LBB166_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3033,7 +3033,7 @@ define i16 @test167(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB167_1
+; PPC64LE-NEXT: bne- 0, .LBB167_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3049,7 +3049,7 @@ define i16 @test168(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB168_1
+; PPC64LE-NEXT: bne- 0, .LBB168_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3066,7 +3066,7 @@ define i16 @test169(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB169_1
+; PPC64LE-NEXT: bne- 0, .LBB169_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3082,7 +3082,7 @@ define i32 @test170(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB170_1
+; PPC64LE-NEXT: bne- 0, .LBB170_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3098,7 +3098,7 @@ define i32 @test171(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB171_1
+; PPC64LE-NEXT: bne- 0, .LBB171_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3114,7 +3114,7 @@ define i32 @test172(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB172_1
+; PPC64LE-NEXT: bne- 0, .LBB172_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3130,7 +3130,7 @@ define i32 @test173(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB173_1
+; PPC64LE-NEXT: bne- 0, .LBB173_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3147,7 +3147,7 @@ define i32 @test174(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB174_1
+; PPC64LE-NEXT: bne- 0, .LBB174_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3163,7 +3163,7 @@ define i64 @test175(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB175_1
+; PPC64LE-NEXT: bne- 0, .LBB175_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3179,7 +3179,7 @@ define i64 @test176(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB176_1
+; PPC64LE-NEXT: bne- 0, .LBB176_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3195,7 +3195,7 @@ define i64 @test177(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB177_1
+; PPC64LE-NEXT: bne- 0, .LBB177_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3211,7 +3211,7 @@ define i64 @test178(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB178_1
+; PPC64LE-NEXT: bne- 0, .LBB178_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3228,7 +3228,7 @@ define i64 @test179(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB179_1
+; PPC64LE-NEXT: bne- 0, .LBB179_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3244,7 +3244,7 @@ define i8 @test180(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB180_1
+; PPC64LE-NEXT: bne- 0, .LBB180_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3260,7 +3260,7 @@ define i8 @test181(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB181_1
+; PPC64LE-NEXT: bne- 0, .LBB181_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3276,7 +3276,7 @@ define i8 @test182(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB182_1
+; PPC64LE-NEXT: bne- 0, .LBB182_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3292,7 +3292,7 @@ define i8 @test183(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB183_1
+; PPC64LE-NEXT: bne- 0, .LBB183_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3309,7 +3309,7 @@ define i8 @test184(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB184_1
+; PPC64LE-NEXT: bne- 0, .LBB184_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3325,7 +3325,7 @@ define i16 @test185(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB185_1
+; PPC64LE-NEXT: bne- 0, .LBB185_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3341,7 +3341,7 @@ define i16 @test186(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB186_1
+; PPC64LE-NEXT: bne- 0, .LBB186_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3357,7 +3357,7 @@ define i16 @test187(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB187_1
+; PPC64LE-NEXT: bne- 0, .LBB187_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3373,7 +3373,7 @@ define i16 @test188(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB188_1
+; PPC64LE-NEXT: bne- 0, .LBB188_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3390,7 +3390,7 @@ define i16 @test189(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB189_1
+; PPC64LE-NEXT: bne- 0, .LBB189_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3406,7 +3406,7 @@ define i32 @test190(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB190_1
+; PPC64LE-NEXT: bne- 0, .LBB190_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3422,7 +3422,7 @@ define i32 @test191(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB191_1
+; PPC64LE-NEXT: bne- 0, .LBB191_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3438,7 +3438,7 @@ define i32 @test192(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB192_1
+; PPC64LE-NEXT: bne- 0, .LBB192_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3454,7 +3454,7 @@ define i32 @test193(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB193_1
+; PPC64LE-NEXT: bne- 0, .LBB193_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3471,7 +3471,7 @@ define i32 @test194(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB194_1
+; PPC64LE-NEXT: bne- 0, .LBB194_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3487,7 +3487,7 @@ define i64 @test195(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB195_1
+; PPC64LE-NEXT: bne- 0, .LBB195_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3503,7 +3503,7 @@ define i64 @test196(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB196_1
+; PPC64LE-NEXT: bne- 0, .LBB196_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3519,7 +3519,7 @@ define i64 @test197(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB197_1
+; PPC64LE-NEXT: bne- 0, .LBB197_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3535,7 +3535,7 @@ define i64 @test198(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB198_1
+; PPC64LE-NEXT: bne- 0, .LBB198_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3552,7 +3552,7 @@ define i64 @test199(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB199_1
+; PPC64LE-NEXT: bne- 0, .LBB199_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3568,7 +3568,7 @@ define i8 @test200(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB200_1
+; PPC64LE-NEXT: bne- 0, .LBB200_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3584,7 +3584,7 @@ define i8 @test201(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB201_1
+; PPC64LE-NEXT: bne- 0, .LBB201_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3600,7 +3600,7 @@ define i8 @test202(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB202_1
+; PPC64LE-NEXT: bne- 0, .LBB202_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3616,7 +3616,7 @@ define i8 @test203(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB203_1
+; PPC64LE-NEXT: bne- 0, .LBB203_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3633,7 +3633,7 @@ define i8 @test204(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB204_1
+; PPC64LE-NEXT: bne- 0, .LBB204_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3649,7 +3649,7 @@ define i16 @test205(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB205_1
+; PPC64LE-NEXT: bne- 0, .LBB205_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3665,7 +3665,7 @@ define i16 @test206(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB206_1
+; PPC64LE-NEXT: bne- 0, .LBB206_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3681,7 +3681,7 @@ define i16 @test207(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB207_1
+; PPC64LE-NEXT: bne- 0, .LBB207_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3697,7 +3697,7 @@ define i16 @test208(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB208_1
+; PPC64LE-NEXT: bne- 0, .LBB208_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3714,7 +3714,7 @@ define i16 @test209(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB209_1
+; PPC64LE-NEXT: bne- 0, .LBB209_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3730,7 +3730,7 @@ define i32 @test210(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB210_1
+; PPC64LE-NEXT: bne- 0, .LBB210_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3746,7 +3746,7 @@ define i32 @test211(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB211_1
+; PPC64LE-NEXT: bne- 0, .LBB211_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3762,7 +3762,7 @@ define i32 @test212(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB212_1
+; PPC64LE-NEXT: bne- 0, .LBB212_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3778,7 +3778,7 @@ define i32 @test213(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB213_1
+; PPC64LE-NEXT: bne- 0, .LBB213_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3795,7 +3795,7 @@ define i32 @test214(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB214_1
+; PPC64LE-NEXT: bne- 0, .LBB214_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3811,7 +3811,7 @@ define i64 @test215(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB215_1
+; PPC64LE-NEXT: bne- 0, .LBB215_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3827,7 +3827,7 @@ define i64 @test216(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB216_1
+; PPC64LE-NEXT: bne- 0, .LBB216_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3843,7 +3843,7 @@ define i64 @test217(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB217_1
+; PPC64LE-NEXT: bne- 0, .LBB217_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3859,7 +3859,7 @@ define i64 @test218(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB218_1
+; PPC64LE-NEXT: bne- 0, .LBB218_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3876,7 +3876,7 @@ define i64 @test219(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB219_1
+; PPC64LE-NEXT: bne- 0, .LBB219_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3892,7 +3892,7 @@ define i8 @test220(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB220_1
+; PPC64LE-NEXT: bne- 0, .LBB220_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3908,7 +3908,7 @@ define i8 @test221(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB221_1
+; PPC64LE-NEXT: bne- 0, .LBB221_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -3924,7 +3924,7 @@ define i8 @test222(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB222_1
+; PPC64LE-NEXT: bne- 0, .LBB222_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3940,7 +3940,7 @@ define i8 @test223(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB223_1
+; PPC64LE-NEXT: bne- 0, .LBB223_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3957,7 +3957,7 @@ define i8 @test224(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB224_1
+; PPC64LE-NEXT: bne- 0, .LBB224_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -3973,7 +3973,7 @@ define i16 @test225(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB225_1
+; PPC64LE-NEXT: bne- 0, .LBB225_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -3989,7 +3989,7 @@ define i16 @test226(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB226_1
+; PPC64LE-NEXT: bne- 0, .LBB226_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4005,7 +4005,7 @@ define i16 @test227(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB227_1
+; PPC64LE-NEXT: bne- 0, .LBB227_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4021,7 +4021,7 @@ define i16 @test228(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB228_1
+; PPC64LE-NEXT: bne- 0, .LBB228_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4038,7 +4038,7 @@ define i16 @test229(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB229_1
+; PPC64LE-NEXT: bne- 0, .LBB229_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4054,7 +4054,7 @@ define i32 @test230(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB230_1
+; PPC64LE-NEXT: bne- 0, .LBB230_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4070,7 +4070,7 @@ define i32 @test231(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB231_1
+; PPC64LE-NEXT: bne- 0, .LBB231_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4086,7 +4086,7 @@ define i32 @test232(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB232_1
+; PPC64LE-NEXT: bne- 0, .LBB232_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4102,7 +4102,7 @@ define i32 @test233(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB233_1
+; PPC64LE-NEXT: bne- 0, .LBB233_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4119,7 +4119,7 @@ define i32 @test234(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB234_1
+; PPC64LE-NEXT: bne- 0, .LBB234_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4135,7 +4135,7 @@ define i64 @test235(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB235_1
+; PPC64LE-NEXT: bne- 0, .LBB235_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4151,7 +4151,7 @@ define i64 @test236(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB236_1
+; PPC64LE-NEXT: bne- 0, .LBB236_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4167,7 +4167,7 @@ define i64 @test237(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB237_1
+; PPC64LE-NEXT: bne- 0, .LBB237_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4183,7 +4183,7 @@ define i64 @test238(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB238_1
+; PPC64LE-NEXT: bne- 0, .LBB238_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4200,7 +4200,7 @@ define i64 @test239(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB239_1
+; PPC64LE-NEXT: bne- 0, .LBB239_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4216,7 +4216,7 @@ define i8 @test240(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB240_1
+; PPC64LE-NEXT: bne- 0, .LBB240_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4232,7 +4232,7 @@ define i8 @test241(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB241_1
+; PPC64LE-NEXT: bne- 0, .LBB241_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4248,7 +4248,7 @@ define i8 @test242(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB242_1
+; PPC64LE-NEXT: bne- 0, .LBB242_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4264,7 +4264,7 @@ define i8 @test243(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB243_1
+; PPC64LE-NEXT: bne- 0, .LBB243_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4281,7 +4281,7 @@ define i8 @test244(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB244_1
+; PPC64LE-NEXT: bne- 0, .LBB244_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4297,7 +4297,7 @@ define i16 @test245(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB245_1
+; PPC64LE-NEXT: bne- 0, .LBB245_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4313,7 +4313,7 @@ define i16 @test246(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB246_1
+; PPC64LE-NEXT: bne- 0, .LBB246_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4329,7 +4329,7 @@ define i16 @test247(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB247_1
+; PPC64LE-NEXT: bne- 0, .LBB247_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4345,7 +4345,7 @@ define i16 @test248(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB248_1
+; PPC64LE-NEXT: bne- 0, .LBB248_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4362,7 +4362,7 @@ define i16 @test249(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB249_1
+; PPC64LE-NEXT: bne- 0, .LBB249_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4378,7 +4378,7 @@ define i32 @test250(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB250_1
+; PPC64LE-NEXT: bne- 0, .LBB250_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4394,7 +4394,7 @@ define i32 @test251(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB251_1
+; PPC64LE-NEXT: bne- 0, .LBB251_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4410,7 +4410,7 @@ define i32 @test252(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB252_1
+; PPC64LE-NEXT: bne- 0, .LBB252_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4426,7 +4426,7 @@ define i32 @test253(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB253_1
+; PPC64LE-NEXT: bne- 0, .LBB253_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4443,7 +4443,7 @@ define i32 @test254(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB254_1
+; PPC64LE-NEXT: bne- 0, .LBB254_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4459,7 +4459,7 @@ define i64 @test255(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB255_1
+; PPC64LE-NEXT: bne- 0, .LBB255_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4475,7 +4475,7 @@ define i64 @test256(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB256_1
+; PPC64LE-NEXT: bne- 0, .LBB256_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4491,7 +4491,7 @@ define i64 @test257(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB257_1
+; PPC64LE-NEXT: bne- 0, .LBB257_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4507,7 +4507,7 @@ define i64 @test258(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB258_1
+; PPC64LE-NEXT: bne- 0, .LBB258_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4524,7 +4524,7 @@ define i64 @test259(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB259_1
+; PPC64LE-NEXT: bne- 0, .LBB259_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4544,7 +4544,7 @@ define i8 @test260(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB260_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB260_1
+; PPC64LE-NEXT: bne- 0, .LBB260_1
; PPC64LE-NEXT: .LBB260_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -4563,7 +4563,7 @@ define i8 @test261(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB261_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB261_1
+; PPC64LE-NEXT: bne- 0, .LBB261_1
; PPC64LE-NEXT: .LBB261_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -4584,7 +4584,7 @@ define i8 @test262(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB262_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB262_1
+; PPC64LE-NEXT: bne- 0, .LBB262_1
; PPC64LE-NEXT: .LBB262_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -4604,7 +4604,7 @@ define i8 @test263(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB263_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB263_1
+; PPC64LE-NEXT: bne- 0, .LBB263_1
; PPC64LE-NEXT: .LBB263_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -4625,7 +4625,7 @@ define i8 @test264(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB264_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB264_1
+; PPC64LE-NEXT: bne- 0, .LBB264_1
; PPC64LE-NEXT: .LBB264_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -4645,7 +4645,7 @@ define i16 @test265(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB265_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB265_1
+; PPC64LE-NEXT: bne- 0, .LBB265_1
; PPC64LE-NEXT: .LBB265_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -4664,7 +4664,7 @@ define i16 @test266(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB266_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB266_1
+; PPC64LE-NEXT: bne- 0, .LBB266_1
; PPC64LE-NEXT: .LBB266_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -4685,7 +4685,7 @@ define i16 @test267(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB267_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB267_1
+; PPC64LE-NEXT: bne- 0, .LBB267_1
; PPC64LE-NEXT: .LBB267_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -4705,7 +4705,7 @@ define i16 @test268(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB268_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB268_1
+; PPC64LE-NEXT: bne- 0, .LBB268_1
; PPC64LE-NEXT: .LBB268_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -4726,7 +4726,7 @@ define i16 @test269(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB269_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB269_1
+; PPC64LE-NEXT: bne- 0, .LBB269_1
; PPC64LE-NEXT: .LBB269_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -4744,7 +4744,7 @@ define i32 @test270(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB270_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB270_1
+; PPC64LE-NEXT: bne- 0, .LBB270_1
; PPC64LE-NEXT: .LBB270_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4762,7 +4762,7 @@ define i32 @test271(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB271_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB271_1
+; PPC64LE-NEXT: bne- 0, .LBB271_1
; PPC64LE-NEXT: .LBB271_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4780,7 +4780,7 @@ define i32 @test272(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB272_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB272_1
+; PPC64LE-NEXT: bne- 0, .LBB272_1
; PPC64LE-NEXT: .LBB272_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4798,7 +4798,7 @@ define i32 @test273(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB273_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB273_1
+; PPC64LE-NEXT: bne- 0, .LBB273_1
; PPC64LE-NEXT: .LBB273_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4817,7 +4817,7 @@ define i32 @test274(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB274_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB274_1
+; PPC64LE-NEXT: bne- 0, .LBB274_1
; PPC64LE-NEXT: .LBB274_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4835,7 +4835,7 @@ define i64 @test275(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB275_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB275_1
+; PPC64LE-NEXT: bne- 0, .LBB275_1
; PPC64LE-NEXT: .LBB275_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4853,7 +4853,7 @@ define i64 @test276(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB276_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB276_1
+; PPC64LE-NEXT: bne- 0, .LBB276_1
; PPC64LE-NEXT: .LBB276_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -4871,7 +4871,7 @@ define i64 @test277(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB277_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB277_1
+; PPC64LE-NEXT: bne- 0, .LBB277_1
; PPC64LE-NEXT: .LBB277_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -4889,7 +4889,7 @@ define i64 @test278(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB278_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB278_1
+; PPC64LE-NEXT: bne- 0, .LBB278_1
; PPC64LE-NEXT: .LBB278_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4908,7 +4908,7 @@ define i64 @test279(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB279_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB279_1
+; PPC64LE-NEXT: bne- 0, .LBB279_1
; PPC64LE-NEXT: .LBB279_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -4928,7 +4928,7 @@ define i8 @test280(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB280_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB280_1
+; PPC64LE-NEXT: bne- 0, .LBB280_1
; PPC64LE-NEXT: .LBB280_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -4947,7 +4947,7 @@ define i8 @test281(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB281_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB281_1
+; PPC64LE-NEXT: bne- 0, .LBB281_1
; PPC64LE-NEXT: .LBB281_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -4968,7 +4968,7 @@ define i8 @test282(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB282_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB282_1
+; PPC64LE-NEXT: bne- 0, .LBB282_1
; PPC64LE-NEXT: .LBB282_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -4988,7 +4988,7 @@ define i8 @test283(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB283_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB283_1
+; PPC64LE-NEXT: bne- 0, .LBB283_1
; PPC64LE-NEXT: .LBB283_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -5009,7 +5009,7 @@ define i8 @test284(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB284_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB284_1
+; PPC64LE-NEXT: bne- 0, .LBB284_1
; PPC64LE-NEXT: .LBB284_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -5029,7 +5029,7 @@ define i16 @test285(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB285_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB285_1
+; PPC64LE-NEXT: bne- 0, .LBB285_1
; PPC64LE-NEXT: .LBB285_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -5048,7 +5048,7 @@ define i16 @test286(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB286_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB286_1
+; PPC64LE-NEXT: bne- 0, .LBB286_1
; PPC64LE-NEXT: .LBB286_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -5069,7 +5069,7 @@ define i16 @test287(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB287_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB287_1
+; PPC64LE-NEXT: bne- 0, .LBB287_1
; PPC64LE-NEXT: .LBB287_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -5089,7 +5089,7 @@ define i16 @test288(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB288_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB288_1
+; PPC64LE-NEXT: bne- 0, .LBB288_1
; PPC64LE-NEXT: .LBB288_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -5110,7 +5110,7 @@ define i16 @test289(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB289_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB289_1
+; PPC64LE-NEXT: bne- 0, .LBB289_1
; PPC64LE-NEXT: .LBB289_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -5128,7 +5128,7 @@ define i32 @test290(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB290_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB290_1
+; PPC64LE-NEXT: bne- 0, .LBB290_1
; PPC64LE-NEXT: .LBB290_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5146,7 +5146,7 @@ define i32 @test291(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB291_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB291_1
+; PPC64LE-NEXT: bne- 0, .LBB291_1
; PPC64LE-NEXT: .LBB291_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5164,7 +5164,7 @@ define i32 @test292(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB292_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB292_1
+; PPC64LE-NEXT: bne- 0, .LBB292_1
; PPC64LE-NEXT: .LBB292_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5182,7 +5182,7 @@ define i32 @test293(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB293_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB293_1
+; PPC64LE-NEXT: bne- 0, .LBB293_1
; PPC64LE-NEXT: .LBB293_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5201,7 +5201,7 @@ define i32 @test294(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB294_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB294_1
+; PPC64LE-NEXT: bne- 0, .LBB294_1
; PPC64LE-NEXT: .LBB294_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5219,7 +5219,7 @@ define i64 @test295(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB295_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB295_1
+; PPC64LE-NEXT: bne- 0, .LBB295_1
; PPC64LE-NEXT: .LBB295_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5237,7 +5237,7 @@ define i64 @test296(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB296_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB296_1
+; PPC64LE-NEXT: bne- 0, .LBB296_1
; PPC64LE-NEXT: .LBB296_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5255,7 +5255,7 @@ define i64 @test297(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB297_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB297_1
+; PPC64LE-NEXT: bne- 0, .LBB297_1
; PPC64LE-NEXT: .LBB297_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5273,7 +5273,7 @@ define i64 @test298(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB298_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB298_1
+; PPC64LE-NEXT: bne- 0, .LBB298_1
; PPC64LE-NEXT: .LBB298_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5292,7 +5292,7 @@ define i64 @test299(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB299_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB299_1
+; PPC64LE-NEXT: bne- 0, .LBB299_1
; PPC64LE-NEXT: .LBB299_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5310,7 +5310,7 @@ define i8 @test300(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB300_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB300_1
+; PPC64LE-NEXT: bne- 0, .LBB300_1
; PPC64LE-NEXT: .LBB300_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5328,7 +5328,7 @@ define i8 @test301(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB301_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB301_1
+; PPC64LE-NEXT: bne- 0, .LBB301_1
; PPC64LE-NEXT: .LBB301_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5346,7 +5346,7 @@ define i8 @test302(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB302_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB302_1
+; PPC64LE-NEXT: bne- 0, .LBB302_1
; PPC64LE-NEXT: .LBB302_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5364,7 +5364,7 @@ define i8 @test303(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB303_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB303_1
+; PPC64LE-NEXT: bne- 0, .LBB303_1
; PPC64LE-NEXT: .LBB303_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5383,7 +5383,7 @@ define i8 @test304(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB304_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB304_1
+; PPC64LE-NEXT: bne- 0, .LBB304_1
; PPC64LE-NEXT: .LBB304_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5401,7 +5401,7 @@ define i16 @test305(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB305_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB305_1
+; PPC64LE-NEXT: bne- 0, .LBB305_1
; PPC64LE-NEXT: .LBB305_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5419,7 +5419,7 @@ define i16 @test306(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB306_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB306_1
+; PPC64LE-NEXT: bne- 0, .LBB306_1
; PPC64LE-NEXT: .LBB306_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5437,7 +5437,7 @@ define i16 @test307(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB307_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB307_1
+; PPC64LE-NEXT: bne- 0, .LBB307_1
; PPC64LE-NEXT: .LBB307_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5455,7 +5455,7 @@ define i16 @test308(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB308_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB308_1
+; PPC64LE-NEXT: bne- 0, .LBB308_1
; PPC64LE-NEXT: .LBB308_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5474,7 +5474,7 @@ define i16 @test309(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB309_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB309_1
+; PPC64LE-NEXT: bne- 0, .LBB309_1
; PPC64LE-NEXT: .LBB309_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5492,7 +5492,7 @@ define i32 @test310(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB310_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB310_1
+; PPC64LE-NEXT: bne- 0, .LBB310_1
; PPC64LE-NEXT: .LBB310_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5510,7 +5510,7 @@ define i32 @test311(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB311_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB311_1
+; PPC64LE-NEXT: bne- 0, .LBB311_1
; PPC64LE-NEXT: .LBB311_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5528,7 +5528,7 @@ define i32 @test312(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB312_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB312_1
+; PPC64LE-NEXT: bne- 0, .LBB312_1
; PPC64LE-NEXT: .LBB312_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5546,7 +5546,7 @@ define i32 @test313(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB313_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB313_1
+; PPC64LE-NEXT: bne- 0, .LBB313_1
; PPC64LE-NEXT: .LBB313_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5565,7 +5565,7 @@ define i32 @test314(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB314_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB314_1
+; PPC64LE-NEXT: bne- 0, .LBB314_1
; PPC64LE-NEXT: .LBB314_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5583,7 +5583,7 @@ define i64 @test315(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB315_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB315_1
+; PPC64LE-NEXT: bne- 0, .LBB315_1
; PPC64LE-NEXT: .LBB315_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5601,7 +5601,7 @@ define i64 @test316(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB316_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB316_1
+; PPC64LE-NEXT: bne- 0, .LBB316_1
; PPC64LE-NEXT: .LBB316_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5619,7 +5619,7 @@ define i64 @test317(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB317_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB317_1
+; PPC64LE-NEXT: bne- 0, .LBB317_1
; PPC64LE-NEXT: .LBB317_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5637,7 +5637,7 @@ define i64 @test318(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB318_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB318_1
+; PPC64LE-NEXT: bne- 0, .LBB318_1
; PPC64LE-NEXT: .LBB318_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5656,7 +5656,7 @@ define i64 @test319(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB319_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB319_1
+; PPC64LE-NEXT: bne- 0, .LBB319_1
; PPC64LE-NEXT: .LBB319_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5674,7 +5674,7 @@ define i8 @test320(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB320_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB320_1
+; PPC64LE-NEXT: bne- 0, .LBB320_1
; PPC64LE-NEXT: .LBB320_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5692,7 +5692,7 @@ define i8 @test321(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB321_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB321_1
+; PPC64LE-NEXT: bne- 0, .LBB321_1
; PPC64LE-NEXT: .LBB321_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5710,7 +5710,7 @@ define i8 @test322(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB322_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB322_1
+; PPC64LE-NEXT: bne- 0, .LBB322_1
; PPC64LE-NEXT: .LBB322_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5728,7 +5728,7 @@ define i8 @test323(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB323_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB323_1
+; PPC64LE-NEXT: bne- 0, .LBB323_1
; PPC64LE-NEXT: .LBB323_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5747,7 +5747,7 @@ define i8 @test324(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB324_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB324_1
+; PPC64LE-NEXT: bne- 0, .LBB324_1
; PPC64LE-NEXT: .LBB324_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5765,7 +5765,7 @@ define i16 @test325(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB325_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB325_1
+; PPC64LE-NEXT: bne- 0, .LBB325_1
; PPC64LE-NEXT: .LBB325_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5783,7 +5783,7 @@ define i16 @test326(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB326_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB326_1
+; PPC64LE-NEXT: bne- 0, .LBB326_1
; PPC64LE-NEXT: .LBB326_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5801,7 +5801,7 @@ define i16 @test327(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB327_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB327_1
+; PPC64LE-NEXT: bne- 0, .LBB327_1
; PPC64LE-NEXT: .LBB327_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5819,7 +5819,7 @@ define i16 @test328(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB328_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB328_1
+; PPC64LE-NEXT: bne- 0, .LBB328_1
; PPC64LE-NEXT: .LBB328_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5838,7 +5838,7 @@ define i16 @test329(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB329_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB329_1
+; PPC64LE-NEXT: bne- 0, .LBB329_1
; PPC64LE-NEXT: .LBB329_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5856,7 +5856,7 @@ define i32 @test330(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB330_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB330_1
+; PPC64LE-NEXT: bne- 0, .LBB330_1
; PPC64LE-NEXT: .LBB330_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5874,7 +5874,7 @@ define i32 @test331(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB331_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB331_1
+; PPC64LE-NEXT: bne- 0, .LBB331_1
; PPC64LE-NEXT: .LBB331_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5892,7 +5892,7 @@ define i32 @test332(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB332_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB332_1
+; PPC64LE-NEXT: bne- 0, .LBB332_1
; PPC64LE-NEXT: .LBB332_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5910,7 +5910,7 @@ define i32 @test333(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB333_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB333_1
+; PPC64LE-NEXT: bne- 0, .LBB333_1
; PPC64LE-NEXT: .LBB333_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5929,7 +5929,7 @@ define i32 @test334(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB334_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB334_1
+; PPC64LE-NEXT: bne- 0, .LBB334_1
; PPC64LE-NEXT: .LBB334_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -5947,7 +5947,7 @@ define i64 @test335(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB335_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB335_1
+; PPC64LE-NEXT: bne- 0, .LBB335_1
; PPC64LE-NEXT: .LBB335_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -5965,7 +5965,7 @@ define i64 @test336(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB336_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB336_1
+; PPC64LE-NEXT: bne- 0, .LBB336_1
; PPC64LE-NEXT: .LBB336_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -5983,7 +5983,7 @@ define i64 @test337(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB337_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB337_1
+; PPC64LE-NEXT: bne- 0, .LBB337_1
; PPC64LE-NEXT: .LBB337_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6001,7 +6001,7 @@ define i64 @test338(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB338_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB338_1
+; PPC64LE-NEXT: bne- 0, .LBB338_1
; PPC64LE-NEXT: .LBB338_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6020,7 +6020,7 @@ define i64 @test339(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB339_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB339_1
+; PPC64LE-NEXT: bne- 0, .LBB339_1
; PPC64LE-NEXT: .LBB339_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6035,7 +6035,7 @@ define i8 @test340(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB340_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB340_1
+; PPC64LE-NEXT: bne- 0, .LBB340_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6050,7 +6050,7 @@ define i8 @test341(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB341_1:
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: stbcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB341_1
+; PPC64LE-NEXT: bne- 0, .LBB341_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6065,7 +6065,7 @@ define i8 @test342(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB342_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB342_1
+; PPC64LE-NEXT: bne- 0, .LBB342_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6080,7 +6080,7 @@ define i8 @test343(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB343_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB343_1
+; PPC64LE-NEXT: bne- 0, .LBB343_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6096,7 +6096,7 @@ define i8 @test344(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: .LBB344_1:
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB344_1
+; PPC64LE-NEXT: bne- 0, .LBB344_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6111,7 +6111,7 @@ define i16 @test345(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB345_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB345_1
+; PPC64LE-NEXT: bne- 0, .LBB345_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6126,7 +6126,7 @@ define i16 @test346(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB346_1:
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: sthcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB346_1
+; PPC64LE-NEXT: bne- 0, .LBB346_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6141,7 +6141,7 @@ define i16 @test347(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB347_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB347_1
+; PPC64LE-NEXT: bne- 0, .LBB347_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6156,7 +6156,7 @@ define i16 @test348(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB348_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB348_1
+; PPC64LE-NEXT: bne- 0, .LBB348_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6172,7 +6172,7 @@ define i16 @test349(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: .LBB349_1:
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB349_1
+; PPC64LE-NEXT: bne- 0, .LBB349_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6187,7 +6187,7 @@ define i32 @test350(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB350_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB350_1
+; PPC64LE-NEXT: bne- 0, .LBB350_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6202,7 +6202,7 @@ define i32 @test351(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB351_1:
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB351_1
+; PPC64LE-NEXT: bne- 0, .LBB351_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6217,7 +6217,7 @@ define i32 @test352(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB352_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB352_1
+; PPC64LE-NEXT: bne- 0, .LBB352_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6232,7 +6232,7 @@ define i32 @test353(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB353_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB353_1
+; PPC64LE-NEXT: bne- 0, .LBB353_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6248,7 +6248,7 @@ define i32 @test354(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: .LBB354_1:
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB354_1
+; PPC64LE-NEXT: bne- 0, .LBB354_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6263,7 +6263,7 @@ define i64 @test355(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB355_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB355_1
+; PPC64LE-NEXT: bne- 0, .LBB355_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6278,7 +6278,7 @@ define i64 @test356(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB356_1:
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB356_1
+; PPC64LE-NEXT: bne- 0, .LBB356_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6293,7 +6293,7 @@ define i64 @test357(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB357_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB357_1
+; PPC64LE-NEXT: bne- 0, .LBB357_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6308,7 +6308,7 @@ define i64 @test358(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB358_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB358_1
+; PPC64LE-NEXT: bne- 0, .LBB358_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6324,7 +6324,7 @@ define i64 @test359(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: .LBB359_1:
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB359_1
+; PPC64LE-NEXT: bne- 0, .LBB359_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6340,7 +6340,7 @@ define i8 @test360(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB360_1
+; PPC64LE-NEXT: bne- 0, .LBB360_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6356,7 +6356,7 @@ define i8 @test361(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB361_1
+; PPC64LE-NEXT: bne- 0, .LBB361_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6372,7 +6372,7 @@ define i8 @test362(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB362_1
+; PPC64LE-NEXT: bne- 0, .LBB362_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6388,7 +6388,7 @@ define i8 @test363(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB363_1
+; PPC64LE-NEXT: bne- 0, .LBB363_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6405,7 +6405,7 @@ define i8 @test364(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB364_1
+; PPC64LE-NEXT: bne- 0, .LBB364_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6421,7 +6421,7 @@ define i16 @test365(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB365_1
+; PPC64LE-NEXT: bne- 0, .LBB365_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6437,7 +6437,7 @@ define i16 @test366(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB366_1
+; PPC64LE-NEXT: bne- 0, .LBB366_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6453,7 +6453,7 @@ define i16 @test367(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB367_1
+; PPC64LE-NEXT: bne- 0, .LBB367_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6469,7 +6469,7 @@ define i16 @test368(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB368_1
+; PPC64LE-NEXT: bne- 0, .LBB368_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6486,7 +6486,7 @@ define i16 @test369(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB369_1
+; PPC64LE-NEXT: bne- 0, .LBB369_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6502,7 +6502,7 @@ define i32 @test370(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB370_1
+; PPC64LE-NEXT: bne- 0, .LBB370_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6518,7 +6518,7 @@ define i32 @test371(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB371_1
+; PPC64LE-NEXT: bne- 0, .LBB371_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6534,7 +6534,7 @@ define i32 @test372(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB372_1
+; PPC64LE-NEXT: bne- 0, .LBB372_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6550,7 +6550,7 @@ define i32 @test373(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB373_1
+; PPC64LE-NEXT: bne- 0, .LBB373_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6567,7 +6567,7 @@ define i32 @test374(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB374_1
+; PPC64LE-NEXT: bne- 0, .LBB374_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6583,7 +6583,7 @@ define i64 @test375(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB375_1
+; PPC64LE-NEXT: bne- 0, .LBB375_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6599,7 +6599,7 @@ define i64 @test376(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: add 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB376_1
+; PPC64LE-NEXT: bne- 0, .LBB376_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6615,7 +6615,7 @@ define i64 @test377(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB377_1
+; PPC64LE-NEXT: bne- 0, .LBB377_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6631,7 +6631,7 @@ define i64 @test378(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB378_1
+; PPC64LE-NEXT: bne- 0, .LBB378_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6648,7 +6648,7 @@ define i64 @test379(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: add 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB379_1
+; PPC64LE-NEXT: bne- 0, .LBB379_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6664,7 +6664,7 @@ define i8 @test380(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB380_1
+; PPC64LE-NEXT: bne- 0, .LBB380_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6680,7 +6680,7 @@ define i8 @test381(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB381_1
+; PPC64LE-NEXT: bne- 0, .LBB381_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6696,7 +6696,7 @@ define i8 @test382(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB382_1
+; PPC64LE-NEXT: bne- 0, .LBB382_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6712,7 +6712,7 @@ define i8 @test383(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB383_1
+; PPC64LE-NEXT: bne- 0, .LBB383_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6729,7 +6729,7 @@ define i8 @test384(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB384_1
+; PPC64LE-NEXT: bne- 0, .LBB384_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6745,7 +6745,7 @@ define i16 @test385(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB385_1
+; PPC64LE-NEXT: bne- 0, .LBB385_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6761,7 +6761,7 @@ define i16 @test386(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB386_1
+; PPC64LE-NEXT: bne- 0, .LBB386_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6777,7 +6777,7 @@ define i16 @test387(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB387_1
+; PPC64LE-NEXT: bne- 0, .LBB387_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6793,7 +6793,7 @@ define i16 @test388(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB388_1
+; PPC64LE-NEXT: bne- 0, .LBB388_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6810,7 +6810,7 @@ define i16 @test389(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB389_1
+; PPC64LE-NEXT: bne- 0, .LBB389_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6826,7 +6826,7 @@ define i32 @test390(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB390_1
+; PPC64LE-NEXT: bne- 0, .LBB390_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6842,7 +6842,7 @@ define i32 @test391(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB391_1
+; PPC64LE-NEXT: bne- 0, .LBB391_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6858,7 +6858,7 @@ define i32 @test392(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB392_1
+; PPC64LE-NEXT: bne- 0, .LBB392_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6874,7 +6874,7 @@ define i32 @test393(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB393_1
+; PPC64LE-NEXT: bne- 0, .LBB393_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6891,7 +6891,7 @@ define i32 @test394(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB394_1
+; PPC64LE-NEXT: bne- 0, .LBB394_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6907,7 +6907,7 @@ define i64 @test395(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB395_1
+; PPC64LE-NEXT: bne- 0, .LBB395_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6923,7 +6923,7 @@ define i64 @test396(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: sub 6, 3, 4
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB396_1
+; PPC64LE-NEXT: bne- 0, .LBB396_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -6939,7 +6939,7 @@ define i64 @test397(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB397_1
+; PPC64LE-NEXT: bne- 0, .LBB397_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -6955,7 +6955,7 @@ define i64 @test398(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB398_1
+; PPC64LE-NEXT: bne- 0, .LBB398_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6972,7 +6972,7 @@ define i64 @test399(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: sub 6, 5, 4
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB399_1
+; PPC64LE-NEXT: bne- 0, .LBB399_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -6988,7 +6988,7 @@ define i8 @test400(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB400_1
+; PPC64LE-NEXT: bne- 0, .LBB400_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7004,7 +7004,7 @@ define i8 @test401(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB401_1
+; PPC64LE-NEXT: bne- 0, .LBB401_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7020,7 +7020,7 @@ define i8 @test402(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB402_1
+; PPC64LE-NEXT: bne- 0, .LBB402_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7036,7 +7036,7 @@ define i8 @test403(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB403_1
+; PPC64LE-NEXT: bne- 0, .LBB403_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7053,7 +7053,7 @@ define i8 @test404(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB404_1
+; PPC64LE-NEXT: bne- 0, .LBB404_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7069,7 +7069,7 @@ define i16 @test405(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB405_1
+; PPC64LE-NEXT: bne- 0, .LBB405_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7085,7 +7085,7 @@ define i16 @test406(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB406_1
+; PPC64LE-NEXT: bne- 0, .LBB406_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7101,7 +7101,7 @@ define i16 @test407(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB407_1
+; PPC64LE-NEXT: bne- 0, .LBB407_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7117,7 +7117,7 @@ define i16 @test408(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB408_1
+; PPC64LE-NEXT: bne- 0, .LBB408_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7134,7 +7134,7 @@ define i16 @test409(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB409_1
+; PPC64LE-NEXT: bne- 0, .LBB409_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7150,7 +7150,7 @@ define i32 @test410(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB410_1
+; PPC64LE-NEXT: bne- 0, .LBB410_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7166,7 +7166,7 @@ define i32 @test411(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB411_1
+; PPC64LE-NEXT: bne- 0, .LBB411_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7182,7 +7182,7 @@ define i32 @test412(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB412_1
+; PPC64LE-NEXT: bne- 0, .LBB412_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7198,7 +7198,7 @@ define i32 @test413(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB413_1
+; PPC64LE-NEXT: bne- 0, .LBB413_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7215,7 +7215,7 @@ define i32 @test414(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB414_1
+; PPC64LE-NEXT: bne- 0, .LBB414_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7231,7 +7231,7 @@ define i64 @test415(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB415_1
+; PPC64LE-NEXT: bne- 0, .LBB415_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7247,7 +7247,7 @@ define i64 @test416(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: and 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB416_1
+; PPC64LE-NEXT: bne- 0, .LBB416_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7263,7 +7263,7 @@ define i64 @test417(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB417_1
+; PPC64LE-NEXT: bne- 0, .LBB417_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7279,7 +7279,7 @@ define i64 @test418(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB418_1
+; PPC64LE-NEXT: bne- 0, .LBB418_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7296,7 +7296,7 @@ define i64 @test419(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: and 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB419_1
+; PPC64LE-NEXT: bne- 0, .LBB419_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7312,7 +7312,7 @@ define i8 @test420(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB420_1
+; PPC64LE-NEXT: bne- 0, .LBB420_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7328,7 +7328,7 @@ define i8 @test421(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB421_1
+; PPC64LE-NEXT: bne- 0, .LBB421_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7344,7 +7344,7 @@ define i8 @test422(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB422_1
+; PPC64LE-NEXT: bne- 0, .LBB422_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7360,7 +7360,7 @@ define i8 @test423(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB423_1
+; PPC64LE-NEXT: bne- 0, .LBB423_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7377,7 +7377,7 @@ define i8 @test424(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB424_1
+; PPC64LE-NEXT: bne- 0, .LBB424_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7393,7 +7393,7 @@ define i16 @test425(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB425_1
+; PPC64LE-NEXT: bne- 0, .LBB425_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7409,7 +7409,7 @@ define i16 @test426(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB426_1
+; PPC64LE-NEXT: bne- 0, .LBB426_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7425,7 +7425,7 @@ define i16 @test427(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB427_1
+; PPC64LE-NEXT: bne- 0, .LBB427_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7441,7 +7441,7 @@ define i16 @test428(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB428_1
+; PPC64LE-NEXT: bne- 0, .LBB428_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7458,7 +7458,7 @@ define i16 @test429(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB429_1
+; PPC64LE-NEXT: bne- 0, .LBB429_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7474,7 +7474,7 @@ define i32 @test430(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB430_1
+; PPC64LE-NEXT: bne- 0, .LBB430_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7490,7 +7490,7 @@ define i32 @test431(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB431_1
+; PPC64LE-NEXT: bne- 0, .LBB431_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7506,7 +7506,7 @@ define i32 @test432(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB432_1
+; PPC64LE-NEXT: bne- 0, .LBB432_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7522,7 +7522,7 @@ define i32 @test433(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB433_1
+; PPC64LE-NEXT: bne- 0, .LBB433_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7539,7 +7539,7 @@ define i32 @test434(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB434_1
+; PPC64LE-NEXT: bne- 0, .LBB434_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7555,7 +7555,7 @@ define i64 @test435(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB435_1
+; PPC64LE-NEXT: bne- 0, .LBB435_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7571,7 +7571,7 @@ define i64 @test436(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: nand 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB436_1
+; PPC64LE-NEXT: bne- 0, .LBB436_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7587,7 +7587,7 @@ define i64 @test437(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB437_1
+; PPC64LE-NEXT: bne- 0, .LBB437_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7603,7 +7603,7 @@ define i64 @test438(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB438_1
+; PPC64LE-NEXT: bne- 0, .LBB438_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7620,7 +7620,7 @@ define i64 @test439(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: nand 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB439_1
+; PPC64LE-NEXT: bne- 0, .LBB439_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7636,7 +7636,7 @@ define i8 @test440(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB440_1
+; PPC64LE-NEXT: bne- 0, .LBB440_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7652,7 +7652,7 @@ define i8 @test441(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB441_1
+; PPC64LE-NEXT: bne- 0, .LBB441_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7668,7 +7668,7 @@ define i8 @test442(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB442_1
+; PPC64LE-NEXT: bne- 0, .LBB442_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7684,7 +7684,7 @@ define i8 @test443(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB443_1
+; PPC64LE-NEXT: bne- 0, .LBB443_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7701,7 +7701,7 @@ define i8 @test444(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB444_1
+; PPC64LE-NEXT: bne- 0, .LBB444_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7717,7 +7717,7 @@ define i16 @test445(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB445_1
+; PPC64LE-NEXT: bne- 0, .LBB445_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7733,7 +7733,7 @@ define i16 @test446(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB446_1
+; PPC64LE-NEXT: bne- 0, .LBB446_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7749,7 +7749,7 @@ define i16 @test447(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB447_1
+; PPC64LE-NEXT: bne- 0, .LBB447_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7765,7 +7765,7 @@ define i16 @test448(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB448_1
+; PPC64LE-NEXT: bne- 0, .LBB448_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7782,7 +7782,7 @@ define i16 @test449(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB449_1
+; PPC64LE-NEXT: bne- 0, .LBB449_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7798,7 +7798,7 @@ define i32 @test450(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB450_1
+; PPC64LE-NEXT: bne- 0, .LBB450_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7814,7 +7814,7 @@ define i32 @test451(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB451_1
+; PPC64LE-NEXT: bne- 0, .LBB451_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7830,7 +7830,7 @@ define i32 @test452(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB452_1
+; PPC64LE-NEXT: bne- 0, .LBB452_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7846,7 +7846,7 @@ define i32 @test453(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB453_1
+; PPC64LE-NEXT: bne- 0, .LBB453_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7863,7 +7863,7 @@ define i32 @test454(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB454_1
+; PPC64LE-NEXT: bne- 0, .LBB454_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7879,7 +7879,7 @@ define i64 @test455(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB455_1
+; PPC64LE-NEXT: bne- 0, .LBB455_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7895,7 +7895,7 @@ define i64 @test456(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: or 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB456_1
+; PPC64LE-NEXT: bne- 0, .LBB456_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7911,7 +7911,7 @@ define i64 @test457(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB457_1
+; PPC64LE-NEXT: bne- 0, .LBB457_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7927,7 +7927,7 @@ define i64 @test458(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB458_1
+; PPC64LE-NEXT: bne- 0, .LBB458_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7944,7 +7944,7 @@ define i64 @test459(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: or 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB459_1
+; PPC64LE-NEXT: bne- 0, .LBB459_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -7960,7 +7960,7 @@ define i8 @test460(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB460_1
+; PPC64LE-NEXT: bne- 0, .LBB460_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -7976,7 +7976,7 @@ define i8 @test461(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stbcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB461_1
+; PPC64LE-NEXT: bne- 0, .LBB461_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -7992,7 +7992,7 @@ define i8 @test462(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB462_1
+; PPC64LE-NEXT: bne- 0, .LBB462_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8008,7 +8008,7 @@ define i8 @test463(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB463_1
+; PPC64LE-NEXT: bne- 0, .LBB463_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8025,7 +8025,7 @@ define i8 @test464(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: lbarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stbcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB464_1
+; PPC64LE-NEXT: bne- 0, .LBB464_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8041,7 +8041,7 @@ define i16 @test465(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB465_1
+; PPC64LE-NEXT: bne- 0, .LBB465_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8057,7 +8057,7 @@ define i16 @test466(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: sthcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB466_1
+; PPC64LE-NEXT: bne- 0, .LBB466_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8073,7 +8073,7 @@ define i16 @test467(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB467_1
+; PPC64LE-NEXT: bne- 0, .LBB467_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8089,7 +8089,7 @@ define i16 @test468(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB468_1
+; PPC64LE-NEXT: bne- 0, .LBB468_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8106,7 +8106,7 @@ define i16 @test469(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: lharx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: sthcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB469_1
+; PPC64LE-NEXT: bne- 0, .LBB469_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8122,7 +8122,7 @@ define i32 @test470(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB470_1
+; PPC64LE-NEXT: bne- 0, .LBB470_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8138,7 +8138,7 @@ define i32 @test471(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stwcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB471_1
+; PPC64LE-NEXT: bne- 0, .LBB471_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8154,7 +8154,7 @@ define i32 @test472(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB472_1
+; PPC64LE-NEXT: bne- 0, .LBB472_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8170,7 +8170,7 @@ define i32 @test473(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB473_1
+; PPC64LE-NEXT: bne- 0, .LBB473_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8187,7 +8187,7 @@ define i32 @test474(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: lwarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stwcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB474_1
+; PPC64LE-NEXT: bne- 0, .LBB474_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8203,7 +8203,7 @@ define i64 @test475(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB475_1
+; PPC64LE-NEXT: bne- 0, .LBB475_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8219,7 +8219,7 @@ define i64 @test476(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 3, 0, 5
; PPC64LE-NEXT: xor 6, 4, 3
; PPC64LE-NEXT: stdcx. 6, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB476_1
+; PPC64LE-NEXT: bne- 0, .LBB476_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8235,7 +8235,7 @@ define i64 @test477(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB477_1
+; PPC64LE-NEXT: bne- 0, .LBB477_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8251,7 +8251,7 @@ define i64 @test478(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB478_1
+; PPC64LE-NEXT: bne- 0, .LBB478_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8268,7 +8268,7 @@ define i64 @test479(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: ldarx 5, 0, 3
; PPC64LE-NEXT: xor 6, 4, 5
; PPC64LE-NEXT: stdcx. 6, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB479_1
+; PPC64LE-NEXT: bne- 0, .LBB479_1
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8288,7 +8288,7 @@ define i8 @test480(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB480_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB480_1
+; PPC64LE-NEXT: bne- 0, .LBB480_1
; PPC64LE-NEXT: .LBB480_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8307,7 +8307,7 @@ define i8 @test481(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB481_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB481_1
+; PPC64LE-NEXT: bne- 0, .LBB481_1
; PPC64LE-NEXT: .LBB481_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8328,7 +8328,7 @@ define i8 @test482(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB482_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB482_1
+; PPC64LE-NEXT: bne- 0, .LBB482_1
; PPC64LE-NEXT: .LBB482_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8348,7 +8348,7 @@ define i8 @test483(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB483_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB483_1
+; PPC64LE-NEXT: bne- 0, .LBB483_1
; PPC64LE-NEXT: .LBB483_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8369,7 +8369,7 @@ define i8 @test484(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB484_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB484_1
+; PPC64LE-NEXT: bne- 0, .LBB484_1
; PPC64LE-NEXT: .LBB484_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8389,7 +8389,7 @@ define i16 @test485(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB485_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB485_1
+; PPC64LE-NEXT: bne- 0, .LBB485_1
; PPC64LE-NEXT: .LBB485_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8408,7 +8408,7 @@ define i16 @test486(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB486_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB486_1
+; PPC64LE-NEXT: bne- 0, .LBB486_1
; PPC64LE-NEXT: .LBB486_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8429,7 +8429,7 @@ define i16 @test487(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB487_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB487_1
+; PPC64LE-NEXT: bne- 0, .LBB487_1
; PPC64LE-NEXT: .LBB487_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8449,7 +8449,7 @@ define i16 @test488(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB488_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB488_1
+; PPC64LE-NEXT: bne- 0, .LBB488_1
; PPC64LE-NEXT: .LBB488_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8470,7 +8470,7 @@ define i16 @test489(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB489_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB489_1
+; PPC64LE-NEXT: bne- 0, .LBB489_1
; PPC64LE-NEXT: .LBB489_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8488,7 +8488,7 @@ define i32 @test490(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB490_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB490_1
+; PPC64LE-NEXT: bne- 0, .LBB490_1
; PPC64LE-NEXT: .LBB490_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8506,7 +8506,7 @@ define i32 @test491(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB491_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB491_1
+; PPC64LE-NEXT: bne- 0, .LBB491_1
; PPC64LE-NEXT: .LBB491_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8524,7 +8524,7 @@ define i32 @test492(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB492_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB492_1
+; PPC64LE-NEXT: bne- 0, .LBB492_1
; PPC64LE-NEXT: .LBB492_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8542,7 +8542,7 @@ define i32 @test493(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB493_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB493_1
+; PPC64LE-NEXT: bne- 0, .LBB493_1
; PPC64LE-NEXT: .LBB493_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8561,7 +8561,7 @@ define i32 @test494(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB494_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB494_1
+; PPC64LE-NEXT: bne- 0, .LBB494_1
; PPC64LE-NEXT: .LBB494_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8579,7 +8579,7 @@ define i64 @test495(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB495_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB495_1
+; PPC64LE-NEXT: bne- 0, .LBB495_1
; PPC64LE-NEXT: .LBB495_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8597,7 +8597,7 @@ define i64 @test496(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB496_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB496_1
+; PPC64LE-NEXT: bne- 0, .LBB496_1
; PPC64LE-NEXT: .LBB496_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8615,7 +8615,7 @@ define i64 @test497(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB497_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB497_1
+; PPC64LE-NEXT: bne- 0, .LBB497_1
; PPC64LE-NEXT: .LBB497_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8633,7 +8633,7 @@ define i64 @test498(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB498_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB498_1
+; PPC64LE-NEXT: bne- 0, .LBB498_1
; PPC64LE-NEXT: .LBB498_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8652,7 +8652,7 @@ define i64 @test499(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB499_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB499_1
+; PPC64LE-NEXT: bne- 0, .LBB499_1
; PPC64LE-NEXT: .LBB499_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8672,7 +8672,7 @@ define i8 @test500(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB500_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB500_1
+; PPC64LE-NEXT: bne- 0, .LBB500_1
; PPC64LE-NEXT: .LBB500_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8691,7 +8691,7 @@ define i8 @test501(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB501_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB501_1
+; PPC64LE-NEXT: bne- 0, .LBB501_1
; PPC64LE-NEXT: .LBB501_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8712,7 +8712,7 @@ define i8 @test502(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB502_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB502_1
+; PPC64LE-NEXT: bne- 0, .LBB502_1
; PPC64LE-NEXT: .LBB502_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8732,7 +8732,7 @@ define i8 @test503(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB503_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB503_1
+; PPC64LE-NEXT: bne- 0, .LBB503_1
; PPC64LE-NEXT: .LBB503_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8753,7 +8753,7 @@ define i8 @test504(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB504_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB504_1
+; PPC64LE-NEXT: bne- 0, .LBB504_1
; PPC64LE-NEXT: .LBB504_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8773,7 +8773,7 @@ define i16 @test505(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB505_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB505_1
+; PPC64LE-NEXT: bne- 0, .LBB505_1
; PPC64LE-NEXT: .LBB505_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8792,7 +8792,7 @@ define i16 @test506(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB506_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB506_1
+; PPC64LE-NEXT: bne- 0, .LBB506_1
; PPC64LE-NEXT: .LBB506_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8813,7 +8813,7 @@ define i16 @test507(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB507_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB507_1
+; PPC64LE-NEXT: bne- 0, .LBB507_1
; PPC64LE-NEXT: .LBB507_3:
; PPC64LE-NEXT: mr 3, 4
; PPC64LE-NEXT: blr
@@ -8833,7 +8833,7 @@ define i16 @test508(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB508_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB508_1
+; PPC64LE-NEXT: bne- 0, .LBB508_1
; PPC64LE-NEXT: .LBB508_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8854,7 +8854,7 @@ define i16 @test509(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB509_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 5, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB509_1
+; PPC64LE-NEXT: bne- 0, .LBB509_1
; PPC64LE-NEXT: .LBB509_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 4
@@ -8872,7 +8872,7 @@ define i32 @test510(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB510_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB510_1
+; PPC64LE-NEXT: bne- 0, .LBB510_1
; PPC64LE-NEXT: .LBB510_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8890,7 +8890,7 @@ define i32 @test511(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB511_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB511_1
+; PPC64LE-NEXT: bne- 0, .LBB511_1
; PPC64LE-NEXT: .LBB511_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8908,7 +8908,7 @@ define i32 @test512(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB512_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB512_1
+; PPC64LE-NEXT: bne- 0, .LBB512_1
; PPC64LE-NEXT: .LBB512_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8926,7 +8926,7 @@ define i32 @test513(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB513_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB513_1
+; PPC64LE-NEXT: bne- 0, .LBB513_1
; PPC64LE-NEXT: .LBB513_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8945,7 +8945,7 @@ define i32 @test514(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB514_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB514_1
+; PPC64LE-NEXT: bne- 0, .LBB514_1
; PPC64LE-NEXT: .LBB514_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -8963,7 +8963,7 @@ define i64 @test515(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB515_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB515_1
+; PPC64LE-NEXT: bne- 0, .LBB515_1
; PPC64LE-NEXT: .LBB515_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -8981,7 +8981,7 @@ define i64 @test516(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB516_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB516_1
+; PPC64LE-NEXT: bne- 0, .LBB516_1
; PPC64LE-NEXT: .LBB516_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -8999,7 +8999,7 @@ define i64 @test517(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB517_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB517_1
+; PPC64LE-NEXT: bne- 0, .LBB517_1
; PPC64LE-NEXT: .LBB517_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9017,7 +9017,7 @@ define i64 @test518(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB518_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB518_1
+; PPC64LE-NEXT: bne- 0, .LBB518_1
; PPC64LE-NEXT: .LBB518_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9036,7 +9036,7 @@ define i64 @test519(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB519_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB519_1
+; PPC64LE-NEXT: bne- 0, .LBB519_1
; PPC64LE-NEXT: .LBB519_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9054,7 +9054,7 @@ define i8 @test520(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB520_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB520_1
+; PPC64LE-NEXT: bne- 0, .LBB520_1
; PPC64LE-NEXT: .LBB520_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9072,7 +9072,7 @@ define i8 @test521(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB521_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB521_1
+; PPC64LE-NEXT: bne- 0, .LBB521_1
; PPC64LE-NEXT: .LBB521_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9090,7 +9090,7 @@ define i8 @test522(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB522_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB522_1
+; PPC64LE-NEXT: bne- 0, .LBB522_1
; PPC64LE-NEXT: .LBB522_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9108,7 +9108,7 @@ define i8 @test523(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB523_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB523_1
+; PPC64LE-NEXT: bne- 0, .LBB523_1
; PPC64LE-NEXT: .LBB523_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9127,7 +9127,7 @@ define i8 @test524(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: bgt 0, .LBB524_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB524_1
+; PPC64LE-NEXT: bne- 0, .LBB524_1
; PPC64LE-NEXT: .LBB524_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9145,7 +9145,7 @@ define i16 @test525(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB525_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB525_1
+; PPC64LE-NEXT: bne- 0, .LBB525_1
; PPC64LE-NEXT: .LBB525_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9163,7 +9163,7 @@ define i16 @test526(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB526_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB526_1
+; PPC64LE-NEXT: bne- 0, .LBB526_1
; PPC64LE-NEXT: .LBB526_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9181,7 +9181,7 @@ define i16 @test527(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB527_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB527_1
+; PPC64LE-NEXT: bne- 0, .LBB527_1
; PPC64LE-NEXT: .LBB527_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9199,7 +9199,7 @@ define i16 @test528(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB528_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB528_1
+; PPC64LE-NEXT: bne- 0, .LBB528_1
; PPC64LE-NEXT: .LBB528_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9218,7 +9218,7 @@ define i16 @test529(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: bgt 0, .LBB529_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB529_1
+; PPC64LE-NEXT: bne- 0, .LBB529_1
; PPC64LE-NEXT: .LBB529_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9236,7 +9236,7 @@ define i32 @test530(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB530_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB530_1
+; PPC64LE-NEXT: bne- 0, .LBB530_1
; PPC64LE-NEXT: .LBB530_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9254,7 +9254,7 @@ define i32 @test531(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB531_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB531_1
+; PPC64LE-NEXT: bne- 0, .LBB531_1
; PPC64LE-NEXT: .LBB531_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9272,7 +9272,7 @@ define i32 @test532(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB532_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB532_1
+; PPC64LE-NEXT: bne- 0, .LBB532_1
; PPC64LE-NEXT: .LBB532_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9290,7 +9290,7 @@ define i32 @test533(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB533_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB533_1
+; PPC64LE-NEXT: bne- 0, .LBB533_1
; PPC64LE-NEXT: .LBB533_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9309,7 +9309,7 @@ define i32 @test534(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: bgt 0, .LBB534_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB534_1
+; PPC64LE-NEXT: bne- 0, .LBB534_1
; PPC64LE-NEXT: .LBB534_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9327,7 +9327,7 @@ define i64 @test535(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB535_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB535_1
+; PPC64LE-NEXT: bne- 0, .LBB535_1
; PPC64LE-NEXT: .LBB535_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9345,7 +9345,7 @@ define i64 @test536(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB536_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB536_1
+; PPC64LE-NEXT: bne- 0, .LBB536_1
; PPC64LE-NEXT: .LBB536_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9363,7 +9363,7 @@ define i64 @test537(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB537_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB537_1
+; PPC64LE-NEXT: bne- 0, .LBB537_1
; PPC64LE-NEXT: .LBB537_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9381,7 +9381,7 @@ define i64 @test538(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB538_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB538_1
+; PPC64LE-NEXT: bne- 0, .LBB538_1
; PPC64LE-NEXT: .LBB538_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9400,7 +9400,7 @@ define i64 @test539(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: bgt 0, .LBB539_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB539_1
+; PPC64LE-NEXT: bne- 0, .LBB539_1
; PPC64LE-NEXT: .LBB539_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9418,7 +9418,7 @@ define i8 @test540(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB540_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB540_1
+; PPC64LE-NEXT: bne- 0, .LBB540_1
; PPC64LE-NEXT: .LBB540_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9436,7 +9436,7 @@ define i8 @test541(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB541_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB541_1
+; PPC64LE-NEXT: bne- 0, .LBB541_1
; PPC64LE-NEXT: .LBB541_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9454,7 +9454,7 @@ define i8 @test542(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB542_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB542_1
+; PPC64LE-NEXT: bne- 0, .LBB542_1
; PPC64LE-NEXT: .LBB542_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9472,7 +9472,7 @@ define i8 @test543(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB543_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB543_1
+; PPC64LE-NEXT: bne- 0, .LBB543_1
; PPC64LE-NEXT: .LBB543_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9491,7 +9491,7 @@ define i8 @test544(ptr %ptr, i8 %val) {
; PPC64LE-NEXT: blt 0, .LBB544_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stbcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB544_1
+; PPC64LE-NEXT: bne- 0, .LBB544_1
; PPC64LE-NEXT: .LBB544_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9509,7 +9509,7 @@ define i16 @test545(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB545_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB545_1
+; PPC64LE-NEXT: bne- 0, .LBB545_1
; PPC64LE-NEXT: .LBB545_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9527,7 +9527,7 @@ define i16 @test546(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB546_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB546_1
+; PPC64LE-NEXT: bne- 0, .LBB546_1
; PPC64LE-NEXT: .LBB546_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9545,7 +9545,7 @@ define i16 @test547(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB547_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB547_1
+; PPC64LE-NEXT: bne- 0, .LBB547_1
; PPC64LE-NEXT: .LBB547_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9563,7 +9563,7 @@ define i16 @test548(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB548_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB548_1
+; PPC64LE-NEXT: bne- 0, .LBB548_1
; PPC64LE-NEXT: .LBB548_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9582,7 +9582,7 @@ define i16 @test549(ptr %ptr, i16 %val) {
; PPC64LE-NEXT: blt 0, .LBB549_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: sthcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB549_1
+; PPC64LE-NEXT: bne- 0, .LBB549_1
; PPC64LE-NEXT: .LBB549_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9600,7 +9600,7 @@ define i32 @test550(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB550_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB550_1
+; PPC64LE-NEXT: bne- 0, .LBB550_1
; PPC64LE-NEXT: .LBB550_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9618,7 +9618,7 @@ define i32 @test551(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB551_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB551_1
+; PPC64LE-NEXT: bne- 0, .LBB551_1
; PPC64LE-NEXT: .LBB551_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9636,7 +9636,7 @@ define i32 @test552(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB552_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB552_1
+; PPC64LE-NEXT: bne- 0, .LBB552_1
; PPC64LE-NEXT: .LBB552_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9654,7 +9654,7 @@ define i32 @test553(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB553_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB553_1
+; PPC64LE-NEXT: bne- 0, .LBB553_1
; PPC64LE-NEXT: .LBB553_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9673,7 +9673,7 @@ define i32 @test554(ptr %ptr, i32 %val) {
; PPC64LE-NEXT: blt 0, .LBB554_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stwcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB554_1
+; PPC64LE-NEXT: bne- 0, .LBB554_1
; PPC64LE-NEXT: .LBB554_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9691,7 +9691,7 @@ define i64 @test555(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB555_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB555_1
+; PPC64LE-NEXT: bne- 0, .LBB555_1
; PPC64LE-NEXT: .LBB555_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9709,7 +9709,7 @@ define i64 @test556(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB556_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 5
-; PPC64LE-NEXT: bne 0, .LBB556_1
+; PPC64LE-NEXT: bne- 0, .LBB556_1
; PPC64LE-NEXT: .LBB556_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: blr
@@ -9727,7 +9727,7 @@ define i64 @test557(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB557_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB557_1
+; PPC64LE-NEXT: bne- 0, .LBB557_1
; PPC64LE-NEXT: .LBB557_3:
; PPC64LE-NEXT: mr 3, 5
; PPC64LE-NEXT: blr
@@ -9745,7 +9745,7 @@ define i64 @test558(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB558_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB558_1
+; PPC64LE-NEXT: bne- 0, .LBB558_1
; PPC64LE-NEXT: .LBB558_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
@@ -9764,7 +9764,7 @@ define i64 @test559(ptr %ptr, i64 %val) {
; PPC64LE-NEXT: blt 0, .LBB559_3
; PPC64LE-NEXT: # %bb.2:
; PPC64LE-NEXT: stdcx. 4, 0, 3
-; PPC64LE-NEXT: bne 0, .LBB559_1
+; PPC64LE-NEXT: bne- 0, .LBB559_1
; PPC64LE-NEXT: .LBB559_3:
; PPC64LE-NEXT: lwsync
; PPC64LE-NEXT: mr 3, 5
diff --git a/llvm/test/CodeGen/PowerPC/atomics.ll b/llvm/test/CodeGen/PowerPC/atomics.ll
index 183c8e1..ff1a722 100644
--- a/llvm/test/CodeGen/PowerPC/atomics.ll
+++ b/llvm/test/CodeGen/PowerPC/atomics.ll
@@ -341,7 +341,7 @@ define i8 @add_i8_monotonic(ptr %mem, i8 %operand) {
; PPC32-NEXT: and r8, r8, r6
; PPC32-NEXT: or r8, r8, r9
; PPC32-NEXT: stwcx. r8, 0, r5
-; PPC32-NEXT: bne cr0, .LBB12_1
+; PPC32-NEXT: bne- cr0, .LBB12_1
; PPC32-NEXT: # %bb.2:
; PPC32-NEXT: srw r3, r7, r3
; PPC32-NEXT: clrlwi r3, r3, 24
@@ -362,7 +362,7 @@ define i8 @add_i8_monotonic(ptr %mem, i8 %operand) {
; PPC64-NEXT: and r8, r8, r6
; PPC64-NEXT: or r8, r8, r9
; PPC64-NEXT: stwcx. r8, 0, r5
-; PPC64-NEXT: bne cr0, .LBB12_1
+; PPC64-NEXT: bne- cr0, .LBB12_1
; PPC64-NEXT: # %bb.2:
; PPC64-NEXT: srw r3, r7, r3
; PPC64-NEXT: clrlwi r3, r3, 24
@@ -388,7 +388,7 @@ define i16 @xor_i16_seq_cst(ptr %mem, i16 %operand) {
; PPC32-NEXT: and r8, r8, r6
; PPC32-NEXT: or r8, r8, r9
; PPC32-NEXT: stwcx. r8, 0, r3
-; PPC32-NEXT: bne cr0, .LBB13_1
+; PPC32-NEXT: bne- cr0, .LBB13_1
; PPC32-NEXT: # %bb.2:
; PPC32-NEXT: srw r3, r7, r5
; PPC32-NEXT: clrlwi r3, r3, 16
@@ -412,7 +412,7 @@ define i16 @xor_i16_seq_cst(ptr %mem, i16 %operand) {
; PPC64-NEXT: and r8, r8, r6
; PPC64-NEXT: or r8, r8, r9
; PPC64-NEXT: stwcx. r8, 0, r3
-; PPC64-NEXT: bne cr0, .LBB13_1
+; PPC64-NEXT: bne- cr0, .LBB13_1
; PPC64-NEXT: # %bb.2:
; PPC64-NEXT: srw r3, r7, r5
; PPC64-NEXT: clrlwi r3, r3, 16
@@ -428,7 +428,7 @@ define i32 @xchg_i32_acq_rel(ptr %mem, i32 %operand) {
; CHECK-NEXT: .LBB14_1:
; CHECK-NEXT: lwarx r5, 0, r3
; CHECK-NEXT: stwcx. r4, 0, r3
-; CHECK-NEXT: bne cr0, .LBB14_1
+; CHECK-NEXT: bne- cr0, .LBB14_1
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: mr r3, r5
; CHECK-NEXT: lwsync
@@ -458,7 +458,7 @@ define i64 @and_i64_release(ptr %mem, i64 %operand) {
; PPC64-NEXT: ldarx r5, 0, r3
; PPC64-NEXT: and r6, r4, r5
; PPC64-NEXT: stdcx. r6, 0, r3
-; PPC64-NEXT: bne cr0, .LBB15_1
+; PPC64-NEXT: bne- cr0, .LBB15_1
; PPC64-NEXT: # %bb.2:
; PPC64-NEXT: mr r3, r5
; PPC64-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
index cad684e..baa127e 100644
--- a/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
+++ b/llvm/test/CodeGen/PowerPC/fmf-propagation.ll
@@ -2,8 +2,8 @@
; REQUIRES: asserts
; RUN: llc < %s -mtriple=powerpc64le -debug-only=isel -o /dev/null 2>&1 | FileCheck %s --check-prefix=FMFDEBUG
; RUN: llc < %s -mtriple=powerpc64le | FileCheck %s --check-prefix=FMF
-; RUN: llc < %s -mtriple=powerpc64le -debug-only=isel -o /dev/null 2>&1 -enable-unsafe-fp-math -fp-contract=fast -enable-no-nans-fp-math | FileCheck %s --check-prefix=GLOBALDEBUG
-; RUN: llc < %s -mtriple=powerpc64le -enable-unsafe-fp-math -fp-contract=fast -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math | FileCheck %s --check-prefix=GLOBAL
+; RUN: llc < %s -mtriple=powerpc64le -debug-only=isel -o /dev/null 2>&1 -fp-contract=fast -enable-no-nans-fp-math | FileCheck %s --check-prefix=GLOBALDEBUG
+; RUN: llc < %s -mtriple=powerpc64le -fp-contract=fast -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math | FileCheck %s --check-prefix=GLOBAL
; Test FP transforms using instruction/node-level fast-math-flags.
; We're also checking debug output to verify that FMF is propagated to the newly created nodes.
diff --git a/llvm/test/CodeGen/PowerPC/i64_fp_round.ll b/llvm/test/CodeGen/PowerPC/i64_fp_round.ll
index f7df003..b1fb907 100644
--- a/llvm/test/CodeGen/PowerPC/i64_fp_round.ll
+++ b/llvm/test/CodeGen/PowerPC/i64_fp_round.ll
@@ -4,10 +4,9 @@
; for minor code generation differences.
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt -mattr=-isel < %s | FileCheck %s --check-prefix=CHECK-NO-ISEL
-; Also check that with -enable-unsafe-fp-math we do not get that extra
+; Also check that with fpexcept.ignore we do not get that extra
; code sequence. Simply verify that there is no "isel" present.
-; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt -enable-unsafe-fp-math < %s | FileCheck %s -check-prefix=CHECK-UNSAFE
-; CHECK-UNSAFE-NOT: isel
+
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -15,9 +14,8 @@ define float @test(i64 %x) nounwind readnone {
; Verify that we get the code sequence needed to avoid double-rounding.
; Note that only parts of the sequence are checked for here, to allow
; for minor code generation differences.
-; Also check that with -enable-unsafe-fp-math we do not get that extra
+; Also check that with fpexcept.ignore we do not get that extra
; code sequence. Simply verify that there is no "isel" present.
-; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt -enable-unsafe-fp-math < %s | FileCheck %s -check-prefix=CHECK-UNSAFE
; CHECK-LABEL: test:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: clrldi 4, 3, 53
@@ -51,18 +49,10 @@ define float @test(i64 %x) nounwind readnone {
; CHECK-NO-ISEL-NEXT: xscvsxddp 0, 0
; CHECK-NO-ISEL-NEXT: frsp 1, 0
; CHECK-NO-ISEL-NEXT: blr
-;
-; CHECK-UNSAFE-LABEL: test:
-; CHECK-UNSAFE: # %bb.0: # %entry
-; CHECK-UNSAFE-NEXT: std 3, -8(1)
-; CHECK-UNSAFE-NEXT: lfd 0, -8(1)
-; CHECK-UNSAFE-NEXT: xscvsxddp 0, 0
-; CHECK-UNSAFE-NEXT: frsp 1, 0
-; CHECK-UNSAFE-NEXT: blr
entry:
%conv = sitofp i64 %x to float
ret float %conv
}
-
+; TODO: Add sitofp afn test.
diff --git a/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll b/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll
index 9e3eea1..a6f5c61 100644
--- a/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll
+++ b/llvm/test/CodeGen/PowerPC/ppc-partword-atomic.ll
@@ -25,7 +25,7 @@ define dso_local zeroext i32 @testI8(i8 zeroext %val) local_unnamed_addr #0 {
; PWR7-NEXT: andc 8, 7, 3
; PWR7-NEXT: or 8, 6, 8
; PWR7-NEXT: stwcx. 8, 0, 5
-; PWR7-NEXT: bne 0, .LBB0_1
+; PWR7-NEXT: bne- 0, .LBB0_1
; PWR7-NEXT: # %bb.2: # %entry
; PWR7-NEXT: srw 3, 7, 4
; PWR7-NEXT: addis 4, 2, global_int@toc@ha
@@ -44,7 +44,7 @@ define dso_local zeroext i32 @testI8(i8 zeroext %val) local_unnamed_addr #0 {
; PWR9-NEXT: #
; PWR9-NEXT: lbarx 4, 0, 5
; PWR9-NEXT: stbcx. 3, 0, 5
-; PWR9-NEXT: bne 0, .LBB0_1
+; PWR9-NEXT: bne- 0, .LBB0_1
; PWR9-NEXT: # %bb.2: # %entry
; PWR9-NEXT: addis 3, 2, global_int@toc@ha
; PWR9-NEXT: lwsync
@@ -78,7 +78,7 @@ define dso_local zeroext i32 @testI16(i16 zeroext %val) local_unnamed_addr #0 {
; PWR7-NEXT: andc 8, 7, 3
; PWR7-NEXT: or 8, 6, 8
; PWR7-NEXT: stwcx. 8, 0, 5
-; PWR7-NEXT: bne 0, .LBB1_1
+; PWR7-NEXT: bne- 0, .LBB1_1
; PWR7-NEXT: # %bb.2: # %entry
; PWR7-NEXT: srw 3, 7, 4
; PWR7-NEXT: addis 4, 2, global_int@toc@ha
@@ -97,7 +97,7 @@ define dso_local zeroext i32 @testI16(i16 zeroext %val) local_unnamed_addr #0 {
; PWR9-NEXT: #
; PWR9-NEXT: lharx 4, 0, 5
; PWR9-NEXT: sthcx. 3, 0, 5
-; PWR9-NEXT: bne 0, .LBB1_1
+; PWR9-NEXT: bne- 0, .LBB1_1
; PWR9-NEXT: # %bb.2: # %entry
; PWR9-NEXT: addis 3, 2, global_int@toc@ha
; PWR9-NEXT: lwsync
diff --git a/llvm/test/CodeGen/PowerPC/pr61882.ll b/llvm/test/CodeGen/PowerPC/pr61882.ll
index c649fe0..062d97c 100644
--- a/llvm/test/CodeGen/PowerPC/pr61882.ll
+++ b/llvm/test/CodeGen/PowerPC/pr61882.ll
@@ -27,7 +27,7 @@ define void @foo(ptr %a, i32 %x) {
; CHECK-NEXT: andc r8, r8, r6
; CHECK-NEXT: or r8, r7, r8
; CHECK-NEXT: stwcx. r8, 0, r3
-; CHECK-NEXT: bne cr0, .LBB0_1
+; CHECK-NEXT: bne- cr0, .LBB0_1
; CHECK-NEXT: .LBB0_3:
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -43,7 +43,7 @@ define void @foo(ptr %a, i32 %x) {
; PWR8-NEXT: bgt cr0, .LBB0_3
; PWR8-NEXT: # %bb.2:
; PWR8-NEXT: stbcx. r4, 0, r3
-; PWR8-NEXT: bne cr0, .LBB0_1
+; PWR8-NEXT: bne- cr0, .LBB0_1
; PWR8-NEXT: .LBB0_3:
; PWR8-NEXT: lwsync
; PWR8-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/scalar-equal.ll b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
index 1832475..c0b11b4 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-equal.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-equal.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \
+; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
; RUN: --check-prefix=FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \
+; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
diff --git a/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll
index ca9baceb..5915bd3 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-min-max-p10.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr10 -ppc-asm-full-reg-names --enable-unsafe-fp-math \
+; RUN: llc -mcpu=pwr10 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
; RUN: --enable-no-nans-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
diff --git a/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll b/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll
index 2be370f..af48bf2 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-rounding-ops.ll
@@ -5,9 +5,6 @@
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s | \
; RUN: FileCheck %s
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
-; RUN: -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s \
-; RUN: --enable-unsafe-fp-math | FileCheck %s --check-prefix=FAST
define dso_local i64 @test_lrint(double %d) local_unnamed_addr {
; BE-LABEL: test_lrint:
; BE: # %bb.0: # %entry
@@ -36,17 +33,36 @@ define dso_local i64 @test_lrint(double %d) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_lrint:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.lrint.i64.f64(double %d)
ret i64 %0
}
+define dso_local i64 @test_constrained_lrint(double %d) local_unnamed_addr {
+; BE-LABEL: test_constrained_lrint:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl lrint
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_lrint:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fctid f0, f1
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.lrint(double %d, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.lrint.i64.f64(double)
define dso_local i64 @test_lrintf(float %f) local_unnamed_addr {
@@ -77,17 +93,36 @@ define dso_local i64 @test_lrintf(float %f) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_lrintf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.lrint.i64.f32(float %f)
ret i64 %0
}
+define dso_local i64 @test_constrained_lrintf(float %f) local_unnamed_addr {
+; BE-LABEL: test_constrained_lrintf:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl lrintf
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_lrintf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fctid f0, f1
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.lrint(float %f, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.lrint.i64.f32(float)
define dso_local i64 @test_llrint(double %d) local_unnamed_addr {
@@ -118,17 +153,36 @@ define dso_local i64 @test_llrint(double %d) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_llrint:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.llrint.i64.f64(double %d)
ret i64 %0
}
+define dso_local i64 @test_constrained_llrint(double %d) local_unnamed_addr {
+; BE-LABEL: test_constrained_llrint:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl llrint
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_llrint:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fctid f0, f1
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llrint(double %d, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.i64.f64(double)
define dso_local i64 @test_llrintf(float %f) local_unnamed_addr {
@@ -159,17 +213,36 @@ define dso_local i64 @test_llrintf(float %f) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_llrintf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.llrint.i64.f32(float %f)
ret i64 %0
}
+define dso_local i64 @test_constrained_llrintf(float %f) local_unnamed_addr {
+; BE-LABEL: test_constrained_llrintf:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl llrintf
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_llrintf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fctid f0, f1
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llrint(float %f, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.llrint.i64.f32(float)
define dso_local i64 @test_lround(double %d) local_unnamed_addr {
@@ -200,18 +273,37 @@ define dso_local i64 @test_lround(double %d) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_lround:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f0, f1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.lround.i64.f64(double %d)
ret i64 %0
}
+define dso_local i64 @test_constrained_lround(double %d) local_unnamed_addr {
+; BE-LABEL: test_constrained_lround:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl lround
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_lround:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f0, f1
+; CHECK-NEXT: fctid f0, f0
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.lround(double %d, metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.lround.i64.f64(double)
define dso_local i32 @test_lroundi32f64(double %d) local_unnamed_addr {
@@ -242,18 +334,37 @@ define dso_local i32 @test_lroundi32f64(double %d) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_lroundi32f64:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f0, f1
-; FAST-NEXT: fctiw f0, f0
-; FAST-NEXT: mffprwz r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i32 @llvm.lround.i32.f64(double %d)
ret i32 %0
}
+define dso_local i32 @test_constrained_lroundi32f64(double %d) local_unnamed_addr {
+; BE-LABEL: test_constrained_lroundi32f64:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl lround
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_lroundi32f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f0, f1
+; CHECK-NEXT: fctiw f0, f0
+; CHECK-NEXT: mffprwz r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i32 @llvm.experimental.constrained.lround(double %d, metadata !"fpexcept.ignore")
+ ret i32 %0
+}
+
declare i32 @llvm.lround.i32.f64(double)
define dso_local i64 @test_lroundf(float %f) local_unnamed_addr {
@@ -284,18 +395,37 @@ define dso_local i64 @test_lroundf(float %f) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_lroundf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f0, f1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.lround.i64.f32(float %f)
ret i64 %0
}
+define dso_local i64 @test_constrained_lroundf(float %f) local_unnamed_addr {
+; BE-LABEL: test_constrained_lroundf:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl lroundf
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_lroundf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f0, f1
+; CHECK-NEXT: fctid f0, f0
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.lround(float %f, metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.lround.i64.f32(float)
define dso_local i32 @test_lroundi32f32(float %d) local_unnamed_addr {
@@ -326,18 +456,37 @@ define dso_local i32 @test_lroundi32f32(float %d) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_lroundi32f32:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f0, f1
-; FAST-NEXT: fctiw f0, f0
-; FAST-NEXT: mffprwz r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i32 @llvm.lround.i32.f32(float %d)
ret i32 %0
}
+define dso_local i32 @test_constrained_lroundi32f32(float %f) local_unnamed_addr {
+; BE-LABEL: test_constrained_lroundi32f32:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl lroundf
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_lroundi32f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f0, f1
+; CHECK-NEXT: fctiw f0, f0
+; CHECK-NEXT: mffprwz r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i32 @llvm.experimental.constrained.lround(float %f, metadata !"fpexcept.ignore")
+ ret i32 %0
+}
+
declare i32 @llvm.lround.i32.f32(float)
define dso_local i64 @test_llround(double %d) local_unnamed_addr {
@@ -368,18 +517,37 @@ define dso_local i64 @test_llround(double %d) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_llround:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f0, f1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.llround.i64.f64(double %d)
ret i64 %0
}
+define dso_local i64 @test_constrained_llround(double %d) local_unnamed_addr {
+; BE-LABEL: test_constrained_llround:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl llround
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_llround:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f0, f1
+; CHECK-NEXT: fctid f0, f0
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llround(double %d, metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.llround.i64.f64(double)
define dso_local i64 @test_llroundf(float %f) local_unnamed_addr {
@@ -410,18 +578,37 @@ define dso_local i64 @test_llroundf(float %f) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_llroundf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f0, f1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
entry:
%0 = tail call i64 @llvm.llround.i64.f32(float %f)
ret i64 %0
}
+define dso_local i64 @test_constrained_llroundf(float %f) local_unnamed_addr {
+; BE-LABEL: test_constrained_llroundf:
+; BE: # %bb.0: # %entry
+; BE-NEXT: mflr r0
+; BE-NEXT: stdu r1, -112(r1)
+; BE-NEXT: std r0, 128(r1)
+; BE-NEXT: .cfi_def_cfa_offset 112
+; BE-NEXT: .cfi_offset lr, 16
+; BE-NEXT: bl llroundf
+; BE-NEXT: nop
+; BE-NEXT: addi r1, r1, 112
+; BE-NEXT: ld r0, 16(r1)
+; BE-NEXT: mtlr r0
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_llroundf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f0, f1
+; CHECK-NEXT: fctid f0, f0
+; CHECK-NEXT: mffprd r3, f0
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call i64 @llvm.experimental.constrained.llround(float %f, metadata !"fpexcept.ignore")
+ ret i64 %0
+}
+
declare i64 @llvm.llround.i64.f32(float)
define dso_local double @test_nearbyint(double %d) local_unnamed_addr {
@@ -452,16 +639,26 @@ define dso_local double @test_nearbyint(double %d) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_nearbyint:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpic f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call double @llvm.nearbyint.f64(double %d)
ret double %0
}
+define dso_local double @test_constrained_nearbyint(double %d) local_unnamed_addr {
+; BE-LABEL: test_constrained_nearbyint:
+; BE: # %bb.0: # %entry
+; BE-NEXT: xsrdpic f1, f1
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_nearbyint:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpic f1, f1
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call double @llvm.experimental.constrained.nearbyint(double %d, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret double %0
+}
+
declare double @llvm.nearbyint.f64(double)
define dso_local float @test_nearbyintf(float %f) local_unnamed_addr {
@@ -492,16 +689,26 @@ define dso_local float @test_nearbyintf(float %f) local_unnamed_addr {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_nearbyintf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpic f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call float @llvm.nearbyint.f32(float %f)
ret float %0
}
+define dso_local float @test_constrained_nearbyintf(float %f) local_unnamed_addr {
+; BE-LABEL: test_constrained_nearbyintf:
+; BE: # %bb.0: # %entry
+; BE-NEXT: xsrdpic f1, f1
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_nearbyintf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpic f1, f1
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call float @llvm.experimental.constrained.nearbyint(float %f, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %0
+}
+
declare float @llvm.nearbyint.f32(float)
define dso_local double @test_round(double %d) local_unnamed_addr {
@@ -514,16 +721,26 @@ define dso_local double @test_round(double %d) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpi f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_round:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call double @llvm.round.f64(double %d)
ret double %0
}
+define dso_local double @test_constrained_round(double %d) local_unnamed_addr {
+; BE-LABEL: test_constrained_round:
+; BE: # %bb.0: # %entry
+; BE-NEXT: xsrdpi f1, f1
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_round:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f1, f1
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call double @llvm.experimental.constrained.round(double %d, metadata !"fpexcept.ignore")
+ ret double %0
+}
+
declare double @llvm.round.f64(double)
define dso_local float @test_roundf(float %f) local_unnamed_addr {
@@ -536,16 +753,26 @@ define dso_local float @test_roundf(float %f) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpi f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_roundf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpi f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call float @llvm.round.f32(float %f)
ret float %0
}
+define dso_local float @test_constrained_roundf(float %f) local_unnamed_addr {
+; BE-LABEL: test_constrained_roundf:
+; BE: # %bb.0: # %entry
+; BE-NEXT: xsrdpi f1, f1
+; BE-NEXT: blr
+;
+; CHECK-LABEL: test_constrained_roundf:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xsrdpi f1, f1
+; CHECK-NEXT: blr
+entry:
+ %0 = tail call float @llvm.experimental.constrained.round(float %f, metadata !"fpexcept.ignore")
+ ret float %0
+}
+
declare float @llvm.round.f32(float)
define dso_local double @test_trunc(double %d) local_unnamed_addr {
@@ -558,11 +785,6 @@ define dso_local double @test_trunc(double %d) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpiz f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_trunc:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpiz f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call double @llvm.trunc.f64(double %d)
ret double %0
@@ -580,11 +802,6 @@ define dso_local float @test_truncf(float %f) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpiz f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_truncf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpiz f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call float @llvm.trunc.f32(float %f)
ret float %0
@@ -602,11 +819,6 @@ define dso_local double @test_floor(double %d) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpim f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_floor:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpim f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call double @llvm.floor.f64(double %d)
ret double %0
@@ -624,11 +836,6 @@ define dso_local float @test_floorf(float %f) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpim f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_floorf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpim f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call float @llvm.floor.f32(float %f)
ret float %0
@@ -646,11 +853,6 @@ define dso_local double @test_ceil(double %d) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpip f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_ceil:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpip f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call double @llvm.ceil.f64(double %d)
ret double %0
@@ -668,11 +870,6 @@ define dso_local float @test_ceilf(float %f) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpip f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_ceilf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpip f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call float @llvm.ceil.f32(float %f)
ret float %0
@@ -690,11 +887,6 @@ define dso_local double @test_rint(double %d) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpic f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_rint:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpic f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call double @llvm.rint.f64(double %d)
ret double %0
@@ -712,11 +904,6 @@ define dso_local float @test_rintf(float %f) local_unnamed_addr {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: xsrdpic f1, f1
; CHECK-NEXT: blr
-;
-; FAST-LABEL: test_rintf:
-; FAST: # %bb.0: # %entry
-; FAST-NEXT: xsrdpic f1, f1
-; FAST-NEXT: blr
entry:
%0 = tail call float @llvm.rint.f32(float %f)
ret float %0
diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index fd0b494..881d1f4 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -1,10 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \
+; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
; RUN: --check-prefix=FAST-P8
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \
+; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names \
; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
; RUN: --enable-no-nans-fp-math --enable-no-infs-fp-math \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
diff --git a/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll b/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll
index 128d546..da4c192 100644
--- a/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll
+++ b/llvm/test/CodeGen/PowerPC/sign-ext-atomics.ll
@@ -16,7 +16,7 @@ define i16 @SEXTParam(i16 signext %0) #0 {
; CHECK-NEXT: # %bb.2: # %top
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 3, 0, 4
-; CHECK-NEXT: bne 0, .LBB0_1
+; CHECK-NEXT: bne- 0, .LBB0_1
; CHECK-NEXT: .LBB0_3: # %top
; CHECK-NEXT: lwsync
; CHECK-NEXT: lhz 3, -4(1)
@@ -49,7 +49,7 @@ define i16 @noSEXTParam(i16 %0) #0 {
; CHECK-NEXT: # %bb.2: # %top
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 3, 0, 4
-; CHECK-NEXT: bne 0, .LBB1_1
+; CHECK-NEXT: bne- 0, .LBB1_1
; CHECK-NEXT: .LBB1_3: # %top
; CHECK-NEXT: lwsync
; CHECK-NEXT: lhz 3, -4(1)
@@ -82,7 +82,7 @@ define i16 @noSEXTLoad(ptr %p) #0 {
; CHECK-NEXT: # %bb.2: # %top
; CHECK-NEXT: #
; CHECK-NEXT: sthcx. 3, 0, 4
-; CHECK-NEXT: bne 0, .LBB2_1
+; CHECK-NEXT: bne- 0, .LBB2_1
; CHECK-NEXT: .LBB2_3: # %top
; CHECK-NEXT: lwsync
; CHECK-NEXT: lhz 3, -4(1)
diff --git a/llvm/test/CodeGen/PowerPC/vector-llrint.ll b/llvm/test/CodeGen/PowerPC/vector-llrint.ll
index 8a9e48e..d2fb6ca 100644
--- a/llvm/test/CodeGen/PowerPC/vector-llrint.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-llrint.ll
@@ -9,9 +9,6 @@
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
; RUN: -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s | \
; RUN: FileCheck %s
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
-; RUN: -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs < %s \
-; RUN: --enable-unsafe-fp-math | FileCheck %s --check-prefix=FAST
define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) nounwind {
; BE-LABEL: llrint_v1i64_v1f16:
@@ -47,23 +44,6 @@ define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v1i64_v1f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -32(r1)
-; FAST-NEXT: std r0, 48(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: addi r1, r1, 32
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half> %x)
ret <1 x i64> %a
}
@@ -147,41 +127,6 @@ define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v1i64_v2f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -48(r1)
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: fmr f1, f2
-; FAST-NEXT: std r0, 64(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: fmr f1, f31
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: fctid f1, f30
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs1, vs0
-; FAST-NEXT: addi r1, r1, 48
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x)
ret <2 x i64> %a
}
@@ -341,68 +286,6 @@ define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v4i64_v4f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -64(r1)
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f4
-; FAST-NEXT: std r0, 80(r1)
-; FAST-NEXT: fmr f31, f3
-; FAST-NEXT: fmr f30, f2
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: fmr f1, f31
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: fmr f1, f30
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: fmr f1, f29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f30
-; FAST-NEXT: fctid f2, f31
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f0, f28
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v3, vs0, vs2
-; FAST-NEXT: addi r1, r1, 64
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: lfd f29, -24(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, -32(r1) # 8-byte Folded Reload
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x)
ret <4 x i64> %a
}
@@ -714,122 +597,6 @@ define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v8i64_v8f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f24, -64(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f25, -56(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f26, -48(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f27, -40(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -96(r1)
-; FAST-NEXT: fmr f24, f1
-; FAST-NEXT: fmr f1, f8
-; FAST-NEXT: std r0, 112(r1)
-; FAST-NEXT: fmr f30, f7
-; FAST-NEXT: fmr f29, f6
-; FAST-NEXT: fmr f28, f5
-; FAST-NEXT: fmr f27, f4
-; FAST-NEXT: fmr f26, f3
-; FAST-NEXT: fmr f25, f2
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: fmr f1, f30
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: fmr f1, f29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f28
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: fmr f1, f27
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f27, f1
-; FAST-NEXT: fmr f1, f26
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f26, f1
-; FAST-NEXT: fmr f1, f25
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f25, f1
-; FAST-NEXT: fmr f1, f24
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f25
-; FAST-NEXT: fctid f2, f26
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: fctid f3, f27
-; FAST-NEXT: fctid f4, f28
-; FAST-NEXT: fctid f5, f29
-; FAST-NEXT: fctid f6, f30
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: mffprd r3, f3
-; FAST-NEXT: mtfprd f3, r3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: mtfprd f4, r3
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v3, vs3, vs2
-; FAST-NEXT: xxmrghd v4, vs5, vs4
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f0, f31
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v5, vs0, vs6
-; FAST-NEXT: addi r1, r1, 96
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: lfd f29, -24(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, -32(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f27, -40(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f26, -48(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f25, -56(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f24, -64(r1) # 8-byte Folded Reload
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
ret <8 x i64> %a
}
@@ -1439,228 +1206,6 @@ define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v16i64_v16f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f16, -128(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f17, -120(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f18, -112(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f19, -104(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f20, -96(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f21, -88(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f22, -80(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f23, -72(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f24, -64(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f25, -56(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f26, -48(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f27, -40(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -160(r1)
-; FAST-NEXT: fmr f26, f1
-; FAST-NEXT: lfs f1, 312(r1)
-; FAST-NEXT: std r0, 176(r1)
-; FAST-NEXT: fmr f28, f13
-; FAST-NEXT: fmr f27, f12
-; FAST-NEXT: fmr f24, f11
-; FAST-NEXT: fmr f21, f10
-; FAST-NEXT: fmr f19, f9
-; FAST-NEXT: fmr f18, f8
-; FAST-NEXT: fmr f17, f7
-; FAST-NEXT: fmr f16, f6
-; FAST-NEXT: fmr f20, f5
-; FAST-NEXT: fmr f22, f4
-; FAST-NEXT: fmr f23, f3
-; FAST-NEXT: fmr f25, f2
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: lfs f1, 304(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: lfs f1, 296(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f28
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: fmr f1, f27
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f27, f1
-; FAST-NEXT: fmr f1, f24
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f24, f1
-; FAST-NEXT: fmr f1, f21
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f21, f1
-; FAST-NEXT: fmr f1, f19
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f19, f1
-; FAST-NEXT: fmr f1, f18
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f18, f1
-; FAST-NEXT: fmr f1, f17
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f17, f1
-; FAST-NEXT: fmr f1, f16
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f16, f1
-; FAST-NEXT: fmr f1, f20
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f20, f1
-; FAST-NEXT: fmr f1, f22
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f22, f1
-; FAST-NEXT: fmr f1, f23
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f23, f1
-; FAST-NEXT: fmr f1, f25
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f25, f1
-; FAST-NEXT: fmr f1, f26
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f25
-; FAST-NEXT: fctid f2, f23
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: fctid f3, f22
-; FAST-NEXT: fctid f4, f20
-; FAST-NEXT: fctid f5, f16
-; FAST-NEXT: fctid f6, f17
-; FAST-NEXT: fctid f7, f18
-; FAST-NEXT: fctid f8, f19
-; FAST-NEXT: fctid f9, f21
-; FAST-NEXT: fctid f10, f24
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: mffprd r3, f3
-; FAST-NEXT: mtfprd f3, r3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: mtfprd f4, r3
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f7
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: mffprd r3, f8
-; FAST-NEXT: mtfprd f8, r3
-; FAST-NEXT: mffprd r3, f9
-; FAST-NEXT: mtfprd f9, r3
-; FAST-NEXT: mffprd r3, f10
-; FAST-NEXT: mtfprd f10, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v3, vs3, vs2
-; FAST-NEXT: xxmrghd v4, vs5, vs4
-; FAST-NEXT: xxmrghd v5, vs7, vs6
-; FAST-NEXT: xxmrghd v6, vs9, vs8
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f0, f27
-; FAST-NEXT: fctid f1, f29
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v7, vs0, vs10
-; FAST-NEXT: fctid f0, f28
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v8, vs1, vs0
-; FAST-NEXT: fctid f0, f30
-; FAST-NEXT: fctid f1, f31
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v9, vs1, vs0
-; FAST-NEXT: addi r1, r1, 160
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: lfd f29, -24(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, -32(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f27, -40(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f26, -48(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f25, -56(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f24, -64(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f23, -72(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f22, -80(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f21, -88(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f20, -96(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f19, -104(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f18, -112(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f17, -120(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f16, -128(r1) # 8-byte Folded Reload
-; FAST-NEXT: blr
%a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
ret <16 x i64> %a
}
@@ -2839,523 +2384,6 @@ define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v32i64_v32f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -480(r1)
-; FAST-NEXT: li r4, 128
-; FAST-NEXT: std r0, 496(r1)
-; FAST-NEXT: std r30, 320(r1) # 8-byte Folded Spill
-; FAST-NEXT: mr r30, r3
-; FAST-NEXT: stfd f14, 336(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f15, 344(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f14, f5
-; FAST-NEXT: stfd f16, 352(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v20, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 144
-; FAST-NEXT: fmr f16, f4
-; FAST-NEXT: stfd f17, 360(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f18, 368(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f19, 376(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f20, 384(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f21, 392(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v21, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 160
-; FAST-NEXT: stfd f22, 400(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f23, 408(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f24, 416(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f25, 424(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f26, 432(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f27, 440(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v22, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 176
-; FAST-NEXT: xxlor v22, f3, f3
-; FAST-NEXT: stfd f28, 448(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, 456(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f29, f9
-; FAST-NEXT: stfd f30, 464(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, 472(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v23, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 192
-; FAST-NEXT: xxlor v23, f2, f2
-; FAST-NEXT: stxvd2x v24, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 208
-; FAST-NEXT: stxvd2x v25, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 224
-; FAST-NEXT: xxlor v25, f13, f13
-; FAST-NEXT: stxvd2x v26, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 240
-; FAST-NEXT: xxlor v26, f12, f12
-; FAST-NEXT: stxvd2x v27, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 256
-; FAST-NEXT: xxlor v27, f11, f11
-; FAST-NEXT: stxvd2x v28, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 272
-; FAST-NEXT: xxlor v28, f10, f10
-; FAST-NEXT: stxvd2x v29, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 288
-; FAST-NEXT: xxlor v29, f8, f8
-; FAST-NEXT: stxvd2x v30, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 304
-; FAST-NEXT: xxlor v30, f7, f7
-; FAST-NEXT: stxvd2x v31, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 44
-; FAST-NEXT: xxlor v31, f6, f6
-; FAST-NEXT: stxsspx f1, r1, r4 # 4-byte Folded Spill
-; FAST-NEXT: lfs f1, 768(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 120
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 760(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 752(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 104
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 744(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 736(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 88
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 728(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 720(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 72
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 712(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 704(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 56
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 696(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 688(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: xxlor v21, f1, f1
-; FAST-NEXT: lfs f1, 680(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: xxlor v20, f1, f1
-; FAST-NEXT: lfs f1, 672(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: xxlor v24, f1, f1
-; FAST-NEXT: lfs f1, 664(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: lfs f1, 656(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: lfs f1, 648(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: lfs f1, 640(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f27, f1
-; FAST-NEXT: lfs f1, 632(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f26, f1
-; FAST-NEXT: lfs f1, 624(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f25, f1
-; FAST-NEXT: xxlor f1, v25, v25
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f24, f1
-; FAST-NEXT: xxlor f1, v26, v26
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f23, f1
-; FAST-NEXT: xxlor f1, v27, v27
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f22, f1
-; FAST-NEXT: xxlor f1, v28, v28
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f21, f1
-; FAST-NEXT: fmr f1, f29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f20, f1
-; FAST-NEXT: xxlor f1, v29, v29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f19, f1
-; FAST-NEXT: xxlor f1, v30, v30
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f18, f1
-; FAST-NEXT: xxlor f1, v31, v31
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f14
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f14, f1
-; FAST-NEXT: fmr f1, f16
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f16, f1
-; FAST-NEXT: xxlor f1, v22, v22
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f17, f1
-; FAST-NEXT: xxlor f1, v23, v23
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 44
-; FAST-NEXT: fmr f15, f1
-; FAST-NEXT: lxsspx f1, r1, r3 # 4-byte Folded Reload
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f3, f15
-; FAST-NEXT: fctid f4, f17
-; FAST-NEXT: mffprd r3, f3
-; FAST-NEXT: fctid f5, f16
-; FAST-NEXT: fctid f6, f14
-; FAST-NEXT: fctid f7, f18
-; FAST-NEXT: fctid f8, f19
-; FAST-NEXT: fctid f13, f1
-; FAST-NEXT: fctid f9, f20
-; FAST-NEXT: fctid f10, f22
-; FAST-NEXT: fctid f11, f24
-; FAST-NEXT: fctid f12, f25
-; FAST-NEXT: fctid f2, f23
-; FAST-NEXT: fctid f0, f21
-; FAST-NEXT: mtvsrd v2, r3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: mtvsrd v3, r3
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: mffprd r3, f7
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f8
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: mffprd r3, f9
-; FAST-NEXT: mtfprd f3, r3
-; FAST-NEXT: mffprd r3, f10
-; FAST-NEXT: mtfprd f4, r3
-; FAST-NEXT: mffprd r3, f11
-; FAST-NEXT: fctid f11, f31
-; FAST-NEXT: lfd f31, 56(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtfprd f8, r3
-; FAST-NEXT: mffprd r3, f12
-; FAST-NEXT: xxlor f12, v24, v24
-; FAST-NEXT: fctid f31, f31
-; FAST-NEXT: fctid f12, f12
-; FAST-NEXT: mtfprd f9, r3
-; FAST-NEXT: mffprd r3, f13
-; FAST-NEXT: lfd f13, 48(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtfprd f10, r3
-; FAST-NEXT: fctid f13, f13
-; FAST-NEXT: xxmrghd v3, vs5, v3
-; FAST-NEXT: fctid f5, f26
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: xxmrghd v4, vs7, vs6
-; FAST-NEXT: fctid f6, f27
-; FAST-NEXT: fctid f7, f28
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: lfd f28, 96(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f7
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: xxmrghd v2, v2, vs10
-; FAST-NEXT: fctid f10, f30
-; FAST-NEXT: mffprd r3, f10
-; FAST-NEXT: lfd f30, 80(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f30, f30
-; FAST-NEXT: mtfprd f10, r3
-; FAST-NEXT: mffprd r3, f11
-; FAST-NEXT: mtfprd f11, r3
-; FAST-NEXT: mffprd r3, f12
-; FAST-NEXT: mtfprd f12, r3
-; FAST-NEXT: xxmrghd v5, vs12, vs11
-; FAST-NEXT: xxlor f11, v20, v20
-; FAST-NEXT: xxlor f12, v21, v21
-; FAST-NEXT: fctid f11, f11
-; FAST-NEXT: fctid f12, f12
-; FAST-NEXT: mffprd r3, f11
-; FAST-NEXT: mtfprd f11, r3
-; FAST-NEXT: mffprd r3, f12
-; FAST-NEXT: mtfprd f12, r3
-; FAST-NEXT: mffprd r3, f13
-; FAST-NEXT: mtfprd f13, r3
-; FAST-NEXT: mffprd r3, f31
-; FAST-NEXT: lfd f31, 64(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f31, f31
-; FAST-NEXT: mtvsrd v0, r3
-; FAST-NEXT: mffprd r3, f31
-; FAST-NEXT: lfd f31, 72(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtvsrd v1, r3
-; FAST-NEXT: mffprd r3, f30
-; FAST-NEXT: lfd f30, 88(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f31, f31
-; FAST-NEXT: mtvsrd v6, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: lfd f28, 104(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f30, f30
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: mtvsrd v7, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: lfd f28, 112(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: mtvsrd v8, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: lfd f28, 120(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: xxmrghd v10, vs12, vs11
-; FAST-NEXT: xxmrghd v0, v0, vs13
-; FAST-NEXT: xxswapd vs12, v0
-; FAST-NEXT: xxmrghd v0, vs9, vs8
-; FAST-NEXT: xxmrghd v7, v8, v7
-; FAST-NEXT: mtvsrd v8, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: mtvsrd v9, r3
-; FAST-NEXT: mffprd r3, f30
-; FAST-NEXT: xxswapd v7, v7
-; FAST-NEXT: xxmrghd v8, v9, v8
-; FAST-NEXT: mtvsrd v9, r3
-; FAST-NEXT: mffprd r3, f31
-; FAST-NEXT: xxswapd v8, v8
-; FAST-NEXT: xxmrghd v6, v9, v6
-; FAST-NEXT: mtvsrd v9, r3
-; FAST-NEXT: li r3, 240
-; FAST-NEXT: stxvd2x v8, r30, r3
-; FAST-NEXT: li r3, 224
-; FAST-NEXT: stxvd2x v7, r30, r3
-; FAST-NEXT: li r3, 208
-; FAST-NEXT: xxswapd vs11, v6
-; FAST-NEXT: xxmrghd v6, vs10, vs7
-; FAST-NEXT: stxvd2x vs11, r30, r3
-; FAST-NEXT: li r3, 192
-; FAST-NEXT: xxmrghd v1, v9, v1
-; FAST-NEXT: xxswapd vs11, v1
-; FAST-NEXT: xxmrghd v1, vs6, vs5
-; FAST-NEXT: xxswapd vs5, v10
-; FAST-NEXT: xxswapd vs6, v5
-; FAST-NEXT: stxvd2x vs11, r30, r3
-; FAST-NEXT: li r3, 176
-; FAST-NEXT: stxvd2x vs12, r30, r3
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: stxvd2x vs5, r30, r3
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: stxvd2x vs6, r30, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: xxswapd vs5, v6
-; FAST-NEXT: stxvd2x vs5, r30, r3
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: xxswapd vs2, v1
-; FAST-NEXT: xxswapd vs6, v0
-; FAST-NEXT: stxvd2x vs2, r30, r3
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: fctid f2, f29
-; FAST-NEXT: stxvd2x vs6, r30, r3
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: xxmrghd v5, vs7, vs4
-; FAST-NEXT: xxswapd vs4, v2
-; FAST-NEXT: xxmrghd v0, vs0, vs3
-; FAST-NEXT: xxswapd vs0, v5
-; FAST-NEXT: xxswapd vs3, v3
-; FAST-NEXT: stxvd2x vs0, r30, r3
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: xxswapd vs0, v0
-; FAST-NEXT: stxvd2x vs0, r30, r3
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: xxmrghd v5, vs2, vs1
-; FAST-NEXT: xxswapd vs1, v4
-; FAST-NEXT: stxvd2x vs1, r30, r3
-; FAST-NEXT: li r3, 32
-; FAST-NEXT: xxswapd vs2, v5
-; FAST-NEXT: stxvd2x vs2, r30, r3
-; FAST-NEXT: li r3, 16
-; FAST-NEXT: stxvd2x vs3, r30, r3
-; FAST-NEXT: li r3, 304
-; FAST-NEXT: stxvd2x vs4, 0, r30
-; FAST-NEXT: lfd f31, 472(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, 464(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f29, 456(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, 448(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f27, 440(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f26, 432(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f25, 424(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f24, 416(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f23, 408(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f22, 400(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f21, 392(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f20, 384(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f19, 376(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f18, 368(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f17, 360(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f16, 352(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f15, 344(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f14, 336(r1) # 8-byte Folded Reload
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 288
-; FAST-NEXT: ld r30, 320(r1) # 8-byte Folded Reload
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 272
-; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 256
-; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 240
-; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 224
-; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 208
-; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 192
-; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 176
-; FAST-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 480
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half> %x)
ret <32 x i64> %a
}
@@ -3385,12 +2413,6 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v1i64_v1f32:
-; FAST: # %bb.0:
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
ret <1 x i64> %a
}
@@ -3444,21 +2466,6 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v2i64_v2f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xscvspdpn f1, vs1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs1, vs0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
ret <2 x i64> %a
}
@@ -3537,32 +2544,6 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v4i64_v4f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xxsldwi vs2, v2, v2, 1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v4, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v2
-; FAST-NEXT: vmr v2, v4
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs2
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v3, vs1, vs0
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
ret <4 x i64> %a
}
@@ -3695,54 +2676,6 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v8i64_v8f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xxsldwi vs2, v2, v2, 1
-; FAST-NEXT: xxsldwi vs3, v3, v3, 3
-; FAST-NEXT: xxswapd vs4, v3
-; FAST-NEXT: xxsldwi vs5, v3, v3, 1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v0, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v2
-; FAST-NEXT: vmr v2, v0
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs2
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v1, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, vs3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs4
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v4, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v3
-; FAST-NEXT: vmr v3, v1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs5
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v5, vs1, vs0
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
ret <8 x i64> %a
}
@@ -3983,98 +2916,6 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v16i64_v16f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xxsldwi vs2, v2, v2, 1
-; FAST-NEXT: xxsldwi vs3, v3, v3, 3
-; FAST-NEXT: xxswapd vs4, v3
-; FAST-NEXT: xxsldwi vs5, v3, v3, 1
-; FAST-NEXT: xxsldwi vs6, v4, v4, 3
-; FAST-NEXT: xxswapd vs7, v4
-; FAST-NEXT: xxsldwi vs8, v4, v4, 1
-; FAST-NEXT: xxsldwi vs9, v5, v5, 3
-; FAST-NEXT: xxswapd vs10, v5
-; FAST-NEXT: xxsldwi vs11, v5, v5, 1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v0, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v2
-; FAST-NEXT: vmr v2, v0
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs2
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v1, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, vs3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs4
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v10, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v3
-; FAST-NEXT: vmr v3, v1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs5
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v11, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, vs6
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs7
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v6, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v4
-; FAST-NEXT: xscvspdpn f1, vs8
-; FAST-NEXT: vmr v4, v10
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v7, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, vs9
-; FAST-NEXT: xscvspdpn f1, vs10
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v8, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, v5
-; FAST-NEXT: xscvspdpn f1, vs11
-; FAST-NEXT: vmr v5, v11
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v9, vs0, vs1
-; FAST-NEXT: blr
%a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
ret <16 x i64> %a
}
@@ -4104,12 +2945,6 @@ define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v1i64_v1f64:
-; FAST: # %bb.0:
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
@@ -4164,19 +2999,6 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v2i64_v2f64:
-; FAST: # %bb.0:
-; FAST-NEXT: xxlor f1, v2, v2
-; FAST-NEXT: xxswapd vs0, v2
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v2, vs1, vs0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
@@ -4261,28 +3083,6 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v4i64_v4f64:
-; FAST: # %bb.0:
-; FAST-NEXT: xxswapd vs0, v2
-; FAST-NEXT: xxlor f2, v2, v2
-; FAST-NEXT: xxswapd vs1, v3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f2, f2
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r4, f0
-; FAST-NEXT: xxlor f0, v3, v3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mtfprd f2, r4
-; FAST-NEXT: mffprd r5, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs0, vs2
-; FAST-NEXT: mtfprd f0, r5
-; FAST-NEXT: xxmrghd v3, vs0, vs1
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
@@ -4427,46 +3227,6 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v8i64_v8f64:
-; FAST: # %bb.0:
-; FAST-NEXT: xxswapd vs0, v2
-; FAST-NEXT: xxswapd vs1, v3
-; FAST-NEXT: xxlor f4, v2, v2
-; FAST-NEXT: xxswapd vs2, v4
-; FAST-NEXT: xxswapd vs3, v5
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f4, f4
-; FAST-NEXT: mffprd r4, f0
-; FAST-NEXT: xxlor f0, v3, v3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r5, f0
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mtfprd f1, r4
-; FAST-NEXT: mffprd r6, f0
-; FAST-NEXT: xxlor f0, v4, v4
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mtfprd f4, r6
-; FAST-NEXT: mffprd r7, f0
-; FAST-NEXT: fctid f0, f2
-; FAST-NEXT: mtfprd f2, r5
-; FAST-NEXT: mtfprd f5, r7
-; FAST-NEXT: mffprd r8, f0
-; FAST-NEXT: xxlor f0, v5, v5
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mtfprd f6, r8
-; FAST-NEXT: mffprd r9, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v3, vs2, vs4
-; FAST-NEXT: xxmrghd v4, vs5, vs6
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f1, f3
-; FAST-NEXT: mtfprd f0, r9
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v5, vs0, vs1
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
@@ -4496,18 +3256,6 @@ define <1 x i64> @llrint_v1i64_v1f128(<1 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v1i64_v1f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -32(r1)
-; FAST-NEXT: std r0, 48(r1)
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: addi r1, r1, 32
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.llrint.v1i64.v1f128(<1 x fp128> %x)
ret <1 x i64> %a
}
@@ -4565,33 +3313,6 @@ define <2 x i64> @llrint_v2i64_v2f128(<2 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v2i64_v2f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -80(r1)
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: std r0, 96(r1)
-; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: vmr v31, v3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v31
-; FAST-NEXT: mtvsrd v30, r3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: xxmrghd v2, vs0, v30
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 80
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.llrint.v2i64.v2f128(<2 x fp128> %x)
ret <2 x i64> %a
}
@@ -4689,53 +3410,6 @@ define <4 x i64> @llrint_v4i64_v4f128(<4 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v4i64_v4f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -112(r1)
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: std r0, 128(r1)
-; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: vmr v29, v3
-; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: vmr v30, v4
-; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: vmr v31, v5
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v29
-; FAST-NEXT: mtvsrd v28, r3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v30
-; FAST-NEXT: xxmrghd v29, vs0, v28
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v31
-; FAST-NEXT: mtvsrd v30, r3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: vmr v2, v29
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: xxmrghd v3, vs0, v30
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 112
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.llrint.v4i64.v4f128(<4 x fp128> %x)
ret <4 x i64> %a
}
@@ -4913,93 +3587,6 @@ define <8 x i64> @llrint_v8i64_v8f128(<8 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: llrint_v8i64_v8f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -176(r1)
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: std r0, 192(r1)
-; FAST-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: vmr v25, v3
-; FAST-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: vmr v26, v4
-; FAST-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: vmr v27, v5
-; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: vmr v28, v6
-; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: vmr v29, v7
-; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: vmr v30, v8
-; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: vmr v31, v9
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v25
-; FAST-NEXT: mtvsrd v24, r3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v26
-; FAST-NEXT: xxmrghd v25, vs0, v24
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v27
-; FAST-NEXT: mtvsrd v26, r3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v28
-; FAST-NEXT: xxmrghd v27, vs0, v26
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v29
-; FAST-NEXT: mtvsrd v28, r3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v30
-; FAST-NEXT: xxmrghd v29, vs0, v28
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v31
-; FAST-NEXT: mtvsrd v30, r3
-; FAST-NEXT: bl llrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: vmr v4, v29
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: vmr v3, v27
-; FAST-NEXT: vmr v2, v25
-; FAST-NEXT: xxmrghd v5, vs0, v30
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 176
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.llrint.v8i64.v8f128(<8 x fp128> %x)
ret <8 x i64> %a
}
diff --git a/llvm/test/CodeGen/PowerPC/vector-lrint.ll b/llvm/test/CodeGen/PowerPC/vector-lrint.ll
index f437536..af5704b 100644
--- a/llvm/test/CodeGen/PowerPC/vector-lrint.ll
+++ b/llvm/test/CodeGen/PowerPC/vector-lrint.ll
@@ -9,10 +9,6 @@
; RUN: sed 's/iXLen/i32/g' %s | llc -mcpu=pwr8 -ppc-asm-full-reg-names \
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown \
; RUN: -verify-machineinstrs | FileCheck %s
-; RUN: sed 's/iXLen/i32/g' %s | llc -mcpu=pwr8 -ppc-asm-full-reg-names \
-; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown \
-; RUN: -verify-machineinstrs --enable-unsafe-fp-math | \
-; RUN: FileCheck %s --check-prefixes=FAST
; FIXME: crash "Input type needs to be promoted!"
; SKIP: sed 's/iXLen/i64/g' %s | llc -ppc-asm-full-reg-names \
; SKIP: -ppc-vsr-nums-as-vr -mtriple=powerpc-unknown-unknown \
@@ -23,10 +19,6 @@
; RUN: sed 's/iXLen/i64/g' %s | llc -mcpu=pwr8 -ppc-asm-full-reg-names \
; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown \
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
-; RUN: sed 's/iXLen/i64/g' %s | llc -mcpu=pwr8 -ppc-asm-full-reg-names \
-; RUN: -ppc-vsr-nums-as-vr -mtriple=powerpc64le-unknown-unknown \
-; RUN: -verify-machineinstrs --enable-unsafe-fp-math | \
-; RUN: FileCheck %s --check-prefixes=FAST
define <1 x i64> @lrint_v1f16(<1 x half> %x) nounwind {
; BE-LABEL: lrint_v1f16:
@@ -62,23 +54,6 @@ define <1 x i64> @lrint_v1f16(<1 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v1f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -32(r1)
-; FAST-NEXT: std r0, 48(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: addi r1, r1, 32
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half> %x)
ret <1 x i64> %a
}
@@ -162,41 +137,6 @@ define <2 x i64> @lrint_v2f16(<2 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v2f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -48(r1)
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: fmr f1, f2
-; FAST-NEXT: std r0, 64(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: fmr f1, f31
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: fctid f1, f30
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs1, vs0
-; FAST-NEXT: addi r1, r1, 48
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half> %x)
ret <2 x i64> %a
}
@@ -356,68 +296,6 @@ define <4 x i64> @lrint_v4f16(<4 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v4f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -64(r1)
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f4
-; FAST-NEXT: std r0, 80(r1)
-; FAST-NEXT: fmr f31, f3
-; FAST-NEXT: fmr f30, f2
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: fmr f1, f31
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: fmr f1, f30
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: fmr f1, f29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f30
-; FAST-NEXT: fctid f2, f31
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f0, f28
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v3, vs0, vs2
-; FAST-NEXT: addi r1, r1, 64
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: lfd f29, -24(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, -32(r1) # 8-byte Folded Reload
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half> %x)
ret <4 x i64> %a
}
@@ -729,122 +607,6 @@ define <8 x i64> @lrint_v8f16(<8 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v8f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f24, -64(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f25, -56(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f26, -48(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f27, -40(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -96(r1)
-; FAST-NEXT: fmr f24, f1
-; FAST-NEXT: fmr f1, f8
-; FAST-NEXT: std r0, 112(r1)
-; FAST-NEXT: fmr f30, f7
-; FAST-NEXT: fmr f29, f6
-; FAST-NEXT: fmr f28, f5
-; FAST-NEXT: fmr f27, f4
-; FAST-NEXT: fmr f26, f3
-; FAST-NEXT: fmr f25, f2
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: fmr f1, f30
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: fmr f1, f29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f28
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: fmr f1, f27
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f27, f1
-; FAST-NEXT: fmr f1, f26
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f26, f1
-; FAST-NEXT: fmr f1, f25
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f25, f1
-; FAST-NEXT: fmr f1, f24
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f25
-; FAST-NEXT: fctid f2, f26
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: fctid f3, f27
-; FAST-NEXT: fctid f4, f28
-; FAST-NEXT: fctid f5, f29
-; FAST-NEXT: fctid f6, f30
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: mffprd r3, f3
-; FAST-NEXT: mtfprd f3, r3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: mtfprd f4, r3
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v3, vs3, vs2
-; FAST-NEXT: xxmrghd v4, vs5, vs4
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f0, f31
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v5, vs0, vs6
-; FAST-NEXT: addi r1, r1, 96
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: lfd f29, -24(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, -32(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f27, -40(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f26, -48(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f25, -56(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f24, -64(r1) # 8-byte Folded Reload
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> %x)
ret <8 x i64> %a
}
@@ -1454,228 +1216,6 @@ define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v16i64_v16f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stfd f16, -128(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f17, -120(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f18, -112(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f19, -104(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f20, -96(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f21, -88(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f22, -80(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f23, -72(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f24, -64(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f25, -56(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f26, -48(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f27, -40(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f28, -32(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, -24(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f30, -16(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, -8(r1) # 8-byte Folded Spill
-; FAST-NEXT: stdu r1, -160(r1)
-; FAST-NEXT: fmr f26, f1
-; FAST-NEXT: lfs f1, 312(r1)
-; FAST-NEXT: std r0, 176(r1)
-; FAST-NEXT: fmr f28, f13
-; FAST-NEXT: fmr f27, f12
-; FAST-NEXT: fmr f24, f11
-; FAST-NEXT: fmr f21, f10
-; FAST-NEXT: fmr f19, f9
-; FAST-NEXT: fmr f18, f8
-; FAST-NEXT: fmr f17, f7
-; FAST-NEXT: fmr f16, f6
-; FAST-NEXT: fmr f20, f5
-; FAST-NEXT: fmr f22, f4
-; FAST-NEXT: fmr f23, f3
-; FAST-NEXT: fmr f25, f2
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: lfs f1, 304(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: lfs f1, 296(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f28
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: fmr f1, f27
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f27, f1
-; FAST-NEXT: fmr f1, f24
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f24, f1
-; FAST-NEXT: fmr f1, f21
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f21, f1
-; FAST-NEXT: fmr f1, f19
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f19, f1
-; FAST-NEXT: fmr f1, f18
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f18, f1
-; FAST-NEXT: fmr f1, f17
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f17, f1
-; FAST-NEXT: fmr f1, f16
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f16, f1
-; FAST-NEXT: fmr f1, f20
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f20, f1
-; FAST-NEXT: fmr f1, f22
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f22, f1
-; FAST-NEXT: fmr f1, f23
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f23, f1
-; FAST-NEXT: fmr f1, f25
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f25, f1
-; FAST-NEXT: fmr f1, f26
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f0, f25
-; FAST-NEXT: fctid f2, f23
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: fctid f3, f22
-; FAST-NEXT: fctid f4, f20
-; FAST-NEXT: fctid f5, f16
-; FAST-NEXT: fctid f6, f17
-; FAST-NEXT: fctid f7, f18
-; FAST-NEXT: fctid f8, f19
-; FAST-NEXT: fctid f9, f21
-; FAST-NEXT: fctid f10, f24
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: mffprd r3, f3
-; FAST-NEXT: mtfprd f3, r3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: mtfprd f4, r3
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f7
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: mffprd r3, f8
-; FAST-NEXT: mtfprd f8, r3
-; FAST-NEXT: mffprd r3, f9
-; FAST-NEXT: mtfprd f9, r3
-; FAST-NEXT: mffprd r3, f10
-; FAST-NEXT: mtfprd f10, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v3, vs3, vs2
-; FAST-NEXT: xxmrghd v4, vs5, vs4
-; FAST-NEXT: xxmrghd v5, vs7, vs6
-; FAST-NEXT: xxmrghd v6, vs9, vs8
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f0, f27
-; FAST-NEXT: fctid f1, f29
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v7, vs0, vs10
-; FAST-NEXT: fctid f0, f28
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v8, vs1, vs0
-; FAST-NEXT: fctid f0, f30
-; FAST-NEXT: fctid f1, f31
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v9, vs1, vs0
-; FAST-NEXT: addi r1, r1, 160
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: lfd f31, -8(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, -16(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: lfd f29, -24(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, -32(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f27, -40(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f26, -48(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f25, -56(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f24, -64(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f23, -72(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f22, -80(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f21, -88(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f20, -96(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f19, -104(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f18, -112(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f17, -120(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f16, -128(r1) # 8-byte Folded Reload
-; FAST-NEXT: blr
%a = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> %x)
ret <16 x i64> %a
}
@@ -2854,523 +2394,6 @@ define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v32i64_v32f16:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -480(r1)
-; FAST-NEXT: li r4, 128
-; FAST-NEXT: std r0, 496(r1)
-; FAST-NEXT: std r30, 320(r1) # 8-byte Folded Spill
-; FAST-NEXT: mr r30, r3
-; FAST-NEXT: stfd f14, 336(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f15, 344(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f14, f5
-; FAST-NEXT: stfd f16, 352(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v20, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 144
-; FAST-NEXT: fmr f16, f4
-; FAST-NEXT: stfd f17, 360(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f18, 368(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f19, 376(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f20, 384(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f21, 392(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v21, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 160
-; FAST-NEXT: stfd f22, 400(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f23, 408(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f24, 416(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f25, 424(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f26, 432(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f27, 440(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v22, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 176
-; FAST-NEXT: xxlor v22, f3, f3
-; FAST-NEXT: stfd f28, 448(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f29, 456(r1) # 8-byte Folded Spill
-; FAST-NEXT: fmr f29, f9
-; FAST-NEXT: stfd f30, 464(r1) # 8-byte Folded Spill
-; FAST-NEXT: stfd f31, 472(r1) # 8-byte Folded Spill
-; FAST-NEXT: stxvd2x v23, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 192
-; FAST-NEXT: xxlor v23, f2, f2
-; FAST-NEXT: stxvd2x v24, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 208
-; FAST-NEXT: stxvd2x v25, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 224
-; FAST-NEXT: xxlor v25, f13, f13
-; FAST-NEXT: stxvd2x v26, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 240
-; FAST-NEXT: xxlor v26, f12, f12
-; FAST-NEXT: stxvd2x v27, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 256
-; FAST-NEXT: xxlor v27, f11, f11
-; FAST-NEXT: stxvd2x v28, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 272
-; FAST-NEXT: xxlor v28, f10, f10
-; FAST-NEXT: stxvd2x v29, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 288
-; FAST-NEXT: xxlor v29, f8, f8
-; FAST-NEXT: stxvd2x v30, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 304
-; FAST-NEXT: xxlor v30, f7, f7
-; FAST-NEXT: stxvd2x v31, r1, r4 # 16-byte Folded Spill
-; FAST-NEXT: li r4, 44
-; FAST-NEXT: xxlor v31, f6, f6
-; FAST-NEXT: stxsspx f1, r1, r4 # 4-byte Folded Spill
-; FAST-NEXT: lfs f1, 768(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 120
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 760(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 752(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 104
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 744(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 736(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 88
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 728(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 720(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 72
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 712(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 704(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 56
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 696(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: stxsdx f1, r1, r3 # 8-byte Folded Spill
-; FAST-NEXT: lfs f1, 688(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: xxlor v21, f1, f1
-; FAST-NEXT: lfs f1, 680(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: xxlor v20, f1, f1
-; FAST-NEXT: lfs f1, 672(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: xxlor v24, f1, f1
-; FAST-NEXT: lfs f1, 664(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f31, f1
-; FAST-NEXT: lfs f1, 656(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f30, f1
-; FAST-NEXT: lfs f1, 648(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f28, f1
-; FAST-NEXT: lfs f1, 640(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f27, f1
-; FAST-NEXT: lfs f1, 632(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f26, f1
-; FAST-NEXT: lfs f1, 624(r1)
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f25, f1
-; FAST-NEXT: xxlor f1, v25, v25
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f24, f1
-; FAST-NEXT: xxlor f1, v26, v26
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f23, f1
-; FAST-NEXT: xxlor f1, v27, v27
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f22, f1
-; FAST-NEXT: xxlor f1, v28, v28
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f21, f1
-; FAST-NEXT: fmr f1, f29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f20, f1
-; FAST-NEXT: xxlor f1, v29, v29
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f19, f1
-; FAST-NEXT: xxlor f1, v30, v30
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f18, f1
-; FAST-NEXT: xxlor f1, v31, v31
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f29, f1
-; FAST-NEXT: fmr f1, f14
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f14, f1
-; FAST-NEXT: fmr f1, f16
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f16, f1
-; FAST-NEXT: xxlor f1, v22, v22
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fmr f17, f1
-; FAST-NEXT: xxlor f1, v23, v23
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: li r3, 44
-; FAST-NEXT: fmr f15, f1
-; FAST-NEXT: lxsspx f1, r1, r3 # 4-byte Folded Reload
-; FAST-NEXT: bl __truncsfhf2
-; FAST-NEXT: nop
-; FAST-NEXT: clrldi r3, r3, 48
-; FAST-NEXT: bl __extendhfsf2
-; FAST-NEXT: nop
-; FAST-NEXT: fctid f3, f15
-; FAST-NEXT: fctid f4, f17
-; FAST-NEXT: mffprd r3, f3
-; FAST-NEXT: fctid f5, f16
-; FAST-NEXT: fctid f6, f14
-; FAST-NEXT: fctid f7, f18
-; FAST-NEXT: fctid f8, f19
-; FAST-NEXT: fctid f13, f1
-; FAST-NEXT: fctid f9, f20
-; FAST-NEXT: fctid f10, f22
-; FAST-NEXT: fctid f11, f24
-; FAST-NEXT: fctid f12, f25
-; FAST-NEXT: fctid f2, f23
-; FAST-NEXT: fctid f0, f21
-; FAST-NEXT: mtvsrd v2, r3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: mtvsrd v3, r3
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: mffprd r3, f7
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f8
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: mffprd r3, f9
-; FAST-NEXT: mtfprd f3, r3
-; FAST-NEXT: mffprd r3, f10
-; FAST-NEXT: mtfprd f4, r3
-; FAST-NEXT: mffprd r3, f11
-; FAST-NEXT: fctid f11, f31
-; FAST-NEXT: lfd f31, 56(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtfprd f8, r3
-; FAST-NEXT: mffprd r3, f12
-; FAST-NEXT: xxlor f12, v24, v24
-; FAST-NEXT: fctid f31, f31
-; FAST-NEXT: fctid f12, f12
-; FAST-NEXT: mtfprd f9, r3
-; FAST-NEXT: mffprd r3, f13
-; FAST-NEXT: lfd f13, 48(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtfprd f10, r3
-; FAST-NEXT: fctid f13, f13
-; FAST-NEXT: xxmrghd v3, vs5, v3
-; FAST-NEXT: fctid f5, f26
-; FAST-NEXT: mffprd r3, f5
-; FAST-NEXT: mtfprd f5, r3
-; FAST-NEXT: xxmrghd v4, vs7, vs6
-; FAST-NEXT: fctid f6, f27
-; FAST-NEXT: fctid f7, f28
-; FAST-NEXT: mffprd r3, f6
-; FAST-NEXT: lfd f28, 96(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: mtfprd f6, r3
-; FAST-NEXT: mffprd r3, f7
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: xxmrghd v2, v2, vs10
-; FAST-NEXT: fctid f10, f30
-; FAST-NEXT: mffprd r3, f10
-; FAST-NEXT: lfd f30, 80(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f30, f30
-; FAST-NEXT: mtfprd f10, r3
-; FAST-NEXT: mffprd r3, f11
-; FAST-NEXT: mtfprd f11, r3
-; FAST-NEXT: mffprd r3, f12
-; FAST-NEXT: mtfprd f12, r3
-; FAST-NEXT: xxmrghd v5, vs12, vs11
-; FAST-NEXT: xxlor f11, v20, v20
-; FAST-NEXT: xxlor f12, v21, v21
-; FAST-NEXT: fctid f11, f11
-; FAST-NEXT: fctid f12, f12
-; FAST-NEXT: mffprd r3, f11
-; FAST-NEXT: mtfprd f11, r3
-; FAST-NEXT: mffprd r3, f12
-; FAST-NEXT: mtfprd f12, r3
-; FAST-NEXT: mffprd r3, f13
-; FAST-NEXT: mtfprd f13, r3
-; FAST-NEXT: mffprd r3, f31
-; FAST-NEXT: lfd f31, 64(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f31, f31
-; FAST-NEXT: mtvsrd v0, r3
-; FAST-NEXT: mffprd r3, f31
-; FAST-NEXT: lfd f31, 72(r1) # 8-byte Folded Reload
-; FAST-NEXT: mtvsrd v1, r3
-; FAST-NEXT: mffprd r3, f30
-; FAST-NEXT: lfd f30, 88(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f31, f31
-; FAST-NEXT: mtvsrd v6, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: lfd f28, 104(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f30, f30
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: mtvsrd v7, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: lfd f28, 112(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: mtvsrd v8, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: lfd f28, 120(r1) # 8-byte Folded Reload
-; FAST-NEXT: fctid f28, f28
-; FAST-NEXT: xxmrghd v10, vs12, vs11
-; FAST-NEXT: xxmrghd v0, v0, vs13
-; FAST-NEXT: xxswapd vs12, v0
-; FAST-NEXT: xxmrghd v0, vs9, vs8
-; FAST-NEXT: xxmrghd v7, v8, v7
-; FAST-NEXT: mtvsrd v8, r3
-; FAST-NEXT: mffprd r3, f28
-; FAST-NEXT: mtvsrd v9, r3
-; FAST-NEXT: mffprd r3, f30
-; FAST-NEXT: xxswapd v7, v7
-; FAST-NEXT: xxmrghd v8, v9, v8
-; FAST-NEXT: mtvsrd v9, r3
-; FAST-NEXT: mffprd r3, f31
-; FAST-NEXT: xxswapd v8, v8
-; FAST-NEXT: xxmrghd v6, v9, v6
-; FAST-NEXT: mtvsrd v9, r3
-; FAST-NEXT: li r3, 240
-; FAST-NEXT: stxvd2x v8, r30, r3
-; FAST-NEXT: li r3, 224
-; FAST-NEXT: stxvd2x v7, r30, r3
-; FAST-NEXT: li r3, 208
-; FAST-NEXT: xxswapd vs11, v6
-; FAST-NEXT: xxmrghd v6, vs10, vs7
-; FAST-NEXT: stxvd2x vs11, r30, r3
-; FAST-NEXT: li r3, 192
-; FAST-NEXT: xxmrghd v1, v9, v1
-; FAST-NEXT: xxswapd vs11, v1
-; FAST-NEXT: xxmrghd v1, vs6, vs5
-; FAST-NEXT: xxswapd vs5, v10
-; FAST-NEXT: xxswapd vs6, v5
-; FAST-NEXT: stxvd2x vs11, r30, r3
-; FAST-NEXT: li r3, 176
-; FAST-NEXT: stxvd2x vs12, r30, r3
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: stxvd2x vs5, r30, r3
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: stxvd2x vs6, r30, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f7, r3
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: xxswapd vs5, v6
-; FAST-NEXT: stxvd2x vs5, r30, r3
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: xxswapd vs2, v1
-; FAST-NEXT: xxswapd vs6, v0
-; FAST-NEXT: stxvd2x vs2, r30, r3
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: fctid f2, f29
-; FAST-NEXT: stxvd2x vs6, r30, r3
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: mtfprd f2, r3
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: xxmrghd v5, vs7, vs4
-; FAST-NEXT: xxswapd vs4, v2
-; FAST-NEXT: xxmrghd v0, vs0, vs3
-; FAST-NEXT: xxswapd vs0, v5
-; FAST-NEXT: xxswapd vs3, v3
-; FAST-NEXT: stxvd2x vs0, r30, r3
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: xxswapd vs0, v0
-; FAST-NEXT: stxvd2x vs0, r30, r3
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: xxmrghd v5, vs2, vs1
-; FAST-NEXT: xxswapd vs1, v4
-; FAST-NEXT: stxvd2x vs1, r30, r3
-; FAST-NEXT: li r3, 32
-; FAST-NEXT: xxswapd vs2, v5
-; FAST-NEXT: stxvd2x vs2, r30, r3
-; FAST-NEXT: li r3, 16
-; FAST-NEXT: stxvd2x vs3, r30, r3
-; FAST-NEXT: li r3, 304
-; FAST-NEXT: stxvd2x vs4, 0, r30
-; FAST-NEXT: lfd f31, 472(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f30, 464(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f29, 456(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f28, 448(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f27, 440(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f26, 432(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f25, 424(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f24, 416(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f23, 408(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f22, 400(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f21, 392(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f20, 384(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f19, 376(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f18, 368(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f17, 360(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f16, 352(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f15, 344(r1) # 8-byte Folded Reload
-; FAST-NEXT: lfd f14, 336(r1) # 8-byte Folded Reload
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 288
-; FAST-NEXT: ld r30, 320(r1) # 8-byte Folded Reload
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 272
-; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 256
-; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 240
-; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 224
-; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 208
-; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 192
-; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 176
-; FAST-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 480
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half> %x)
ret <32 x i64> %a
}
@@ -3400,12 +2423,6 @@ define <1 x i64> @lrint_v1f32(<1 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v1f32:
-; FAST: # %bb.0:
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float> %x)
ret <1 x i64> %a
}
@@ -3459,21 +2476,6 @@ define <2 x i64> @lrint_v2f32(<2 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v2f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xscvspdpn f1, vs1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs1, vs0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> %x)
ret <2 x i64> %a
}
@@ -3552,32 +2554,6 @@ define <4 x i64> @lrint_v4f32(<4 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v4f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xxsldwi vs2, v2, v2, 1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v4, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v2
-; FAST-NEXT: vmr v2, v4
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs2
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v3, vs1, vs0
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> %x)
ret <4 x i64> %a
}
@@ -3710,54 +2686,6 @@ define <8 x i64> @lrint_v8f32(<8 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v8f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xxsldwi vs2, v2, v2, 1
-; FAST-NEXT: xxsldwi vs3, v3, v3, 3
-; FAST-NEXT: xxswapd vs4, v3
-; FAST-NEXT: xxsldwi vs5, v3, v3, 1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v0, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v2
-; FAST-NEXT: vmr v2, v0
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs2
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v1, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, vs3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs4
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v4, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v3
-; FAST-NEXT: vmr v3, v1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs5
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v5, vs1, vs0
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> %x)
ret <8 x i64> %a
}
@@ -3998,98 +2926,6 @@ define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v16i64_v16f32:
-; FAST: # %bb.0:
-; FAST-NEXT: xxsldwi vs0, v2, v2, 3
-; FAST-NEXT: xxswapd vs1, v2
-; FAST-NEXT: xscvspdpn f0, vs0
-; FAST-NEXT: xxsldwi vs2, v2, v2, 1
-; FAST-NEXT: xxsldwi vs3, v3, v3, 3
-; FAST-NEXT: xxswapd vs4, v3
-; FAST-NEXT: xxsldwi vs5, v3, v3, 1
-; FAST-NEXT: xxsldwi vs6, v4, v4, 3
-; FAST-NEXT: xxswapd vs7, v4
-; FAST-NEXT: xxsldwi vs8, v4, v4, 1
-; FAST-NEXT: xxsldwi vs9, v5, v5, 3
-; FAST-NEXT: xxswapd vs10, v5
-; FAST-NEXT: xxsldwi vs11, v5, v5, 1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v0, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v2
-; FAST-NEXT: vmr v2, v0
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs2
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v1, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, vs3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs4
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v10, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v3
-; FAST-NEXT: vmr v3, v1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs5
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v11, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, vs6
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: xscvspdpn f0, vs7
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v6, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, v4
-; FAST-NEXT: xscvspdpn f1, vs8
-; FAST-NEXT: vmr v4, v10
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v7, vs0, vs1
-; FAST-NEXT: xscvspdpn f0, vs9
-; FAST-NEXT: xscvspdpn f1, vs10
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v8, vs1, vs0
-; FAST-NEXT: xscvspdpn f0, v5
-; FAST-NEXT: xscvspdpn f1, vs11
-; FAST-NEXT: vmr v5, v11
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v9, vs0, vs1
-; FAST-NEXT: blr
%a = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> %x)
ret <16 x i64> %a
}
@@ -4119,12 +2955,6 @@ define <1 x i64> @lrint_v1f64(<1 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v1f64:
-; FAST: # %bb.0:
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
@@ -4179,19 +3009,6 @@ define <2 x i64> @lrint_v2f64(<2 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v2f64:
-; FAST: # %bb.0:
-; FAST-NEXT: xxlor f1, v2, v2
-; FAST-NEXT: xxswapd vs0, v2
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: mffprd r3, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v2, vs1, vs0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
@@ -4276,28 +3093,6 @@ define <4 x i64> @lrint_v4f64(<4 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v4f64:
-; FAST: # %bb.0:
-; FAST-NEXT: xxswapd vs0, v2
-; FAST-NEXT: xxlor f2, v2, v2
-; FAST-NEXT: xxswapd vs1, v3
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f2, f2
-; FAST-NEXT: fctid f1, f1
-; FAST-NEXT: mffprd r4, f0
-; FAST-NEXT: xxlor f0, v3, v3
-; FAST-NEXT: mffprd r3, f2
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mtfprd f2, r4
-; FAST-NEXT: mffprd r5, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v2, vs0, vs2
-; FAST-NEXT: mtfprd f0, r5
-; FAST-NEXT: xxmrghd v3, vs0, vs1
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
@@ -4442,46 +3237,6 @@ define <8 x i64> @lrint_v8f64(<8 x double> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v8f64:
-; FAST: # %bb.0:
-; FAST-NEXT: xxswapd vs0, v2
-; FAST-NEXT: xxswapd vs1, v3
-; FAST-NEXT: xxlor f4, v2, v2
-; FAST-NEXT: xxswapd vs2, v4
-; FAST-NEXT: xxswapd vs3, v5
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: fctid f4, f4
-; FAST-NEXT: mffprd r4, f0
-; FAST-NEXT: xxlor f0, v3, v3
-; FAST-NEXT: mffprd r3, f4
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mffprd r5, f0
-; FAST-NEXT: fctid f0, f1
-; FAST-NEXT: mtfprd f1, r4
-; FAST-NEXT: mffprd r6, f0
-; FAST-NEXT: xxlor f0, v4, v4
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mtfprd f4, r6
-; FAST-NEXT: mffprd r7, f0
-; FAST-NEXT: fctid f0, f2
-; FAST-NEXT: mtfprd f2, r5
-; FAST-NEXT: mtfprd f5, r7
-; FAST-NEXT: mffprd r8, f0
-; FAST-NEXT: xxlor f0, v5, v5
-; FAST-NEXT: fctid f0, f0
-; FAST-NEXT: mtfprd f6, r8
-; FAST-NEXT: mffprd r9, f0
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: xxmrghd v3, vs2, vs4
-; FAST-NEXT: xxmrghd v4, vs5, vs6
-; FAST-NEXT: xxmrghd v2, vs0, vs1
-; FAST-NEXT: fctid f1, f3
-; FAST-NEXT: mtfprd f0, r9
-; FAST-NEXT: mffprd r3, f1
-; FAST-NEXT: mtfprd f1, r3
-; FAST-NEXT: xxmrghd v5, vs0, vs1
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
@@ -4511,18 +3266,6 @@ define <1 x i64> @lrint_v1f128(<1 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v1f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -32(r1)
-; FAST-NEXT: std r0, 48(r1)
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: addi r1, r1, 32
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <1 x i64> @llvm.lrint.v1i64.v1f128(<1 x fp128> %x)
ret <1 x i64> %a
}
@@ -4580,33 +3323,6 @@ define <2 x i64> @lrint_v2f128(<2 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v2f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -80(r1)
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: std r0, 96(r1)
-; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: vmr v31, v3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v31
-; FAST-NEXT: mtvsrd v30, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: xxmrghd v2, vs0, v30
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 80
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <2 x i64> @llvm.lrint.v2i64.v2f128(<2 x fp128> %x)
ret <2 x i64> %a
}
@@ -4704,53 +3420,6 @@ define <4 x i64> @lrint_v4f128(<4 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v4f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -112(r1)
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: std r0, 128(r1)
-; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: vmr v29, v3
-; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: vmr v30, v4
-; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: vmr v31, v5
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v29
-; FAST-NEXT: mtvsrd v28, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v30
-; FAST-NEXT: xxmrghd v29, vs0, v28
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v31
-; FAST-NEXT: mtvsrd v30, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: vmr v2, v29
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: xxmrghd v3, vs0, v30
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 112
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <4 x i64> @llvm.lrint.v4i64.v4f128(<4 x fp128> %x)
ret <4 x i64> %a
}
@@ -4928,93 +3597,6 @@ define <8 x i64> @lrint_v8f128(<8 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v8f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -176(r1)
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: std r0, 192(r1)
-; FAST-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: vmr v25, v3
-; FAST-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: vmr v26, v4
-; FAST-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: vmr v27, v5
-; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: vmr v28, v6
-; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: vmr v29, v7
-; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: vmr v30, v8
-; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: vmr v31, v9
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v25
-; FAST-NEXT: mtvsrd v24, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v26
-; FAST-NEXT: xxmrghd v25, vs0, v24
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v27
-; FAST-NEXT: mtvsrd v26, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v28
-; FAST-NEXT: xxmrghd v27, vs0, v26
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v29
-; FAST-NEXT: mtvsrd v28, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v30
-; FAST-NEXT: xxmrghd v29, vs0, v28
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v31
-; FAST-NEXT: mtvsrd v30, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: vmr v4, v29
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: vmr v3, v27
-; FAST-NEXT: vmr v2, v25
-; FAST-NEXT: xxmrghd v5, vs0, v30
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 176
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <8 x i64> @llvm.lrint.v8i64.v8f128(<8 x fp128> %x)
ret <8 x i64> %a
}
@@ -5355,176 +3937,6 @@ define <16 x i64> @lrint_v16i64_v16f128(<16 x fp128> %x) nounwind {
; CHECK-NEXT: ld r0, 16(r1)
; CHECK-NEXT: mtlr r0
; CHECK-NEXT: blr
-;
-; FAST-LABEL: lrint_v16i64_v16f128:
-; FAST: # %bb.0:
-; FAST-NEXT: mflr r0
-; FAST-NEXT: stdu r1, -304(r1)
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: std r0, 320(r1)
-; FAST-NEXT: stxvd2x v20, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: stxvd2x v21, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: vmr v21, v4
-; FAST-NEXT: stxvd2x v22, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: vmr v22, v6
-; FAST-NEXT: stxvd2x v23, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 176
-; FAST-NEXT: vmr v23, v8
-; FAST-NEXT: stxvd2x v24, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 192
-; FAST-NEXT: vmr v24, v9
-; FAST-NEXT: stxvd2x v25, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 208
-; FAST-NEXT: vmr v25, v7
-; FAST-NEXT: stxvd2x v26, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 224
-; FAST-NEXT: vmr v26, v10
-; FAST-NEXT: stxvd2x v27, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 240
-; FAST-NEXT: vmr v27, v5
-; FAST-NEXT: stxvd2x v28, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 256
-; FAST-NEXT: vmr v28, v11
-; FAST-NEXT: stxvd2x v29, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 272
-; FAST-NEXT: vmr v29, v12
-; FAST-NEXT: stxvd2x v30, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 288
-; FAST-NEXT: vmr v30, v3
-; FAST-NEXT: stxvd2x v31, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: stxvd2x v13, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: addi r3, r1, 576
-; FAST-NEXT: lxvd2x vs0, 0, r3
-; FAST-NEXT: addi r3, r1, 560
-; FAST-NEXT: lxvd2x vs1, 0, r3
-; FAST-NEXT: addi r3, r1, 544
-; FAST-NEXT: lxvd2x vs2, 0, r3
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: xxswapd vs0, vs0
-; FAST-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: xxswapd vs0, vs1
-; FAST-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: xxswapd vs0, vs2
-; FAST-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill
-; FAST-NEXT: addi r3, r1, 528
-; FAST-NEXT: lxvd2x vs0, 0, r3
-; FAST-NEXT: xxswapd v31, vs0
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v30
-; FAST-NEXT: mtvsrd v20, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v21
-; FAST-NEXT: xxmrghd v30, vs0, v20
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v27
-; FAST-NEXT: mtvsrd v21, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v22
-; FAST-NEXT: xxmrghd v27, vs0, v21
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v25
-; FAST-NEXT: mtvsrd v22, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v23
-; FAST-NEXT: xxmrghd v25, vs0, v22
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v24
-; FAST-NEXT: mtvsrd v23, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v26
-; FAST-NEXT: xxmrghd v24, vs0, v23
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: vmr v2, v28
-; FAST-NEXT: mtvsrd v26, r3
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v29
-; FAST-NEXT: xxmrghd v28, vs0, v26
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtvsrd v29, r3
-; FAST-NEXT: li r3, 64
-; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: vmr v2, v31
-; FAST-NEXT: xxmrghd v29, vs0, v29
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtvsrd v31, r3
-; FAST-NEXT: li r3, 48
-; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 80
-; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: xxmrghd v31, vs0, v31
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtvsrd v26, r3
-; FAST-NEXT: li r3, 96
-; FAST-NEXT: lxvd2x v2, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: bl lrintf128
-; FAST-NEXT: nop
-; FAST-NEXT: mtfprd f0, r3
-; FAST-NEXT: li r3, 288
-; FAST-NEXT: vmr v8, v31
-; FAST-NEXT: lxvd2x v31, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 272
-; FAST-NEXT: vmr v2, v30
-; FAST-NEXT: vmr v7, v29
-; FAST-NEXT: vmr v6, v28
-; FAST-NEXT: vmr v3, v27
-; FAST-NEXT: lxvd2x v30, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 256
-; FAST-NEXT: vmr v4, v25
-; FAST-NEXT: vmr v5, v24
-; FAST-NEXT: lxvd2x v29, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 240
-; FAST-NEXT: lxvd2x v28, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 224
-; FAST-NEXT: lxvd2x v27, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 208
-; FAST-NEXT: xxmrghd v9, vs0, v26
-; FAST-NEXT: lxvd2x v26, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 192
-; FAST-NEXT: lxvd2x v25, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 176
-; FAST-NEXT: lxvd2x v24, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 160
-; FAST-NEXT: lxvd2x v23, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 144
-; FAST-NEXT: lxvd2x v22, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 128
-; FAST-NEXT: lxvd2x v21, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: li r3, 112
-; FAST-NEXT: lxvd2x v20, r1, r3 # 16-byte Folded Reload
-; FAST-NEXT: addi r1, r1, 304
-; FAST-NEXT: ld r0, 16(r1)
-; FAST-NEXT: mtlr r0
-; FAST-NEXT: blr
%a = call <16 x i64> @llvm.lrint.v16i64.v16f128(<16 x fp128> %x)
ret <16 x i64> %a
}
diff --git a/llvm/test/CodeGen/RISCV/atomic-fence.ll b/llvm/test/CodeGen/RISCV/atomic-fence.ll
index 7103345..77148f6 100644
--- a/llvm/test/CodeGen/RISCV/atomic-fence.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-fence.ll
@@ -1,12 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+ztso -verify-machineinstrs < %s \
; RUN: | FileCheck --check-prefixes=CHECK,TSO %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck --check-prefixes=CHECK,WMO %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+ztso -verify-machineinstrs < %s \
diff --git a/llvm/test/CodeGen/RISCV/atomic-load-store.ll b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
index 7e3abc7..c6234de 100644
--- a/llvm/test/CodeGen/RISCV/atomic-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-load-store.ll
@@ -1,12 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I-ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-WMO %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+ztso,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-TSO %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I-ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-WMO %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+ztso,+no-trailing-seq-cst-fence -verify-machineinstrs < %s \
@@ -44,6 +48,11 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i8_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i8_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0)
@@ -59,6 +68,11 @@ define i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i8_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i8_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lb a0, 0(a0)
@@ -78,6 +92,11 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lb a0, 0(a0)
@@ -93,6 +112,11 @@ define i8 @atomic_load_i8_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lb a0, 0(a0)
@@ -112,6 +136,12 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV32I-ZALRSC-NEXT: fence r, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_load_i8_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: lb a0, 0(a0)
@@ -133,6 +163,12 @@ define i8 @atomic_load_i8_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i8_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: lb a0, 0(a0)
@@ -200,6 +236,13 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, rw
+; RV32I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV32I-ZALRSC-NEXT: fence r, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_load_i8_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, rw
@@ -223,6 +266,13 @@ define i8 @atomic_load_i8_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i8_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, rw
@@ -286,6 +336,11 @@ define i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i16_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i16_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0)
@@ -301,6 +356,11 @@ define i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i16_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i16_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lh a0, 0(a0)
@@ -320,6 +380,11 @@ define i16 @atomic_load_i16_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lh a0, 0(a0)
@@ -335,6 +400,11 @@ define i16 @atomic_load_i16_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lh a0, 0(a0)
@@ -354,6 +424,12 @@ define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV32I-ZALRSC-NEXT: fence r, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_load_i16_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: lh a0, 0(a0)
@@ -375,6 +451,12 @@ define i16 @atomic_load_i16_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i16_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: lh a0, 0(a0)
@@ -442,6 +524,13 @@ define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, rw
+; RV32I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV32I-ZALRSC-NEXT: fence r, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_load_i16_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, rw
@@ -465,6 +554,13 @@ define i16 @atomic_load_i16_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i16_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, rw
@@ -528,6 +624,11 @@ define i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i32_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i32_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0)
@@ -543,6 +644,11 @@ define i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i32_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i32_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a0, 0(a0)
@@ -562,6 +668,11 @@ define i32 @atomic_load_i32_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: lw a0, 0(a0)
@@ -577,6 +688,11 @@ define i32 @atomic_load_i32_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a0, 0(a0)
@@ -596,6 +712,12 @@ define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV32I-ZALRSC-NEXT: fence r, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_load_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: lw a0, 0(a0)
@@ -617,6 +739,12 @@ define i32 @atomic_load_i32_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: lw a0, 0(a0)
@@ -684,6 +812,13 @@ define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, rw
+; RV32I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV32I-ZALRSC-NEXT: fence r, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_load_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, rw
@@ -707,6 +842,13 @@ define i32 @atomic_load_i32_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, rw
@@ -770,6 +912,16 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i64_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a1, 0
+; RV32I-ZALRSC-NEXT: call __atomic_load_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i64_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -790,6 +942,11 @@ define i64 @atomic_load_i64_unordered(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i64_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: ld a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i64_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: ld a0, 0(a0)
@@ -809,6 +966,16 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a1, 0
+; RV32I-ZALRSC-NEXT: call __atomic_load_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -829,6 +996,11 @@ define i64 @atomic_load_i64_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: ld a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_load_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: ld a0, 0(a0)
@@ -848,6 +1020,16 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a1, 2
+; RV32I-ZALRSC-NEXT: call __atomic_load_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -868,6 +1050,12 @@ define i64 @atomic_load_i64_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: ld a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: ld a0, 0(a0)
@@ -914,6 +1102,16 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a1, 5
+; RV32I-ZALRSC-NEXT: call __atomic_load_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_load_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -934,6 +1132,13 @@ define i64 @atomic_load_i64_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_load_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: ld a0, 0(a0)
+; RV64I-ZALRSC-NEXT: fence r, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_load_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, rw
@@ -979,6 +1184,11 @@ define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i8_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i8_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sb a1, 0(a0)
@@ -994,6 +1204,11 @@ define void @atomic_store_i8_unordered(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i8_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i8_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sb a1, 0(a0)
@@ -1013,6 +1228,11 @@ define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i8_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sb a1, 0(a0)
@@ -1028,6 +1248,11 @@ define void @atomic_store_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i8_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sb a1, 0(a0)
@@ -1047,6 +1272,12 @@ define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, w
+; RV32I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_store_i8_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, w
@@ -1068,6 +1299,12 @@ define void @atomic_store_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i8_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
@@ -1135,6 +1372,13 @@ define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, w
+; RV32I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV32I-ZALRSC-NEXT: fence rw, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_store_i8_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, w
@@ -1157,6 +1401,13 @@ define void @atomic_store_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sb a1, 0(a0)
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i8_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
@@ -1219,6 +1470,11 @@ define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i16_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i16_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sh a1, 0(a0)
@@ -1234,6 +1490,11 @@ define void @atomic_store_i16_unordered(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i16_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i16_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sh a1, 0(a0)
@@ -1253,6 +1514,11 @@ define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i16_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sh a1, 0(a0)
@@ -1268,6 +1534,11 @@ define void @atomic_store_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i16_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sh a1, 0(a0)
@@ -1287,6 +1558,12 @@ define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, w
+; RV32I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_store_i16_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, w
@@ -1308,6 +1585,12 @@ define void @atomic_store_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i16_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
@@ -1375,6 +1658,13 @@ define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, w
+; RV32I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV32I-ZALRSC-NEXT: fence rw, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_store_i16_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, w
@@ -1397,6 +1687,13 @@ define void @atomic_store_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sh a1, 0(a0)
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i16_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
@@ -1459,6 +1756,11 @@ define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i32_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i32_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sw a1, 0(a0)
@@ -1474,6 +1776,11 @@ define void @atomic_store_i32_unordered(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i32_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i32_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sw a1, 0(a0)
@@ -1493,6 +1800,11 @@ define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: sw a1, 0(a0)
@@ -1508,6 +1820,11 @@ define void @atomic_store_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sw a1, 0(a0)
@@ -1527,6 +1844,12 @@ define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, w
+; RV32I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_store_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, w
@@ -1548,6 +1871,12 @@ define void @atomic_store_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
@@ -1615,6 +1944,13 @@ define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: fence rw, w
+; RV32I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV32I-ZALRSC-NEXT: fence rw, rw
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomic_store_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: fence rw, w
@@ -1637,6 +1973,13 @@ define void @atomic_store_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
@@ -1699,6 +2042,16 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i64_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_store_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i64_unordered:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -1719,6 +2072,11 @@ define void @atomic_store_i64_unordered(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i64_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sd a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i64_unordered:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sd a1, 0(a0)
@@ -1738,6 +2096,16 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_store_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -1758,6 +2126,11 @@ define void @atomic_store_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sd a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomic_store_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: sd a1, 0(a0)
@@ -1777,6 +2150,16 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_store_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -1797,6 +2180,12 @@ define void @atomic_store_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sd a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
@@ -1843,6 +2232,16 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_store_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_store_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomic_store_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -1863,6 +2262,13 @@ define void @atomic_store_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomic_store_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: fence rw, w
+; RV64I-ZALRSC-NEXT: sd a1, 0(a0)
+; RV64I-ZALRSC-NEXT: fence rw, rw
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomic_store_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: fence rw, w
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll b/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
index 4dafd6a..d5238ab 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw-sub.ll
@@ -3,10 +3,14 @@
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA %s
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32I-ZALRSC %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA %s
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64I-ZALRSC %s
define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
; RV32I-LABEL: atomicrmw_sub_i32_constant:
@@ -26,6 +30,18 @@ define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
; RV32IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_constant:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a1, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB0_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a1
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i32_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -42,6 +58,18 @@ define i32 @atomicrmw_sub_i32_constant(ptr %a) nounwind {
; RV64IA-NEXT: li a1, -1
; RV64IA-NEXT: amoadd.w.aqrl a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_constant:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB0_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw sub ptr %a, i32 1 seq_cst
ret i32 %1
}
@@ -71,6 +99,18 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_constant:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a1, 1
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: li a2, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i64_constant:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -87,6 +127,18 @@ define i64 @atomicrmw_sub_i64_constant(ptr %a) nounwind {
; RV64IA-NEXT: li a1, -1
; RV64IA-NEXT: amoadd.d.aqrl a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_constant:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB1_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw sub ptr %a, i64 1 seq_cst
ret i64 %1
}
@@ -109,6 +161,18 @@ define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
; RV32IA-NEXT: amoadd.w.aqrl a0, a2, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_neg:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: sub a2, a1, a2
+; RV32I-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a1, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB2_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a1
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i32_neg:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -125,6 +189,18 @@ define i32 @atomicrmw_sub_i32_neg(ptr %a, i32 %x, i32 %y) nounwind {
; RV64IA-NEXT: sub a2, a2, a1
; RV64IA-NEXT: amoadd.w.aqrl a0, a2, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_neg:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: subw a2, a1, a2
+; RV64I-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB2_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%b = sub i32 %x, %y
%1 = atomicrmw sub ptr %a, i32 %b seq_cst
ret i32 %1
@@ -159,6 +235,20 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_neg:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sltu a5, a1, a3
+; RV32I-ZALRSC-NEXT: sub a2, a2, a4
+; RV32I-ZALRSC-NEXT: sub a2, a2, a5
+; RV32I-ZALRSC-NEXT: sub a1, a1, a3
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i64_neg:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -175,6 +265,18 @@ define i64 @atomicrmw_sub_i64_neg(ptr %a, i64 %x, i64 %y) nounwind {
; RV64IA-NEXT: sub a2, a2, a1
; RV64IA-NEXT: amoadd.d.aqrl a0, a2, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_neg:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sub a2, a1, a2
+; RV64I-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB3_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%b = sub i64 %x, %y
%1 = atomicrmw sub ptr %a, i64 %b seq_cst
ret i64 %1
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index 1213256..26feb83 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -1,12 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32I-ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-NOZACAS,RV32IA-WMO,RV32IA-WMO-NOZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+ztso -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-NOZACAS,RV32IA-TSO,RV32IA-TSO-NOZACAS %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64I-ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-NOZACAS,RV64IA-WMO,RV64IA-WMO-NOZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+ztso -verify-machineinstrs < %s \
@@ -50,6 +54,26 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB0_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -80,6 +104,26 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB0_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -174,6 +218,26 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB1_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -224,6 +288,26 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB1_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -378,6 +462,26 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB2_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -428,6 +532,26 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB2_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -582,6 +706,26 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB3_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -632,6 +776,26 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB3_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -786,6 +950,26 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB4_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -816,6 +1000,26 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB4_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -914,6 +1118,22 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB5_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_0_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -936,6 +1156,22 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB5_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_0_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -1004,6 +1240,22 @@ define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB6_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1037,6 +1289,22 @@ define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB6_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1138,6 +1406,22 @@ define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB7_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1171,6 +1455,22 @@ define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB7_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1272,6 +1572,22 @@ define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB8_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1305,6 +1621,22 @@ define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB8_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1406,6 +1738,22 @@ define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB9_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1439,6 +1787,22 @@ define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB9_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i8_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1540,6 +1904,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB10_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -1561,6 +1940,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB10_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -1630,6 +2024,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB11_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1661,6 +2070,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB11_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1760,6 +2184,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB12_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1791,6 +2230,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB12_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1890,6 +2344,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB13_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -1921,6 +2390,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB13_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -2020,6 +2504,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a2, 255
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB14_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -2051,6 +2550,21 @@ define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a2, 255
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB14_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -2149,6 +2663,26 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB15_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_add_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -2179,6 +2713,26 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB15_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_add_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -2273,6 +2827,26 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB16_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_add_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -2323,6 +2897,26 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB16_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_add_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -2477,6 +3071,26 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB17_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_add_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -2527,6 +3141,26 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB17_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_add_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -2681,6 +3315,26 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB18_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_add_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -2731,6 +3385,26 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB18_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_add_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -2885,6 +3559,26 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB19_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_add_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -2915,6 +3609,26 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB19_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_add_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -3009,6 +3723,26 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB20_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_sub_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -3039,6 +3773,26 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB20_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_sub_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -3137,6 +3891,26 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB21_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -3187,6 +3961,26 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB21_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -3345,6 +4139,26 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB22_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -3395,6 +4209,26 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB22_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -3553,6 +4387,26 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB23_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -3603,6 +4457,26 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB23_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -3761,6 +4635,26 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB24_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_sub_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -3791,6 +4685,26 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB24_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_sub_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -3889,6 +4803,25 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB25_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_and_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -3913,6 +4846,25 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB25_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_and_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -3989,6 +4941,25 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB26_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4027,6 +4998,25 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB26_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4145,6 +5135,25 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB27_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB27_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4183,6 +5192,25 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB27_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB27_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4301,6 +5329,25 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB28_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4339,6 +5386,25 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB28_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4457,6 +5523,25 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB29_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4495,6 +5580,25 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB29_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i8_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4613,6 +5717,27 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a4, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB30_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_nand_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -4644,6 +5769,27 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a4, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB30_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -4865,6 +6011,27 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a4, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB31_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -4917,6 +6084,27 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a4, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB31_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -5201,6 +6389,27 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a4, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB32_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -5253,6 +6462,27 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a4, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB32_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -5537,6 +6767,27 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a4, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB33_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -5589,6 +6840,27 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a4, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB33_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -5873,6 +7145,27 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a4, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB34_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_nand_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -5904,6 +7197,27 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a4, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB34_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -6129,6 +7443,21 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB35_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_or_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -6149,6 +7478,21 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB35_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_or_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -6213,6 +7557,21 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB36_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6243,6 +7602,21 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB36_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6337,6 +7711,21 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB37_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6367,6 +7756,21 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB37_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6461,6 +7865,21 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB38_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6491,6 +7910,21 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB38_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6585,6 +8019,21 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB39_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6615,6 +8064,21 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB39_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i8_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6709,6 +8173,21 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB40_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xor_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -6729,6 +8208,21 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB40_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xor_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -6793,6 +8287,21 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB41_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6823,6 +8332,21 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB41_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6917,6 +8441,21 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB42_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -6947,6 +8486,21 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB42_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7041,6 +8595,21 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB43_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7071,6 +8640,21 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB43_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7165,6 +8749,21 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB44_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7195,6 +8794,21 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB44_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i8_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7321,6 +8935,35 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB45_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB45_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_max_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -7392,6 +9035,35 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB45_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB45_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_max_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -7545,6 +9217,35 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB46_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB46_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_max_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7645,6 +9346,35 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB46_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB46_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_max_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7885,6 +9615,35 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB47_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB47_3: # in Loop: Header=BB47_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB47_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_max_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -7985,6 +9744,35 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB47_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB47_3: # in Loop: Header=BB47_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB47_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_max_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -8225,6 +10013,35 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB48_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB48_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_max_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -8325,6 +10142,35 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB48_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB48_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_max_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -8565,6 +10411,35 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB49_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB49_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_max_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -8636,6 +10511,35 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB49_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB49_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_max_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -8789,6 +10693,35 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB50_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB50_3: # in Loop: Header=BB50_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB50_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_min_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -8860,6 +10793,35 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB50_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB50_3: # in Loop: Header=BB50_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB50_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_min_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -9013,6 +10975,35 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB51_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB51_3: # in Loop: Header=BB51_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB51_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_min_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -9113,6 +11104,35 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB51_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB51_3: # in Loop: Header=BB51_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB51_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_min_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -9353,6 +11373,35 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB52_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB52_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_min_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -9453,6 +11502,35 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB52_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB52_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_min_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -9693,6 +11771,35 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB53_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB53_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_min_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -9793,6 +11900,35 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB53_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB53_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_min_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -10033,6 +12169,35 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB54_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB54_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB54_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_min_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -10104,6 +12269,35 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB54_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB54_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB54_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_min_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -10255,6 +12449,30 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB55_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB55_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB55_3: # in Loop: Header=BB55_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB55_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umax_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -10319,6 +12537,30 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB55_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB55_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB55_3: # in Loop: Header=BB55_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB55_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umax_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -10455,6 +12697,30 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB56_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB56_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB56_3: # in Loop: Header=BB56_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB56_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -10543,6 +12809,30 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB56_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB56_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB56_3: # in Loop: Header=BB56_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB56_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -10751,6 +13041,30 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB57_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB57_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB57_3: # in Loop: Header=BB57_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB57_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -10839,6 +13153,30 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB57_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB57_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB57_3: # in Loop: Header=BB57_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB57_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -11047,6 +13385,30 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB58_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB58_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB58_3: # in Loop: Header=BB58_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB58_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -11135,6 +13497,30 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB58_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB58_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB58_3: # in Loop: Header=BB58_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB58_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -11343,6 +13729,30 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB59_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB59_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB59_3: # in Loop: Header=BB59_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB59_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umax_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -11407,6 +13817,30 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB59_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB59_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB59_3: # in Loop: Header=BB59_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB59_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umax_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -11543,6 +13977,30 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB60_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB60_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB60_3: # in Loop: Header=BB60_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB60_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umin_i8_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -11607,6 +14065,30 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB60_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB60_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB60_3: # in Loop: Header=BB60_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB60_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umin_i8_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -11743,6 +14225,30 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i8_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB61_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB61_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB61_3: # in Loop: Header=BB61_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB61_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i8_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -11831,6 +14337,30 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i8_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB61_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB61_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB61_3: # in Loop: Header=BB61_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB61_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i8_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -12039,6 +14569,30 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i8_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB62_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB62_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB62_3: # in Loop: Header=BB62_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB62_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i8_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -12127,6 +14681,30 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i8_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB62_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB62_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB62_3: # in Loop: Header=BB62_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB62_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i8_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -12335,6 +14913,30 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i8_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB63_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB63_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB63_3: # in Loop: Header=BB63_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB63_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i8_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -12423,6 +15025,30 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i8_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB63_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB63_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB63_3: # in Loop: Header=BB63_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB63_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i8_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -12631,6 +15257,30 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i8_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB64_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB64_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB64_3: # in Loop: Header=BB64_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB64_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umin_i8_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -12695,6 +15345,30 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i8_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB64_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB64_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB64_3: # in Loop: Header=BB64_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB64_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umin_i8_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -12801,6 +15475,27 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB65_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -12832,6 +15527,27 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB65_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -12929,6 +15645,27 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB66_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -12981,6 +15718,27 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB66_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -13141,6 +15899,27 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB67_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -13193,6 +15972,27 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB67_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -13353,6 +16153,27 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB68_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -13405,6 +16226,27 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB68_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -13565,6 +16407,27 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB69_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -13596,6 +16459,27 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB69_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -13697,6 +16581,23 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB70_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB70_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_0_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -13720,6 +16621,23 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB70_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB70_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_0_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -13791,6 +16709,23 @@ define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB71_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB71_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -13826,6 +16761,23 @@ define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB71_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB71_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -13933,6 +16885,23 @@ define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB72_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB72_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -13968,6 +16937,23 @@ define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB72_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB72_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14075,6 +17061,23 @@ define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB73_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB73_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14110,6 +17113,23 @@ define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB73_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB73_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14217,6 +17237,23 @@ define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: not a2, a2
+; RV32I-ZALRSC-NEXT: .LBB74_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV32I-ZALRSC-NEXT: and a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB74_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14252,6 +17289,23 @@ define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: not a2, a2
+; RV64I-ZALRSC-NEXT: .LBB74_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV64I-ZALRSC-NEXT: and a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB74_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_0_i16_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14360,6 +17414,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB75_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -14383,6 +17453,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB75_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a1, a0, -4
@@ -14456,6 +17542,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB76_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14490,6 +17592,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB76_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14596,6 +17714,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB77_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14630,6 +17764,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB77_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14736,6 +17886,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB78_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14770,6 +17936,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB78_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14876,6 +18058,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a1, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a2, 16
+; RV32I-ZALRSC-NEXT: addi a2, a2, -1
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV32I-ZALRSC-NEXT: or a4, a3, a2
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB79_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -14910,6 +18108,22 @@ define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a2, 16
+; RV64I-ZALRSC-NEXT: addi a2, a2, -1
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a1)
+; RV64I-ZALRSC-NEXT: or a4, a3, a2
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a1)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB79_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a1, a0, -4
@@ -15014,6 +18228,27 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB80_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB80_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_add_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -15045,6 +18280,27 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB80_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB80_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_add_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -15142,6 +18398,27 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB81_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB81_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_add_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -15194,6 +18471,27 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB81_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB81_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_add_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -15354,6 +18652,27 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB82_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB82_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_add_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -15406,6 +18725,27 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB82_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB82_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_add_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -15566,6 +18906,27 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB83_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB83_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_add_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -15618,6 +18979,27 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB83_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB83_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_add_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -15778,6 +19160,27 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB84_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB84_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_add_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -15809,6 +19212,27 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB84_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB84_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_add_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -15906,6 +19330,27 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB85_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB85_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_sub_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -15937,6 +19382,27 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB85_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB85_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_sub_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -16038,6 +19504,27 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB86_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB86_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -16090,6 +19577,27 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB86_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB86_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -16254,6 +19762,27 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB87_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB87_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -16306,6 +19835,27 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB87_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB87_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -16470,6 +20020,27 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB88_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB88_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -16522,6 +20093,27 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB88_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB88_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_sub_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -16686,6 +20278,27 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB89_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB89_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_sub_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -16717,6 +20330,27 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB89_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB89_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_sub_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -16818,6 +20452,26 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: not a3, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB90_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_and_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -16843,6 +20497,26 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: not a3, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB90_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_and_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -16922,6 +20596,26 @@ define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: not a3, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB91_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -16962,6 +20656,26 @@ define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: not a3, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB91_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17086,6 +20800,26 @@ define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: not a3, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB92_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17126,6 +20860,26 @@ define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: not a3, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB92_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17250,6 +21004,26 @@ define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: not a3, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB93_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17290,6 +21064,26 @@ define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: not a3, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB93_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17414,6 +21208,26 @@ define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: not a3, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB94_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17454,6 +21268,26 @@ define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: not a3, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB94_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_and_i16_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17578,6 +21412,28 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a3, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB95_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_nand_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -17610,6 +21466,28 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a3, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB95_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -17838,6 +21716,28 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a3, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB96_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -17892,6 +21792,28 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a3, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB96_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -18186,6 +22108,28 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a3, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB97_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -18240,6 +22184,28 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a3, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB97_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -18534,6 +22500,28 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a3, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB98_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -18588,6 +22576,28 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a3, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB98_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -18882,6 +22892,28 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a3, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB99_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_nand_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -18914,6 +22946,28 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a3, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB99_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -19146,6 +23200,22 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB100_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB100_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_or_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -19167,6 +23237,22 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB100_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB100_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_or_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -19234,6 +23320,22 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB101_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB101_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19266,6 +23368,22 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB101_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB101_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19366,6 +23484,22 @@ define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB102_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB102_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19398,6 +23532,22 @@ define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB102_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB102_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19498,6 +23648,22 @@ define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB103_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB103_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19530,6 +23696,22 @@ define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB103_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB103_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19630,6 +23812,22 @@ define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB104_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19662,6 +23860,22 @@ define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB104_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_or_i16_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19762,6 +23976,22 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB105_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB105_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_xor_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -19783,6 +24013,22 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB105_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB105_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_xor_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -19850,6 +24096,22 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB106_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB106_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19882,6 +24144,22 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB106_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB106_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -19982,6 +24260,22 @@ define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB107_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20014,6 +24308,22 @@ define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB107_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20114,6 +24424,22 @@ define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB108_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB108_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20146,6 +24472,22 @@ define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB108_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB108_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20246,6 +24588,22 @@ define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB109_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB109_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_seq_cst:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20278,6 +24636,22 @@ define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB109_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB109_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_xor_i16_seq_cst:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20410,6 +24784,37 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB110_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB110_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB110_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB110_3: # in Loop: Header=BB110_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB110_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_max_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -20483,6 +24888,37 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB110_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB110_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB110_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB110_3: # in Loop: Header=BB110_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB110_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_max_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -20642,6 +25078,37 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB111_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB111_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB111_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB111_3: # in Loop: Header=BB111_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB111_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_max_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20746,6 +25213,37 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB111_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB111_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB111_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB111_3: # in Loop: Header=BB111_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB111_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_max_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -20998,6 +25496,37 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB112_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB112_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB112_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB112_3: # in Loop: Header=BB112_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB112_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_max_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -21102,6 +25631,37 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB112_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB112_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB112_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB112_3: # in Loop: Header=BB112_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB112_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_max_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -21354,6 +25914,37 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB113_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB113_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB113_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB113_3: # in Loop: Header=BB113_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB113_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_max_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -21458,6 +26049,37 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB113_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB113_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB113_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB113_3: # in Loop: Header=BB113_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB113_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_max_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -21710,6 +26332,37 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB114_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB114_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB114_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB114_3: # in Loop: Header=BB114_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB114_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_max_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -21783,6 +26436,37 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB114_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB114_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB114_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB114_3: # in Loop: Header=BB114_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB114_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_max_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -21942,6 +26626,37 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB115_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB115_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB115_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB115_3: # in Loop: Header=BB115_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB115_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_min_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -22015,6 +26730,37 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB115_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB115_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB115_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB115_3: # in Loop: Header=BB115_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB115_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_min_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -22174,6 +26920,37 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB116_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB116_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB116_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB116_3: # in Loop: Header=BB116_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB116_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_min_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -22278,6 +27055,37 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB116_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB116_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB116_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB116_3: # in Loop: Header=BB116_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB116_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_min_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -22530,6 +27338,37 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB117_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB117_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB117_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB117_3: # in Loop: Header=BB117_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB117_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_min_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -22634,6 +27473,37 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB117_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB117_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB117_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB117_3: # in Loop: Header=BB117_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB117_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_min_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -22886,6 +27756,37 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB118_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB118_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB118_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB118_3: # in Loop: Header=BB118_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB118_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_min_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -22990,6 +27891,37 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB118_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB118_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB118_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB118_3: # in Loop: Header=BB118_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB118_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_min_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -23242,6 +28174,37 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB119_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB119_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB119_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB119_3: # in Loop: Header=BB119_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB119_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_min_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -23315,6 +28278,37 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB119_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB119_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB119_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB119_3: # in Loop: Header=BB119_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB119_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_min_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -23476,6 +28470,31 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB120_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB120_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB120_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB120_3: # in Loop: Header=BB120_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB120_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umax_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -23545,6 +28564,31 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB120_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB120_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB120_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB120_3: # in Loop: Header=BB120_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB120_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umax_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -23688,6 +28732,31 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB121_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB121_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB121_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB121_3: # in Loop: Header=BB121_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB121_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -23782,6 +28851,31 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB121_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB121_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB121_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB121_3: # in Loop: Header=BB121_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB121_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -24000,6 +29094,31 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB122_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB122_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB122_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB122_3: # in Loop: Header=BB122_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB122_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -24094,6 +29213,31 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB122_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB122_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB122_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB122_3: # in Loop: Header=BB122_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB122_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -24312,6 +29456,31 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB123_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB123_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB123_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB123_3: # in Loop: Header=BB123_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB123_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -24406,6 +29575,31 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB123_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB123_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB123_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB123_3: # in Loop: Header=BB123_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB123_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umax_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -24624,6 +29818,31 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB124_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB124_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB124_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB124_3: # in Loop: Header=BB124_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB124_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umax_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -24693,6 +29912,31 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB124_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB124_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB124_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB124_3: # in Loop: Header=BB124_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB124_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umax_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -24836,6 +30080,31 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB125_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB125_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB125_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB125_3: # in Loop: Header=BB125_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB125_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umin_i16_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -24905,6 +30174,31 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB125_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB125_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB125_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB125_3: # in Loop: Header=BB125_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB125_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umin_i16_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -25048,6 +30342,31 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i16_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB126_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB126_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB126_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB126_3: # in Loop: Header=BB126_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB126_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i16_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -25142,6 +30461,31 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i16_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB126_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB126_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB126_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB126_3: # in Loop: Header=BB126_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB126_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i16_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -25360,6 +30704,31 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i16_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB127_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB127_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB127_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB127_3: # in Loop: Header=BB127_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB127_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i16_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -25454,6 +30823,31 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i16_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB127_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB127_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB127_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB127_3: # in Loop: Header=BB127_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB127_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i16_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -25672,6 +31066,31 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i16_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB128_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB128_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB128_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB128_3: # in Loop: Header=BB128_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB128_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i16_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -25766,6 +31185,31 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i16_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB128_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB128_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB128_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB128_3: # in Loop: Header=BB128_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB128_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_umin_i16_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: andi a2, a0, -4
@@ -25984,6 +31428,31 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i16_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB129_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB129_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB129_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB129_3: # in Loop: Header=BB129_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB129_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_umin_i16_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -26053,6 +31522,31 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i16_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB129_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB129_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB129_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB129_3: # in Loop: Header=BB129_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB129_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_umin_i16_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: andi a2, a0, -4
@@ -26162,6 +31656,17 @@ define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB130_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB130_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xchg_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoswap.w a0, a1, (a0)
@@ -26177,6 +31682,17 @@ define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB130_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB130_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_xchg_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoswap.w a0, a1, (a0)
@@ -26196,6 +31712,17 @@ define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB131_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB131_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xchg_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoswap.w.aq a0, a1, (a0)
@@ -26216,6 +31743,17 @@ define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB131_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB131_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.w.aq a0, a1, (a0)
@@ -26240,6 +31778,17 @@ define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB132_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB132_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xchg_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoswap.w.rl a0, a1, (a0)
@@ -26260,6 +31809,17 @@ define i32 @atomicrmw_xchg_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB132_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB132_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.w.rl a0, a1, (a0)
@@ -26284,6 +31844,17 @@ define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB133_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB133_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xchg_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoswap.w.aqrl a0, a1, (a0)
@@ -26304,6 +31875,17 @@ define i32 @atomicrmw_xchg_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB133_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB133_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.w.aqrl a0, a1, (a0)
@@ -26328,6 +31910,17 @@ define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB134_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB134_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xchg_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoswap.w.aqrl a0, a1, (a0)
@@ -26348,6 +31941,17 @@ define i32 @atomicrmw_xchg_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB134_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB134_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.w.aqrl a0, a1, (a0)
@@ -26372,6 +31976,17 @@ define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB135_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: add a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB135_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_add_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoadd.w a0, a1, (a0)
@@ -26387,6 +32002,17 @@ define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB135_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB135_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_add_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoadd.w a0, a1, (a0)
@@ -26406,6 +32032,17 @@ define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB136_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: add a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB136_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_add_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoadd.w.aq a0, a1, (a0)
@@ -26426,6 +32063,17 @@ define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB136_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB136_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.w.aq a0, a1, (a0)
@@ -26450,6 +32098,17 @@ define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB137_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: add a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB137_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_add_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoadd.w.rl a0, a1, (a0)
@@ -26470,6 +32129,17 @@ define i32 @atomicrmw_add_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB137_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB137_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.w.rl a0, a1, (a0)
@@ -26494,6 +32164,17 @@ define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB138_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: add a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB138_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_add_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoadd.w.aqrl a0, a1, (a0)
@@ -26514,6 +32195,17 @@ define i32 @atomicrmw_add_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB138_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB138_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.w.aqrl a0, a1, (a0)
@@ -26538,6 +32230,17 @@ define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB139_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: add a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB139_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_add_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoadd.w.aqrl a0, a1, (a0)
@@ -26558,6 +32261,17 @@ define i32 @atomicrmw_add_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB139_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB139_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.w.aqrl a0, a1, (a0)
@@ -26582,6 +32296,17 @@ define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB140_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB140_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_sub_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: neg a1, a1
@@ -26598,6 +32323,17 @@ define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB140_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB140_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_sub_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: neg a1, a1
@@ -26618,6 +32354,17 @@ define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB141_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB141_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_sub_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: neg a1, a1
@@ -26640,6 +32387,17 @@ define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB141_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB141_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -26666,6 +32424,17 @@ define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB142_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB142_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_sub_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: neg a1, a1
@@ -26688,6 +32457,17 @@ define i32 @atomicrmw_sub_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB142_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB142_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -26714,6 +32494,17 @@ define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB143_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB143_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_sub_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: neg a1, a1
@@ -26736,6 +32527,17 @@ define i32 @atomicrmw_sub_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB143_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB143_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -26762,6 +32564,17 @@ define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB144_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB144_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_sub_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: neg a1, a1
@@ -26784,6 +32597,17 @@ define i32 @atomicrmw_sub_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB144_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB144_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -26810,6 +32634,17 @@ define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB145_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB145_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_and_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoand.w a0, a1, (a0)
@@ -26825,6 +32660,17 @@ define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB145_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB145_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_and_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoand.w a0, a1, (a0)
@@ -26844,6 +32690,17 @@ define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB146_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB146_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_and_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoand.w.aq a0, a1, (a0)
@@ -26864,6 +32721,17 @@ define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB146_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB146_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.w.aq a0, a1, (a0)
@@ -26888,6 +32756,17 @@ define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB147_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB147_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_and_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoand.w.rl a0, a1, (a0)
@@ -26908,6 +32787,17 @@ define i32 @atomicrmw_and_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB147_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB147_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.w.rl a0, a1, (a0)
@@ -26932,6 +32822,17 @@ define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB148_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB148_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_and_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoand.w.aqrl a0, a1, (a0)
@@ -26952,6 +32853,17 @@ define i32 @atomicrmw_and_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB148_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB148_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.w.aqrl a0, a1, (a0)
@@ -26976,6 +32888,17 @@ define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB149_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB149_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_and_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoand.w.aqrl a0, a1, (a0)
@@ -26996,6 +32919,17 @@ define i32 @atomicrmw_and_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB149_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB149_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.w.aqrl a0, a1, (a0)
@@ -27020,6 +32954,18 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB150_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB150_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_nand_i32_monotonic:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: .LBB150_1: # =>This Inner Loop Header: Depth=1
@@ -27042,6 +32988,18 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB150_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB150_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i32_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: .LBB150_1: # =>This Inner Loop Header: Depth=1
@@ -27200,6 +33158,18 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB151_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB151_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i32_acquire:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: .LBB151_1: # =>This Inner Loop Header: Depth=1
@@ -27234,6 +33204,18 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB151_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB151_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i32_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: .LBB151_1: # =>This Inner Loop Header: Depth=1
@@ -27432,6 +33414,18 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB152_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB152_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i32_release:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: .LBB152_1: # =>This Inner Loop Header: Depth=1
@@ -27466,6 +33460,18 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB152_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB152_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i32_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: .LBB152_1: # =>This Inner Loop Header: Depth=1
@@ -27664,6 +33670,18 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB153_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB153_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i32_acq_rel:
; RV32IA-WMO-NOZACAS: # %bb.0:
; RV32IA-WMO-NOZACAS-NEXT: .LBB153_1: # =>This Inner Loop Header: Depth=1
@@ -27698,6 +33716,18 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB153_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB153_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i32_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: .LBB153_1: # =>This Inner Loop Header: Depth=1
@@ -27896,6 +33926,18 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB154_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB154_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-NOZACAS-LABEL: atomicrmw_nand_i32_seq_cst:
; RV32IA-NOZACAS: # %bb.0:
; RV32IA-NOZACAS-NEXT: .LBB154_1: # =>This Inner Loop Header: Depth=1
@@ -27918,6 +33960,18 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB154_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB154_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i32_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: .LBB154_1: # =>This Inner Loop Header: Depth=1
@@ -28112,6 +34166,17 @@ define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB155_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: or a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB155_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_or_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoor.w a0, a1, (a0)
@@ -28127,6 +34192,17 @@ define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB155_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB155_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_or_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoor.w a0, a1, (a0)
@@ -28146,6 +34222,17 @@ define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB156_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: or a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB156_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_or_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoor.w.aq a0, a1, (a0)
@@ -28166,6 +34253,17 @@ define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB156_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB156_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.w.aq a0, a1, (a0)
@@ -28190,6 +34288,17 @@ define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB157_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: or a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB157_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_or_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoor.w.rl a0, a1, (a0)
@@ -28210,6 +34319,17 @@ define i32 @atomicrmw_or_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB157_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB157_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.w.rl a0, a1, (a0)
@@ -28234,6 +34354,17 @@ define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB158_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: or a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB158_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_or_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoor.w.aqrl a0, a1, (a0)
@@ -28254,6 +34385,17 @@ define i32 @atomicrmw_or_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB158_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB158_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.w.aqrl a0, a1, (a0)
@@ -28278,6 +34420,17 @@ define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB159_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: or a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB159_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_or_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoor.w.aqrl a0, a1, (a0)
@@ -28298,6 +34451,17 @@ define i32 @atomicrmw_or_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB159_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB159_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.w.aqrl a0, a1, (a0)
@@ -28322,6 +34486,17 @@ define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB160_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: xor a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB160_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xor_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amoxor.w a0, a1, (a0)
@@ -28337,6 +34512,17 @@ define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB160_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB160_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_xor_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoxor.w a0, a1, (a0)
@@ -28356,6 +34542,17 @@ define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB161_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: xor a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB161_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xor_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoxor.w.aq a0, a1, (a0)
@@ -28376,6 +34573,17 @@ define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB161_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB161_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.w.aq a0, a1, (a0)
@@ -28400,6 +34608,17 @@ define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB162_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: xor a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB162_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xor_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoxor.w.rl a0, a1, (a0)
@@ -28420,6 +34639,17 @@ define i32 @atomicrmw_xor_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB162_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB162_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.w.rl a0, a1, (a0)
@@ -28444,6 +34674,17 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB163_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: xor a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB163_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xor_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoxor.w.aqrl a0, a1, (a0)
@@ -28464,6 +34705,17 @@ define i32 @atomicrmw_xor_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB163_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB163_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.w.aqrl a0, a1, (a0)
@@ -28488,6 +34740,17 @@ define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB164_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: xor a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB164_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_xor_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amoxor.w.aqrl a0, a1, (a0)
@@ -28508,6 +34771,17 @@ define i32 @atomicrmw_xor_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB164_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB164_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.w.aqrl a0, a1, (a0)
@@ -28558,6 +34832,21 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB165_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a3, a1, .LBB165_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB165_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB165_3: # in Loop: Header=BB165_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB165_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_max_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amomax.w a0, a1, (a0)
@@ -28602,6 +34891,22 @@ define i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB165_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a3, a2, .LBB165_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB165_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB165_3: # in Loop: Header=BB165_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB165_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_max_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomax.w a0, a1, (a0)
@@ -28647,6 +34952,21 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB166_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a3, a1, .LBB166_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB166_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB166_3: # in Loop: Header=BB166_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB166_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_max_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomax.w.aq a0, a1, (a0)
@@ -28696,6 +35016,22 @@ define i32 @atomicrmw_max_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB166_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a3, a2, .LBB166_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB166_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB166_3: # in Loop: Header=BB166_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB166_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.w.aq a0, a1, (a0)
@@ -28746,6 +35082,21 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB167_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a3, a1, .LBB167_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB167_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB167_3: # in Loop: Header=BB167_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB167_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_max_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomax.w.rl a0, a1, (a0)
@@ -28795,6 +35146,22 @@ define i32 @atomicrmw_max_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB167_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a3, a2, .LBB167_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB167_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB167_3: # in Loop: Header=BB167_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB167_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.w.rl a0, a1, (a0)
@@ -28845,6 +35212,21 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB168_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a3, a1, .LBB168_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB168_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB168_3: # in Loop: Header=BB168_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB168_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_max_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomax.w.aqrl a0, a1, (a0)
@@ -28894,6 +35276,22 @@ define i32 @atomicrmw_max_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB168_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a3, a2, .LBB168_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB168_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB168_3: # in Loop: Header=BB168_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB168_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.w.aqrl a0, a1, (a0)
@@ -28944,6 +35342,21 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB169_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a3, a1, .LBB169_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB169_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB169_3: # in Loop: Header=BB169_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB169_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_max_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomax.w.aqrl a0, a1, (a0)
@@ -28993,6 +35406,22 @@ define i32 @atomicrmw_max_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB169_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a3, a2, .LBB169_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB169_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB169_3: # in Loop: Header=BB169_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB169_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.w.aqrl a0, a1, (a0)
@@ -29043,6 +35472,21 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB170_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a1, a3, .LBB170_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB170_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB170_3: # in Loop: Header=BB170_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB170_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_min_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amomin.w a0, a1, (a0)
@@ -29087,6 +35531,22 @@ define i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB170_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a2, a3, .LBB170_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB170_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB170_3: # in Loop: Header=BB170_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB170_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_min_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomin.w a0, a1, (a0)
@@ -29132,6 +35592,21 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB171_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a1, a3, .LBB171_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB171_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB171_3: # in Loop: Header=BB171_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB171_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_min_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomin.w.aq a0, a1, (a0)
@@ -29181,6 +35656,22 @@ define i32 @atomicrmw_min_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB171_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a2, a3, .LBB171_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB171_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB171_3: # in Loop: Header=BB171_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB171_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.w.aq a0, a1, (a0)
@@ -29231,6 +35722,21 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB172_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a1, a3, .LBB172_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB172_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB172_3: # in Loop: Header=BB172_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB172_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_min_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomin.w.rl a0, a1, (a0)
@@ -29280,6 +35786,22 @@ define i32 @atomicrmw_min_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB172_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a2, a3, .LBB172_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB172_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB172_3: # in Loop: Header=BB172_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB172_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.w.rl a0, a1, (a0)
@@ -29330,6 +35852,21 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB173_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a1, a3, .LBB173_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB173_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB173_3: # in Loop: Header=BB173_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB173_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_min_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomin.w.aqrl a0, a1, (a0)
@@ -29379,6 +35916,22 @@ define i32 @atomicrmw_min_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB173_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a2, a3, .LBB173_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB173_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB173_3: # in Loop: Header=BB173_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB173_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.w.aqrl a0, a1, (a0)
@@ -29429,6 +35982,21 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB174_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a1, a3, .LBB174_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB174_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB174_3: # in Loop: Header=BB174_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB174_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_min_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomin.w.aqrl a0, a1, (a0)
@@ -29478,6 +36046,22 @@ define i32 @atomicrmw_min_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB174_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a2, a3, .LBB174_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB174_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB174_3: # in Loop: Header=BB174_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB174_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.w.aqrl a0, a1, (a0)
@@ -29528,6 +36112,21 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB175_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a3, a1, .LBB175_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB175_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB175_3: # in Loop: Header=BB175_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB175_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umax_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amomaxu.w a0, a1, (a0)
@@ -29572,6 +36171,22 @@ define i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB175_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a3, a2, .LBB175_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB175_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB175_3: # in Loop: Header=BB175_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB175_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_umax_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomaxu.w a0, a1, (a0)
@@ -29617,6 +36232,21 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB176_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a3, a1, .LBB176_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB176_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB176_3: # in Loop: Header=BB176_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB176_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umax_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomaxu.w.aq a0, a1, (a0)
@@ -29666,6 +36296,22 @@ define i32 @atomicrmw_umax_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB176_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a3, a2, .LBB176_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB176_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB176_3: # in Loop: Header=BB176_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB176_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.w.aq a0, a1, (a0)
@@ -29716,6 +36362,21 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB177_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a3, a1, .LBB177_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB177_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB177_3: # in Loop: Header=BB177_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB177_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umax_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomaxu.w.rl a0, a1, (a0)
@@ -29765,6 +36426,22 @@ define i32 @atomicrmw_umax_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB177_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a3, a2, .LBB177_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB177_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB177_3: # in Loop: Header=BB177_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB177_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.w.rl a0, a1, (a0)
@@ -29815,6 +36492,21 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB178_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a3, a1, .LBB178_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB178_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB178_3: # in Loop: Header=BB178_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB178_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umax_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomaxu.w.aqrl a0, a1, (a0)
@@ -29864,6 +36556,22 @@ define i32 @atomicrmw_umax_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB178_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a3, a2, .LBB178_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB178_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB178_3: # in Loop: Header=BB178_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB178_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.w.aqrl a0, a1, (a0)
@@ -29914,6 +36622,21 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB179_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a3, a1, .LBB179_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB179_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB179_3: # in Loop: Header=BB179_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB179_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umax_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amomaxu.w.aqrl a0, a1, (a0)
@@ -29963,6 +36686,22 @@ define i32 @atomicrmw_umax_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB179_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a3, a2, .LBB179_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB179_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB179_3: # in Loop: Header=BB179_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB179_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.w.aqrl a0, a1, (a0)
@@ -30013,6 +36752,21 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB180_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a1, a3, .LBB180_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB180_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB180_3: # in Loop: Header=BB180_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB180_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umin_i32_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: amominu.w a0, a1, (a0)
@@ -30057,6 +36811,22 @@ define i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB180_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a2, a3, .LBB180_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB180_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB180_3: # in Loop: Header=BB180_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB180_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_umin_i32_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amominu.w a0, a1, (a0)
@@ -30102,6 +36872,21 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB181_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a1, a3, .LBB181_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB181_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB181_3: # in Loop: Header=BB181_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB181_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umin_i32_acquire:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amominu.w.aq a0, a1, (a0)
@@ -30151,6 +36936,22 @@ define i32 @atomicrmw_umin_i32_acquire(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB181_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a2, a3, .LBB181_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB181_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB181_3: # in Loop: Header=BB181_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB181_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i32_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.w.aq a0, a1, (a0)
@@ -30201,6 +37002,21 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB182_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a1, a3, .LBB182_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB182_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB182_3: # in Loop: Header=BB182_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB182_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umin_i32_release:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amominu.w.rl a0, a1, (a0)
@@ -30250,6 +37066,22 @@ define i32 @atomicrmw_umin_i32_release(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB182_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a2, a3, .LBB182_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB182_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB182_3: # in Loop: Header=BB182_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB182_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i32_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.w.rl a0, a1, (a0)
@@ -30300,6 +37132,21 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB183_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aq a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a1, a3, .LBB183_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB183_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB183_3: # in Loop: Header=BB183_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB183_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umin_i32_acq_rel:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amominu.w.aqrl a0, a1, (a0)
@@ -30349,6 +37196,22 @@ define i32 @atomicrmw_umin_i32_acq_rel(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB183_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aq a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a2, a3, .LBB183_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB183_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB183_3: # in Loop: Header=BB183_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB183_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i32_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.w.aqrl a0, a1, (a0)
@@ -30399,6 +37262,21 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB184_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a1, a3, .LBB184_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB184_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB184_3: # in Loop: Header=BB184_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB184_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-WMO-LABEL: atomicrmw_umin_i32_seq_cst:
; RV32IA-WMO: # %bb.0:
; RV32IA-WMO-NEXT: amominu.w.aqrl a0, a1, (a0)
@@ -30448,6 +37326,22 @@ define i32 @atomicrmw_umin_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 48
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB184_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a2, a3, .LBB184_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB184_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB184_3: # in Loop: Header=BB184_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB184_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i32_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.w.aqrl a0, a1, (a0)
@@ -30472,6 +37366,16 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_exchange_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xchg_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30492,6 +37396,17 @@ define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB185_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB185_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_xchg_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoswap.d a0, a1, (a0)
@@ -30511,6 +37426,16 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 2
+; RV32I-ZALRSC-NEXT: call __atomic_exchange_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xchg_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30531,6 +37456,17 @@ define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB186_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB186_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.d.aq a0, a1, (a0)
@@ -30555,6 +37491,16 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_exchange_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xchg_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30575,6 +37521,17 @@ define i64 @atomicrmw_xchg_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB187_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB187_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.d.rl a0, a1, (a0)
@@ -30599,6 +37556,16 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 4
+; RV32I-ZALRSC-NEXT: call __atomic_exchange_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xchg_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30619,6 +37586,17 @@ define i64 @atomicrmw_xchg_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB188_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB188_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.d.aqrl a0, a1, (a0)
@@ -30643,6 +37621,16 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_exchange_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xchg_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30663,6 +37651,17 @@ define i64 @atomicrmw_xchg_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB189_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB189_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xchg_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoswap.d.aqrl a0, a1, (a0)
@@ -30687,6 +37686,16 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_add_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_add_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30707,6 +37716,17 @@ define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB190_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB190_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_add_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoadd.d a0, a1, (a0)
@@ -30726,6 +37746,16 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 2
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_add_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_add_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30746,6 +37776,17 @@ define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB191_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB191_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.d.aq a0, a1, (a0)
@@ -30770,6 +37811,16 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_add_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_add_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30790,6 +37841,17 @@ define i64 @atomicrmw_add_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB192_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB192_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.d.rl a0, a1, (a0)
@@ -30814,6 +37876,16 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 4
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_add_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_add_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30834,6 +37906,17 @@ define i64 @atomicrmw_add_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB193_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB193_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.d.aqrl a0, a1, (a0)
@@ -30858,6 +37941,16 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_add_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_add_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30878,6 +37971,17 @@ define i64 @atomicrmw_add_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB194_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB194_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_add_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoadd.d.aqrl a0, a1, (a0)
@@ -30902,6 +38006,16 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_sub_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30922,6 +38036,17 @@ define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB195_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB195_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_sub_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: neg a1, a1
@@ -30942,6 +38067,16 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 2
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_sub_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -30962,6 +38097,17 @@ define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB196_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB196_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -30988,6 +38134,16 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_sub_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31008,6 +38164,17 @@ define i64 @atomicrmw_sub_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB197_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB197_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -31034,6 +38201,16 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 4
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_sub_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31054,6 +38231,17 @@ define i64 @atomicrmw_sub_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB198_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB198_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -31080,6 +38268,16 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_sub_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31100,6 +38298,17 @@ define i64 @atomicrmw_sub_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB199_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB199_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_sub_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: neg a1, a1
@@ -31126,6 +38335,16 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_and_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_and_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31146,6 +38365,17 @@ define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB200_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB200_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_and_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoand.d a0, a1, (a0)
@@ -31165,6 +38395,16 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 2
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_and_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_and_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31185,6 +38425,17 @@ define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB201_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB201_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.d.aq a0, a1, (a0)
@@ -31209,6 +38460,16 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_and_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_and_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31229,6 +38490,17 @@ define i64 @atomicrmw_and_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB202_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB202_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.d.rl a0, a1, (a0)
@@ -31253,6 +38525,16 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 4
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_and_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_and_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31273,6 +38555,17 @@ define i64 @atomicrmw_and_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB203_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB203_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.d.aqrl a0, a1, (a0)
@@ -31297,6 +38590,16 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_and_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_and_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31317,6 +38620,17 @@ define i64 @atomicrmw_and_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB204_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB204_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_and_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoand.d.aqrl a0, a1, (a0)
@@ -31341,6 +38655,16 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_nand_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_nand_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31361,6 +38685,18 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB205_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB205_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i64_monotonic:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: .LBB205_1: # =>This Inner Loop Header: Depth=1
@@ -31453,6 +38789,16 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 2
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_nand_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_nand_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31473,6 +38819,18 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB206_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB206_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i64_acquire:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: .LBB206_1: # =>This Inner Loop Header: Depth=1
@@ -31591,6 +38949,16 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_nand_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_nand_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31611,6 +38979,18 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB207_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB207_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i64_release:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: .LBB207_1: # =>This Inner Loop Header: Depth=1
@@ -31729,6 +39109,16 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 4
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_nand_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_nand_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31749,6 +39139,18 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB208_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB208_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-NOZACAS-LABEL: atomicrmw_nand_i64_acq_rel:
; RV64IA-WMO-NOZACAS: # %bb.0:
; RV64IA-WMO-NOZACAS-NEXT: .LBB208_1: # =>This Inner Loop Header: Depth=1
@@ -31867,6 +39269,16 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_nand_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_nand_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -31887,6 +39299,18 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB209_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB209_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-NOZACAS-LABEL: atomicrmw_nand_i64_seq_cst:
; RV64IA-NOZACAS: # %bb.0:
; RV64IA-NOZACAS-NEXT: .LBB209_1: # =>This Inner Loop Header: Depth=1
@@ -31997,6 +39421,16 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_or_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_or_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32017,6 +39451,17 @@ define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB210_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB210_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_or_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoor.d a0, a1, (a0)
@@ -32036,6 +39481,16 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 2
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_or_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_or_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32056,6 +39511,17 @@ define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB211_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB211_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.d.aq a0, a1, (a0)
@@ -32080,6 +39546,16 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_or_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_or_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32100,6 +39576,17 @@ define i64 @atomicrmw_or_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB212_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB212_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.d.rl a0, a1, (a0)
@@ -32124,6 +39611,16 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 4
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_or_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_or_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32144,6 +39641,17 @@ define i64 @atomicrmw_or_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB213_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB213_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.d.aqrl a0, a1, (a0)
@@ -32168,6 +39676,16 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_or_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_or_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32188,6 +39706,17 @@ define i64 @atomicrmw_or_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB214_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB214_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_or_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoor.d.aqrl a0, a1, (a0)
@@ -32212,6 +39741,16 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_xor_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xor_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32232,6 +39771,17 @@ define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB215_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB215_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_xor_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoxor.d a0, a1, (a0)
@@ -32251,6 +39801,16 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 2
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_xor_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xor_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32271,6 +39831,17 @@ define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB216_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB216_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.d.aq a0, a1, (a0)
@@ -32295,6 +39866,16 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 3
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_xor_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xor_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32315,6 +39896,17 @@ define i64 @atomicrmw_xor_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB217_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB217_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.d.rl a0, a1, (a0)
@@ -32339,6 +39931,16 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 4
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_xor_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xor_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32359,6 +39961,17 @@ define i64 @atomicrmw_xor_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB218_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB218_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.d.aqrl a0, a1, (a0)
@@ -32383,6 +39996,16 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 5
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_xor_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_xor_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -16
@@ -32403,6 +40026,17 @@ define i64 @atomicrmw_xor_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB219_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB219_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_xor_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amoxor.d.aqrl a0, a1, (a0)
@@ -32471,6 +40105,60 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB220_2
+; RV32I-ZALRSC-NEXT: .LBB220_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB220_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB220_7
+; RV32I-ZALRSC-NEXT: .LBB220_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB220_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB220_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB220_5
+; RV32I-ZALRSC-NEXT: .LBB220_4: # in Loop: Header=BB220_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB220_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB220_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB220_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB220_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB220_1
+; RV32I-ZALRSC-NEXT: .LBB220_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_max_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -32561,6 +40249,21 @@ define i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB220_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a3, a1, .LBB220_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB220_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB220_3: # in Loop: Header=BB220_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB220_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_max_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomax.d a0, a1, (a0)
@@ -32624,6 +40327,60 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB221_2
+; RV32I-ZALRSC-NEXT: .LBB221_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB221_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 2
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB221_7
+; RV32I-ZALRSC-NEXT: .LBB221_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB221_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB221_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB221_5
+; RV32I-ZALRSC-NEXT: .LBB221_4: # in Loop: Header=BB221_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB221_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB221_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB221_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB221_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB221_1
+; RV32I-ZALRSC-NEXT: .LBB221_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_max_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -32714,6 +40471,21 @@ define i64 @atomicrmw_max_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB221_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a3, a1, .LBB221_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB221_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB221_3: # in Loop: Header=BB221_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB221_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.d.aq a0, a1, (a0)
@@ -32782,6 +40554,60 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB222_2
+; RV32I-ZALRSC-NEXT: .LBB222_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB222_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 3
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB222_7
+; RV32I-ZALRSC-NEXT: .LBB222_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB222_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB222_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB222_5
+; RV32I-ZALRSC-NEXT: .LBB222_4: # in Loop: Header=BB222_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB222_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB222_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB222_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB222_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB222_1
+; RV32I-ZALRSC-NEXT: .LBB222_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_max_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -32872,6 +40698,21 @@ define i64 @atomicrmw_max_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB222_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a3, a1, .LBB222_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB222_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB222_3: # in Loop: Header=BB222_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB222_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.d.rl a0, a1, (a0)
@@ -32940,6 +40781,60 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB223_2
+; RV32I-ZALRSC-NEXT: .LBB223_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB223_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 4
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB223_7
+; RV32I-ZALRSC-NEXT: .LBB223_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB223_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB223_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB223_5
+; RV32I-ZALRSC-NEXT: .LBB223_4: # in Loop: Header=BB223_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB223_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB223_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB223_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB223_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB223_1
+; RV32I-ZALRSC-NEXT: .LBB223_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_max_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -33030,6 +40925,21 @@ define i64 @atomicrmw_max_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB223_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a3, a1, .LBB223_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB223_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB223_3: # in Loop: Header=BB223_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB223_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.d.aqrl a0, a1, (a0)
@@ -33098,6 +41008,60 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB224_2
+; RV32I-ZALRSC-NEXT: .LBB224_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB224_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 5
+; RV32I-ZALRSC-NEXT: li a5, 5
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB224_7
+; RV32I-ZALRSC-NEXT: .LBB224_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB224_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB224_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB224_5
+; RV32I-ZALRSC-NEXT: .LBB224_4: # in Loop: Header=BB224_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB224_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB224_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB224_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB224_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB224_1
+; RV32I-ZALRSC-NEXT: .LBB224_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_max_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -33188,6 +41152,21 @@ define i64 @atomicrmw_max_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB224_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a3, a1, .LBB224_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB224_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB224_3: # in Loop: Header=BB224_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB224_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_max_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomax.d.aqrl a0, a1, (a0)
@@ -33256,6 +41235,60 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB225_2
+; RV32I-ZALRSC-NEXT: .LBB225_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB225_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB225_7
+; RV32I-ZALRSC-NEXT: .LBB225_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB225_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB225_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB225_5
+; RV32I-ZALRSC-NEXT: .LBB225_4: # in Loop: Header=BB225_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB225_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB225_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB225_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB225_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB225_1
+; RV32I-ZALRSC-NEXT: .LBB225_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_min_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -33346,6 +41379,21 @@ define i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB225_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a1, a3, .LBB225_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB225_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB225_3: # in Loop: Header=BB225_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB225_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_min_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomin.d a0, a1, (a0)
@@ -33409,6 +41457,60 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB226_2
+; RV32I-ZALRSC-NEXT: .LBB226_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB226_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 2
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB226_7
+; RV32I-ZALRSC-NEXT: .LBB226_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB226_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB226_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB226_5
+; RV32I-ZALRSC-NEXT: .LBB226_4: # in Loop: Header=BB226_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB226_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB226_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB226_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB226_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB226_1
+; RV32I-ZALRSC-NEXT: .LBB226_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_min_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -33499,6 +41601,21 @@ define i64 @atomicrmw_min_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB226_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a1, a3, .LBB226_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB226_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB226_3: # in Loop: Header=BB226_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB226_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.d.aq a0, a1, (a0)
@@ -33567,6 +41684,60 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB227_2
+; RV32I-ZALRSC-NEXT: .LBB227_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB227_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 3
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB227_7
+; RV32I-ZALRSC-NEXT: .LBB227_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB227_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB227_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB227_5
+; RV32I-ZALRSC-NEXT: .LBB227_4: # in Loop: Header=BB227_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB227_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB227_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB227_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB227_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB227_1
+; RV32I-ZALRSC-NEXT: .LBB227_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_min_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -33657,6 +41828,21 @@ define i64 @atomicrmw_min_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB227_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a1, a3, .LBB227_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB227_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB227_3: # in Loop: Header=BB227_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB227_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.d.rl a0, a1, (a0)
@@ -33725,6 +41911,60 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB228_2
+; RV32I-ZALRSC-NEXT: .LBB228_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB228_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 4
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB228_7
+; RV32I-ZALRSC-NEXT: .LBB228_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB228_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB228_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB228_5
+; RV32I-ZALRSC-NEXT: .LBB228_4: # in Loop: Header=BB228_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB228_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB228_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB228_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB228_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB228_1
+; RV32I-ZALRSC-NEXT: .LBB228_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_min_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -33815,6 +42055,21 @@ define i64 @atomicrmw_min_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB228_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a1, a3, .LBB228_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB228_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB228_3: # in Loop: Header=BB228_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB228_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.d.aqrl a0, a1, (a0)
@@ -33883,6 +42138,60 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB229_2
+; RV32I-ZALRSC-NEXT: .LBB229_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB229_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 5
+; RV32I-ZALRSC-NEXT: li a5, 5
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB229_7
+; RV32I-ZALRSC-NEXT: .LBB229_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB229_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB229_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB229_5
+; RV32I-ZALRSC-NEXT: .LBB229_4: # in Loop: Header=BB229_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB229_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB229_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB229_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB229_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB229_1
+; RV32I-ZALRSC-NEXT: .LBB229_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_min_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -33973,6 +42282,21 @@ define i64 @atomicrmw_min_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB229_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a1, a3, .LBB229_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB229_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB229_3: # in Loop: Header=BB229_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB229_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_min_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomin.d.aqrl a0, a1, (a0)
@@ -34041,6 +42365,60 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB230_2
+; RV32I-ZALRSC-NEXT: .LBB230_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB230_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB230_7
+; RV32I-ZALRSC-NEXT: .LBB230_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB230_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB230_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB230_5
+; RV32I-ZALRSC-NEXT: .LBB230_4: # in Loop: Header=BB230_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB230_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB230_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB230_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB230_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB230_1
+; RV32I-ZALRSC-NEXT: .LBB230_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umax_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -34131,6 +42509,21 @@ define i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB230_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a3, a1, .LBB230_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB230_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB230_3: # in Loop: Header=BB230_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB230_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_umax_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomaxu.d a0, a1, (a0)
@@ -34194,6 +42587,60 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB231_2
+; RV32I-ZALRSC-NEXT: .LBB231_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB231_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 2
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB231_7
+; RV32I-ZALRSC-NEXT: .LBB231_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB231_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB231_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB231_5
+; RV32I-ZALRSC-NEXT: .LBB231_4: # in Loop: Header=BB231_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB231_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB231_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB231_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB231_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB231_1
+; RV32I-ZALRSC-NEXT: .LBB231_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umax_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -34284,6 +42731,21 @@ define i64 @atomicrmw_umax_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB231_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a3, a1, .LBB231_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB231_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB231_3: # in Loop: Header=BB231_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB231_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.d.aq a0, a1, (a0)
@@ -34352,6 +42814,60 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB232_2
+; RV32I-ZALRSC-NEXT: .LBB232_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB232_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 3
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB232_7
+; RV32I-ZALRSC-NEXT: .LBB232_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB232_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB232_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB232_5
+; RV32I-ZALRSC-NEXT: .LBB232_4: # in Loop: Header=BB232_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB232_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB232_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB232_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB232_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB232_1
+; RV32I-ZALRSC-NEXT: .LBB232_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umax_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -34442,6 +42958,21 @@ define i64 @atomicrmw_umax_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB232_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a3, a1, .LBB232_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB232_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB232_3: # in Loop: Header=BB232_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB232_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.d.rl a0, a1, (a0)
@@ -34510,6 +43041,60 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB233_2
+; RV32I-ZALRSC-NEXT: .LBB233_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB233_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 4
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB233_7
+; RV32I-ZALRSC-NEXT: .LBB233_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB233_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB233_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB233_5
+; RV32I-ZALRSC-NEXT: .LBB233_4: # in Loop: Header=BB233_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB233_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB233_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB233_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB233_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB233_1
+; RV32I-ZALRSC-NEXT: .LBB233_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umax_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -34600,6 +43185,21 @@ define i64 @atomicrmw_umax_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB233_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a3, a1, .LBB233_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB233_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB233_3: # in Loop: Header=BB233_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB233_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.d.aqrl a0, a1, (a0)
@@ -34668,6 +43268,60 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB234_2
+; RV32I-ZALRSC-NEXT: .LBB234_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB234_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 5
+; RV32I-ZALRSC-NEXT: li a5, 5
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB234_7
+; RV32I-ZALRSC-NEXT: .LBB234_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB234_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB234_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB234_5
+; RV32I-ZALRSC-NEXT: .LBB234_4: # in Loop: Header=BB234_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB234_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB234_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB234_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB234_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB234_1
+; RV32I-ZALRSC-NEXT: .LBB234_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umax_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -34758,6 +43412,21 @@ define i64 @atomicrmw_umax_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB234_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a3, a1, .LBB234_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB234_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB234_3: # in Loop: Header=BB234_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB234_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umax_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amomaxu.d.aqrl a0, a1, (a0)
@@ -34826,6 +43495,60 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB235_2
+; RV32I-ZALRSC-NEXT: .LBB235_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB235_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB235_7
+; RV32I-ZALRSC-NEXT: .LBB235_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB235_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB235_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB235_5
+; RV32I-ZALRSC-NEXT: .LBB235_4: # in Loop: Header=BB235_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB235_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB235_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB235_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB235_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB235_1
+; RV32I-ZALRSC-NEXT: .LBB235_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umin_i64_monotonic:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -34916,6 +43639,21 @@ define i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB235_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a1, a3, .LBB235_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB235_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB235_3: # in Loop: Header=BB235_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB235_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-LABEL: atomicrmw_umin_i64_monotonic:
; RV64IA: # %bb.0:
; RV64IA-NEXT: amominu.d a0, a1, (a0)
@@ -34979,6 +43717,60 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_acquire:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB236_2
+; RV32I-ZALRSC-NEXT: .LBB236_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB236_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 2
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB236_7
+; RV32I-ZALRSC-NEXT: .LBB236_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB236_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB236_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB236_5
+; RV32I-ZALRSC-NEXT: .LBB236_4: # in Loop: Header=BB236_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB236_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB236_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB236_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB236_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB236_1
+; RV32I-ZALRSC-NEXT: .LBB236_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umin_i64_acquire:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -35069,6 +43861,21 @@ define i64 @atomicrmw_umin_i64_acquire(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_acquire:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB236_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a1, a3, .LBB236_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB236_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB236_3: # in Loop: Header=BB236_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB236_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i64_acquire:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.d.aq a0, a1, (a0)
@@ -35137,6 +43944,60 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_release:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB237_2
+; RV32I-ZALRSC-NEXT: .LBB237_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB237_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 3
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB237_7
+; RV32I-ZALRSC-NEXT: .LBB237_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB237_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB237_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB237_5
+; RV32I-ZALRSC-NEXT: .LBB237_4: # in Loop: Header=BB237_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB237_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB237_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB237_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB237_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB237_1
+; RV32I-ZALRSC-NEXT: .LBB237_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umin_i64_release:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -35227,6 +44088,21 @@ define i64 @atomicrmw_umin_i64_release(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_release:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB237_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a1, a3, .LBB237_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB237_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB237_3: # in Loop: Header=BB237_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB237_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i64_release:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.d.rl a0, a1, (a0)
@@ -35295,6 +44171,60 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_acq_rel:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB238_2
+; RV32I-ZALRSC-NEXT: .LBB238_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB238_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 4
+; RV32I-ZALRSC-NEXT: li a5, 2
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB238_7
+; RV32I-ZALRSC-NEXT: .LBB238_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB238_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB238_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB238_5
+; RV32I-ZALRSC-NEXT: .LBB238_4: # in Loop: Header=BB238_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB238_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB238_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB238_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB238_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB238_1
+; RV32I-ZALRSC-NEXT: .LBB238_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umin_i64_acq_rel:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -35385,6 +44315,21 @@ define i64 @atomicrmw_umin_i64_acq_rel(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_acq_rel:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB238_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aq a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a1, a3, .LBB238_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB238_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB238_3: # in Loop: Header=BB238_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB238_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i64_acq_rel:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.d.aqrl a0, a1, (a0)
@@ -35453,6 +44398,60 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV32I-NEXT: addi sp, sp, 32
; RV32I-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB239_2
+; RV32I-ZALRSC-NEXT: .LBB239_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB239_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: li a4, 5
+; RV32I-ZALRSC-NEXT: li a5, 5
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB239_7
+; RV32I-ZALRSC-NEXT: .LBB239_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB239_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB239_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB239_5
+; RV32I-ZALRSC-NEXT: .LBB239_4: # in Loop: Header=BB239_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB239_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB239_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB239_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB239_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB239_1
+; RV32I-ZALRSC-NEXT: .LBB239_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV32IA-LABEL: atomicrmw_umin_i64_seq_cst:
; RV32IA: # %bb.0:
; RV32IA-NEXT: addi sp, sp, -32
@@ -35543,6 +44542,21 @@ define i64 @atomicrmw_umin_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_seq_cst:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB239_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d.aqrl a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a1, a3, .LBB239_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB239_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB239_3: # in Loop: Header=BB239_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d.rl a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB239_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
+;
; RV64IA-WMO-LABEL: atomicrmw_umin_i64_seq_cst:
; RV64IA-WMO: # %bb.0:
; RV64IA-WMO-NEXT: amominu.d.aqrl a0, a1, (a0)
diff --git a/llvm/test/CodeGen/RISCV/atomic-signext.ll b/llvm/test/CodeGen/RISCV/atomic-signext.ll
index 7d29ac9..7fe5fa7 100644
--- a/llvm/test/CodeGen/RISCV/atomic-signext.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-signext.ll
@@ -5,12 +5,16 @@
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-NOZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32IA,RV32IA-ZACAS %s
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV32I-ZALRSC %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+a -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-NOZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+zacas -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64IA,RV64IA-ZACAS %s
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefixes=RV64I-ZALRSC %s
define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32I-LABEL: atomic_load_i8_unordered:
@@ -30,6 +34,11 @@ define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV32IA-NEXT: lb a0, 0(a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i8_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomic_load_i8_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -46,6 +55,11 @@ define signext i8 @atomic_load_i8_unordered(ptr %a) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: lb a0, 0(a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomic_load_i8_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lb a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
%1 = load atomic i8, ptr %a unordered, align 1
ret i8 %1
}
@@ -68,6 +82,11 @@ define signext i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV32IA-NEXT: lh a0, 0(a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i16_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomic_load_i16_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -84,6 +103,11 @@ define signext i16 @atomic_load_i16_unordered(ptr %a) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: lh a0, 0(a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomic_load_i16_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lh a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
%1 = load atomic i16, ptr %a unordered, align 2
ret i16 %1
}
@@ -104,6 +128,11 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV32IA-NEXT: lw a0, 0(a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomic_load_i32_unordered:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomic_load_i32_unordered:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -119,6 +148,11 @@ define signext i32 @atomic_load_i32_unordered(ptr %a) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: lw a0, 0(a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomic_load_i32_unordered:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: lw a0, 0(a0)
+; RV64I-ZALRSC-NEXT: ret
%1 = load atomic i32, ptr %a unordered, align 4
ret i32 %1
}
@@ -159,6 +193,28 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB3_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xchg_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -192,6 +248,28 @@ define signext i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB3_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xchg ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -231,6 +309,28 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB4_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_add_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -264,6 +364,28 @@ define signext i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB4_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw add ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -303,6 +425,28 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a4, a1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB5_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -336,6 +480,28 @@ define signext i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a4, a1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB5_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw sub ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -369,6 +535,27 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB6_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_and_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -396,6 +583,27 @@ define signext i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB6_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw and ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -436,6 +644,29 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a4, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB7_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_nand_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -470,6 +701,29 @@ define signext i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a4, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB7_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw nand ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -499,6 +753,23 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB8_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_or_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -522,6 +793,23 @@ define signext i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB8_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw or ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -551,6 +839,23 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB9_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xor_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -574,6 +879,23 @@ define signext i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB9_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xor ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -653,6 +975,37 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB10_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB10_3: # in Loop: Header=BB10_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB10_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_max_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -726,6 +1079,37 @@ define signext i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB10_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB10_3: # in Loop: Header=BB10_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB10_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw max ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -805,6 +1189,37 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: slli a1, a1, 24
+; RV32I-ZALRSC-NEXT: andi a4, a0, 24
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: srai a1, a1, 24
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: xori a4, a4, 24
+; RV32I-ZALRSC-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB11_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB11_3: # in Loop: Header=BB11_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB11_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_min_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -878,6 +1293,37 @@ define signext i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: slli a1, a1, 56
+; RV64I-ZALRSC-NEXT: andi a4, a0, 24
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: srai a1, a1, 56
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: xori a4, a4, 56
+; RV64I-ZALRSC-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB11_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB11_3: # in Loop: Header=BB11_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB11_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw min ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -950,6 +1396,32 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB12_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB12_3: # in Loop: Header=BB12_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB12_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umax_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -1016,6 +1488,32 @@ define signext i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB12_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB12_3: # in Loop: Header=BB12_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB12_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umax ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -1088,6 +1586,32 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i8_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a3, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a4, a3
+; RV32I-ZALRSC-NEXT: mv a5, a4
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB13_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a4, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a3
+; RV32I-ZALRSC-NEXT: xor a5, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB13_3: # in Loop: Header=BB13_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB13_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umin_i8_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -1154,6 +1678,32 @@ define signext i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i8_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a3, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a4, a3
+; RV64I-ZALRSC-NEXT: mv a5, a4
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB13_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a4, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a3
+; RV64I-ZALRSC-NEXT: xor a5, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB13_3: # in Loop: Header=BB13_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB13_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umin ptr %a, i8 %b monotonic
ret i8 %1
}
@@ -1194,6 +1744,29 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: mv a5, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB14_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xchg_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -1228,6 +1801,29 @@ define signext i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: mv a5, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB14_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xchg ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1268,6 +1864,29 @@ define signext i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: add a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB15_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_add_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -1302,6 +1921,29 @@ define signext i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: add a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB15_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw add ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1342,6 +1984,29 @@ define signext i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: sub a5, a3, a1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB16_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -1376,6 +2041,29 @@ define signext i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: sub a5, a3, a1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB16_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw sub ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1410,6 +2098,28 @@ define signext i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: not a3, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: or a1, a1, a3
+; RV32I-ZALRSC-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB17_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_and_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -1438,6 +2148,28 @@ define signext i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: not a3, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: or a1, a1, a3
+; RV64I-ZALRSC-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB17_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw and ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1479,6 +2211,30 @@ define signext i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a5, a3, a1
+; RV32I-ZALRSC-NEXT: not a5, a5
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB18_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_nand_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -1514,6 +2270,30 @@ define signext i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a5, a3, a1
+; RV64I-ZALRSC-NEXT: not a5, a5
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB18_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw nand ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1544,6 +2324,24 @@ define signext i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: or a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB19_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_or_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -1568,6 +2366,24 @@ define signext i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: or a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB19_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw or ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1598,6 +2414,24 @@ define signext i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: srli a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: xor a4, a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB20_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xor_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -1622,6 +2456,24 @@ define signext i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: srli a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: xor a4, a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a2)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB20_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xor ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1703,6 +2555,39 @@ define signext i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a7, a1, .LBB21_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB21_3: # in Loop: Header=BB21_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB21_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_max_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -1778,6 +2663,39 @@ define signext i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a7, a1, .LBB21_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB21_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB21_3: # in Loop: Header=BB21_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB21_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw max ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -1859,6 +2777,39 @@ define signext i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: slli a1, a1, 16
+; RV32I-ZALRSC-NEXT: li a4, 16
+; RV32I-ZALRSC-NEXT: andi a5, a0, 24
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: srai a1, a1, 16
+; RV32I-ZALRSC-NEXT: sll a3, a3, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sub a4, a4, a5
+; RV32I-ZALRSC-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV32I-ZALRSC-NEXT: and a7, a5, a3
+; RV32I-ZALRSC-NEXT: mv a6, a5
+; RV32I-ZALRSC-NEXT: sll a7, a7, a4
+; RV32I-ZALRSC-NEXT: sra a7, a7, a4
+; RV32I-ZALRSC-NEXT: bge a1, a7, .LBB22_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a1
+; RV32I-ZALRSC-NEXT: and a6, a6, a3
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: .LBB22_3: # in Loop: Header=BB22_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB22_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_min_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -1934,6 +2885,39 @@ define signext i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: slli a1, a1, 48
+; RV64I-ZALRSC-NEXT: li a4, 48
+; RV64I-ZALRSC-NEXT: andi a5, a0, 24
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: srai a1, a1, 48
+; RV64I-ZALRSC-NEXT: sllw a3, a3, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sub a4, a4, a5
+; RV64I-ZALRSC-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a2)
+; RV64I-ZALRSC-NEXT: and a7, a5, a3
+; RV64I-ZALRSC-NEXT: mv a6, a5
+; RV64I-ZALRSC-NEXT: sll a7, a7, a4
+; RV64I-ZALRSC-NEXT: sra a7, a7, a4
+; RV64I-ZALRSC-NEXT: bge a1, a7, .LBB22_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB22_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a1
+; RV64I-ZALRSC-NEXT: and a6, a6, a3
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: .LBB22_3: # in Loop: Header=BB22_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a2)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB22_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw min ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -2011,6 +2995,33 @@ define signext i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a6, a1, .LBB23_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB23_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB23_3: # in Loop: Header=BB23_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB23_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umax_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -2082,6 +3093,33 @@ define signext i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a6, a1, .LBB23_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB23_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB23_3: # in Loop: Header=BB23_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB23_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umax ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -2159,6 +3197,33 @@ define signext i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i16_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a3, 16
+; RV32I-ZALRSC-NEXT: addi a3, a3, -1
+; RV32I-ZALRSC-NEXT: sll a4, a3, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a3
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV32I-ZALRSC-NEXT: and a6, a3, a4
+; RV32I-ZALRSC-NEXT: mv a5, a3
+; RV32I-ZALRSC-NEXT: bgeu a1, a6, .LBB24_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB24_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a3, a1
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a3, a5
+; RV32I-ZALRSC-NEXT: .LBB24_3: # in Loop: Header=BB24_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB24_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: srl a0, a3, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umin_i16_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -2230,6 +3295,33 @@ define signext i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i16_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a3, 16
+; RV64I-ZALRSC-NEXT: addi a3, a3, -1
+; RV64I-ZALRSC-NEXT: sllw a4, a3, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a3
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a2)
+; RV64I-ZALRSC-NEXT: and a6, a3, a4
+; RV64I-ZALRSC-NEXT: mv a5, a3
+; RV64I-ZALRSC-NEXT: bgeu a1, a6, .LBB24_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB24_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a3, a1
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a3, a5
+; RV64I-ZALRSC-NEXT: .LBB24_3: # in Loop: Header=BB24_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a2)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB24_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: srlw a0, a3, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umin ptr %a, i16 %b monotonic
ret i16 %1
}
@@ -2250,6 +3342,17 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amoswap.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB25_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xchg_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2265,6 +3368,17 @@ define signext i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoswap.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB25_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xchg ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2285,6 +3399,17 @@ define signext i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amoadd.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: add a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB26_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_add_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2300,6 +3425,17 @@ define signext i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoadd.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB26_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw add ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2321,6 +3457,17 @@ define signext i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amoadd.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB27_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: sub a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB27_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2337,6 +3484,17 @@ define signext i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA-NEXT: neg a1, a1
; RV64IA-NEXT: amoadd.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB27_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB27_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw sub ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2357,6 +3515,17 @@ define signext i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amoand.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB28_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_and_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2372,6 +3541,17 @@ define signext i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoand.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB28_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw and ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2413,6 +3593,18 @@ define signext i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-ZACAS-NEXT: # %bb.2: # %atomicrmw.end
; RV32IA-ZACAS-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: and a3, a2, a1
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB29_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_nand_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2449,6 +3641,18 @@ define signext i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA-ZACAS-NEXT: bne a0, a3, .LBB29_1
; RV64IA-ZACAS-NEXT: # %bb.2: # %atomicrmw.end
; RV64IA-ZACAS-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB29_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw nand ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2469,6 +3673,17 @@ define signext i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amoor.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: or a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB30_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_or_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2484,6 +3699,17 @@ define signext i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoor.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB30_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw or ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2504,6 +3730,17 @@ define signext i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amoxor.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: xor a3, a2, a1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB31_1
+; RV32I-ZALRSC-NEXT: # %bb.2:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xor_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2519,6 +3756,17 @@ define signext i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoxor.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB31_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB31_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xor ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2565,6 +3813,21 @@ define signext i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amomax.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a3, a1, .LBB32_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB32_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB32_3: # in Loop: Header=BB32_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB32_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_max_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -2608,6 +3871,22 @@ define signext i32 @atomicrmw_max_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomax.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a3, a2, .LBB32_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB32_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB32_3: # in Loop: Header=BB32_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB32_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw max ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2654,6 +3933,21 @@ define signext i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amomin.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bge a1, a3, .LBB33_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB33_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB33_3: # in Loop: Header=BB33_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB33_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_min_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -2697,6 +3991,22 @@ define signext i32 @atomicrmw_min_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomin.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bge a2, a3, .LBB33_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB33_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB33_3: # in Loop: Header=BB33_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB33_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw min ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2743,6 +4053,21 @@ define signext i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amomaxu.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a3, a1, .LBB34_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB34_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB34_3: # in Loop: Header=BB34_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB34_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umax_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -2786,6 +4111,22 @@ define signext i32 @atomicrmw_umax_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomaxu.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a3, a2, .LBB34_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB34_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB34_3: # in Loop: Header=BB34_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB34_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umax ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2832,6 +4173,21 @@ define signext i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: amominu.w a0, a1, (a0)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a0)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: bgeu a1, a3, .LBB35_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a1
+; RV32I-ZALRSC-NEXT: .LBB35_3: # in Loop: Header=BB35_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB35_1
+; RV32I-ZALRSC-NEXT: # %bb.4:
+; RV32I-ZALRSC-NEXT: mv a0, a2
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umin_i32_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -48
@@ -2875,6 +4231,22 @@ define signext i32 @atomicrmw_umin_i32_monotonic(ptr %a, i32 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amominu.w a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: sext.w a2, a1
+; RV64I-ZALRSC-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a2, a3, .LBB35_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB35_3: # in Loop: Header=BB35_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB35_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umin ptr %a, i32 %b monotonic
ret i32 %1
}
@@ -2900,6 +4272,16 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_exchange_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xchg_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2914,6 +4296,17 @@ define signext i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoswap.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB36_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xchg ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -2939,6 +4332,16 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_add_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_add_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2953,6 +4356,17 @@ define signext i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoadd.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB37_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw add ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -2978,6 +4392,16 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_sub_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -2993,6 +4417,17 @@ define signext i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA-NEXT: neg a1, a1
; RV64IA-NEXT: amoadd.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB38_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw sub ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3018,6 +4453,16 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_and_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_and_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -3032,6 +4477,17 @@ define signext i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoand.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB39_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw and ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3057,6 +4513,16 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_nand_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_nand_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -3092,6 +4558,18 @@ define signext i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA-ZACAS-NEXT: bne a0, a3, .LBB40_1
; RV64IA-ZACAS-NEXT: # %bb.2: # %atomicrmw.end
; RV64IA-ZACAS-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a2, a1
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB40_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw nand ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3117,6 +4595,16 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_or_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_or_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -3131,6 +4619,17 @@ define signext i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoor.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB41_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw or ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3156,6 +4655,16 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -16
+; RV32I-ZALRSC-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: li a3, 0
+; RV32I-ZALRSC-NEXT: call __atomic_fetch_xor_8
+; RV32I-ZALRSC-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xor_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -3170,6 +4679,17 @@ define signext i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amoxor.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a2, a1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB42_1
+; RV64I-ZALRSC-NEXT: # %bb.2:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw xor ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3283,6 +4803,60 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB43_2
+; RV32I-ZALRSC-NEXT: .LBB43_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB43_7
+; RV32I-ZALRSC-NEXT: .LBB43_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB43_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB43_5
+; RV32I-ZALRSC-NEXT: .LBB43_4: # in Loop: Header=BB43_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB43_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB43_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB43_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB43_1
+; RV32I-ZALRSC-NEXT: .LBB43_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_max_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -3323,6 +4897,21 @@ define signext i64 @atomicrmw_max_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomax.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a3, a1, .LBB43_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB43_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB43_3: # in Loop: Header=BB43_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB43_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw max ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3436,6 +5025,60 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB44_2
+; RV32I-ZALRSC-NEXT: .LBB44_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB44_7
+; RV32I-ZALRSC-NEXT: .LBB44_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB44_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; RV32I-ZALRSC-NEXT: slt a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB44_5
+; RV32I-ZALRSC-NEXT: .LBB44_4: # in Loop: Header=BB44_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB44_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB44_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB44_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB44_1
+; RV32I-ZALRSC-NEXT: .LBB44_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_min_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -3476,6 +5119,21 @@ define signext i64 @atomicrmw_min_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomin.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bge a1, a3, .LBB44_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB44_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB44_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw min ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3589,6 +5247,60 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB45_2
+; RV32I-ZALRSC-NEXT: .LBB45_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB45_7
+; RV32I-ZALRSC-NEXT: .LBB45_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB45_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB45_5
+; RV32I-ZALRSC-NEXT: .LBB45_4: # in Loop: Header=BB45_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB45_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB45_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB45_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB45_1
+; RV32I-ZALRSC-NEXT: .LBB45_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umax_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -3629,6 +5341,21 @@ define signext i64 @atomicrmw_umax_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amomaxu.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a3, a1, .LBB45_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB45_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umax ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3742,6 +5469,60 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV32IA-NEXT: addi sp, sp, 32
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i64_monotonic:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: addi sp, sp, -32
+; RV32I-ZALRSC-NEXT: sw ra, 28(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s0, 24(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s1, 20(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
+; RV32I-ZALRSC-NEXT: mv s0, a2
+; RV32I-ZALRSC-NEXT: mv s1, a0
+; RV32I-ZALRSC-NEXT: lw a4, 0(a0)
+; RV32I-ZALRSC-NEXT: lw a5, 4(a0)
+; RV32I-ZALRSC-NEXT: mv s2, a1
+; RV32I-ZALRSC-NEXT: j .LBB46_2
+; RV32I-ZALRSC-NEXT: .LBB46_1: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; RV32I-ZALRSC-NEXT: sw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: sw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: addi a1, sp, 8
+; RV32I-ZALRSC-NEXT: mv a0, s1
+; RV32I-ZALRSC-NEXT: li a4, 0
+; RV32I-ZALRSC-NEXT: li a5, 0
+; RV32I-ZALRSC-NEXT: call __atomic_compare_exchange_8
+; RV32I-ZALRSC-NEXT: lw a4, 8(sp)
+; RV32I-ZALRSC-NEXT: lw a5, 12(sp)
+; RV32I-ZALRSC-NEXT: bnez a0, .LBB46_7
+; RV32I-ZALRSC-NEXT: .LBB46_2: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: beq a5, s0, .LBB46_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s0, a5
+; RV32I-ZALRSC-NEXT: j .LBB46_5
+; RV32I-ZALRSC-NEXT: .LBB46_4: # in Loop: Header=BB46_2 Depth=1
+; RV32I-ZALRSC-NEXT: sltu a0, s2, a4
+; RV32I-ZALRSC-NEXT: .LBB46_5: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, a4
+; RV32I-ZALRSC-NEXT: mv a3, a5
+; RV32I-ZALRSC-NEXT: beqz a0, .LBB46_1
+; RV32I-ZALRSC-NEXT: # %bb.6: # %atomicrmw.start
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB46_2 Depth=1
+; RV32I-ZALRSC-NEXT: mv a2, s2
+; RV32I-ZALRSC-NEXT: mv a3, s0
+; RV32I-ZALRSC-NEXT: j .LBB46_1
+; RV32I-ZALRSC-NEXT: .LBB46_7: # %atomicrmw.end
+; RV32I-ZALRSC-NEXT: mv a0, a4
+; RV32I-ZALRSC-NEXT: mv a1, a5
+; RV32I-ZALRSC-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s0, 24(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
+; RV32I-ZALRSC-NEXT: addi sp, sp, 32
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umin_i64_monotonic:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -3782,6 +5563,21 @@ define signext i64 @atomicrmw_umin_i64_monotonic(ptr %a, i64 %b) nounwind {
; RV64IA: # %bb.0:
; RV64IA-NEXT: amominu.d a0, a1, (a0)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i64_monotonic:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.d a2, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: bgeu a1, a3, .LBB46_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.d a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB46_1
+; RV64I-ZALRSC-NEXT: # %bb.4:
+; RV64I-ZALRSC-NEXT: mv a0, a2
+; RV64I-ZALRSC-NEXT: ret
%1 = atomicrmw umin ptr %a, i64 %b monotonic
ret i64 %1
}
@@ -3827,6 +5623,32 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
; RV32IA-NEXT: srai a0, a0, 24
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: cmpxchg_i8_monotonic_monotonic_val0:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a3, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a4, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: zext.b a2, a2
+; RV32I-ZALRSC-NEXT: sll a4, a4, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a5, (a3)
+; RV32I-ZALRSC-NEXT: and a6, a5, a4
+; RV32I-ZALRSC-NEXT: bne a6, a1, .LBB47_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a5, a2
+; RV32I-ZALRSC-NEXT: and a6, a6, a4
+; RV32I-ZALRSC-NEXT: xor a6, a5, a6
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a3)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB47_1
+; RV32I-ZALRSC-NEXT: .LBB47_3:
+; RV32I-ZALRSC-NEXT: srl a0, a5, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 24
+; RV32I-ZALRSC-NEXT: srai a0, a0, 24
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic_val0:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -3866,6 +5688,32 @@ define signext i8 @cmpxchg_i8_monotonic_monotonic_val0(ptr %ptr, i8 signext %cmp
; RV64IA-NEXT: slli a0, a0, 56
; RV64IA-NEXT: srai a0, a0, 56
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: cmpxchg_i8_monotonic_monotonic_val0:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a3, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a4, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: zext.b a2, a2
+; RV64I-ZALRSC-NEXT: sllw a4, a4, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a5, (a3)
+; RV64I-ZALRSC-NEXT: and a6, a5, a4
+; RV64I-ZALRSC-NEXT: bne a6, a1, .LBB47_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a5, a2
+; RV64I-ZALRSC-NEXT: and a6, a6, a4
+; RV64I-ZALRSC-NEXT: xor a6, a5, a6
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a3)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB47_1
+; RV64I-ZALRSC-NEXT: .LBB47_3:
+; RV64I-ZALRSC-NEXT: srlw a0, a5, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 56
+; RV64I-ZALRSC-NEXT: srai a0, a0, 56
+; RV64I-ZALRSC-NEXT: ret
%1 = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
%2 = extractvalue { i8, i1 } %1, 0
ret i8 %2
@@ -3911,6 +5759,32 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
; RV32IA-NEXT: seqz a0, a1
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: cmpxchg_i8_monotonic_monotonic_val1:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a3, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: li a4, 255
+; RV32I-ZALRSC-NEXT: zext.b a1, a1
+; RV32I-ZALRSC-NEXT: zext.b a2, a2
+; RV32I-ZALRSC-NEXT: sll a4, a4, a0
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sll a0, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a3)
+; RV32I-ZALRSC-NEXT: and a5, a2, a4
+; RV32I-ZALRSC-NEXT: bne a5, a1, .LBB48_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a5, a2, a0
+; RV32I-ZALRSC-NEXT: and a5, a5, a4
+; RV32I-ZALRSC-NEXT: xor a5, a2, a5
+; RV32I-ZALRSC-NEXT: sc.w a5, a5, (a3)
+; RV32I-ZALRSC-NEXT: bnez a5, .LBB48_1
+; RV32I-ZALRSC-NEXT: .LBB48_3:
+; RV32I-ZALRSC-NEXT: and a2, a2, a4
+; RV32I-ZALRSC-NEXT: xor a1, a1, a2
+; RV32I-ZALRSC-NEXT: seqz a0, a1
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: cmpxchg_i8_monotonic_monotonic_val1:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -3949,6 +5823,32 @@ define i1 @cmpxchg_i8_monotonic_monotonic_val1(ptr %ptr, i8 signext %cmp, i8 sig
; RV64IA-NEXT: xor a1, a1, a2
; RV64IA-NEXT: seqz a0, a1
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: cmpxchg_i8_monotonic_monotonic_val1:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a3, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: li a4, 255
+; RV64I-ZALRSC-NEXT: zext.b a1, a1
+; RV64I-ZALRSC-NEXT: zext.b a2, a2
+; RV64I-ZALRSC-NEXT: sllw a4, a4, a0
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sllw a0, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a3)
+; RV64I-ZALRSC-NEXT: and a5, a2, a4
+; RV64I-ZALRSC-NEXT: bne a5, a1, .LBB48_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a5, a2, a0
+; RV64I-ZALRSC-NEXT: and a5, a5, a4
+; RV64I-ZALRSC-NEXT: xor a5, a2, a5
+; RV64I-ZALRSC-NEXT: sc.w a5, a5, (a3)
+; RV64I-ZALRSC-NEXT: bnez a5, .LBB48_1
+; RV64I-ZALRSC-NEXT: .LBB48_3:
+; RV64I-ZALRSC-NEXT: and a2, a2, a4
+; RV64I-ZALRSC-NEXT: xor a1, a1, a2
+; RV64I-ZALRSC-NEXT: seqz a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
%2 = extractvalue { i8, i1 } %1, 1
ret i1 %2
@@ -3996,6 +5896,33 @@ define signext i16 @cmpxchg_i16_monotonic_monotonic_val0(ptr %ptr, i16 signext %
; RV32IA-NEXT: srai a0, a0, 16
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: cmpxchg_i16_monotonic_monotonic_val0:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a3, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a4, 16
+; RV32I-ZALRSC-NEXT: addi a4, a4, -1
+; RV32I-ZALRSC-NEXT: sll a5, a4, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a4
+; RV32I-ZALRSC-NEXT: and a2, a2, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sll a2, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a4, (a3)
+; RV32I-ZALRSC-NEXT: and a6, a4, a5
+; RV32I-ZALRSC-NEXT: bne a6, a1, .LBB49_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a6, a4, a2
+; RV32I-ZALRSC-NEXT: and a6, a6, a5
+; RV32I-ZALRSC-NEXT: xor a6, a4, a6
+; RV32I-ZALRSC-NEXT: sc.w a6, a6, (a3)
+; RV32I-ZALRSC-NEXT: bnez a6, .LBB49_1
+; RV32I-ZALRSC-NEXT: .LBB49_3:
+; RV32I-ZALRSC-NEXT: srl a0, a4, a0
+; RV32I-ZALRSC-NEXT: slli a0, a0, 16
+; RV32I-ZALRSC-NEXT: srai a0, a0, 16
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic_val0:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -4036,6 +5963,33 @@ define signext i16 @cmpxchg_i16_monotonic_monotonic_val0(ptr %ptr, i16 signext %
; RV64IA-NEXT: slli a0, a0, 48
; RV64IA-NEXT: srai a0, a0, 48
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: cmpxchg_i16_monotonic_monotonic_val0:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a3, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a4, 16
+; RV64I-ZALRSC-NEXT: addi a4, a4, -1
+; RV64I-ZALRSC-NEXT: sllw a5, a4, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a4
+; RV64I-ZALRSC-NEXT: and a2, a2, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sllw a2, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a4, (a3)
+; RV64I-ZALRSC-NEXT: and a6, a4, a5
+; RV64I-ZALRSC-NEXT: bne a6, a1, .LBB49_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a6, a4, a2
+; RV64I-ZALRSC-NEXT: and a6, a6, a5
+; RV64I-ZALRSC-NEXT: xor a6, a4, a6
+; RV64I-ZALRSC-NEXT: sc.w a6, a6, (a3)
+; RV64I-ZALRSC-NEXT: bnez a6, .LBB49_1
+; RV64I-ZALRSC-NEXT: .LBB49_3:
+; RV64I-ZALRSC-NEXT: srlw a0, a4, a0
+; RV64I-ZALRSC-NEXT: slli a0, a0, 48
+; RV64I-ZALRSC-NEXT: srai a0, a0, 48
+; RV64I-ZALRSC-NEXT: ret
%1 = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
%2 = extractvalue { i16, i1 } %1, 0
ret i16 %2
@@ -4082,6 +6036,33 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(ptr %ptr, i16 signext %cmp, i16
; RV32IA-NEXT: seqz a0, a1
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: cmpxchg_i16_monotonic_monotonic_val1:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a3, a0, -4
+; RV32I-ZALRSC-NEXT: slli a0, a0, 3
+; RV32I-ZALRSC-NEXT: lui a4, 16
+; RV32I-ZALRSC-NEXT: addi a4, a4, -1
+; RV32I-ZALRSC-NEXT: sll a5, a4, a0
+; RV32I-ZALRSC-NEXT: and a1, a1, a4
+; RV32I-ZALRSC-NEXT: and a2, a2, a4
+; RV32I-ZALRSC-NEXT: sll a1, a1, a0
+; RV32I-ZALRSC-NEXT: sll a0, a2, a0
+; RV32I-ZALRSC-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a2, (a3)
+; RV32I-ZALRSC-NEXT: and a4, a2, a5
+; RV32I-ZALRSC-NEXT: bne a4, a1, .LBB50_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; RV32I-ZALRSC-NEXT: xor a4, a2, a0
+; RV32I-ZALRSC-NEXT: and a4, a4, a5
+; RV32I-ZALRSC-NEXT: xor a4, a2, a4
+; RV32I-ZALRSC-NEXT: sc.w a4, a4, (a3)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB50_1
+; RV32I-ZALRSC-NEXT: .LBB50_3:
+; RV32I-ZALRSC-NEXT: and a2, a2, a5
+; RV32I-ZALRSC-NEXT: xor a1, a1, a2
+; RV32I-ZALRSC-NEXT: seqz a0, a1
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: cmpxchg_i16_monotonic_monotonic_val1:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -4121,6 +6102,33 @@ define i1 @cmpxchg_i16_monotonic_monotonic_val1(ptr %ptr, i16 signext %cmp, i16
; RV64IA-NEXT: xor a1, a1, a2
; RV64IA-NEXT: seqz a0, a1
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: cmpxchg_i16_monotonic_monotonic_val1:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a3, a0, -4
+; RV64I-ZALRSC-NEXT: slli a0, a0, 3
+; RV64I-ZALRSC-NEXT: lui a4, 16
+; RV64I-ZALRSC-NEXT: addi a4, a4, -1
+; RV64I-ZALRSC-NEXT: sllw a5, a4, a0
+; RV64I-ZALRSC-NEXT: and a1, a1, a4
+; RV64I-ZALRSC-NEXT: and a2, a2, a4
+; RV64I-ZALRSC-NEXT: sllw a1, a1, a0
+; RV64I-ZALRSC-NEXT: sllw a0, a2, a0
+; RV64I-ZALRSC-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a2, (a3)
+; RV64I-ZALRSC-NEXT: and a4, a2, a5
+; RV64I-ZALRSC-NEXT: bne a4, a1, .LBB50_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1
+; RV64I-ZALRSC-NEXT: xor a4, a2, a0
+; RV64I-ZALRSC-NEXT: and a4, a4, a5
+; RV64I-ZALRSC-NEXT: xor a4, a2, a4
+; RV64I-ZALRSC-NEXT: sc.w a4, a4, (a3)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB50_1
+; RV64I-ZALRSC-NEXT: .LBB50_3:
+; RV64I-ZALRSC-NEXT: and a2, a2, a5
+; RV64I-ZALRSC-NEXT: xor a1, a1, a2
+; RV64I-ZALRSC-NEXT: seqz a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
%2 = extractvalue { i16, i1 } %1, 1
ret i1 %2
@@ -4159,6 +6167,18 @@ define signext i32 @cmpxchg_i32_monotonic_monotonic_val0(ptr %ptr, i32 signext %
; RV32IA-ZACAS-NEXT: mv a0, a1
; RV32IA-ZACAS-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: cmpxchg_i32_monotonic_monotonic_val0:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a0)
+; RV32I-ZALRSC-NEXT: bne a3, a1, .LBB51_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a4, a2, (a0)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB51_1
+; RV32I-ZALRSC-NEXT: .LBB51_3:
+; RV32I-ZALRSC-NEXT: mv a0, a3
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic_val0:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -4190,6 +6210,18 @@ define signext i32 @cmpxchg_i32_monotonic_monotonic_val0(ptr %ptr, i32 signext %
; RV64IA-ZACAS-NEXT: amocas.w a1, a2, (a0)
; RV64IA-ZACAS-NEXT: mv a0, a1
; RV64IA-ZACAS-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: cmpxchg_i32_monotonic_monotonic_val0:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a0)
+; RV64I-ZALRSC-NEXT: bne a3, a1, .LBB51_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a4, a2, (a0)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB51_1
+; RV64I-ZALRSC-NEXT: .LBB51_3:
+; RV64I-ZALRSC-NEXT: mv a0, a3
+; RV64I-ZALRSC-NEXT: ret
%1 = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
%2 = extractvalue { i32, i1 } %1, 0
ret i32 %2
@@ -4230,6 +6262,19 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(ptr %ptr, i32 signext %cmp, i32
; RV32IA-ZACAS-NEXT: seqz a0, a1
; RV32IA-ZACAS-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: cmpxchg_i32_monotonic_monotonic_val1:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a3, (a0)
+; RV32I-ZALRSC-NEXT: bne a3, a1, .LBB52_3
+; RV32I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a4, a2, (a0)
+; RV32I-ZALRSC-NEXT: bnez a4, .LBB52_1
+; RV32I-ZALRSC-NEXT: .LBB52_3:
+; RV32I-ZALRSC-NEXT: xor a1, a3, a1
+; RV32I-ZALRSC-NEXT: seqz a0, a1
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: cmpxchg_i32_monotonic_monotonic_val1:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@@ -4263,6 +6308,19 @@ define i1 @cmpxchg_i32_monotonic_monotonic_val1(ptr %ptr, i32 signext %cmp, i32
; RV64IA-ZACAS-NEXT: xor a1, a3, a1
; RV64IA-ZACAS-NEXT: seqz a0, a1
; RV64IA-ZACAS-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: cmpxchg_i32_monotonic_monotonic_val1:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a3, (a0)
+; RV64I-ZALRSC-NEXT: bne a3, a1, .LBB52_3
+; RV64I-ZALRSC-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a4, a2, (a0)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB52_1
+; RV64I-ZALRSC-NEXT: .LBB52_3:
+; RV64I-ZALRSC-NEXT: xor a1, a3, a1
+; RV64I-ZALRSC-NEXT: seqz a0, a1
+; RV64I-ZALRSC-NEXT: ret
%1 = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
%2 = extractvalue { i32, i1 } %1, 1
ret i1 %2
@@ -4304,6 +6362,27 @@ define signext i32 @atomicrmw_xchg_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xchg_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB53_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB53_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB53_3
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB53_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xchg_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a1, a1, 1
@@ -4339,6 +6418,28 @@ define signext i32 @atomicrmw_xchg_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: li a2, 1
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xchg_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a1, 1
+; RV64I-ZALRSC-NEXT: beqz a1, .LBB53_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB53_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB53_3
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB53_2: # %else
+; RV64I-ZALRSC-NEXT: lw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: sw a2, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -4391,6 +6492,27 @@ define signext i32 @atomicrmw_add_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_add_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB54_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB54_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: add a3, a0, a2
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB54_3
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB54_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: addi a2, a0, 1
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_add_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a1, a1, 1
@@ -4426,6 +6548,28 @@ define signext i32 @atomicrmw_add_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: addi a2, a0, 1
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_add_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a1, 1
+; RV64I-ZALRSC-NEXT: beqz a1, .LBB54_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB54_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: add a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB54_3
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB54_2: # %else
+; RV64I-ZALRSC-NEXT: lw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: addi a2, a1, 1
+; RV64I-ZALRSC-NEXT: sw a2, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -4479,6 +6623,27 @@ define signext i32 @atomicrmw_sub_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_sub_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB55_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB55_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: sub a3, a0, a2
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB55_3
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB55_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: addi a2, a0, -1
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_sub_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a1, a1, 1
@@ -4514,6 +6679,28 @@ define signext i32 @atomicrmw_sub_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: addi a2, a0, -1
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_sub_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a1, 1
+; RV64I-ZALRSC-NEXT: beqz a1, .LBB55_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB55_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: sub a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB55_3
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB55_2: # %else
+; RV64I-ZALRSC-NEXT: lw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: addi a2, a1, -1
+; RV64I-ZALRSC-NEXT: sw a2, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -4567,6 +6754,27 @@ define signext i32 @atomicrmw_and_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_and_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB56_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB56_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: and a3, a0, a2
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB56_3
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB56_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: andi a2, a0, 1
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_and_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a1, a1, 1
@@ -4602,6 +6810,28 @@ define signext i32 @atomicrmw_and_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: andi a2, a0, 1
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_and_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a1, 1
+; RV64I-ZALRSC-NEXT: beqz a1, .LBB56_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB56_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: and a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB56_3
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB56_2: # %else
+; RV64I-ZALRSC-NEXT: lw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: andi a2, a1, 1
+; RV64I-ZALRSC-NEXT: sw a2, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -4685,6 +6915,28 @@ define signext i32 @atomicrmw_nand_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-ZACAS-NEXT: mv a0, a1
; RV32IA-ZACAS-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_nand_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB57_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB57_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: and a3, a0, a2
+; RV32I-ZALRSC-NEXT: not a3, a3
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB57_3
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB57_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: andi a2, a0, 1
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_nand_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a1, a1, 1
@@ -4750,6 +7002,28 @@ define signext i32 @atomicrmw_nand_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-ZACAS-NEXT: sw a2, 0(a0)
; RV64IA-ZACAS-NEXT: mv a0, a1
; RV64IA-ZACAS-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_nand_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a1, 1
+; RV64I-ZALRSC-NEXT: mv a1, a0
+; RV64I-ZALRSC-NEXT: beqz a2, .LBB57_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB57_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV64I-ZALRSC-NEXT: and a3, a0, a2
+; RV64I-ZALRSC-NEXT: not a3, a3
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB57_3
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB57_2: # %else
+; RV64I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV64I-ZALRSC-NEXT: andi a2, a0, 1
+; RV64I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -4803,6 +7077,27 @@ define signext i32 @atomicrmw_or_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind {
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_or_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB58_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB58_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: or a3, a0, a2
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB58_3
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB58_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: ori a2, a0, 1
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_or_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a1, a1, 1
@@ -4838,6 +7133,28 @@ define signext i32 @atomicrmw_or_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind {
; RV64IA-NEXT: ori a2, a0, 1
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_or_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a1, 1
+; RV64I-ZALRSC-NEXT: beqz a1, .LBB58_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB58_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: or a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB58_3
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB58_2: # %else
+; RV64I-ZALRSC-NEXT: lw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: ori a2, a1, 1
+; RV64I-ZALRSC-NEXT: sw a2, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -4891,6 +7208,27 @@ define signext i32 @atomicrmw_xor_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_xor_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB59_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB59_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: xor a3, a0, a2
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB59_3
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB59_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: xori a2, a0, 1
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_xor_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: andi a1, a1, 1
@@ -4926,6 +7264,28 @@ define signext i32 @atomicrmw_xor_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: xori a2, a0, 1
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_xor_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a1, 1
+; RV64I-ZALRSC-NEXT: beqz a1, .LBB59_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB59_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: xor a3, a1, a2
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB59_3
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB59_2: # %else
+; RV64I-ZALRSC-NEXT: lw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: xori a2, a1, 1
+; RV64I-ZALRSC-NEXT: sw a2, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -5007,6 +7367,37 @@ define signext i32 @atomicrmw_max_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_max_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB60_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB60_5: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: mv a3, a0
+; RV32I-ZALRSC-NEXT: bge a3, a2, .LBB60_7
+; RV32I-ZALRSC-NEXT: # %bb.6: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB60_5 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: .LBB60_7: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB60_5 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB60_5
+; RV32I-ZALRSC-NEXT: # %bb.8: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB60_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: mv a2, a0
+; RV32I-ZALRSC-NEXT: bgtz a0, .LBB60_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %else
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB60_4: # %else
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_max_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -5070,6 +7461,37 @@ define signext i32 @atomicrmw_max_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: .LBB60_4: # %else
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_max_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a1, 1
+; RV64I-ZALRSC-NEXT: mv a1, a0
+; RV64I-ZALRSC-NEXT: beqz a2, .LBB60_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB60_5: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV64I-ZALRSC-NEXT: mv a3, a0
+; RV64I-ZALRSC-NEXT: bge a3, a2, .LBB60_7
+; RV64I-ZALRSC-NEXT: # %bb.6: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB60_5 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB60_7: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB60_5 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB60_5
+; RV64I-ZALRSC-NEXT: # %bb.8: # %then
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB60_2: # %else
+; RV64I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV64I-ZALRSC-NEXT: mv a2, a0
+; RV64I-ZALRSC-NEXT: bgtz a0, .LBB60_4
+; RV64I-ZALRSC-NEXT: # %bb.3: # %else
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB60_4: # %else
+; RV64I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -5155,6 +7577,37 @@ define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_min_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB61_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB61_5: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: mv a3, a0
+; RV32I-ZALRSC-NEXT: bge a2, a3, .LBB61_7
+; RV32I-ZALRSC-NEXT: # %bb.6: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB61_5 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: .LBB61_7: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB61_5 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB61_5
+; RV32I-ZALRSC-NEXT: # %bb.8: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB61_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: mv a2, a0
+; RV32I-ZALRSC-NEXT: blez a0, .LBB61_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %else
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB61_4: # %else
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_min_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -5220,6 +7673,37 @@ define signext i32 @atomicrmw_min_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: .LBB61_4: # %else
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_min_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a1, 1
+; RV64I-ZALRSC-NEXT: mv a1, a0
+; RV64I-ZALRSC-NEXT: beqz a2, .LBB61_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB61_5: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV64I-ZALRSC-NEXT: mv a3, a0
+; RV64I-ZALRSC-NEXT: bge a2, a3, .LBB61_7
+; RV64I-ZALRSC-NEXT: # %bb.6: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB61_5 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB61_7: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB61_5 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB61_5
+; RV64I-ZALRSC-NEXT: # %bb.8: # %then
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB61_2: # %else
+; RV64I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV64I-ZALRSC-NEXT: mv a2, a0
+; RV64I-ZALRSC-NEXT: blez a0, .LBB61_4
+; RV64I-ZALRSC-NEXT: # %bb.3: # %else
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB61_4: # %else
+; RV64I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -5290,6 +7774,34 @@ define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umax_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB62_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB62_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: mv a3, a0
+; RV32I-ZALRSC-NEXT: bgeu a3, a2, .LBB62_5
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB62_3 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: .LBB62_5: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB62_3 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB62_3
+; RV32I-ZALRSC-NEXT: # %bb.6: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB62_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: seqz a2, a0
+; RV32I-ZALRSC-NEXT: add a2, a0, a2
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umax_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -5347,6 +7859,35 @@ define signext i32 @atomicrmw_umax_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: add a2, a0, a2
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umax_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a1, a1, 1
+; RV64I-ZALRSC-NEXT: beqz a1, .LBB62_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB62_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a1, (a0)
+; RV64I-ZALRSC-NEXT: mv a3, a1
+; RV64I-ZALRSC-NEXT: bgeu a3, a2, .LBB62_5
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB62_3 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB62_5: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB62_3 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a0)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB62_3
+; RV64I-ZALRSC-NEXT: # %bb.6: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB62_2: # %else
+; RV64I-ZALRSC-NEXT: lw a1, 0(a0)
+; RV64I-ZALRSC-NEXT: seqz a2, a1
+; RV64I-ZALRSC-NEXT: add a2, a1, a2
+; RV64I-ZALRSC-NEXT: sw a2, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a1
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -5434,6 +7975,38 @@ define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV32IA-NEXT: sw a2, 0(a1)
; RV32IA-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: atomicrmw_umin_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: andi a2, a1, 1
+; RV32I-ZALRSC-NEXT: mv a1, a0
+; RV32I-ZALRSC-NEXT: beqz a2, .LBB63_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB63_5: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV32I-ZALRSC-NEXT: mv a3, a0
+; RV32I-ZALRSC-NEXT: bgeu a2, a3, .LBB63_7
+; RV32I-ZALRSC-NEXT: # %bb.6: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB63_5 Depth=1
+; RV32I-ZALRSC-NEXT: mv a3, a2
+; RV32I-ZALRSC-NEXT: .LBB63_7: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB63_5 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB63_5
+; RV32I-ZALRSC-NEXT: # %bb.8: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB63_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV32I-ZALRSC-NEXT: li a3, 1
+; RV32I-ZALRSC-NEXT: mv a2, a0
+; RV32I-ZALRSC-NEXT: bltu a0, a3, .LBB63_4
+; RV32I-ZALRSC-NEXT: # %bb.3: # %else
+; RV32I-ZALRSC-NEXT: li a2, 1
+; RV32I-ZALRSC-NEXT: .LBB63_4: # %else
+; RV32I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: atomicrmw_umin_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -32
@@ -5501,6 +8074,38 @@ define signext i32 @atomicrmw_umin_i32_monotonic_crossbb(ptr %a, i1 %c) nounwind
; RV64IA-NEXT: .LBB63_4: # %else
; RV64IA-NEXT: sw a2, 0(a1)
; RV64IA-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: atomicrmw_umin_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: andi a2, a1, 1
+; RV64I-ZALRSC-NEXT: mv a1, a0
+; RV64I-ZALRSC-NEXT: beqz a2, .LBB63_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB63_5: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w a0, (a1)
+; RV64I-ZALRSC-NEXT: mv a3, a0
+; RV64I-ZALRSC-NEXT: bgeu a2, a3, .LBB63_7
+; RV64I-ZALRSC-NEXT: # %bb.6: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB63_5 Depth=1
+; RV64I-ZALRSC-NEXT: mv a3, a2
+; RV64I-ZALRSC-NEXT: .LBB63_7: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB63_5 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w a3, a3, (a1)
+; RV64I-ZALRSC-NEXT: bnez a3, .LBB63_5
+; RV64I-ZALRSC-NEXT: # %bb.8: # %then
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB63_2: # %else
+; RV64I-ZALRSC-NEXT: lw a0, 0(a1)
+; RV64I-ZALRSC-NEXT: li a3, 1
+; RV64I-ZALRSC-NEXT: mv a2, a0
+; RV64I-ZALRSC-NEXT: bltu a0, a3, .LBB63_4
+; RV64I-ZALRSC-NEXT: # %bb.3: # %else
+; RV64I-ZALRSC-NEXT: li a2, 1
+; RV64I-ZALRSC-NEXT: .LBB63_4: # %else
+; RV64I-ZALRSC-NEXT: sw a2, 0(a1)
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
@@ -5570,6 +8175,25 @@ define signext i32 @cmpxchg_i32_monotonic_crossbb(ptr %ptr, i32 signext %cmp, i3
; RV32IA-ZACAS-NEXT: lw a0, 0(a0)
; RV32IA-ZACAS-NEXT: ret
;
+; RV32I-ZALRSC-LABEL: cmpxchg_i32_monotonic_crossbb:
+; RV32I-ZALRSC: # %bb.0:
+; RV32I-ZALRSC-NEXT: mv a4, a0
+; RV32I-ZALRSC-NEXT: beqz a3, .LBB64_2
+; RV32I-ZALRSC-NEXT: # %bb.1: # %then
+; RV32I-ZALRSC-NEXT: .LBB64_3: # %then
+; RV32I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV32I-ZALRSC-NEXT: lr.w.aqrl a0, (a4)
+; RV32I-ZALRSC-NEXT: bne a0, a1, .LBB64_5
+; RV32I-ZALRSC-NEXT: # %bb.4: # %then
+; RV32I-ZALRSC-NEXT: # in Loop: Header=BB64_3 Depth=1
+; RV32I-ZALRSC-NEXT: sc.w.rl a3, a2, (a4)
+; RV32I-ZALRSC-NEXT: bnez a3, .LBB64_3
+; RV32I-ZALRSC-NEXT: .LBB64_5: # %then
+; RV32I-ZALRSC-NEXT: ret
+; RV32I-ZALRSC-NEXT: .LBB64_2: # %else
+; RV32I-ZALRSC-NEXT: lw a0, 0(a4)
+; RV32I-ZALRSC-NEXT: ret
+;
; RV64I-LABEL: cmpxchg_i32_monotonic_crossbb:
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a3, .LBB64_2
@@ -5620,6 +8244,26 @@ define signext i32 @cmpxchg_i32_monotonic_crossbb(ptr %ptr, i32 signext %cmp, i3
; RV64IA-ZACAS-NEXT: .LBB64_2: # %else
; RV64IA-ZACAS-NEXT: lw a0, 0(a0)
; RV64IA-ZACAS-NEXT: ret
+;
+; RV64I-ZALRSC-LABEL: cmpxchg_i32_monotonic_crossbb:
+; RV64I-ZALRSC: # %bb.0:
+; RV64I-ZALRSC-NEXT: beqz a3, .LBB64_2
+; RV64I-ZALRSC-NEXT: # %bb.1: # %then
+; RV64I-ZALRSC-NEXT: .LBB64_3: # %then
+; RV64I-ZALRSC-NEXT: # =>This Inner Loop Header: Depth=1
+; RV64I-ZALRSC-NEXT: lr.w.aqrl a3, (a0)
+; RV64I-ZALRSC-NEXT: bne a3, a1, .LBB64_5
+; RV64I-ZALRSC-NEXT: # %bb.4: # %then
+; RV64I-ZALRSC-NEXT: # in Loop: Header=BB64_3 Depth=1
+; RV64I-ZALRSC-NEXT: sc.w.rl a4, a2, (a0)
+; RV64I-ZALRSC-NEXT: bnez a4, .LBB64_3
+; RV64I-ZALRSC-NEXT: .LBB64_5: # %then
+; RV64I-ZALRSC-NEXT: sext.w a0, a3
+; RV64I-ZALRSC-NEXT: ret
+; RV64I-ZALRSC-NEXT: .LBB64_2: # %else
+; RV64I-ZALRSC-NEXT: lw a3, 0(a0)
+; RV64I-ZALRSC-NEXT: sext.w a0, a3
+; RV64I-ZALRSC-NEXT: ret
br i1 %c, label %then, label %else
then:
diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll
index 693a40d..5e5f2b7 100644
--- a/llvm/test/CodeGen/RISCV/features-info.ll
+++ b/llvm/test/CodeGen/RISCV/features-info.ll
@@ -217,6 +217,11 @@
; CHECK-NEXT: xsfmm64t - 'XSfmm64t' (TE=64 configuration).
; CHECK-NEXT: xsfmmbase - 'XSfmmbase' (All non arithmetic instructions for all TEWs and sf.vtzero).
; CHECK-NEXT: xsfvcp - 'XSfvcp' (SiFive Custom Vector Coprocessor Interface Instructions).
+; CHECK-NEXT: xsfvfbfexp16e - 'XSfvfbfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, BFloat16).
+; CHECK-NEXT: xsfvfexp16e - 'XSfvfexp16e' (SiFive Vector Floating-Point Exponential Function Instruction, Half Precision).
+; CHECK-NEXT: xsfvfexp32e - 'XSfvfexp32e' (SiFive Vector Floating-Point Exponential Function Instruction, Single Precision).
+; CHECK-NEXT: xsfvfexpa - 'XSfvfexpa' (SiFive Vector Floating-Point Exponential Approximation Instruction).
+; CHECK-NEXT: xsfvfexpa64e - 'XSfvfexpa64e' (SiFive Vector Floating-Point Exponential Approximation Instruction with Double-Precision).
; CHECK-NEXT: xsfvfnrclipxfqf - 'XSfvfnrclipxfqf' (SiFive FP32-to-int8 Ranged Clip Instructions).
; CHECK-NEXT: xsfvfwmaccqqq - 'XSfvfwmaccqqq' (SiFive Matrix Multiply Accumulate Instruction (4-by-4)).
; CHECK-NEXT: xsfvqmaccdod - 'XSfvqmaccdod' (SiFive Int8 Matrix Multiplication Instructions (2-by-8 and 8-by-2)).
diff --git a/llvm/test/CodeGen/RISCV/rv32zbs.ll b/llvm/test/CodeGen/RISCV/rv32zbs.ll
index dcb70f8..f9527ef 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbs.ll
@@ -45,6 +45,32 @@ define i32 @bclr_i32_no_mask(i32 %a, i32 %b) nounwind {
ret i32 %and1
}
+define i32 @bclr_i32_mask_multiple(i32 %a, i32 %b, i32 %shamt) nounwind {
+; RV32I-LABEL: bclr_i32_mask_multiple:
+; RV32I: # %bb.0:
+; RV32I-NEXT: li a3, 1
+; RV32I-NEXT: sll a2, a3, a2
+; RV32I-NEXT: not a3, a2
+; RV32I-NEXT: and a0, a3, a0
+; RV32I-NEXT: or a1, a1, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: ret
+;
+; RV32ZBS-LABEL: bclr_i32_mask_multiple:
+; RV32ZBS: # %bb.0:
+; RV32ZBS-NEXT: bclr a0, a0, a2
+; RV32ZBS-NEXT: bset a1, a1, a2
+; RV32ZBS-NEXT: add a0, a0, a1
+; RV32ZBS-NEXT: ret
+ %shamt_masked = and i32 %shamt, 63
+ %shl = shl nuw i32 1, %shamt_masked
+ %neg = xor i32 %shl, -1
+ %and = and i32 %neg, %a
+ %or = or i32 %b, %shl
+ %c = add i32 %and, %or
+ ret i32 %c
+}
+
define i64 @bclr_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: bclr_i64:
; RV32I: # %bb.0:
@@ -301,17 +327,17 @@ define i64 @bext_i64(i64 %a, i64 %b) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a3, a2, 63
; CHECK-NEXT: addi a4, a3, -32
-; CHECK-NEXT: bltz a4, .LBB12_2
+; CHECK-NEXT: bltz a4, .LBB13_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: srl a0, a1, a3
-; CHECK-NEXT: j .LBB12_3
-; CHECK-NEXT: .LBB12_2:
+; CHECK-NEXT: j .LBB13_3
+; CHECK-NEXT: .LBB13_2:
; CHECK-NEXT: srl a0, a0, a2
; CHECK-NEXT: slli a1, a1, 1
; CHECK-NEXT: not a2, a3
; CHECK-NEXT: sll a1, a1, a2
; CHECK-NEXT: or a0, a0, a1
-; CHECK-NEXT: .LBB12_3:
+; CHECK-NEXT: .LBB13_3:
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: ret
@@ -789,17 +815,17 @@ define i64 @bset_trailing_ones_i64_mask(i64 %a) nounwind {
; CHECK-NEXT: li a3, -1
; CHECK-NEXT: addi a1, a2, -32
; CHECK-NEXT: sll a0, a3, a0
-; CHECK-NEXT: bltz a1, .LBB43_2
+; CHECK-NEXT: bltz a1, .LBB44_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: sll a2, a3, a2
-; CHECK-NEXT: j .LBB43_3
-; CHECK-NEXT: .LBB43_2:
+; CHECK-NEXT: j .LBB44_3
+; CHECK-NEXT: .LBB44_2:
; CHECK-NEXT: not a2, a2
; CHECK-NEXT: lui a3, 524288
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: srl a2, a3, a2
; CHECK-NEXT: or a2, a0, a2
-; CHECK-NEXT: .LBB43_3:
+; CHECK-NEXT: .LBB44_3:
; CHECK-NEXT: srai a1, a1, 31
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: not a1, a2
@@ -817,17 +843,17 @@ define i64 @bset_trailing_ones_i64_no_mask(i64 %a) nounwind {
; CHECK-NEXT: li a1, -1
; CHECK-NEXT: addi a2, a0, -32
; CHECK-NEXT: sll a1, a1, a0
-; CHECK-NEXT: bltz a2, .LBB44_2
+; CHECK-NEXT: bltz a2, .LBB45_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: j .LBB44_3
-; CHECK-NEXT: .LBB44_2:
+; CHECK-NEXT: j .LBB45_3
+; CHECK-NEXT: .LBB45_2:
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: lui a3, 524288
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: srl a0, a3, a0
; CHECK-NEXT: or a0, a1, a0
-; CHECK-NEXT: .LBB44_3:
+; CHECK-NEXT: .LBB45_3:
; CHECK-NEXT: srai a2, a2, 31
; CHECK-NEXT: and a2, a2, a1
; CHECK-NEXT: not a1, a0
diff --git a/llvm/test/CodeGen/RISCV/rv64zbs.ll b/llvm/test/CodeGen/RISCV/rv64zbs.ll
index b4edcf6c..d42bc8e 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbs.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbs.ll
@@ -110,6 +110,32 @@ define i64 @bclr_i64_no_mask(i64 %a, i64 %b) nounwind {
ret i64 %and1
}
+define i64 @bclr_i64_mask_multiple(i64 %a, i64 %b, i64 %shamt) nounwind {
+; RV64I-LABEL: bclr_i64_mask_multiple:
+; RV64I: # %bb.0:
+; RV64I-NEXT: li a3, 1
+; RV64I-NEXT: sll a2, a3, a2
+; RV64I-NEXT: not a3, a2
+; RV64I-NEXT: and a0, a3, a0
+; RV64I-NEXT: or a1, a1, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: ret
+;
+; RV64ZBS-LABEL: bclr_i64_mask_multiple:
+; RV64ZBS: # %bb.0:
+; RV64ZBS-NEXT: bclr a0, a0, a2
+; RV64ZBS-NEXT: bset a1, a1, a2
+; RV64ZBS-NEXT: add a0, a0, a1
+; RV64ZBS-NEXT: ret
+ %shamt_masked = and i64 %shamt, 63
+ %shl = shl nuw i64 1, %shamt_masked
+ %neg = xor i64 %shl, -1
+ %and = and i64 %neg, %a
+ %or = or i64 %b, %shl
+ %c = add i64 %and, %or
+ ret i64 %c
+}
+
define signext i32 @bset_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: bset_i32:
; RV64I: # %bb.0:
@@ -372,19 +398,19 @@ define void @bext_i32_trunc(i32 signext %0, i32 signext %1) {
; RV64I: # %bb.0:
; RV64I-NEXT: srlw a0, a0, a1
; RV64I-NEXT: andi a0, a0, 1
-; RV64I-NEXT: beqz a0, .LBB19_2
+; RV64I-NEXT: beqz a0, .LBB20_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: ret
-; RV64I-NEXT: .LBB19_2:
+; RV64I-NEXT: .LBB20_2:
; RV64I-NEXT: tail bar
;
; RV64ZBS-LABEL: bext_i32_trunc:
; RV64ZBS: # %bb.0:
; RV64ZBS-NEXT: bext a0, a0, a1
-; RV64ZBS-NEXT: beqz a0, .LBB19_2
+; RV64ZBS-NEXT: beqz a0, .LBB20_2
; RV64ZBS-NEXT: # %bb.1:
; RV64ZBS-NEXT: ret
-; RV64ZBS-NEXT: .LBB19_2:
+; RV64ZBS-NEXT: .LBB20_2:
; RV64ZBS-NEXT: tail bar
%3 = shl i32 1, %1
%4 = and i32 %3, %0
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll
index cce1eda..1aee688 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWBufferDynamicIdx.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s --match-full-lines
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
%"__cblayout_$Globals" = type <{ i32 }>
@@ -9,7 +10,6 @@
; CHECK: OpCapability Shader
; CHECK: OpCapability StorageTexelBufferArrayDynamicIndexingEXT
-
define void @main() local_unnamed_addr #0 {
entry:
%"$Globals.cb_h.i.i" = tail call target("spirv.VulkanBuffer", target("spirv.Layout", %"__cblayout_$Globals", 4, 0), 2, 0) @"llvm.spv.resource.handlefromimplicitbinding.tspirv.VulkanBuffer_tspirv.Layout_s___cblayout_$Globalss_4_0t_2_0t"(i32 1, i32 0, i32 1, i32 0, ptr nonnull @"$Globals.str")
@@ -19,4 +19,8 @@ entry:
%2 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_2_33t(target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) %1, i32 98)
store i32 99, ptr addrspace(11) %2, align 4
ret void
-} \ No newline at end of file
+}
+
+!hlsl.cbs = !{!0}
+
+!0 = !{ptr @"$Globals.cb", ptr addrspace(12) @i}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll
index da69a2f..163fc9d 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/DynamicIdx/RWStructuredBufferDynamicIdx.ll
@@ -1,4 +1,5 @@
; RUN: llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s --match-full-lines
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
%"__cblayout_$Globals" = type <{ i32 }>
@@ -19,3 +20,7 @@ entry:
store i32 98, ptr addrspace(11) %2, align 4
ret void
}
+
+!hlsl.cbs = !{!0}
+
+!0 = !{ptr @"$Globals.cb", ptr addrspace(12) @i}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/TypedBufferLoad.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/TypedBufferLoad.ll
new file mode 100644
index 0000000..7c44b6d
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/TypedBufferLoad.ll
@@ -0,0 +1,43 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv1.6-unknown-vulkan1.3-compute %s -o - -filetype=obj | spirv-val %}
+
+; When accessing read-only `Buffer` types, SPIR-V should use `OpImageFetch` instead of `OpImageRead`.
+; https://github.com/llvm/llvm-project/issues/162891
+
+; CHECK-DAG: OpCapability SampledBuffer
+; CHECK-DAG: OpCapability ImageBuffer
+; CHECK-DAG: [[TypeInt:%[0-9]+]] = OpTypeInt 32 0
+; CHECK-DAG: [[TypeImageBuffer:%[0-9]+]] = OpTypeImage [[TypeInt]] Buffer 2 0 0 1 Unknown
+; CHECK-DAG: [[TypePtrImageBuffer:%[0-9]+]] = OpTypePointer UniformConstant [[TypeImageBuffer]]
+; CHECK-DAG: [[TypeVector:%[0-9]+]] = OpTypeVector [[TypeInt]] 4
+; CHECK-DAG: [[Index:%[0-9]+]] = OpConstant [[TypeInt]] 98
+; CHECK-DAG: [[Variable:%[0-9]+]] = OpVariable [[TypePtrImageBuffer]] UniformConstant
+@.str = private unnamed_addr constant [7 x i8] c"rwbuff\00", align 1
+@.str.2 = private unnamed_addr constant [5 x i8] c"buff\00", align 1
+@.str.4 = private unnamed_addr constant [8 x i8] c"unknown\00", align 1
+
+define void @main() local_unnamed_addr #0 {
+ %1 = tail call target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_i32_5_2_0_0_2_33t(i32 0, i32 0, i32 1, i32 0, ptr nonnull @.str)
+ %2 = tail call target("spirv.Image", i32, 5, 2, 0, 0, 1, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_i32_5_2_0_0_1_0t(i32 1, i32 0, i32 1, i32 0, ptr nonnull @.str.2)
+ %3 = tail call target("spirv.Image", i32, 5, 2, 0, 0, 0, 0) @llvm.spv.resource.handlefromimplicitbinding.tspirv.Image_i32_5_2_0_0_0_0t(i32 2, i32 0, i32 1, i32 0, ptr nonnull @.str.4)
+ %4 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_1_0t(target("spirv.Image", i32, 5, 2, 0, 0, 1, 0) %2, i32 98)
+; CHECK: [[Load:%[0-9]+]] = OpLoad [[TypeImageBuffer]] [[Variable]]
+; CHECK: [[ImageFetch:%[0-9]+]] = OpImageFetch [[TypeVector]] [[Load]] [[Index]]
+; CHECK: {{.*}} = OpCompositeExtract [[TypeInt]] [[ImageFetch]] 0
+ %5 = load i32, ptr addrspace(11) %4, align 4
+ %6 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_2_33t(target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) %1, i32 99)
+ store i32 %5, ptr addrspace(11) %6, align 4
+ %7 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_2_33t(target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) %1, i32 96)
+; CHECK: {{%[0-9]+}} = OpLoad {{.*}}
+; CHECK: {{%[0-9]+}} = OpImageRead {{.*}}
+ %8 = load i32, ptr addrspace(11) %7, align 4
+ %9 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_2_33t(target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) %1, i32 97)
+ store i32 %8, ptr addrspace(11) %9, align 4
+ %10 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_0_0t(target("spirv.Image", i32, 5, 2, 0, 0, 0, 0) %3, i32 94)
+; CHECK: {{%[0-9]+}} = OpLoad {{.*}}
+; CHECK: {{%[0-9]+}} = OpImageRead {{.*}}
+ %11 = load i32, ptr addrspace(11) %10, align 4
+ %12 = tail call noundef align 4 dereferenceable(4) ptr addrspace(11) @llvm.spv.resource.getpointer.p11.tspirv.Image_i32_5_2_0_0_2_33t(target("spirv.Image", i32, 5, 2, 0, 0, 2, 33) %1, i32 95)
+ store i32 %11, ptr addrspace(11) %12, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer.ll b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer.ll
index 4d32e66..6d41875 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-resources/cbuffer.ll
@@ -1,5 +1,5 @@
; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv1.6-vulkan1.3-library %s -o - | FileCheck %s
-; Test that uses of cbuffer members inside ConstantExprs are handled correctly.
+; Test that uses of cbuffer members are handled correctly.
; CHECK-DAG: OpDecorate %[[MyCBuffer:[0-9]+]] DescriptorSet 0
; CHECK-DAG: OpDecorate %[[MyCBuffer]] Binding 0
@@ -37,10 +37,8 @@ entry:
; CHECK: %[[tmp_ptr:[0-9]+]] = OpAccessChain {{%[0-9]+}} %[[tmp]] %[[uint_0]] %[[uint_0]]
; CHECK: %[[v_ptr:.+]] = OpAccessChain %[[_ptr_Uniform_v4float]] %[[tmp]] %[[uint_0]] %[[uint_1]]
; CHECK: %[[s_ptr_gep:[0-9]+]] = OpInBoundsAccessChain %[[_ptr_Uniform_float]] %[[tmp_ptr]] %[[uint_0]] %[[uint_1]]
- %gep = getelementptr inbounds %MyStruct, ptr addrspace(12) @s, i32 0, i32 0, i32 1
-
; CHECK: %[[s_val:.+]] = OpLoad %[[float]] %[[s_ptr_gep]]
- %load_from_gep = load float, ptr addrspace(12) %gep, align 4
+ %load_from_gep = load float, ptr addrspace(12) getelementptr inbounds (%MyStruct, ptr addrspace(12) @s, i32 0, i32 0, i32 1), align 4
; CHECK: %[[v_val:.+]] = OpLoad %[[v4float]] %[[v_ptr]]
%load_v = load <4 x float>, ptr addrspace(12) @v, align 16
diff --git a/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll b/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll
index 4a38d7a..c87f113 100644
--- a/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll
+++ b/llvm/test/CodeGen/SystemZ/fp-sincos-01.ll
@@ -1,7 +1,7 @@
; Test that combined sin/cos library call is emitted when appropriate
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s --check-prefix=CHECK-OPT
-; RUN: llc < %s -mtriple=s390x-linux-gnu -enable-unsafe-fp-math | FileCheck %s --check-prefix=CHECK-OPT
+; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s --check-prefix=CHECK-OPT
define float @f1(float %x) {
; CHECK-OPT-LABEL: f1:
diff --git a/llvm/test/CodeGen/SystemZ/int-conv-14.ll b/llvm/test/CodeGen/SystemZ/int-conv-14.ll
index 98dc88f..baab5ac 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-14.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-14.ll
@@ -55,14 +55,15 @@ define i128 @f4(ptr %ptr) {
}
; Truncation to i64.
-define i64 @f5(i128 %a) {
+define i64 @f5(i128 %a, i128 %b) {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvg %r2, %v0, 1
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i64
ret i64 %res
}
@@ -134,15 +135,16 @@ define i128 @f10(ptr %ptr) {
}
; Truncation to i32.
-define i32 @f11(i128 %a) {
+define i32 @f11(i128 %a, i128 %b) {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i32
ret i32 %res
}
@@ -215,15 +217,16 @@ define i128 @f16(ptr %ptr) {
}
; Truncation to i16.
-define i16 @f17(i128 %a) {
+define i16 @f17(i128 %a, i128 %b) {
; CHECK-LABEL: f17:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i16
ret i16 %res
}
@@ -296,15 +299,16 @@ define i128 @f22(ptr %ptr) {
}
; Truncation to i8.
-define i8 @f23(i128 %a) {
+define i8 @f23(i128 %a, i128 %b) {
; CHECK-LABEL: f23:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i8
ret i8 %res
}
@@ -385,15 +389,16 @@ define i128 @f28(ptr %ptr) {
}
; Truncation to i1.
-define i1 @f29(i128 %a) {
+define i1 @f29(i128 %a, i128 %b) {
; CHECK-LABEL: f29:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i1
ret i1 %res
}
diff --git a/llvm/test/CodeGen/SystemZ/int-conv-15.ll b/llvm/test/CodeGen/SystemZ/int-conv-15.ll
index 0d8ee75..f2c9ee5 100644
--- a/llvm/test/CodeGen/SystemZ/int-conv-15.ll
+++ b/llvm/test/CodeGen/SystemZ/int-conv-15.ll
@@ -55,14 +55,15 @@ define i128 @f4(ptr %ptr) {
}
; Truncation to i64.
-define i64 @f5(i128 %a) {
+define i64 @f5(i128 %a, i128 %b) {
; CHECK-LABEL: f5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvg %r2, %v0, 1
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i64
ret i64 %res
}
@@ -134,15 +135,16 @@ define i128 @f10(ptr %ptr) {
}
; Truncation to i32.
-define i32 @f11(i128 %a) {
+define i32 @f11(i128 %a, i128 %b) {
; CHECK-LABEL: f11:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i32
ret i32 %res
}
@@ -215,15 +217,16 @@ define i128 @f16(ptr %ptr) {
}
; Truncation to i16.
-define i16 @f17(i128 %a) {
+define i16 @f17(i128 %a, i128 %b) {
; CHECK-LABEL: f17:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i16
ret i16 %res
}
@@ -296,15 +299,16 @@ define i128 @f22(ptr %ptr) {
}
; Truncation to i8.
-define i8 @f23(i128 %a) {
+define i8 @f23(i128 %a, i128 %b) {
; CHECK-LABEL: f23:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i8
ret i8 %res
}
@@ -383,15 +387,16 @@ define i128 @f28(ptr %ptr) {
}
; Truncation to i1.
-define i1 @f29(i128 %a) {
+define i1 @f29(i128 %a, i128 %b) {
; CHECK-LABEL: f29:
; CHECK: # %bb.0:
-; CHECK-NEXT: vl %v0, 0(%r2), 3
-; CHECK-NEXT: vaq %v0, %v0, %v0
+; CHECK-NEXT: vl %v0, 0(%r3), 3
+; CHECK-NEXT: vl %v1, 0(%r2), 3
+; CHECK-NEXT: vaq %v0, %v1, %v0
; CHECK-NEXT: vlgvf %r2, %v0, 3
; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d
; CHECK-NEXT: br %r14
- %op = add i128 %a, %a
+ %op = add i128 %a, %b
%res = trunc i128 %op to i1
ret i1 %res
}
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-vs-unpredicated-copy.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-vs-unpredicated-copy.mir
new file mode 100644
index 0000000..5783133
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vctp-vs-unpredicated-copy.mir
@@ -0,0 +1,146 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -verify-machineinstrs -o - | FileCheck %s
+
+# From bug #162644. The _wrong_ output of this test is to generate the
+# body of the tail-predicated loop like this:
+#
+# $q2 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q2
+# renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, renamable $lr :: (load unknown-size from %ir.13, align 4)
+# $q0 = MVE_VORR $q1, $q1, 0, $noreg, $noreg, undef $q0
+# renamable $q0 = MVE_VADDf32 killed renamable $q2, killed renamable $q3, 0, killed $noreg, renamable $lr, killed renamable $q0
+# $lr = MVE_LETP killed renamable $lr, %bb.1
+#
+# in which the second MVE_VORR, copying q1 into q0, is an invalid conversion of
+# the input MQPRCopy, because it won't copy the vector lanes disabled by
+# FPSCR.LTPSIZE, and those are needed in the output value of the loop.
+#
+# In the right output, that MQPRCopy is expanded into a pair of VMOVD copying
+# d2,d3 into d0,d1 respectively, which are unaffected by LTPSIZE.
+
+--- |
+ target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+ target triple = "thumbv8.1m.main-unknown-none-eabihf"
+
+ @inactive = dso_local local_unnamed_addr global <4 x float> zeroinitializer, align 16
+
+ define <4 x float> @test_func(ptr %0, i32 %1) {
+ %3 = load <4 x float>, ptr @inactive, align 16
+ %4 = add i32 %1, 3
+ %5 = call i32 @llvm.smin.i32(i32 %1, i32 4)
+ %6 = sub i32 %4, %5
+ %7 = lshr i32 %6, 2
+ %8 = add nuw nsw i32 %7, 1
+ %9 = call i32 @llvm.start.loop.iterations.i32(i32 %8)
+ br label %10
+
+ 10: ; preds = %10, %2
+ %11 = phi <4 x float> [ splat (float 0x3FB99999A0000000), %2 ], [ %17, %10 ]
+ %12 = phi i32 [ %1, %2 ], [ %19, %10 ]
+ %13 = phi ptr [ %0, %2 ], [ %18, %10 ]
+ %14 = phi i32 [ %9, %2 ], [ %20, %10 ]
+ %15 = tail call <4 x i1> @llvm.arm.mve.vctp32(i32 %12)
+ %16 = tail call <4 x float> @llvm.masked.load.v4f32.p0(ptr %13, i32 4, <4 x i1> %15, <4 x float> zeroinitializer)
+ %17 = tail call <4 x float> @llvm.arm.mve.add.predicated.v4f32.v4i1(<4 x float> %11, <4 x float> %16, <4 x i1> %15, <4 x float> %3)
+ %18 = getelementptr inbounds nuw i8, ptr %13, i32 16
+ %19 = add i32 %12, -4
+ %20 = call i32 @llvm.loop.decrement.reg.i32(i32 %14, i32 1)
+ %21 = icmp ne i32 %20, 0
+ br i1 %21, label %10, label %22
+
+ 22: ; preds = %10
+ ret <4 x float> %17
+ }
+...
+---
+name: test_func
+alignment: 4
+legalized: false
+tracksRegLiveness: true
+registers: []
+liveins:
+ - { reg: '$r0', virtual-reg: '' }
+ - { reg: '$r1', virtual-reg: '' }
+stack:
+ - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+body: |
+ ; CHECK-LABEL: name: test_func
+ ; CHECK: bb.0 (%ir-block.2):
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $lr, $r0, $r1, $r7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK-NEXT: frame-setup CFI_INSTRUCTION offset $r7, -8
+ ; CHECK-NEXT: $r2 = t2MOVi16 target-flags(arm-lo16) @inactive, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r2 = t2MOVTi16 killed $r2, target-flags(arm-hi16) @inactive, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q1 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (dereferenceable load (s128) from @inactive)
+ ; CHECK-NEXT: $r3 = t2MOVi16 52429, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $r3 = t2MOVTi16 killed $r3, 15820, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q0
+ ; CHECK-NEXT: $lr = MVE_DLSTP_32 killed renamable $r1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (%ir-block.10, align 4):
+ ; CHECK-NEXT: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK-NEXT: liveins: $lr, $d2, $d3, $q0, $r0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: $q2 = MVE_VORR killed $q0, killed $q0, 0, $noreg, $noreg, undef $q2
+ ; CHECK-NEXT: renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 16, 0, $noreg, renamable $lr :: (load unknown-size from %ir.13, align 4)
+ ; CHECK-NEXT: $d0 = VMOVD $d2, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: $d1 = VMOVD $d3, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: renamable $q0 = MVE_VADDf32 killed renamable $q2, killed renamable $q3, 0, killed $noreg, renamable $lr, killed renamable $q0
+ ; CHECK-NEXT: $lr = MVE_LETP killed renamable $lr, %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2 (%ir-block.22):
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $q0
+ bb.0 (%ir-block.2):
+ successors: %bb.1(0x80000000)
+ liveins: $r0, $r1, $r7, $lr
+
+ frame-setup tPUSH 14 /* CC::al */, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
+ frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ frame-setup CFI_INSTRUCTION offset $lr, -4
+ frame-setup CFI_INSTRUCTION offset $r7, -8
+ $r2 = t2MOVi16 target-flags(arm-lo16) @inactive, 14 /* CC::al */, $noreg
+ tCMPi8 renamable $r1, 4, 14 /* CC::al */, $noreg, implicit-def $cpsr
+ $r2 = t2MOVTi16 killed $r2, target-flags(arm-hi16) @inactive, 14 /* CC::al */, $noreg
+ renamable $r3 = t2MOVi 1, 14 /* CC::al */, $noreg, $noreg
+ renamable $q1 = MVE_VLDRWU32 killed renamable $r2, 0, 0, $noreg, $noreg :: (dereferenceable load (s128) from @inactive)
+ $r2 = tMOVr $r1, 14 /* CC::al */, $noreg
+ t2IT 10, 8, implicit-def $itstate
+ renamable $r2 = tMOVi8 $noreg, 4, 10 /* CC::ge */, killed $cpsr, implicit killed renamable $r2, implicit killed $itstate
+ renamable $r2, dead $cpsr = tSUBrr renamable $r1, killed renamable $r2, 14 /* CC::al */, $noreg
+ renamable $r2, dead $cpsr = tADDi8 killed renamable $r2, 3, 14 /* CC::al */, $noreg
+ renamable $r2 = nuw nsw t2ADDrs killed renamable $r3, killed renamable $r2, 19, 14 /* CC::al */, $noreg, $noreg
+ $r3 = t2MOVi16 52429, 14 /* CC::al */, $noreg
+ $r3 = t2MOVTi16 killed $r3, 15820, 14 /* CC::al */, $noreg
+ renamable $q0 = MVE_VDUP32 killed renamable $r3, 0, $noreg, $noreg, undef renamable $q0
+ renamable $lr = t2DoLoopStartTP killed renamable $r2, renamable $r1
+
+ bb.1 (%ir-block.10, align 4):
+ successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ liveins: $lr, $q0, $q1, $r0, $r1
+
+ renamable $vpr = MVE_VCTP32 renamable $r1, 0, $noreg, $noreg
+ $q2 = MQPRCopy killed $q0
+ MVE_VPST 8, implicit $vpr
+ renamable $r0, renamable $q3 = MVE_VLDRWU32_post killed renamable $r0, 16, 1, renamable $vpr, renamable $lr :: (load unknown-size from %ir.13, align 4)
+ $q0 = MQPRCopy $q1
+ MVE_VPST 8, implicit $vpr
+ renamable $q0 = MVE_VADDf32 killed renamable $q2, killed renamable $q3, 1, killed renamable $vpr, renamable $lr, killed renamable $q0
+ renamable $r1, dead $cpsr = tSUBi8 killed renamable $r1, 4, 14 /* CC::al */, $noreg
+ renamable $lr = t2LoopEndDec killed renamable $lr, %bb.1, implicit-def dead $cpsr
+ tB %bb.2, 14 /* CC::al */, $noreg
+
+ bb.2 (%ir-block.22):
+ liveins: $q0
+
+ frame-destroy tPOP_RET 14 /* CC::al */, $noreg, def $r7, def $pc, implicit killed $q0
+...
diff --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll
index 053d6a1..d741411 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-5.ll
@@ -94,5 +94,5 @@ attributes #1 = { minsize nofree norecurse nounwind optsize }
!llvm.module.flags = !{!0, !1, !2}
!0 = !{i32 8, !"branch-target-enforcement", i32 0}
-!1 = !{i32 8, !"sign-return-address", i32 1}
+!1 = !{i32 8, !"sign-return-address", i32 2}
!2 = !{i32 8, !"sign-return-address-all", i32 0}
diff --git a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
index 94efe0f..104ec31 100644
--- a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
+++ b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
@@ -5,6 +5,7 @@ target datalayout = "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20
%struct.TwoInts = type { i32, i32 }
%struct.ThreeInts = type { i32, i32, i32 }
%struct.FourInts = type { i32, i32, i32, i32 }
+%struct.TwoShorts = type { i16, i16 }
%struct.ThreeShorts = type { i16, i16, i16 }
%struct.FourShorts = type { i16, i16, i16, i16 }
%struct.FiveShorts = type { i16, i16, i16, i16, i16 }
@@ -12,6 +13,8 @@ target datalayout = "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20
%struct.ThreeBytes = type { i8, i8, i8 }
%struct.FourBytes = type { i8, i8, i8, i8 }
%struct.EightBytes = type { i8, i8, i8, i8, i8, i8, i8, i8 }
+%struct.TwoFloats = type { float, float }
+%struct.FourFloats = type { float, float, float, float }
; CHECK-LABEL: two_ints_same_op:
; CHECK: loop
@@ -1536,3 +1539,1608 @@ define hidden void @scale_uv_row_down2_linear(ptr nocapture noundef readonly %0,
34: ; preds = %6, %4
ret void
}
+
+; CHECK-LABEL: two_floats_same_op:
+; CHECK-NOT: f32x4.mul
+define hidden void @two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp21.not = icmp eq i32 %N, 0
+ br i1 %cmp21.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %arrayidx3 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.022
+ store float %mul, ptr %arrayidx3, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %mul8 = fmul float %2, %3
+ %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4
+ store float %mul8, ptr %y10, align 4
+ %inc = add nuw i32 %i.022, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_floats_vary_op:
+; CHECK-NOT: f32x4
+define hidden void @two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp20.not = icmp eq i32 %N, 0
+ br i1 %cmp20.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.021 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.021
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.021
+ %1 = load float, ptr %arrayidx1, align 4
+ %add = fadd float %0, %1
+ %arrayidx3 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.021
+ store float %add, ptr %arrayidx3, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %sub = fsub float %2, %3
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4
+ store float %sub, ptr %y9, align 4
+ %inc = add nuw i32 %i.021, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_bytes_two_floats_same_op:
+; CHECK: loop
+; CHECK: v128.load64_zero
+; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load64_zero
+; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: v128.store
+define hidden void @two_bytes_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp24.not = icmp eq i32 %N, 0
+ br i1 %cmp24.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.025 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoBytes, ptr %a, i32 %i.025
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = sitofp i8 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoBytes, ptr %b, i32 %i.025
+ %1 = load i8, ptr %arrayidx1, align 1
+ %conv3 = sitofp i8 %1 to float
+ %mul = fmul float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.025
+ store float %mul, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1
+ %2 = load i8, ptr %y, align 1
+ %conv7 = sitofp i8 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1
+ %3 = load i8, ptr %y9, align 1
+ %conv10 = sitofp i8 %3 to float
+ %mul11 = fmul float %conv7, %conv10
+ %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %mul11, ptr %y13, align 4
+ %inc = add nuw i32 %i.025, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_bytes_two_floats_vary_op:
+; CHECK: v128.load64_zero
+; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load64_zero
+; CHECK: i8x16.shuffle {{.*}} 0, 2, 4, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.add
+; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 1, 3, 5, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.sub
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: v128.store
+define hidden void @two_bytes_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp23.not = icmp eq i32 %N, 0
+ br i1 %cmp23.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.024 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoBytes, ptr %a, i32 %i.024
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = sitofp i8 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoBytes, ptr %b, i32 %i.024
+ %1 = load i8, ptr %arrayidx1, align 1
+ %conv3 = sitofp i8 %1 to float
+ %add = fadd float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.024
+ store float %add, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1
+ %2 = load i8, ptr %y, align 1
+ %conv7 = sitofp i8 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1
+ %3 = load i8, ptr %y9, align 1
+ %conv10 = sitofp i8 %3 to float
+ %sub = fsub float %conv7, %conv10
+ %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %sub, ptr %y12, align 4
+ %inc = add nuw i32 %i.024, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_floats_two_bytes_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: v128.store64_lane
+define hidden void @two_floats_two_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp22.not = icmp eq i32 %N, 0
+ br i1 %cmp22.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.023
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.023
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %conv = fptosi float %mul to i8
+ %arrayidx3 = getelementptr inbounds nuw %struct.TwoBytes, ptr %res, i32 %i.023
+ store i8 %conv, ptr %arrayidx3, align 1
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %mul8 = fmul float %2, %3
+ %conv9 = fptosi float %mul8 to i8
+ %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1
+ store i8 %conv9, ptr %y11, align 1
+ %inc = add nuw i32 %i.023, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_floats_two_bytes_vary_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: f32x4.add
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: f32x4.sub
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: v128.store64_lane
+define hidden void @two_floats_two_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp21.not = icmp eq i32 %N, 0
+ br i1 %cmp21.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022
+ %1 = load float, ptr %arrayidx1, align 4
+ %add = fadd float %0, %1
+ %conv = fptosi float %add to i8
+ %arrayidx3 = getelementptr inbounds nuw %struct.TwoBytes, ptr %res, i32 %i.022
+ store i8 %conv, ptr %arrayidx3, align 1
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %sub = fsub float %2, %3
+ %conv8 = fptosi float %sub to i8
+ %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1
+ store i8 %conv8, ptr %y10, align 1
+ %inc = add nuw i32 %i.022, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_shorts_two_floats_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: v128.store
+define hidden void @two_shorts_two_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp24.not = icmp eq i32 %N, 0
+ br i1 %cmp24.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.025 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoShorts, ptr %a, i32 %i.025
+ %0 = load i16, ptr %arrayidx, align 2
+ %conv = sitofp i16 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoShorts, ptr %b, i32 %i.025
+ %1 = load i16, ptr %arrayidx1, align 2
+ %conv3 = sitofp i16 %1 to float
+ %mul = fmul float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.025
+ store float %mul, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2
+ %2 = load i16, ptr %y, align 2
+ %conv7 = sitofp i16 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2
+ %3 = load i16, ptr %y9, align 2
+ %conv10 = sitofp i16 %3 to float
+ %mul11 = fmul float %conv7, %conv10
+ %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %mul11, ptr %y13, align 4
+ %inc = add nuw i32 %i.025, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_shorts_two_floats_vary_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.add
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 6, 7, 10, 11, 14, 15, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.sub
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: v128.store
+define hidden void @two_shorts_two_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp23.not = icmp eq i32 %N, 0
+ br i1 %cmp23.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.024 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoShorts, ptr %a, i32 %i.024
+ %0 = load i16, ptr %arrayidx, align 2
+ %conv = sitofp i16 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoShorts, ptr %b, i32 %i.024
+ %1 = load i16, ptr %arrayidx1, align 2
+ %conv3 = sitofp i16 %1 to float
+ %add = fadd float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.TwoFloats, ptr %res, i32 %i.024
+ store float %add, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2
+ %2 = load i16, ptr %y, align 2
+ %conv7 = sitofp i16 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2
+ %3 = load i16, ptr %y9, align 2
+ %conv10 = sitofp i16 %3 to float
+ %sub = fsub float %conv7, %conv10
+ %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %sub, ptr %y12, align 4
+ %inc = add nuw i32 %i.024, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_floats_two_shorts_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: v128.store
+define hidden void @two_floats_two_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp22.not = icmp eq i32 %N, 0
+ br i1 %cmp22.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.023 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.023
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.023
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %conv = fptosi float %mul to i16
+ %arrayidx3 = getelementptr inbounds nuw %struct.TwoShorts, ptr %res, i32 %i.023
+ store i16 %conv, ptr %arrayidx3, align 2
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %mul8 = fmul float %2, %3
+ %conv9 = fptosi float %mul8 to i16
+ %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2
+ store i16 %conv9, ptr %y11, align 2
+ %inc = add nuw i32 %i.023, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: two_floats_two_shorts_vary_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK: f32x4.add
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+; CHECK: f32x4.sub
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: v128.store
+define hidden void @two_floats_two_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp21.not = icmp eq i32 %N, 0
+ br i1 %cmp21.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.022 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.TwoFloats, ptr %a, i32 %i.022
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.TwoFloats, ptr %b, i32 %i.022
+ %1 = load float, ptr %arrayidx1, align 4
+ %add = fadd float %0, %1
+ %conv = fptosi float %add to i16
+ %arrayidx3 = getelementptr inbounds nuw %struct.TwoShorts, ptr %res, i32 %i.022
+ store i16 %conv, ptr %arrayidx3, align 2
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %sub = fsub float %2, %3
+ %conv8 = fptosi float %sub to i16
+ %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2
+ store i16 %conv8, ptr %y10, align 2
+ %inc = add nuw i32 %i.022, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_floats_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: f32x4.mul
+; CHECK: v128.store
+define hidden void @four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp45.not = icmp eq i32 %N, 0
+ br i1 %cmp45.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %arrayidx3 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.046
+ store float %mul, ptr %arrayidx3, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %mul8 = fmul float %2, %3
+ %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4
+ store float %mul8, ptr %y10, align 4
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8
+ %4 = load float, ptr %z, align 4
+ %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8
+ %5 = load float, ptr %z13, align 4
+ %mul14 = fmul float %4, %5
+ %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 8
+ store float %mul14, ptr %z16, align 4
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12
+ %6 = load float, ptr %w, align 4
+ %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12
+ %7 = load float, ptr %w19, align 4
+ %mul20 = fmul float %6, %7
+ %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 12
+ store float %mul20, ptr %w22, align 4
+ %inc = add nuw i32 %i.046, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_floats_vary_op:
+; CHECK-NOT: f32x4
+define hidden void @four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp42.not = icmp eq i32 %N, 0
+ br i1 %cmp42.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.043 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.043
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.043
+ %1 = load float, ptr %arrayidx1, align 4
+ %add = fadd float %0, %1
+ %arrayidx3 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.043
+ store float %add, ptr %arrayidx3, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %sub = fsub float %2, %3
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4
+ store float %sub, ptr %y9, align 4
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8
+ %4 = load float, ptr %z, align 4
+ %z12 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8
+ %5 = load float, ptr %z12, align 4
+ %mul = fmul float %4, %5
+ %z14 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 8
+ store float %mul, ptr %z14, align 4
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12
+ %6 = load float, ptr %w, align 4
+ %w17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12
+ %7 = load float, ptr %w17, align 4
+ %div = fdiv float %6, %7
+ %w19 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 12
+ store float %div, ptr %w19, align 4
+ %inc = add nuw i32 %i.043, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_bytes_four_floats_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+define hidden void @four_bytes_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp52.not = icmp eq i32 %N, 0
+ br i1 %cmp52.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.053 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourBytes, ptr %a, i32 %i.053
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = sitofp i8 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourBytes, ptr %b, i32 %i.053
+ %1 = load i8, ptr %arrayidx1, align 1
+ %conv3 = sitofp i8 %1 to float
+ %mul = fmul float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.053
+ store float %mul, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1
+ %2 = load i8, ptr %y, align 1
+ %conv7 = sitofp i8 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1
+ %3 = load i8, ptr %y9, align 1
+ %conv10 = sitofp i8 %3 to float
+ %mul11 = fmul float %conv7, %conv10
+ %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %mul11, ptr %y13, align 4
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2
+ %4 = load i8, ptr %z, align 1
+ %conv15 = sitofp i8 %4 to float
+ %z17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2
+ %5 = load i8, ptr %z17, align 1
+ %conv18 = sitofp i8 %5 to float
+ %mul19 = fmul float %conv15, %conv18
+ %z21 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8
+ store float %mul19, ptr %z21, align 4
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 3
+ %6 = load i8, ptr %w, align 1
+ %conv23 = sitofp i8 %6 to float
+ %w25 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 3
+ %7 = load i8, ptr %w25, align 1
+ %conv26 = sitofp i8 %7 to float
+ %mul27 = fmul float %conv23, %conv26
+ %w29 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12
+ store float %mul27, ptr %w29, align 4
+ %inc = add nuw i32 %i.053, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_bytes_four_floats_vary_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 1, 5, 9, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.add
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 2, 6, 10, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.div
+; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 3, 7, 11, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK: i16x8.extend_low_i8x16_s
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.sub
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+define hidden void @four_bytes_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp49.not = icmp eq i32 %N, 0
+ br i1 %cmp49.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.050 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourBytes, ptr %a, i32 %i.050
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = sitofp i8 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourBytes, ptr %b, i32 %i.050
+ %1 = load i8, ptr %arrayidx1, align 1
+ %conv3 = sitofp i8 %1 to float
+ %mul = fmul float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.050
+ store float %mul, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 1
+ %2 = load i8, ptr %y, align 1
+ %conv7 = sitofp i8 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 1
+ %3 = load i8, ptr %y9, align 1
+ %conv10 = sitofp i8 %3 to float
+ %add = fadd float %conv7, %conv10
+ %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %add, ptr %y12, align 4
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2
+ %4 = load i8, ptr %z, align 1
+ %conv14 = sitofp i8 %4 to float
+ %z16 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2
+ %5 = load i8, ptr %z16, align 1
+ %conv17 = sitofp i8 %5 to float
+ %div = fdiv float %conv14, %conv17
+ %z19 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8
+ store float %div, ptr %z19, align 4
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 3
+ %6 = load i8, ptr %w, align 1
+ %conv21 = sitofp i8 %6 to float
+ %w23 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 3
+ %7 = load i8, ptr %w23, align 1
+ %conv24 = sitofp i8 %7 to float
+ %sub = fsub float %conv21, %conv24
+ %w26 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12
+ store float %sub, ptr %w26, align 4
+ %inc = add nuw i32 %i.050, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_floats_four_bytes_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: v128.store
+define hidden void @four_floats_four_bytes_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp48.not = icmp eq i32 %N, 0
+ br i1 %cmp48.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.049 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.049
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.049
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %conv = fptosi float %mul to i8
+ %arrayidx3 = getelementptr inbounds nuw %struct.FourBytes, ptr %res, i32 %i.049
+ store i8 %conv, ptr %arrayidx3, align 1
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %mul8 = fmul float %2, %3
+ %conv9 = fptosi float %mul8 to i8
+ %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1
+ store i8 %conv9, ptr %y11, align 1
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8
+ %4 = load float, ptr %z, align 4
+ %z14 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8
+ %5 = load float, ptr %z14, align 4
+ %mul15 = fmul float %4, %5
+ %conv16 = fptosi float %mul15 to i8
+ %z18 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2
+ store i8 %conv16, ptr %z18, align 1
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12
+ %6 = load float, ptr %w, align 4
+ %w21 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12
+ %7 = load float, ptr %w21, align 4
+ %mul22 = fmul float %6, %7
+ %conv23 = fptosi float %mul22 to i8
+ %w25 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 3
+ store i8 %conv23, ptr %w25, align 1
+ %inc = add nuw i32 %i.049, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_floats_four_bytes_vary_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.add
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.div
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.sub
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i8x16.replace_lane
+; CHECK: v128.store
+define hidden void @four_floats_four_bytes_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp45.not = icmp eq i32 %N, 0
+ br i1 %cmp45.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %conv = fptosi float %mul to i8
+ %arrayidx3 = getelementptr inbounds nuw %struct.FourBytes, ptr %res, i32 %i.046
+ store i8 %conv, ptr %arrayidx3, align 1
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %add = fadd float %2, %3
+ %conv8 = fptosi float %add to i8
+ %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 1
+ store i8 %conv8, ptr %y10, align 1
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8
+ %4 = load float, ptr %z, align 4
+ %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8
+ %5 = load float, ptr %z13, align 4
+ %div = fdiv float %4, %5
+ %conv14 = fptosi float %div to i8
+ %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2
+ store i8 %conv14, ptr %z16, align 1
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12
+ %6 = load float, ptr %w, align 4
+ %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12
+ %7 = load float, ptr %w19, align 4
+ %sub = fsub float %6, %7
+ %conv20 = fptosi float %sub to i8
+ %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 3
+ store i8 %conv20, ptr %w22, align 1
+ %inc = add nuw i32 %i.046, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_shorts_four_floats_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+define hidden void @four_shorts_four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp52.not = icmp eq i32 %N, 0
+ br i1 %cmp52.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.053 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourShorts, ptr %a, i32 %i.053
+ %0 = load i16, ptr %arrayidx, align 2
+ %conv = sitofp i16 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourShorts, ptr %b, i32 %i.053
+ %1 = load i16, ptr %arrayidx1, align 2
+ %conv3 = sitofp i16 %1 to float
+ %mul = fmul float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.053
+ store float %mul, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2
+ %2 = load i16, ptr %y, align 2
+ %conv7 = sitofp i16 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2
+ %3 = load i16, ptr %y9, align 2
+ %conv10 = sitofp i16 %3 to float
+ %mul11 = fmul float %conv7, %conv10
+ %y13 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %mul11, ptr %y13, align 4
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %4 = load i16, ptr %z, align 2
+ %conv15 = sitofp i16 %4 to float
+ %z17 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %5 = load i16, ptr %z17, align 2
+ %conv18 = sitofp i16 %5 to float
+ %mul19 = fmul float %conv15, %conv18
+ %z21 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8
+ store float %mul19, ptr %z21, align 4
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 6
+ %6 = load i16, ptr %w, align 2
+ %conv23 = sitofp i16 %6 to float
+ %w25 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 6
+ %7 = load i16, ptr %w25, align 2
+ %conv26 = sitofp i16 %7 to float
+ %mul27 = fmul float %conv23, %conv26
+ %w29 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12
+ store float %mul27, ptr %w29, align 4
+ %inc = add nuw i32 %i.053, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_shorts_four_floats_vary_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 8, 9, 16, 17, 24, 25, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.mul
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 2, 3, 10, 11, 18, 19, 26, 27, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.add
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 12, 13, 20, 21, 28, 29, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.div
+; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: i8x16.shuffle {{.*}} 6, 7, 14, 15, 22, 23, 30, 31, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK: i32x4.extend_low_i16x8_s
+; CHECK: f32x4.convert_i32x4_s
+; CHECK: f32x4.sub
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.store
+define hidden void @four_shorts_four_floats_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp49.not = icmp eq i32 %N, 0
+ br i1 %cmp49.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.050 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourShorts, ptr %a, i32 %i.050
+ %0 = load i16, ptr %arrayidx, align 2
+ %conv = sitofp i16 %0 to float
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourShorts, ptr %b, i32 %i.050
+ %1 = load i16, ptr %arrayidx1, align 2
+ %conv3 = sitofp i16 %1 to float
+ %mul = fmul float %conv, %conv3
+ %arrayidx4 = getelementptr inbounds nuw %struct.FourFloats, ptr %res, i32 %i.050
+ store float %mul, ptr %arrayidx4, align 4
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 2
+ %2 = load i16, ptr %y, align 2
+ %conv7 = sitofp i16 %2 to float
+ %y9 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 2
+ %3 = load i16, ptr %y9, align 2
+ %conv10 = sitofp i16 %3 to float
+ %add = fadd float %conv7, %conv10
+ %y12 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 4
+ store float %add, ptr %y12, align 4
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %4 = load i16, ptr %z, align 2
+ %conv14 = sitofp i16 %4 to float
+ %z16 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %5 = load i16, ptr %z16, align 2
+ %conv17 = sitofp i16 %5 to float
+ %div = fdiv float %conv14, %conv17
+ %z19 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 8
+ store float %div, ptr %z19, align 4
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 6
+ %6 = load i16, ptr %w, align 2
+ %conv21 = sitofp i16 %6 to float
+ %w23 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 6
+ %7 = load i16, ptr %w23, align 2
+ %conv24 = sitofp i16 %7 to float
+ %sub = fsub float %conv21, %conv24
+ %w26 = getelementptr inbounds nuw i8, ptr %arrayidx4, i32 12
+ store float %sub, ptr %w26, align 4
+ %inc = add nuw i32 %i.050, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_floats_four_shorts_same_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: v128.store
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.splat
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: v128.store
+define hidden void @four_floats_four_shorts_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp48.not = icmp eq i32 %N, 0
+ br i1 %cmp48.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.049 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.049
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.049
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %conv = fptosi float %mul to i16
+ %arrayidx3 = getelementptr inbounds nuw %struct.FourShorts, ptr %res, i32 %i.049
+ store i16 %conv, ptr %arrayidx3, align 2
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %mul8 = fmul float %2, %3
+ %conv9 = fptosi float %mul8 to i16
+ %y11 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2
+ store i16 %conv9, ptr %y11, align 2
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8
+ %4 = load float, ptr %z, align 4
+ %z14 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8
+ %5 = load float, ptr %z14, align 4
+ %mul15 = fmul float %4, %5
+ %conv16 = fptosi float %mul15 to i16
+ %z18 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4
+ store i16 %conv16, ptr %z18, align 2
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12
+ %6 = load float, ptr %w, align 4
+ %w21 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12
+ %7 = load float, ptr %w21, align 4
+ %mul22 = fmul float %6, %7
+ %conv23 = fptosi float %mul22 to i16
+ %w25 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 6
+ store i16 %conv23, ptr %w25, align 2
+ %inc = add nuw i32 %i.049, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+; CHECK-LABEL: four_floats_four_shorts_vary_op:
+; CHECK: loop
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 16, 17, 18, 19, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: v128.load
+; CHECK: v128.load
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 16, 17, 18, 19
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.mul
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.splat
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 4, 5, 6, 7, 20, 21, 22, 23, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 20, 21, 22, 23
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.add
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 8, 9, 10, 11, 24, 25, 26, 27, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 24, 25, 26, 27
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.div
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 12, 13, 14, 15, 28, 29, 30, 31, 0, 1, 2, 3, 0, 1, 2, 3
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 0, 1, 2, 3, 12, 13, 14, 15, 28, 29, 30, 31
+; CHECK: i8x16.shuffle {{.*}} 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31
+; CHECK: f32x4.sub
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: v128.store
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.splat
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: f32x4.extract_lane
+; CHECK: i32.trunc_sat_f32_s
+; CHECK: i16x8.replace_lane
+; CHECK: v128.store
+define hidden void @four_floats_four_shorts_vary_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
+entry:
+ %cmp45.not = icmp eq i32 %N, 0
+ br i1 %cmp45.not, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ ret void
+
+for.body: ; preds = %entry, %for.body
+ %i.046 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw %struct.FourFloats, ptr %a, i32 %i.046
+ %0 = load float, ptr %arrayidx, align 4
+ %arrayidx1 = getelementptr inbounds nuw %struct.FourFloats, ptr %b, i32 %i.046
+ %1 = load float, ptr %arrayidx1, align 4
+ %mul = fmul float %0, %1
+ %conv = fptosi float %mul to i16
+ %arrayidx3 = getelementptr inbounds nuw %struct.FourShorts, ptr %res, i32 %i.046
+ store i16 %conv, ptr %arrayidx3, align 2
+ %y = getelementptr inbounds nuw i8, ptr %arrayidx, i32 4
+ %2 = load float, ptr %y, align 4
+ %y7 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 4
+ %3 = load float, ptr %y7, align 4
+ %add = fadd float %2, %3
+ %conv8 = fptosi float %add to i16
+ %y10 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 2
+ store i16 %conv8, ptr %y10, align 2
+ %z = getelementptr inbounds nuw i8, ptr %arrayidx, i32 8
+ %4 = load float, ptr %z, align 4
+ %z13 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 8
+ %5 = load float, ptr %z13, align 4
+ %div = fdiv float %4, %5
+ %conv14 = fptosi float %div to i16
+ %z16 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 4
+ store i16 %conv14, ptr %z16, align 2
+ %w = getelementptr inbounds nuw i8, ptr %arrayidx, i32 12
+ %6 = load float, ptr %w, align 4
+ %w19 = getelementptr inbounds nuw i8, ptr %arrayidx1, i32 12
+ %7 = load float, ptr %w19, align 4
+ %sub = fsub float %6, %7
+ %conv20 = fptosi float %sub to i16
+ %w22 = getelementptr inbounds nuw i8, ptr %arrayidx3, i32 6
+ store i16 %conv20, ptr %w22, align 2
+ %inc = add nuw i32 %i.046, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll
new file mode 100644
index 0000000..45f4ddd
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll
@@ -0,0 +1,60 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+
+; RUN: llc < %s -mtriple=wasm32-unknown-unknown -mattr=+simd128,+relaxed-simd | FileCheck %s
+
+; Test that fmaxnum and fmaximumnum get transformed to relaxed_max
+
+target triple = "wasm32"
+
+define <4 x float> @test_maxnum_f32x4(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_maxnum_f32x4:
+; CHECK: .functype test_maxnum_f32x4 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %result = call <4 x float> @llvm.maxnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %result
+}
+
+define <4 x float> @test_maximumnum_f32x4(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_maximumnum_f32x4:
+; CHECK: .functype test_maximumnum_f32x4 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %result = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %result
+}
+
+define <2 x double> @test_maxnum_f64x2(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_maxnum_f64x2:
+; CHECK: .functype test_maxnum_f64x2 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %result = call <2 x double> @llvm.maxnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %result
+}
+
+define <2 x double> @test_minimumnum_f64x2(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_minimumnum_f64x2:
+; CHECK: .functype test_minimumnum_f64x2 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %result = call <2 x double> @llvm.maximumnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %result
+}
+
+declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.maximumnum.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>)
+declare <2 x double> @llvm.maximumnum.v2f64(<2 x double>, <2 x double>)
diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll
new file mode 100644
index 0000000..f3eec02
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=wasm32-unknown-unknown -mattr=+simd128,+relaxed-simd | FileCheck %s
+
+; Test that fminnum and fminimumnum get transformed to relaxed_min
+
+target triple = "wasm32"
+
+define <4 x float> @test_minnum_f32x4(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_minnum_f32x4:
+; CHECK: .functype test_minnum_f32x4 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %result = call <4 x float> @llvm.minnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %result
+}
+
+define <4 x float> @test_minimumnum_f32x4(<4 x float> %a, <4 x float> %b) {
+; CHECK-LABEL: test_minimumnum_f32x4:
+; CHECK: .functype test_minimumnum_f32x4 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %result = call <4 x float> @llvm.minimumnum.v4f32(<4 x float> %a, <4 x float> %b)
+ ret <4 x float> %result
+}
+
+define <2 x double> @test_minnum_f64x2(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_minnum_f64x2:
+; CHECK: .functype test_minnum_f64x2 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %result = call <2 x double> @llvm.minnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %result
+}
+
+define <2 x double> @test_minimumnum_f64x2(<2 x double> %a, <2 x double> %b) {
+; CHECK-LABEL: test_minimumnum_f64x2:
+; CHECK: .functype test_minimumnum_f64x2 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %result = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %a, <2 x double> %b)
+ ret <2 x double> %result
+}
+
+declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
+declare <4 x float> @llvm.fminimumnum.v4f32(<4 x float>, <4 x float>)
+declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
+declare <2 x double> @llvm.fminimumnum.v2f64(<2 x double>, <2 x double>)
diff --git a/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll b/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll
index 123438d..f58456b 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-vector-trunc.ll
@@ -94,6 +94,19 @@ entry:
ret <16 x i8> %0
}
+define <8 x i8> @trunc8i16_8i8(<8 x i16> %a) {
+; CHECK-LABEL: trunc8i16_8i8:
+; CHECK: .functype trunc8i16_8i8 (v128) -> (v128)
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i8x16.shuffle 0, 2, 4, 6, 8, 10, 12, 14, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %0 = trunc <8 x i16> %a to <8 x i8>
+ ret <8 x i8> %0
+}
+
define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
; CHECK-LABEL: trunc8i64_8i16:
; CHECK: .functype trunc8i64_8i16 (v128, v128, v128, v128) -> (v128)
@@ -139,3 +152,29 @@ entry:
%0 = trunc <8 x i32> %a to <8 x i16>
ret <8 x i16> %0
}
+
+define <4 x i16> @trunc4i32_4i16(<4 x i32> %a) {
+; CHECK-LABEL: trunc4i32_4i16:
+; CHECK: .functype trunc4i32_4i16 (v128) -> (v128)
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i8x16.shuffle 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 0, 1, 0, 1, 0, 1
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %0 = trunc <4 x i32> %a to <4 x i16>
+ ret <4 x i16> %0
+}
+
+define <4 x i8> @trunc4i32_4i8(<4 x i32> %a) {
+; CHECK-LABEL: trunc4i32_4i8:
+; CHECK: .functype trunc4i32_4i8 (v128) -> (v128)
+; CHECK-NEXT: # %bb.0: # %entry
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: i8x16.shuffle 0, 4, 8, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+; CHECK-NEXT: # fallthrough-return
+entry:
+ %0 = trunc <4 x i32> %a to <4 x i8>
+ ret <4 x i8> %0
+}
diff --git a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
index bea11e9..940fe8c 100644
--- a/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
+++ b/llvm/test/CodeGen/X86/2006-05-22-FPSetEQ.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -mtriple=i686-- -mattr=-sse | FileCheck %s -check-prefix=WITHNANS
-; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
+; RUN: llc < %s -mtriple=i686-- -mattr=-sse -enable-no-nans-fp-math | FileCheck %s -check-prefix=NONANS
; WITHNANS-LABEL: test:
; WITHNANS: setnp
diff --git a/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll b/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
index 8411a40..ff7a99a 100644
--- a/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
+++ b/llvm/test/CodeGen/X86/2008-05-01-InvalidOrdCompare.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -enable-unsafe-fp-math -mtriple=i686-- | FileCheck %s
+; RUN: llc < %s -mtriple=i686-- | FileCheck %s
; rdar://5902801
declare void @test2()
diff --git a/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll b/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll
index 6ebbb2e..0e0e20f 100644
--- a/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll
+++ b/llvm/test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll
@@ -1,4 +1,4 @@
-; RUN: llc < %s -enable-unsafe-fp-math
+; RUN: llc < %s
; <rdar://problem/12180135>
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
target triple = "i386-apple-macosx10.8.0"
diff --git a/llvm/test/CodeGen/X86/avx-minmax.ll b/llvm/test/CodeGen/X86/avx-minmax.ll
index 6da04c5..8e4b6c6 100644
--- a/llvm/test/CodeGen/X86/avx-minmax.ll
+++ b/llvm/test/CodeGen/X86/avx-minmax.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx -enable-no-nans-fp-math | FileCheck %s
define <2 x double> @maxpd(<2 x double> %x, <2 x double> %y) {
; CHECK-LABEL: maxpd:
diff --git a/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll b/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll
index f827998..eb9de8a 100644
--- a/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll
+++ b/llvm/test/CodeGen/X86/avx512-unsafe-fp-math.ll
@@ -2,7 +2,7 @@
; RUN: llc < %s -mtriple=x86_64 -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=CHECK_UNSAFE
; RUN: llc < %s -mtriple=x86_64 -enable-no-nans-fp-math -mattr=+avx512f | FileCheck %s
; RUN: llc < %s -mtriple=x86_64 -enable-no-signed-zeros-fp-math -mattr=+avx512f | FileCheck %s
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s
; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s
define <16 x float> @test_max_v16f32(ptr %a_ptr, <16 x float> %b) {
diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
index 5d9784a..1147d79 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-vfmulc-fadd.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s
define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce, <32 x half> %rhs.coerce) {
; CHECK-LABEL: test1:
diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
index b58bae9..1c4d9c6 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc-fadd.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s
define dso_local <32 x half> @test1(<32 x half> %acc.coerce, <32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 {
; CHECK-LABEL: test1:
diff --git a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
index 92bdebb..a8ff969 100644
--- a/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
+++ b/llvm/test/CodeGen/X86/avx512fp16-combine-xor-vfmulc.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl --enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512fp16,avx512vl | FileCheck %s
define dso_local <32 x half> @test1(<32 x half> %lhs.coerce.conj, <32 x half> %rhs.coerce) local_unnamed_addr #0 {
; CHECK-LABEL: test1:
diff --git a/llvm/test/CodeGen/X86/bf16-fast-isel.ll b/llvm/test/CodeGen/X86/bf16-fast-isel.ll
new file mode 100644
index 0000000..c659e0e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/bf16-fast-isel.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --fast-isel < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define i8 @test_direct_call(ptr %f) nounwind {
+; CHECK-LABEL: test_direct_call:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: callq bar@PLT
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+entry:
+ %call = call bfloat @foo(ptr %f)
+ %call2 = call zeroext i8 @bar(bfloat %call)
+ ret i8 %call2
+}
+
+define i8 @test_fast_direct_call(ptr %f) nounwind {
+; CHECK-LABEL: test_fast_direct_call:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: callq foo_fast@PLT
+; CHECK-NEXT: callq bar@PLT
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: retq
+entry:
+ %call = call fastcc bfloat @foo_fast(ptr %f)
+ %call2 = call zeroext i8 @bar(bfloat %call)
+ ret i8 %call2
+}
+
+define i8 @test_indirect_all(ptr %fptr, ptr %f) nounwind {
+; CHECK-LABEL: test_indirect_all:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movq %rsi, %rdi
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: callq *%rbx
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+entry:
+ %call = call bfloat @foo(ptr %f)
+ %call2 = call zeroext i8 %fptr(bfloat %call)
+ ret i8 %call2
+}
+
+define i8 @test_fast_indirect_all(ptr %fptr, ptr %f) nounwind {
+; CHECK-LABEL: test_fast_indirect_all:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rbx
+; CHECK-NEXT: movq %rdi, %rbx
+; CHECK-NEXT: movq %rsi, %rdi
+; CHECK-NEXT: callq foo@PLT
+; CHECK-NEXT: callq *%rbx
+; CHECK-NEXT: popq %rbx
+; CHECK-NEXT: retq
+entry:
+ %call = call fastcc bfloat @foo(ptr %f)
+ %call2 = call zeroext i8 %fptr(bfloat %call)
+ ret i8 %call2
+}
+
+declare bfloat @foo(ptr %f)
+declare zeroext i8 @bar(bfloat)
+declare fastcc bfloat @foo_fast(ptr %f)
diff --git a/llvm/test/CodeGen/X86/bitcnt-big-integer.ll b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll
new file mode 100644
index 0000000..13149d7
--- /dev/null
+++ b/llvm/test/CodeGen/X86/bitcnt-big-integer.ll
@@ -0,0 +1,3021 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 -mattr=+avx512vpopcntdq | FileCheck %s --check-prefixes=CHECK,AVX512
+
+;
+; CTPOP
+;
+
+define i32 @test_ctpop_i128(i128 %a0) nounwind {
+; CHECK-LABEL: test_ctpop_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq %rsi, %rcx
+; CHECK-NEXT: popcntq %rdi, %rax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %cnt = call i128 @llvm.ctpop.i128(i128 %a0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i128(ptr %p0) nounwind {
+; CHECK-LABEL: load_ctpop_i128:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq 8(%rdi), %rcx
+; CHECK-NEXT: popcntq (%rdi), %rax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %a0 = load i128, ptr %p0
+ %cnt = call i128 @llvm.ctpop.i128(i128 %a0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctpop_i256(i256 %a0) nounwind {
+; CHECK-LABEL: test_ctpop_i256:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq %rcx, %rax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: popcntq %rdx, %rcx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: popcntq %rsi, %rdx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %rdi, %rax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %cnt = call i256 @llvm.ctpop.i256(i256 %a0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i256(ptr %p0) nounwind {
+; SSE-LABEL: load_ctpop_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: popcntq 24(%rdi), %rcx
+; SSE-NEXT: popcntq 16(%rdi), %rdx
+; SSE-NEXT: popcntq 8(%rdi), %rsi
+; SSE-NEXT: popcntq (%rdi), %rax
+; SSE-NEXT: addl %ecx, %edx
+; SSE-NEXT: addl %esi, %eax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctpop_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: popcntq 24(%rdi), %rax
+; AVX2-NEXT: popcntq 16(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: popcntq 8(%rdi), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq (%rdi), %rax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctpop_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: popcntq 24(%rdi), %rax
+; AVX512-NEXT: popcntq 16(%rdi), %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: popcntq 8(%rdi), %rdx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq (%rdi), %rax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i256, ptr %p0
+ %cnt = call i256 @llvm.ctpop.i256(i256 %a0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctpop_i512(i512 %a0) nounwind {
+; CHECK-LABEL: test_ctpop_i512:
+; CHECK: # %bb.0:
+; CHECK-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; CHECK-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; CHECK-NEXT: addl %eax, %r10d
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %r9, %rax
+; CHECK-NEXT: popcntq %r8, %r8
+; CHECK-NEXT: addl %eax, %r8d
+; CHECK-NEXT: addl %r10d, %r8d
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %rcx, %rax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: popcntq %rdx, %rcx
+; CHECK-NEXT: addl %eax, %ecx
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: popcntq %rsi, %rdx
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: popcntq %rdi, %rax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: addl %ecx, %eax
+; CHECK-NEXT: addl %r8d, %eax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
+; CHECK-NEXT: retq
+ %cnt = call i512 @llvm.ctpop.i512(i512 %a0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i512(ptr %p0) nounwind {
+; SSE-LABEL: load_ctpop_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: popcntq 56(%rdi), %rax
+; SSE-NEXT: popcntq 48(%rdi), %rcx
+; SSE-NEXT: popcntq 40(%rdi), %rdx
+; SSE-NEXT: popcntq 32(%rdi), %rsi
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: addl %edx, %esi
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 24(%rdi), %rax
+; SSE-NEXT: addl %ecx, %esi
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: popcntq 16(%rdi), %rcx
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 8(%rdi), %rdx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq (%rdi), %rax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: addl %ecx, %eax
+; SSE-NEXT: addl %esi, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctpop_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: popcntq 56(%rdi), %rax
+; AVX2-NEXT: popcntq 48(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 40(%rdi), %rax
+; AVX2-NEXT: popcntq 32(%rdi), %rdx
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: addl %ecx, %edx
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: popcntq 24(%rdi), %rcx
+; AVX2-NEXT: popcntq 16(%rdi), %rsi
+; AVX2-NEXT: popcntq 8(%rdi), %r8
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq (%rdi), %rax
+; AVX2-NEXT: addl %ecx, %esi
+; AVX2-NEXT: addl %r8d, %eax
+; AVX2-NEXT: addl %esi, %eax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctpop_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: popcntq 56(%rdi), %rax
+; AVX512-NEXT: popcntq 48(%rdi), %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 40(%rdi), %rax
+; AVX512-NEXT: popcntq 32(%rdi), %rdx
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: addl %ecx, %edx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 24(%rdi), %rax
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: popcntq 16(%rdi), %rcx
+; AVX512-NEXT: popcntq 8(%rdi), %rsi
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq (%rdi), %rax
+; AVX512-NEXT: addl %esi, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i512, ptr %p0
+ %cnt = call i512 @llvm.ctpop.i512(i512 %a0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctpop_i1024(i1024 %a0) nounwind {
+; SSE-LABEL: test_ctpop_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: addl %eax, %r10d
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: addl %r11d, %eax
+; SSE-NEXT: xorl %r11d, %r11d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: xorl %ebx, %ebx
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: addl %r10d, %eax
+; SSE-NEXT: addl %r11d, %ebx
+; SSE-NEXT: xorl %r11d, %r11d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: xorl %r10d, %r10d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: addl %r11d, %r10d
+; SSE-NEXT: addl %ebx, %r10d
+; SSE-NEXT: xorl %r11d, %r11d
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: xorl %ebx, %ebx
+; SSE-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: addl %eax, %r10d
+; SSE-NEXT: addl %r11d, %ebx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq %r9, %rax
+; SSE-NEXT: popcntq %r8, %r8
+; SSE-NEXT: addl %eax, %r8d
+; SSE-NEXT: addl %ebx, %r8d
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq %rcx, %rax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: popcntq %rdx, %rcx
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq %rsi, %rdx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq %rdi, %rax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: addl %ecx, %eax
+; SSE-NEXT: addl %r8d, %eax
+; SSE-NEXT: addl %r10d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctpop_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: addl %eax, %r10d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: addl %eax, %r11d
+; AVX2-NEXT: addl %r10d, %r11d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r14
+; AVX2-NEXT: addl %eax, %ebx
+; AVX2-NEXT: xorl %r10d, %r10d
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: addl %r14d, %r10d
+; AVX2-NEXT: addl %ebx, %r10d
+; AVX2-NEXT: addl %r11d, %r10d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: xorl %r11d, %r11d
+; AVX2-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: addl %eax, %r11d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq %r9, %rax
+; AVX2-NEXT: popcntq %r8, %r8
+; AVX2-NEXT: addl %eax, %r8d
+; AVX2-NEXT: addl %r11d, %r8d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq %rcx, %rax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: popcntq %rdx, %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: popcntq %rsi, %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq %rdi, %rax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: addl %r8d, %eax
+; AVX2-NEXT: addl %r10d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctpop_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: addl %eax, %r10d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: addl %eax, %r11d
+; AVX512-NEXT: addl %r10d, %r11d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: xorl %ebx, %ebx
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: xorl %r14d, %r14d
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r14
+; AVX512-NEXT: addl %eax, %ebx
+; AVX512-NEXT: xorl %r10d, %r10d
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: addl %r14d, %r10d
+; AVX512-NEXT: addl %ebx, %r10d
+; AVX512-NEXT: addl %r11d, %r10d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: xorl %r11d, %r11d
+; AVX512-NEXT: popcntq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: addl %eax, %r11d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq %r9, %rax
+; AVX512-NEXT: popcntq %r8, %r8
+; AVX512-NEXT: addl %eax, %r8d
+; AVX512-NEXT: addl %r11d, %r8d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq %rcx, %rax
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: popcntq %rdx, %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: popcntq %rsi, %rdx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq %rdi, %rax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: addl %r8d, %eax
+; AVX512-NEXT: addl %r10d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %cnt = call i1024 @llvm.ctpop.i1024(i1024 %a0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctpop_i1024(ptr %p0) nounwind {
+; SSE-LABEL: load_ctpop_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: popcntq 120(%rdi), %rax
+; SSE-NEXT: popcntq 112(%rdi), %rcx
+; SSE-NEXT: popcntq 104(%rdi), %rdx
+; SSE-NEXT: popcntq 96(%rdi), %rsi
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: addl %edx, %esi
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 88(%rdi), %rax
+; SSE-NEXT: addl %ecx, %esi
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 80(%rdi), %rdx
+; SSE-NEXT: addl %eax, %edx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 72(%rdi), %rax
+; SSE-NEXT: xorl %ecx, %ecx
+; SSE-NEXT: popcntq 64(%rdi), %rcx
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: addl %edx, %ecx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 56(%rdi), %rax
+; SSE-NEXT: addl %esi, %ecx
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 48(%rdi), %rdx
+; SSE-NEXT: addl %eax, %edx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 40(%rdi), %rax
+; SSE-NEXT: xorl %esi, %esi
+; SSE-NEXT: popcntq 32(%rdi), %rsi
+; SSE-NEXT: addl %eax, %esi
+; SSE-NEXT: addl %edx, %esi
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq 24(%rdi), %rax
+; SSE-NEXT: xorl %edx, %edx
+; SSE-NEXT: popcntq 16(%rdi), %rdx
+; SSE-NEXT: popcntq 8(%rdi), %r8
+; SSE-NEXT: addl %eax, %edx
+; SSE-NEXT: xorl %eax, %eax
+; SSE-NEXT: popcntq (%rdi), %rax
+; SSE-NEXT: addl %r8d, %eax
+; SSE-NEXT: addl %edx, %eax
+; SSE-NEXT: addl %esi, %eax
+; SSE-NEXT: addl %ecx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctpop_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: popcntq 120(%rdi), %rax
+; AVX2-NEXT: popcntq 112(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 104(%rdi), %rax
+; AVX2-NEXT: popcntq 96(%rdi), %rdx
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: addl %ecx, %edx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 88(%rdi), %rax
+; AVX2-NEXT: popcntq 80(%rdi), %rsi
+; AVX2-NEXT: popcntq 72(%rdi), %r8
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: popcntq 64(%rdi), %rcx
+; AVX2-NEXT: addl %eax, %esi
+; AVX2-NEXT: addl %r8d, %ecx
+; AVX2-NEXT: addl %esi, %ecx
+; AVX2-NEXT: addl %edx, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 56(%rdi), %rax
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: popcntq 48(%rdi), %rdx
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: popcntq 40(%rdi), %rsi
+; AVX2-NEXT: xorl %r8d, %r8d
+; AVX2-NEXT: popcntq 32(%rdi), %r8
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: addl %esi, %r8d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq 24(%rdi), %rax
+; AVX2-NEXT: addl %edx, %r8d
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: popcntq 16(%rdi), %rdx
+; AVX2-NEXT: addl %eax, %edx
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: popcntq 8(%rdi), %rsi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: popcntq (%rdi), %rax
+; AVX2-NEXT: addl %esi, %eax
+; AVX2-NEXT: addl %edx, %eax
+; AVX2-NEXT: addl %r8d, %eax
+; AVX2-NEXT: addl %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctpop_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: popcntq 120(%rdi), %rax
+; AVX512-NEXT: popcntq 112(%rdi), %rcx
+; AVX512-NEXT: addl %eax, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 104(%rdi), %rax
+; AVX512-NEXT: popcntq 96(%rdi), %rdx
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: addl %ecx, %edx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 88(%rdi), %rax
+; AVX512-NEXT: popcntq 80(%rdi), %rsi
+; AVX512-NEXT: popcntq 72(%rdi), %r8
+; AVX512-NEXT: addl %eax, %esi
+; AVX512-NEXT: xorl %ecx, %ecx
+; AVX512-NEXT: popcntq 64(%rdi), %rcx
+; AVX512-NEXT: addl %r8d, %ecx
+; AVX512-NEXT: addl %esi, %ecx
+; AVX512-NEXT: addl %edx, %ecx
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 56(%rdi), %rax
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: popcntq 48(%rdi), %rdx
+; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: popcntq 40(%rdi), %rsi
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: xorl %r8d, %r8d
+; AVX512-NEXT: popcntq 32(%rdi), %r8
+; AVX512-NEXT: addl %esi, %r8d
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq 24(%rdi), %rax
+; AVX512-NEXT: addl %edx, %r8d
+; AVX512-NEXT: xorl %edx, %edx
+; AVX512-NEXT: popcntq 16(%rdi), %rdx
+; AVX512-NEXT: addl %eax, %edx
+; AVX512-NEXT: xorl %esi, %esi
+; AVX512-NEXT: popcntq 8(%rdi), %rsi
+; AVX512-NEXT: xorl %eax, %eax
+; AVX512-NEXT: popcntq (%rdi), %rax
+; AVX512-NEXT: addl %esi, %eax
+; AVX512-NEXT: addl %edx, %eax
+; AVX512-NEXT: addl %r8d, %eax
+; AVX512-NEXT: addl %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i1024, ptr %p0
+ %cnt = call i1024 @llvm.ctpop.i1024(i1024 %a0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+;
+; CTLZ
+;
+
+define i32 @test_ctlz_i128(i128 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: bsrq %rsi, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: lzcntq %rsi, %rcx
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: lzcntq %rsi, %rcx
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i128 @llvm.ctlz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i128(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: movq 8(%rdi), %rcx
+; SSE-NEXT: bsrq %rcx, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq 8(%rdi), %rcx
+; AVX2-NEXT: lzcntq %rcx, %rdx
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq 8(%rdi), %rcx
+; AVX512-NEXT: lzcntq %rcx, %rdx
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i128, ptr %p0
+ %cnt = call i128 @llvm.ctlz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctlz_i256(i256 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: bsrq %rcx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rdx, %r8
+; SSE-NEXT: xorl $63, %r8d
+; SSE-NEXT: orl $64, %r8d
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %eax, %r8d
+; SSE-NEXT: bsrq %rsi, %r9
+; SSE-NEXT: xorl $63, %r9d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: cmovnel %r8d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: lzcntq %rcx, %rax
+; AVX2-NEXT: lzcntq %rdx, %r8
+; AVX2-NEXT: addl $64, %r8d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %r8d
+; AVX2-NEXT: lzcntq %rsi, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: cmovnel %r8d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: lzcntq %rcx, %rax
+; AVX512-NEXT: lzcntq %rdx, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: lzcntq %rsi, %r9
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i256(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: movq 16(%rdi), %rcx
+; SSE-NEXT: movq 24(%rdi), %rdx
+; SSE-NEXT: bsrq %rdx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rcx, %rsi
+; SSE-NEXT: xorl $63, %esi
+; SSE-NEXT: orl $64, %esi
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %esi
+; SSE-NEXT: movq 8(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %r9
+; SSE-NEXT: xorl $63, %r9d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rdx, %rcx
+; SSE-NEXT: cmovnel %esi, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq 16(%rdi), %rcx
+; AVX2-NEXT: movq 24(%rdi), %rdx
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: lzcntq %rcx, %rsi
+; AVX2-NEXT: addl $64, %esi
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %esi
+; AVX2-NEXT: movq 8(%rdi), %r8
+; AVX2-NEXT: lzcntq %r8, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: cmovnel %esi, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq 8(%rdi), %rcx
+; AVX512-NEXT: movq 16(%rdi), %rdx
+; AVX512-NEXT: movq 24(%rdi), %rsi
+; AVX512-NEXT: lzcntq %rsi, %rax
+; AVX512-NEXT: lzcntq %rdx, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: lzcntq %rcx, %r9
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rsi, %rdx
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i256, ptr %p0
+ %cnt = call i256 @llvm.ctlz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctlz_i512(i512 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: bsrq %r11, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r10, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %r9, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r8, %rbx
+; SSE-NEXT: xorl $63, %ebx
+; SSE-NEXT: orl $64, %ebx
+; SSE-NEXT: testq %r9, %r9
+; SSE-NEXT: cmovnel %eax, %ebx
+; SSE-NEXT: subl $-128, %ebx
+; SSE-NEXT: movq %r10, %rax
+; SSE-NEXT: orq %r11, %rax
+; SSE-NEXT: cmovnel %r14d, %ebx
+; SSE-NEXT: bsrq %rcx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rdx, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %rsi, %r15
+; SSE-NEXT: xorl $63, %r15d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r11, %r9
+; SSE-NEXT: orq %r10, %r8
+; SSE-NEXT: orq %r9, %r8
+; SSE-NEXT: cmovnel %ebx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: lzcntq %r11, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: lzcntq %r10, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %r11, %r11
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %r9, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: lzcntq %r8, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %r9, %r9
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: subl $-128, %ebx
+; AVX2-NEXT: movq %r10, %rax
+; AVX2-NEXT: orq %r11, %rax
+; AVX2-NEXT: cmovnel %r14d, %ebx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rcx, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: lzcntq %rdx, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: lzcntq %rsi, %r15
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r11, %r9
+; AVX2-NEXT: orq %r10, %r8
+; AVX2-NEXT: orq %r9, %r8
+; AVX2-NEXT: cmovnel %ebx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: lzcntq %r11, %rax
+; AVX512-NEXT: lzcntq %r10, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq %r9, %rax
+; AVX512-NEXT: lzcntq %r8, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %r9, %r9
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: subl $-128, %ebx
+; AVX512-NEXT: movq %r10, %rax
+; AVX512-NEXT: orq %r11, %rax
+; AVX512-NEXT: cmovnel %r14d, %ebx
+; AVX512-NEXT: lzcntq %rcx, %rax
+; AVX512-NEXT: lzcntq %rdx, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq %rsi, %r15
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %r15d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rcx, %rdx
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r11, %r9
+; AVX512-NEXT: orq %r10, %r8
+; AVX512-NEXT: orq %r9, %r8
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: retq
+ %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i512(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 8(%rdi), %r10
+; SSE-NEXT: movq 16(%rdi), %r9
+; SSE-NEXT: movq 32(%rdi), %rcx
+; SSE-NEXT: movq 40(%rdi), %rdx
+; SSE-NEXT: movq 48(%rdi), %rsi
+; SSE-NEXT: movq 56(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rsi, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %rdx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %rcx, %r11
+; SSE-NEXT: xorl $63, %r11d
+; SSE-NEXT: orl $64, %r11d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: movq 24(%rdi), %rbx
+; SSE-NEXT: subl $-128, %r11d
+; SSE-NEXT: movq %rsi, %rax
+; SSE-NEXT: orq %r8, %rax
+; SSE-NEXT: cmovnel %r14d, %r11d
+; SSE-NEXT: bsrq %rbx, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r9, %r14
+; SSE-NEXT: xorl $63, %r14d
+; SSE-NEXT: orl $64, %r14d
+; SSE-NEXT: testq %rbx, %rbx
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: bsrq %r10, %r15
+; SSE-NEXT: xorl $63, %r15d
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rbx, %r9
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r8, %rdx
+; SSE-NEXT: orq %rsi, %rcx
+; SSE-NEXT: orq %rdx, %rcx
+; SSE-NEXT: cmovnel %r11d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 8(%rdi), %r10
+; AVX2-NEXT: movq 16(%rdi), %r9
+; AVX2-NEXT: movq 32(%rdi), %rcx
+; AVX2-NEXT: movq 40(%rdi), %rdx
+; AVX2-NEXT: movq 48(%rdi), %rsi
+; AVX2-NEXT: movq 56(%rdi), %r8
+; AVX2-NEXT: lzcntq %r8, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: lzcntq %rsi, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: lzcntq %rcx, %r11
+; AVX2-NEXT: addl $64, %r11d
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r11d
+; AVX2-NEXT: subl $-128, %r11d
+; AVX2-NEXT: movq %rsi, %rax
+; AVX2-NEXT: orq %r8, %rax
+; AVX2-NEXT: cmovnel %ebx, %r11d
+; AVX2-NEXT: movq 24(%rdi), %rbx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rbx, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: lzcntq %r9, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: lzcntq %r10, %r15
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rbx, %r9
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r8, %rdx
+; AVX2-NEXT: orq %rsi, %rcx
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: cmovnel %r11d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 8(%rdi), %r11
+; AVX512-NEXT: movq 16(%rdi), %r9
+; AVX512-NEXT: movq 24(%rdi), %r10
+; AVX512-NEXT: movq 32(%rdi), %rcx
+; AVX512-NEXT: movq 40(%rdi), %rdx
+; AVX512-NEXT: movq 48(%rdi), %rsi
+; AVX512-NEXT: movq 56(%rdi), %r8
+; AVX512-NEXT: lzcntq %r8, %rax
+; AVX512-NEXT: lzcntq %rsi, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq %rdx, %rax
+; AVX512-NEXT: lzcntq %rcx, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: subl $-128, %ebx
+; AVX512-NEXT: movq %rsi, %rax
+; AVX512-NEXT: orq %r8, %rax
+; AVX512-NEXT: cmovnel %r14d, %ebx
+; AVX512-NEXT: lzcntq %r10, %rax
+; AVX512-NEXT: lzcntq %r9, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: lzcntq %r11, %rdi
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r10, %r9
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r8, %rdx
+; AVX512-NEXT: orq %rsi, %rcx
+; AVX512-NEXT: orq %rdx, %rcx
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %a0 = load i512, ptr %p0
+ %cnt = call i512 @llvm.ctlz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_ctlz_i1024(i1024 %a0) nounwind {
+; SSE-LABEL: test_ctlz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq %r9, %r11
+; SSE-NEXT: movq %r8, %r9
+; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq %rdx, %r12
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: bsrq %r8, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r15, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: bsrq %r14, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: bsrq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: orl $64, %eax
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %r15, %rdx
+; SSE-NEXT: orq %r8, %rdx
+; SSE-NEXT: movq %r8, %r14
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: bsrq %r13, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: bsrq %rbx, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: orl $64, %edx
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %ecx, %edx
+; SSE-NEXT: bsrq %r10, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: bsrq %r8, %rbp
+; SSE-NEXT: xorl $63, %ebp
+; SSE-NEXT: orl $64, %ebp
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %ecx, %ebp
+; SSE-NEXT: subl $-128, %ebp
+; SSE-NEXT: movq %rbx, %rcx
+; SSE-NEXT: orq %r13, %rcx
+; SSE-NEXT: cmovnel %edx, %ebp
+; SSE-NEXT: addl $256, %ebp # imm = 0x100
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; SSE-NEXT: orq %r14, %rcx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT: orq %r15, %rdx
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: cmovnel %eax, %ebp
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; SSE-NEXT: bsrq %r14, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; SSE-NEXT: bsrq %r15, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: bsrq %r11, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r9, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: orl $64, %edx
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %eax, %edx
+; SSE-NEXT: subl $-128, %edx
+; SSE-NEXT: movq %r15, %rax
+; SSE-NEXT: orq %r14, %rax
+; SSE-NEXT: cmovnel %ecx, %edx
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
+; SSE-NEXT: bsrq %r15, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r12, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r15, %r15
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq %rdi, %rax
+; SSE-NEXT: bsrq %rsi, %rdi
+; SSE-NEXT: xorl $63, %edi
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %edi, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r15, %r12
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq %r14, %r11
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r9
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r11, %r9
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: orq %r13, %r10
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: orq %rbx, %r8
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r10, %r8
+; SSE-NEXT: cmovnel %ebp, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_ctlz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq %r9, %r14
+; AVX2-NEXT: movq %r8, %r11
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r12, %rcx
+; AVX2-NEXT: xorl %r9d, %r9d
+; AVX2-NEXT: lzcntq %r8, %r9
+; AVX2-NEXT: addl $64, %r9d
+; AVX2-NEXT: testq %r12, %r12
+; AVX2-NEXT: cmovnel %ecx, %r9d
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: lzcntq %r10, %rsi
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %rax, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %esi, %ecx
+; AVX2-NEXT: subl $-128, %ecx
+; AVX2-NEXT: movq %r8, %rsi
+; AVX2-NEXT: orq %r12, %rsi
+; AVX2-NEXT: cmovnel %r9d, %ecx
+; AVX2-NEXT: xorl %edi, %edi
+; AVX2-NEXT: lzcntq %rbx, %rdi
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: lzcntq %r15, %rsi
+; AVX2-NEXT: addl $64, %esi
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %edi, %esi
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: lzcntq %r13, %rbp
+; AVX2-NEXT: addl $64, %ebp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r9
+; AVX2-NEXT: xorl %edi, %edi
+; AVX2-NEXT: lzcntq %r9, %rdi
+; AVX2-NEXT: testq %r9, %r9
+; AVX2-NEXT: cmovnel %edi, %ebp
+; AVX2-NEXT: subl $-128, %ebp
+; AVX2-NEXT: movq %r15, %rdi
+; AVX2-NEXT: orq %rbx, %rdi
+; AVX2-NEXT: cmovnel %esi, %ebp
+; AVX2-NEXT: addl $256, %ebp # imm = 0x100
+; AVX2-NEXT: movq %r10, %rdi
+; AVX2-NEXT: orq %r12, %rdi
+; AVX2-NEXT: movq %rax, %rsi
+; AVX2-NEXT: orq %r8, %rsi
+; AVX2-NEXT: orq %rdi, %rsi
+; AVX2-NEXT: cmovnel %ecx, %ebp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r12, %rcx
+; AVX2-NEXT: testq %r12, %r12
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r11, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: xorl %esi, %esi
+; AVX2-NEXT: lzcntq %r14, %rsi
+; AVX2-NEXT: testq %r14, %r14
+; AVX2-NEXT: cmovnel %esi, %ecx
+; AVX2-NEXT: subl $-128, %ecx
+; AVX2-NEXT: movq %rdi, %rsi
+; AVX2-NEXT: orq %r12, %rsi
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: movq %rdx, %rdi
+; AVX2-NEXT: lzcntq %rdx, %rdx
+; AVX2-NEXT: addl $64, %edx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %r10, %rax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %eax, %edx
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; AVX2-NEXT: lzcntq %rax, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX2-NEXT: lzcntq %rsi, %r8
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %r8d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r10, %rdi
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: orq %r12, %r14
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r14, %r11
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r9
+; AVX2-NEXT: orq %rbx, %r9
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r15
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: orq %r15, %r13
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r9, %r13
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_ctlz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq %r9, %r14
+; AVX512-NEXT: movq %r8, %r11
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX512-NEXT: lzcntq %r12, %rcx
+; AVX512-NEXT: lzcntq %r8, %r9
+; AVX512-NEXT: addl $64, %r9d
+; AVX512-NEXT: testq %r12, %r12
+; AVX512-NEXT: cmovnel %ecx, %r9d
+; AVX512-NEXT: lzcntq %r10, %rsi
+; AVX512-NEXT: lzcntq %rax, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %esi, %ecx
+; AVX512-NEXT: subl $-128, %ecx
+; AVX512-NEXT: movq %r8, %rsi
+; AVX512-NEXT: orq %r12, %rsi
+; AVX512-NEXT: cmovnel %r9d, %ecx
+; AVX512-NEXT: lzcntq %rbx, %rdi
+; AVX512-NEXT: lzcntq %r15, %rsi
+; AVX512-NEXT: addl $64, %esi
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %edi, %esi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: lzcntq %r13, %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9
+; AVX512-NEXT: lzcntq %r9, %rdi
+; AVX512-NEXT: testq %r9, %r9
+; AVX512-NEXT: cmovnel %edi, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %r15, %rdi
+; AVX512-NEXT: orq %rbx, %rdi
+; AVX512-NEXT: cmovnel %esi, %ebp
+; AVX512-NEXT: addl $256, %ebp # imm = 0x100
+; AVX512-NEXT: movq %r10, %rdi
+; AVX512-NEXT: orq %r12, %rdi
+; AVX512-NEXT: movq %rax, %rsi
+; AVX512-NEXT: orq %r8, %rsi
+; AVX512-NEXT: orq %rdi, %rsi
+; AVX512-NEXT: cmovnel %ecx, %ebp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi
+; AVX512-NEXT: lzcntq %rdi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX512-NEXT: lzcntq %r12, %rcx
+; AVX512-NEXT: testq %r12, %r12
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: lzcntq %r11, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: lzcntq %r14, %rsi
+; AVX512-NEXT: testq %r14, %r14
+; AVX512-NEXT: cmovnel %esi, %ecx
+; AVX512-NEXT: subl $-128, %ecx
+; AVX512-NEXT: movq %rdi, %rsi
+; AVX512-NEXT: orq %r12, %rsi
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: movq %rdx, %rdi
+; AVX512-NEXT: lzcntq %rdx, %rdx
+; AVX512-NEXT: addl $64, %edx
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
+; AVX512-NEXT: lzcntq %r10, %rax
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %eax, %edx
+; AVX512-NEXT: lzcntq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
+; AVX512-NEXT: lzcntq %rsi, %r8
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r10, %rdi
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: orq %r12, %r14
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r14, %r11
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r9
+; AVX512-NEXT: orq %rbx, %r9
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r15
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: orq %r15, %r13
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r9, %r13
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %cnt = call i1024 @llvm.ctlz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_ctlz_i1024(ptr %p0) nounwind {
+; SSE-LABEL: load_ctlz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 40(%rdi), %rbp
+; SSE-NEXT: movq 64(%rdi), %rbx
+; SSE-NEXT: movq 72(%rdi), %r11
+; SSE-NEXT: movq 80(%rdi), %r12
+; SSE-NEXT: movq 88(%rdi), %r14
+; SSE-NEXT: movq 96(%rdi), %rsi
+; SSE-NEXT: movq 104(%rdi), %r9
+; SSE-NEXT: movq 112(%rdi), %r10
+; SSE-NEXT: movq 120(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r10, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: bsrq %r9, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: bsrq %rsi, %rax
+; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: orl $64, %eax
+; SSE-NEXT: testq %r9, %r9
+; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %r10, %rdx
+; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: orq %r8, %rdx
+; SSE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: bsrq %r14, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: bsrq %r12, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: orl $64, %edx
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %ecx, %edx
+; SSE-NEXT: bsrq %r11, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: bsrq %rbx, %r15
+; SSE-NEXT: xorl $63, %r15d
+; SSE-NEXT: orl $64, %r15d
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %ecx, %r15d
+; SSE-NEXT: subl $-128, %r15d
+; SSE-NEXT: movq %r12, %rcx
+; SSE-NEXT: orq %r14, %rcx
+; SSE-NEXT: cmovnel %edx, %r15d
+; SSE-NEXT: movq 48(%rdi), %r12
+; SSE-NEXT: addl $256, %r15d # imm = 0x100
+; SSE-NEXT: movq %r9, %rcx
+; SSE-NEXT: orq %r8, %rcx
+; SSE-NEXT: movq %rsi, %rdx
+; SSE-NEXT: orq %r10, %rdx
+; SSE-NEXT: orq %rcx, %rdx
+; SSE-NEXT: movq 56(%rdi), %r13
+; SSE-NEXT: cmovnel %eax, %r15d
+; SSE-NEXT: bsrq %r13, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: bsrq %r12, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movq %rbp, %r10
+; SSE-NEXT: bsrq %rbp, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: movq 32(%rdi), %r8
+; SSE-NEXT: bsrq %r8, %rbp
+; SSE-NEXT: xorl $63, %ebp
+; SSE-NEXT: orl $64, %ebp
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %eax, %ebp
+; SSE-NEXT: subl $-128, %ebp
+; SSE-NEXT: movq %r12, %rax
+; SSE-NEXT: orq %r13, %rax
+; SSE-NEXT: cmovnel %ecx, %ebp
+; SSE-NEXT: movq 24(%rdi), %r9
+; SSE-NEXT: bsrq %r9, %rax
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: movq 16(%rdi), %rsi
+; SSE-NEXT: bsrq %rsi, %rcx
+; SSE-NEXT: xorl $63, %ecx
+; SSE-NEXT: orl $64, %ecx
+; SSE-NEXT: testq %r9, %r9
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $127, %eax
+; SSE-NEXT: bsrq (%rdi), %rax
+; SSE-NEXT: movq 8(%rdi), %rdi
+; SSE-NEXT: bsrq %rdi, %rdx
+; SSE-NEXT: xorl $63, %edx
+; SSE-NEXT: xorl $63, %eax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %rsi
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq %r13, %r10
+; SSE-NEXT: orq %r12, %r8
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r10, %r8
+; SSE-NEXT: cmovnel %ebp, %eax
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE-NEXT: orq %r14, %r11
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; SSE-NEXT: orq %rcx, %rbx
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r11, %rbx
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_ctlz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 48(%rdi), %r9
+; AVX2-NEXT: movq 56(%rdi), %rbp
+; AVX2-NEXT: movq 64(%rdi), %r11
+; AVX2-NEXT: movq 72(%rdi), %r10
+; AVX2-NEXT: movq 80(%rdi), %r14
+; AVX2-NEXT: movq 88(%rdi), %rbx
+; AVX2-NEXT: movq 96(%rdi), %rdx
+; AVX2-NEXT: movq 104(%rdi), %r8
+; AVX2-NEXT: movq 112(%rdi), %rsi
+; AVX2-NEXT: movq 120(%rdi), %r15
+; AVX2-NEXT: lzcntq %r15, %rax
+; AVX2-NEXT: lzcntq %rsi, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: testq %r15, %r15
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: lzcntq %r8, %r12
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: movq %rsi, %r12
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: orq %r15, %r12
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %rbx, %rcx
+; AVX2-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: lzcntq %r14, %r13
+; AVX2-NEXT: addl $64, %r13d
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %ecx, %r13d
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r10, %rcx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: lzcntq %r11, %r12
+; AVX2-NEXT: addl $64, %r12d
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %ecx, %r12d
+; AVX2-NEXT: subl $-128, %r12d
+; AVX2-NEXT: movq %r14, %rcx
+; AVX2-NEXT: orq %rbx, %rcx
+; AVX2-NEXT: cmovnel %r13d, %r12d
+; AVX2-NEXT: addl $256, %r12d # imm = 0x100
+; AVX2-NEXT: movq %r8, %rcx
+; AVX2-NEXT: orq %r15, %rcx
+; AVX2-NEXT: orq %rsi, %rdx
+; AVX2-NEXT: orq %rcx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r12d
+; AVX2-NEXT: movq %rbp, %r14
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %rbp, %rcx
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %r9, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rbp, %rbp
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: movq 32(%rdi), %r13
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: lzcntq %r13, %rbp
+; AVX2-NEXT: addl $64, %ebp
+; AVX2-NEXT: movq 40(%rdi), %r8
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: lzcntq %r8, %rdx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %edx, %ebp
+; AVX2-NEXT: subl $-128, %ebp
+; AVX2-NEXT: movq %r9, %rdx
+; AVX2-NEXT: orq %r14, %rdx
+; AVX2-NEXT: cmovnel %eax, %ebp
+; AVX2-NEXT: movq 16(%rdi), %r9
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: lzcntq %r9, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: movq 24(%rdi), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq %rdx, %rax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: movq 8(%rdi), %rsi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: lzcntq (%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: lzcntq %rsi, %rdi
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %edi, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rdx, %r9
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq %r14, %r8
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r8, %r13
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: orq %r15, %rbx
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rbx, %r10
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rcx, %r11
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r10, %r11
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_ctlz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 32(%rdi), %r14
+; AVX512-NEXT: movq 48(%rdi), %rbp
+; AVX512-NEXT: movq 64(%rdi), %r11
+; AVX512-NEXT: movq 72(%rdi), %r10
+; AVX512-NEXT: movq 80(%rdi), %rdx
+; AVX512-NEXT: movq 88(%rdi), %rbx
+; AVX512-NEXT: movq 96(%rdi), %rsi
+; AVX512-NEXT: movq 104(%rdi), %r9
+; AVX512-NEXT: movq 112(%rdi), %r8
+; AVX512-NEXT: movq 120(%rdi), %r15
+; AVX512-NEXT: lzcntq %r15, %rax
+; AVX512-NEXT: lzcntq %r8, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: testq %r15, %r15
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: lzcntq %r9, %r12
+; AVX512-NEXT: lzcntq %rsi, %rax
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r9, %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: movq %r8, %r12
+; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: orq %r15, %r12
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: lzcntq %rbx, %rcx
+; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: lzcntq %rdx, %r13
+; AVX512-NEXT: addl $64, %r13d
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %ecx, %r13d
+; AVX512-NEXT: lzcntq %r10, %rcx
+; AVX512-NEXT: lzcntq %r11, %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %ecx, %r12d
+; AVX512-NEXT: subl $-128, %r12d
+; AVX512-NEXT: movq %rdx, %rcx
+; AVX512-NEXT: orq %rbx, %rcx
+; AVX512-NEXT: cmovnel %r13d, %r12d
+; AVX512-NEXT: addl $256, %r12d # imm = 0x100
+; AVX512-NEXT: movq %r9, %rcx
+; AVX512-NEXT: orq %r15, %rcx
+; AVX512-NEXT: orq %r8, %rsi
+; AVX512-NEXT: orq %rcx, %rsi
+; AVX512-NEXT: movq 56(%rdi), %r13
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: lzcntq %r13, %rcx
+; AVX512-NEXT: movq %rbp, %rsi
+; AVX512-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: lzcntq %rbp, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r13, %r13
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: lzcntq %r14, %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: movq 40(%rdi), %r8
+; AVX512-NEXT: lzcntq %r8, %rdx
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %edx, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %rsi, %rdx
+; AVX512-NEXT: orq %r13, %rdx
+; AVX512-NEXT: cmovnel %eax, %ebp
+; AVX512-NEXT: movq 16(%rdi), %r9
+; AVX512-NEXT: lzcntq %r9, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: movq 24(%rdi), %rdx
+; AVX512-NEXT: lzcntq %rdx, %rax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: movq 8(%rdi), %rsi
+; AVX512-NEXT: lzcntq (%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: lzcntq %rsi, %rdi
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rdx, %r9
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq %r13, %r8
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r8, %r14
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: orq %r15, %rbx
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rbx, %r10
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rcx, %r11
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r10, %r11
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %a0 = load i1024, ptr %p0
+ %cnt = call i1024 @llvm.ctlz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+;
+; CTTZ
+;
+
+define i32 @test_cttz_i128(i128 %a0) nounwind {
+; SSE-LABEL: test_cttz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: rep bsfq %rdi, %rcx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq %rsi, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: tzcntq %rdi, %rcx
+; AVX2-NEXT: tzcntq %rsi, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: tzcntq %rdi, %rcx
+; AVX512-NEXT: tzcntq %rsi, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i128(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i128:
+; SSE: # %bb.0:
+; SSE-NEXT: movq (%rdi), %rcx
+; SSE-NEXT: rep bsfq %rcx, %rdx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 8(%rdi), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i128:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq (%rdi), %rcx
+; AVX2-NEXT: tzcntq %rcx, %rdx
+; AVX2-NEXT: tzcntq 8(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i128:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq (%rdi), %rcx
+; AVX512-NEXT: tzcntq %rcx, %rdx
+; AVX512-NEXT: tzcntq 8(%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i128, ptr %p0
+ %cnt = call i128 @llvm.cttz.i128(i128 %a0, i1 0)
+ %res = trunc i128 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_cttz_i256(i256 %a0) nounwind {
+; SSE-LABEL: test_cttz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: rep bsfq %rdi, %rax
+; SSE-NEXT: rep bsfq %rsi, %r8
+; SSE-NEXT: addl $64, %r8d
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %eax, %r8d
+; SSE-NEXT: rep bsfq %rdx, %r9
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq %rcx, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rsi, %rdi
+; SSE-NEXT: cmovnel %r8d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: tzcntq %rdi, %rax
+; AVX2-NEXT: tzcntq %rsi, %r8
+; AVX2-NEXT: addl $64, %r8d
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %eax, %r8d
+; AVX2-NEXT: tzcntq %rdx, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rcx, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rsi, %rdi
+; AVX2-NEXT: cmovnel %r8d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: tzcntq %rdi, %rax
+; AVX512-NEXT: tzcntq %rsi, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: tzcntq %rdx, %r9
+; AVX512-NEXT: tzcntq %rcx, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rsi, %rdi
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i256(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i256:
+; SSE: # %bb.0:
+; SSE-NEXT: movq 16(%rdi), %rcx
+; SSE-NEXT: movq (%rdi), %rdx
+; SSE-NEXT: movq 8(%rdi), %rsi
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: rep bsfq %rsi, %r8
+; SSE-NEXT: addl $64, %r8d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r8d
+; SSE-NEXT: rep bsfq %rcx, %r9
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 24(%rdi), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %r9d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rsi, %rdx
+; SSE-NEXT: cmovnel %r8d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i256:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movq (%rdi), %rcx
+; AVX2-NEXT: movq 8(%rdi), %rdx
+; AVX2-NEXT: tzcntq %rcx, %rax
+; AVX2-NEXT: tzcntq %rdx, %rsi
+; AVX2-NEXT: addl $64, %esi
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %esi
+; AVX2-NEXT: movq 16(%rdi), %r8
+; AVX2-NEXT: tzcntq %r8, %r9
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq 24(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %r9d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: cmovnel %esi, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i256:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movq 16(%rdi), %rcx
+; AVX512-NEXT: movq (%rdi), %rdx
+; AVX512-NEXT: movq 8(%rdi), %rsi
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: tzcntq %rsi, %r8
+; AVX512-NEXT: addl $64, %r8d
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %r8d
+; AVX512-NEXT: tzcntq %rcx, %r9
+; AVX512-NEXT: tzcntq 24(%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %r9d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %rsi, %rdx
+; AVX512-NEXT: cmovnel %r8d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: retq
+ %a0 = load i256, ptr %p0
+ %cnt = call i256 @llvm.cttz.i256(i256 %a0, i1 0)
+ %res = trunc i256 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_cttz_i512(i512 %a0) nounwind {
+; SSE-LABEL: test_cttz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: rep bsfq %rdi, %rax
+; SSE-NEXT: rep bsfq %rsi, %r11
+; SSE-NEXT: addl $64, %r11d
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: rep bsfq %rcx, %r10
+; SSE-NEXT: addl $64, %r10d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r10d
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: subl $-128, %r10d
+; SSE-NEXT: movq %rdi, %rax
+; SSE-NEXT: orq %rsi, %rax
+; SSE-NEXT: cmovnel %r11d, %r10d
+; SSE-NEXT: rep bsfq %r8, %rax
+; SSE-NEXT: rep bsfq %r9, %r11
+; SSE-NEXT: addl $64, %r11d
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: rep bsfq %rbx, %r14
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %rbx, %rbx
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %r8
+; SSE-NEXT: cmovnel %r11d, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %rcx, %rsi
+; SSE-NEXT: orq %rdx, %rdi
+; SSE-NEXT: orq %rsi, %rdi
+; SSE-NEXT: cmovnel %r10d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: tzcntq %rdi, %rax
+; AVX2-NEXT: tzcntq %rsi, %r11
+; AVX2-NEXT: addl $64, %r11d
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %eax, %r11d
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: tzcntq %rcx, %r10
+; AVX2-NEXT: addl $64, %r10d
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r10d
+; AVX2-NEXT: subl $-128, %r10d
+; AVX2-NEXT: movq %rdi, %rax
+; AVX2-NEXT: orq %rsi, %rax
+; AVX2-NEXT: cmovnel %r11d, %r10d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r8, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: tzcntq %r9, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: tzcntq %r11, %r14
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r11, %r11
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r9, %r8
+; AVX2-NEXT: cmovnel %ebx, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %rcx, %rsi
+; AVX2-NEXT: orq %rdx, %rdi
+; AVX2-NEXT: orq %rsi, %rdi
+; AVX2-NEXT: cmovnel %r10d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: tzcntq %rdi, %rax
+; AVX512-NEXT: tzcntq %rsi, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: tzcntq %rcx, %r10
+; AVX512-NEXT: addl $64, %r10d
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %r10d
+; AVX512-NEXT: subl $-128, %r10d
+; AVX512-NEXT: movq %rdi, %rax
+; AVX512-NEXT: orq %rsi, %rax
+; AVX512-NEXT: cmovnel %ebx, %r10d
+; AVX512-NEXT: tzcntq %r8, %rax
+; AVX512-NEXT: tzcntq %r9, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: tzcntq %r11, %r14
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r9, %r8
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %rcx, %rsi
+; AVX512-NEXT: orq %rdx, %rdi
+; AVX512-NEXT: orq %rsi, %rdi
+; AVX512-NEXT: cmovnel %r10d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i512(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i512:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 48(%rdi), %r10
+; SSE-NEXT: movq 40(%rdi), %r9
+; SSE-NEXT: movq 24(%rdi), %r8
+; SSE-NEXT: movq 16(%rdi), %rdx
+; SSE-NEXT: movq (%rdi), %rcx
+; SSE-NEXT: movq 8(%rdi), %rsi
+; SSE-NEXT: rep bsfq %rcx, %rax
+; SSE-NEXT: rep bsfq %rsi, %rbx
+; SSE-NEXT: addl $64, %ebx
+; SSE-NEXT: testq %rcx, %rcx
+; SSE-NEXT: cmovnel %eax, %ebx
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: rep bsfq %r8, %r11
+; SSE-NEXT: addl $64, %r11d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %r11d
+; SSE-NEXT: movq 32(%rdi), %r14
+; SSE-NEXT: subl $-128, %r11d
+; SSE-NEXT: movq %rcx, %rax
+; SSE-NEXT: orq %rsi, %rax
+; SSE-NEXT: cmovnel %ebx, %r11d
+; SSE-NEXT: rep bsfq %r14, %rax
+; SSE-NEXT: rep bsfq %r9, %rbx
+; SSE-NEXT: addl $64, %ebx
+; SSE-NEXT: testq %r14, %r14
+; SSE-NEXT: cmovnel %eax, %ebx
+; SSE-NEXT: rep bsfq %r10, %r15
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 56(%rdi), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %r14
+; SSE-NEXT: cmovnel %ebx, %eax
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r8, %rsi
+; SSE-NEXT: orq %rdx, %rcx
+; SSE-NEXT: orq %rsi, %rcx
+; SSE-NEXT: cmovnel %r11d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i512:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 48(%rdi), %r10
+; AVX2-NEXT: movq 40(%rdi), %r9
+; AVX2-NEXT: movq 24(%rdi), %r8
+; AVX2-NEXT: movq 16(%rdi), %rdx
+; AVX2-NEXT: movq (%rdi), %rcx
+; AVX2-NEXT: movq 8(%rdi), %rsi
+; AVX2-NEXT: tzcntq %rcx, %rax
+; AVX2-NEXT: xorl %ebx, %ebx
+; AVX2-NEXT: tzcntq %rsi, %rbx
+; AVX2-NEXT: addl $64, %ebx
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %eax, %ebx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: tzcntq %r8, %r11
+; AVX2-NEXT: addl $64, %r11d
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %r11d
+; AVX2-NEXT: subl $-128, %r11d
+; AVX2-NEXT: movq %rcx, %rax
+; AVX2-NEXT: orq %rsi, %rax
+; AVX2-NEXT: cmovnel %ebx, %r11d
+; AVX2-NEXT: movq 32(%rdi), %rbx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rbx, %rax
+; AVX2-NEXT: xorl %r14d, %r14d
+; AVX2-NEXT: tzcntq %r9, %r14
+; AVX2-NEXT: addl $64, %r14d
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %eax, %r14d
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %r10, %r15
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq 56(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r9, %rbx
+; AVX2-NEXT: cmovnel %r14d, %eax
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r8, %rsi
+; AVX2-NEXT: orq %rdx, %rcx
+; AVX2-NEXT: orq %rsi, %rcx
+; AVX2-NEXT: cmovnel %r11d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i512:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 48(%rdi), %r11
+; AVX512-NEXT: movq 40(%rdi), %r9
+; AVX512-NEXT: movq 32(%rdi), %r10
+; AVX512-NEXT: movq 24(%rdi), %r8
+; AVX512-NEXT: movq 16(%rdi), %rdx
+; AVX512-NEXT: movq (%rdi), %rcx
+; AVX512-NEXT: movq 8(%rdi), %rsi
+; AVX512-NEXT: tzcntq %rcx, %rax
+; AVX512-NEXT: tzcntq %rsi, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: tzcntq %r8, %rbx
+; AVX512-NEXT: addl $64, %ebx
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ebx
+; AVX512-NEXT: subl $-128, %ebx
+; AVX512-NEXT: movq %rcx, %rax
+; AVX512-NEXT: orq %rsi, %rax
+; AVX512-NEXT: cmovnel %r14d, %ebx
+; AVX512-NEXT: tzcntq %r10, %rax
+; AVX512-NEXT: tzcntq %r9, %r14
+; AVX512-NEXT: addl $64, %r14d
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %eax, %r14d
+; AVX512-NEXT: tzcntq 56(%rdi), %rax
+; AVX512-NEXT: tzcntq %r11, %rdi
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r11, %r11
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r9, %r10
+; AVX512-NEXT: cmovnel %r14d, %eax
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r8, %rsi
+; AVX512-NEXT: orq %rdx, %rcx
+; AVX512-NEXT: orq %rsi, %rcx
+; AVX512-NEXT: cmovnel %ebx, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: retq
+ %a0 = load i512, ptr %p0
+ %cnt = call i512 @llvm.cttz.i512(i512 %a0, i1 0)
+ %res = trunc i512 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @test_cttz_i1024(i1024 %a0) nounwind {
+; SSE-LABEL: test_cttz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq %r9, %r13
+; SSE-NEXT: movq %r8, %r14
+; SSE-NEXT: movq %rcx, %rbx
+; SSE-NEXT: movq %rdx, %r10
+; SSE-NEXT: movq %rsi, %r9
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT: rep bsfq %rdi, %rax
+; SSE-NEXT: rep bsfq %r9, %r15
+; SSE-NEXT: addl $64, %r15d
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %eax, %r15d
+; SSE-NEXT: rep bsfq %r10, %r12
+; SSE-NEXT: rep bsfq %rcx, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %r12d, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %rdi, %r12
+; SSE-NEXT: orq %r9, %r12
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: rep bsfq %r8, %r15
+; SSE-NEXT: movq %r13, %rcx
+; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: rep bsfq %r13, %r13
+; SSE-NEXT: addl $64, %r13d
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %r15d, %r13d
+; SSE-NEXT: rep bsfq %rdx, %r12
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r15
+; SSE-NEXT: addl $64, %r15d
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %r12d, %r15d
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; SSE-NEXT: subl $-128, %r15d
+; SSE-NEXT: movq %r8, %rbp
+; SSE-NEXT: orq %rcx, %rbp
+; SSE-NEXT: cmovnel %r13d, %r15d
+; SSE-NEXT: addl $256, %r15d # imm = 0x100
+; SSE-NEXT: movq %r9, %r13
+; SSE-NEXT: orq %rbx, %r13
+; SSE-NEXT: movq %rdi, %rbp
+; SSE-NEXT: orq %r10, %rbp
+; SSE-NEXT: orq %r13, %rbp
+; SSE-NEXT: cmovnel %eax, %r15d
+; SSE-NEXT: rep bsfq %r11, %r13
+; SSE-NEXT: rep bsfq %r12, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r11, %r11
+; SSE-NEXT: cmovnel %r13d, %eax
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %r13
+; SSE-NEXT: addl $64, %r13d
+; SSE-NEXT: rep bsfq %rsi, %rcx
+; SSE-NEXT: testq %rsi, %rsi
+; SSE-NEXT: cmovnel %ecx, %r13d
+; SSE-NEXT: subl $-128, %r13d
+; SSE-NEXT: movq %r11, %rcx
+; SSE-NEXT: orq %r12, %rcx
+; SSE-NEXT: cmovnel %eax, %r13d
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rbp
+; SSE-NEXT: rep bsfq %rbp, %rcx
+; SSE-NEXT: addl $64, %ecx
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq {{[0-9]+}}(%rsp), %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; SSE-NEXT: rep bsfq %r8, %rsi
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %esi, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %rbp, %rdx
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r12
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r12, %r11
+; SSE-NEXT: cmovnel %r13d, %eax
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; SSE-NEXT: orq %rbx, %r9
+; SSE-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; SSE-NEXT: orq %r14, %rdi
+; SSE-NEXT: orq %r10, %rdi
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r9, %rdi
+; SSE-NEXT: cmovnel %r15d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: test_cttz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq %r9, %rbx
+; AVX2-NEXT: movq %r8, %r14
+; AVX2-NEXT: movq %rcx, %r11
+; AVX2-NEXT: movq %rdx, %r10
+; AVX2-NEXT: movq %rsi, %r9
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: tzcntq %rdi, %rax
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %r9, %r15
+; AVX2-NEXT: addl $64, %r15d
+; AVX2-NEXT: testq %rdi, %rdi
+; AVX2-NEXT: cmovnel %eax, %r15d
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %r10, %r12
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r11, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: movq %rdi, %r12
+; AVX2-NEXT: orq %r9, %r12
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %r14, %r15
+; AVX2-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %rbx, %r12
+; AVX2-NEXT: addl $64, %r12d
+; AVX2-NEXT: testq %r14, %r14
+; AVX2-NEXT: cmovnel %r15d, %r12d
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: tzcntq %rcx, %r13
+; AVX2-NEXT: xorl %r15d, %r15d
+; AVX2-NEXT: tzcntq %rdx, %r15
+; AVX2-NEXT: addl $64, %r15d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %r13d, %r15d
+; AVX2-NEXT: subl $-128, %r15d
+; AVX2-NEXT: movq %r14, %r13
+; AVX2-NEXT: orq %rbx, %r13
+; AVX2-NEXT: cmovnel %r12d, %r15d
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: addl $256, %r15d # imm = 0x100
+; AVX2-NEXT: movq %r9, %r13
+; AVX2-NEXT: orq %r11, %r13
+; AVX2-NEXT: movq %rdi, %rbp
+; AVX2-NEXT: orq %r10, %rbp
+; AVX2-NEXT: orq %r13, %rbp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: cmovnel %eax, %r15d
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: tzcntq %r12, %rbp
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r13, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r12, %r12
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: xorl %ebp, %ebp
+; AVX2-NEXT: tzcntq %r8, %rbp
+; AVX2-NEXT: addl $64, %ebp
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %rsi, %rcx
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %ecx, %ebp
+; AVX2-NEXT: subl $-128, %ebp
+; AVX2-NEXT: movq %r12, %rcx
+; AVX2-NEXT: orq %r13, %rcx
+; AVX2-NEXT: cmovnel %eax, %ebp
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %rbx, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX2-NEXT: tzcntq %r8, %rsi
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %esi, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %rbx, %rdx
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r12
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r13, %r12
+; AVX2-NEXT: cmovnel %ebp, %eax
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; AVX2-NEXT: orq %r11, %r9
+; AVX2-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; AVX2-NEXT: orq %r14, %rdi
+; AVX2-NEXT: orq %r10, %rdi
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r9, %rdi
+; AVX2-NEXT: cmovnel %r15d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_cttz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq %r9, %r14
+; AVX512-NEXT: movq %r8, %r15
+; AVX512-NEXT: movq %rcx, %r11
+; AVX512-NEXT: movq %rdx, %r10
+; AVX512-NEXT: movq %rsi, %r9
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx
+; AVX512-NEXT: tzcntq %rdi, %rax
+; AVX512-NEXT: tzcntq %r9, %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %rdi, %rdi
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: tzcntq %rdx, %r13
+; AVX512-NEXT: tzcntq %r11, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %r13d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: movq %rdi, %r13
+; AVX512-NEXT: orq %r9, %r13
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: tzcntq %r8, %r12
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: tzcntq %r14, %r13
+; AVX512-NEXT: addl $64, %r13d
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %r12d, %r13d
+; AVX512-NEXT: tzcntq %rcx, %rbp
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %ebp, %r12d
+; AVX512-NEXT: subl $-128, %r12d
+; AVX512-NEXT: movq %r8, %rbp
+; AVX512-NEXT: orq %r14, %rbp
+; AVX512-NEXT: cmovnel %r13d, %r12d
+; AVX512-NEXT: addl $256, %r12d # imm = 0x100
+; AVX512-NEXT: movq %r9, %r13
+; AVX512-NEXT: orq %r11, %r13
+; AVX512-NEXT: movq %rdi, %rbp
+; AVX512-NEXT: orq %rdx, %rbp
+; AVX512-NEXT: orq %r13, %rbp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: tzcntq %rbx, %rbp
+; AVX512-NEXT: tzcntq %r13, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: tzcntq %rsi, %rcx
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %ecx, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %rbx, %rcx
+; AVX512-NEXT: orq %r13, %rcx
+; AVX512-NEXT: cmovnel %eax, %ebp
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14
+; AVX512-NEXT: tzcntq %r14, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: tzcntq {{[0-9]+}}(%rsp), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8
+; AVX512-NEXT: tzcntq %r8, %rsi
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %esi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r14, %rdx
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r13
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %rbx
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r13, %rbx
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r11
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
+; AVX512-NEXT: orq %r11, %r9
+; AVX512-NEXT: orq {{[0-9]+}}(%rsp), %r10
+; AVX512-NEXT: orq %r15, %rdi
+; AVX512-NEXT: orq %r10, %rdi
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r9, %rdi
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
+
+define i32 @load_cttz_i1024(ptr %p0) nounwind {
+; SSE-LABEL: load_cttz_i1024:
+; SSE: # %bb.0:
+; SSE-NEXT: pushq %rbp
+; SSE-NEXT: pushq %r15
+; SSE-NEXT: pushq %r14
+; SSE-NEXT: pushq %r13
+; SSE-NEXT: pushq %r12
+; SSE-NEXT: pushq %rbx
+; SSE-NEXT: movq 88(%rdi), %r10
+; SSE-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 56(%rdi), %rcx
+; SSE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 40(%rdi), %rsi
+; SSE-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: movq 24(%rdi), %r9
+; SSE-NEXT: movq 16(%rdi), %r15
+; SSE-NEXT: movq (%rdi), %r8
+; SSE-NEXT: movq 8(%rdi), %r11
+; SSE-NEXT: rep bsfq %r8, %rax
+; SSE-NEXT: rep bsfq %r11, %rdx
+; SSE-NEXT: addl $64, %edx
+; SSE-NEXT: testq %r8, %r8
+; SSE-NEXT: cmovnel %eax, %edx
+; SSE-NEXT: rep bsfq %r15, %rbx
+; SSE-NEXT: rep bsfq %r9, %rax
+; SSE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r15, %r15
+; SSE-NEXT: cmovnel %ebx, %eax
+; SSE-NEXT: movq 32(%rdi), %rbx
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: movq %r8, %r14
+; SSE-NEXT: orq %r11, %r14
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: rep bsfq %rbx, %rdx
+; SSE-NEXT: rep bsfq %rsi, %r12
+; SSE-NEXT: addl $64, %r12d
+; SSE-NEXT: testq %rbx, %rbx
+; SSE-NEXT: cmovnel %edx, %r12d
+; SSE-NEXT: movq 48(%rdi), %r13
+; SSE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: rep bsfq %r13, %rdx
+; SSE-NEXT: rep bsfq %rcx, %r14
+; SSE-NEXT: addl $64, %r14d
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %edx, %r14d
+; SSE-NEXT: subl $-128, %r14d
+; SSE-NEXT: movq %rbx, %rdx
+; SSE-NEXT: orq %rsi, %rdx
+; SSE-NEXT: cmovnel %r12d, %r14d
+; SSE-NEXT: movq 72(%rdi), %r12
+; SSE-NEXT: addl $256, %r14d # imm = 0x100
+; SSE-NEXT: movq %r11, %rdx
+; SSE-NEXT: orq %r9, %rdx
+; SSE-NEXT: movq %r8, %r13
+; SSE-NEXT: orq %r15, %r13
+; SSE-NEXT: orq %rdx, %r13
+; SSE-NEXT: movq 64(%rdi), %r13
+; SSE-NEXT: cmovnel %eax, %r14d
+; SSE-NEXT: rep bsfq %r13, %rdx
+; SSE-NEXT: rep bsfq %r12, %rax
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: testq %r13, %r13
+; SSE-NEXT: cmovnel %edx, %eax
+; SSE-NEXT: rep bsfq %r10, %rbp
+; SSE-NEXT: addl $64, %ebp
+; SSE-NEXT: movq 80(%rdi), %r10
+; SSE-NEXT: rep bsfq %r10, %rcx
+; SSE-NEXT: testq %r10, %r10
+; SSE-NEXT: cmovnel %ecx, %ebp
+; SSE-NEXT: subl $-128, %ebp
+; SSE-NEXT: movq %r13, %rcx
+; SSE-NEXT: orq %r12, %rcx
+; SSE-NEXT: cmovnel %eax, %ebp
+; SSE-NEXT: movq 104(%rdi), %r9
+; SSE-NEXT: rep bsfq %r9, %rcx
+; SSE-NEXT: addl $64, %ecx
+; SSE-NEXT: movq 96(%rdi), %rdx
+; SSE-NEXT: rep bsfq %rdx, %rax
+; SSE-NEXT: testq %rdx, %rdx
+; SSE-NEXT: cmovnel %eax, %ecx
+; SSE-NEXT: movl $64, %eax
+; SSE-NEXT: rep bsfq 120(%rdi), %rax
+; SSE-NEXT: movq 112(%rdi), %rdi
+; SSE-NEXT: addl $64, %eax
+; SSE-NEXT: rep bsfq %rdi, %rsi
+; SSE-NEXT: testq %rdi, %rdi
+; SSE-NEXT: cmovnel %esi, %eax
+; SSE-NEXT: subl $-128, %eax
+; SSE-NEXT: orq %r9, %rdx
+; SSE-NEXT: cmovnel %ecx, %eax
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
+; SSE-NEXT: orq %r10, %r13
+; SSE-NEXT: addl $256, %eax # imm = 0x100
+; SSE-NEXT: orq %r12, %r13
+; SSE-NEXT: cmovnel %ebp, %eax
+; SSE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; SSE-NEXT: orq %rcx, %r11
+; SSE-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
+; SSE-NEXT: orq %rbx, %r8
+; SSE-NEXT: orq %r15, %r8
+; SSE-NEXT: addl $512, %eax # imm = 0x200
+; SSE-NEXT: orq %r11, %r8
+; SSE-NEXT: cmovnel %r14d, %eax
+; SSE-NEXT: # kill: def $eax killed $eax killed $rax
+; SSE-NEXT: popq %rbx
+; SSE-NEXT: popq %r12
+; SSE-NEXT: popq %r13
+; SSE-NEXT: popq %r14
+; SSE-NEXT: popq %r15
+; SSE-NEXT: popq %rbp
+; SSE-NEXT: retq
+;
+; AVX2-LABEL: load_cttz_i1024:
+; AVX2: # %bb.0:
+; AVX2-NEXT: pushq %rbp
+; AVX2-NEXT: pushq %r15
+; AVX2-NEXT: pushq %r14
+; AVX2-NEXT: pushq %r13
+; AVX2-NEXT: pushq %r12
+; AVX2-NEXT: pushq %rbx
+; AVX2-NEXT: movq 72(%rdi), %r14
+; AVX2-NEXT: movq 64(%rdi), %r15
+; AVX2-NEXT: movq 56(%rdi), %r9
+; AVX2-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq 48(%rdi), %rcx
+; AVX2-NEXT: movq 40(%rdi), %r10
+; AVX2-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: movq 32(%rdi), %rsi
+; AVX2-NEXT: movq 24(%rdi), %rbp
+; AVX2-NEXT: movq 16(%rdi), %rbx
+; AVX2-NEXT: movq (%rdi), %r8
+; AVX2-NEXT: movq 8(%rdi), %r11
+; AVX2-NEXT: tzcntq %r8, %rax
+; AVX2-NEXT: tzcntq %r11, %rdx
+; AVX2-NEXT: addl $64, %edx
+; AVX2-NEXT: testq %r8, %r8
+; AVX2-NEXT: cmovnel %eax, %edx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %rbx, %r12
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rbp, %rax
+; AVX2-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %rbx, %rbx
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: movq %r8, %r12
+; AVX2-NEXT: orq %r11, %r12
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: tzcntq %rsi, %rdx
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: tzcntq %r10, %r13
+; AVX2-NEXT: addl $64, %r13d
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: cmovnel %edx, %r13d
+; AVX2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: tzcntq %rcx, %rdx
+; AVX2-NEXT: xorl %r12d, %r12d
+; AVX2-NEXT: tzcntq %r9, %r12
+; AVX2-NEXT: addl $64, %r12d
+; AVX2-NEXT: testq %rcx, %rcx
+; AVX2-NEXT: cmovnel %edx, %r12d
+; AVX2-NEXT: subl $-128, %r12d
+; AVX2-NEXT: movq %rsi, %rdx
+; AVX2-NEXT: orq %r10, %rdx
+; AVX2-NEXT: cmovnel %r13d, %r12d
+; AVX2-NEXT: addl $256, %r12d # imm = 0x100
+; AVX2-NEXT: movq %r11, %rdx
+; AVX2-NEXT: orq %rbp, %rdx
+; AVX2-NEXT: movq %r8, %r13
+; AVX2-NEXT: orq %rbx, %r13
+; AVX2-NEXT: orq %rdx, %r13
+; AVX2-NEXT: cmovnel %eax, %r12d
+; AVX2-NEXT: xorl %edx, %edx
+; AVX2-NEXT: tzcntq %r15, %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %r14, %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: testq %r15, %r15
+; AVX2-NEXT: cmovnel %edx, %eax
+; AVX2-NEXT: movq 88(%rdi), %rbp
+; AVX2-NEXT: xorl %r13d, %r13d
+; AVX2-NEXT: tzcntq %rbp, %r13
+; AVX2-NEXT: addl $64, %r13d
+; AVX2-NEXT: movq 80(%rdi), %r10
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %r10, %rcx
+; AVX2-NEXT: testq %r10, %r10
+; AVX2-NEXT: cmovnel %ecx, %r13d
+; AVX2-NEXT: subl $-128, %r13d
+; AVX2-NEXT: movq %r15, %rcx
+; AVX2-NEXT: orq %r14, %rcx
+; AVX2-NEXT: cmovnel %eax, %r13d
+; AVX2-NEXT: movq 104(%rdi), %r9
+; AVX2-NEXT: xorl %ecx, %ecx
+; AVX2-NEXT: tzcntq %r9, %rcx
+; AVX2-NEXT: addl $64, %ecx
+; AVX2-NEXT: movq 96(%rdi), %rdx
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq %rdx, %rax
+; AVX2-NEXT: testq %rdx, %rdx
+; AVX2-NEXT: cmovnel %eax, %ecx
+; AVX2-NEXT: movq 112(%rdi), %rsi
+; AVX2-NEXT: xorl %eax, %eax
+; AVX2-NEXT: tzcntq 120(%rdi), %rax
+; AVX2-NEXT: addl $64, %eax
+; AVX2-NEXT: tzcntq %rsi, %rdi
+; AVX2-NEXT: testq %rsi, %rsi
+; AVX2-NEXT: cmovnel %edi, %eax
+; AVX2-NEXT: subl $-128, %eax
+; AVX2-NEXT: orq %r9, %rdx
+; AVX2-NEXT: cmovnel %ecx, %eax
+; AVX2-NEXT: orq %rbp, %r14
+; AVX2-NEXT: orq %r10, %r15
+; AVX2-NEXT: addl $256, %eax # imm = 0x100
+; AVX2-NEXT: orq %r14, %r15
+; AVX2-NEXT: cmovnel %r13d, %eax
+; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rcx, %r11
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX2-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX2-NEXT: orq %rbx, %r8
+; AVX2-NEXT: addl $512, %eax # imm = 0x200
+; AVX2-NEXT: orq %r11, %r8
+; AVX2-NEXT: cmovnel %r12d, %eax
+; AVX2-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX2-NEXT: popq %rbx
+; AVX2-NEXT: popq %r12
+; AVX2-NEXT: popq %r13
+; AVX2-NEXT: popq %r14
+; AVX2-NEXT: popq %r15
+; AVX2-NEXT: popq %rbp
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: load_cttz_i1024:
+; AVX512: # %bb.0:
+; AVX512-NEXT: pushq %rbp
+; AVX512-NEXT: pushq %r15
+; AVX512-NEXT: pushq %r14
+; AVX512-NEXT: pushq %r13
+; AVX512-NEXT: pushq %r12
+; AVX512-NEXT: pushq %rbx
+; AVX512-NEXT: movq 88(%rdi), %rbp
+; AVX512-NEXT: movq 72(%rdi), %r15
+; AVX512-NEXT: movq 56(%rdi), %r9
+; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq 48(%rdi), %rcx
+; AVX512-NEXT: movq 40(%rdi), %r10
+; AVX512-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: movq 32(%rdi), %rsi
+; AVX512-NEXT: movq 24(%rdi), %r14
+; AVX512-NEXT: movq 16(%rdi), %rbx
+; AVX512-NEXT: movq (%rdi), %r8
+; AVX512-NEXT: movq 8(%rdi), %r11
+; AVX512-NEXT: tzcntq %r8, %rax
+; AVX512-NEXT: tzcntq %r11, %rdx
+; AVX512-NEXT: addl $64, %edx
+; AVX512-NEXT: testq %r8, %r8
+; AVX512-NEXT: cmovnel %eax, %edx
+; AVX512-NEXT: tzcntq %rbx, %r12
+; AVX512-NEXT: tzcntq %r14, %rax
+; AVX512-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %rbx, %rbx
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: movq %r8, %r12
+; AVX512-NEXT: orq %r11, %r12
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: tzcntq %rsi, %rdx
+; AVX512-NEXT: tzcntq %r10, %r13
+; AVX512-NEXT: addl $64, %r13d
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: cmovnel %edx, %r13d
+; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX512-NEXT: tzcntq %rcx, %rdx
+; AVX512-NEXT: tzcntq %r9, %r12
+; AVX512-NEXT: addl $64, %r12d
+; AVX512-NEXT: testq %rcx, %rcx
+; AVX512-NEXT: cmovnel %edx, %r12d
+; AVX512-NEXT: subl $-128, %r12d
+; AVX512-NEXT: movq %rsi, %rdx
+; AVX512-NEXT: orq %r10, %rdx
+; AVX512-NEXT: cmovnel %r13d, %r12d
+; AVX512-NEXT: addl $256, %r12d # imm = 0x100
+; AVX512-NEXT: movq %r11, %rdx
+; AVX512-NEXT: orq %r14, %rdx
+; AVX512-NEXT: movq %r8, %r13
+; AVX512-NEXT: orq %rbx, %r13
+; AVX512-NEXT: orq %rdx, %r13
+; AVX512-NEXT: movq 64(%rdi), %r13
+; AVX512-NEXT: cmovnel %eax, %r12d
+; AVX512-NEXT: tzcntq %r13, %rdx
+; AVX512-NEXT: tzcntq %r15, %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: testq %r13, %r13
+; AVX512-NEXT: cmovnel %edx, %eax
+; AVX512-NEXT: movq %rbp, %r14
+; AVX512-NEXT: tzcntq %rbp, %rbp
+; AVX512-NEXT: addl $64, %ebp
+; AVX512-NEXT: movq 80(%rdi), %r10
+; AVX512-NEXT: tzcntq %r10, %rcx
+; AVX512-NEXT: testq %r10, %r10
+; AVX512-NEXT: cmovnel %ecx, %ebp
+; AVX512-NEXT: subl $-128, %ebp
+; AVX512-NEXT: movq %r13, %rcx
+; AVX512-NEXT: orq %r15, %rcx
+; AVX512-NEXT: cmovnel %eax, %ebp
+; AVX512-NEXT: movq 104(%rdi), %r9
+; AVX512-NEXT: tzcntq %r9, %rcx
+; AVX512-NEXT: addl $64, %ecx
+; AVX512-NEXT: movq 96(%rdi), %rdx
+; AVX512-NEXT: tzcntq %rdx, %rax
+; AVX512-NEXT: testq %rdx, %rdx
+; AVX512-NEXT: cmovnel %eax, %ecx
+; AVX512-NEXT: movq 112(%rdi), %rsi
+; AVX512-NEXT: tzcntq 120(%rdi), %rax
+; AVX512-NEXT: addl $64, %eax
+; AVX512-NEXT: tzcntq %rsi, %rdi
+; AVX512-NEXT: testq %rsi, %rsi
+; AVX512-NEXT: cmovnel %edi, %eax
+; AVX512-NEXT: subl $-128, %eax
+; AVX512-NEXT: orq %r9, %rdx
+; AVX512-NEXT: cmovnel %ecx, %eax
+; AVX512-NEXT: orq %r14, %r15
+; AVX512-NEXT: orq %r10, %r13
+; AVX512-NEXT: addl $256, %eax # imm = 0x100
+; AVX512-NEXT: orq %r15, %r13
+; AVX512-NEXT: cmovnel %ebp, %eax
+; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rcx, %r11
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
+; AVX512-NEXT: orq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
+; AVX512-NEXT: orq %rbx, %r8
+; AVX512-NEXT: addl $512, %eax # imm = 0x200
+; AVX512-NEXT: orq %r11, %r8
+; AVX512-NEXT: cmovnel %r12d, %eax
+; AVX512-NEXT: # kill: def $eax killed $eax killed $rax
+; AVX512-NEXT: popq %rbx
+; AVX512-NEXT: popq %r12
+; AVX512-NEXT: popq %r13
+; AVX512-NEXT: popq %r14
+; AVX512-NEXT: popq %r15
+; AVX512-NEXT: popq %rbp
+; AVX512-NEXT: retq
+ %a0 = load i1024, ptr %p0
+ %cnt = call i1024 @llvm.cttz.i1024(i1024 %a0, i1 0)
+ %res = trunc i1024 %cnt to i32
+ ret i32 %res
+}
diff --git a/llvm/test/CodeGen/X86/dag-fmf-cse.ll b/llvm/test/CodeGen/X86/dag-fmf-cse.ll
index 609ccdc..cdcc082 100644
--- a/llvm/test/CodeGen/X86/dag-fmf-cse.ll
+++ b/llvm/test/CodeGen/X86/dag-fmf-cse.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=fma | FileCheck %s
; If fast-math-flags are propagated correctly, the mul1 expression
; should be recognized as a factor in the last fsub, so we should
diff --git a/llvm/test/CodeGen/X86/fabs.ll b/llvm/test/CodeGen/X86/fabs.ll
index 82c82ac..4e6da83 100644
--- a/llvm/test/CodeGen/X86/fabs.ll
+++ b/llvm/test/CodeGen/X86/fabs.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 | FileCheck %s --check-prefix=X87
-; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math -enable-no-nans-fp-math | FileCheck %s --check-prefix=X87UNSAFE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse,-sse2,-sse3 -enable-no-nans-fp-math | FileCheck %s --check-prefix=X87UNSAFE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
declare float @fabsf(float)
diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
index 0fe107c..aae6cda 100644
--- a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
+++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
@@ -22,25 +22,24 @@ declare <4 x bfloat> @llvm.maximumnum.v4bf16(<4 x bfloat>, <4 x bfloat>)
define float @test_fmaximumnum(float %x, float %y) nounwind {
; SSE2-LABEL: test_fmaximumnum:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: js .LBB0_2
-; SSE2-NEXT: # %bb.1:
+; SSE2-NEXT: js .LBB0_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: jmp .LBB0_3
+; SSE2-NEXT: .LBB0_1:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: .LBB0_3:
; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: .LBB0_2:
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: cmpordss %xmm3, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB0_4
-; SSE2-NEXT: # %bb.3:
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: .LBB0_4:
-; SSE2-NEXT: maxss %xmm1, %xmm3
-; SSE2-NEXT: andnps %xmm3, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: maxss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm0
+; SSE2-NEXT: cmpunordss %xmm3, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: andnps %xmm3, %xmm2
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum:
@@ -56,7 +55,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind {
; AVX1-NEXT: vmovdqa %xmm0, %xmm1
; AVX1-NEXT: .LBB0_3:
; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -70,7 +69,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind {
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: retq
;
@@ -95,7 +94,7 @@ define float @test_fmaximumnum(float %x, float %y) nounwind {
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB0_3:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -371,26 +370,25 @@ define float @test_fmaximumnum_nsz(float %x, float %y) "no-signed-zeros-fp-math"
; SSE2-LABEL: test_fmaximumnum_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordss %xmm0, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm3
-; SSE2-NEXT: andps %xmm0, %xmm3
-; SSE2-NEXT: maxss %xmm1, %xmm0
-; SSE2-NEXT: andnps %xmm0, %xmm2
-; SSE2-NEXT: orps %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm1
+; SSE2-NEXT: cmpunordss %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_nsz:
; AVX1: # %bb.0:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: test_fmaximumnum_nsz:
; AVX512: # %bb.0:
; AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmovaps %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -404,9 +402,9 @@ define float @test_fmaximumnum_nsz(float %x, float %y) "no-signed-zeros-fp-math"
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1
-; X86-NEXT: vmaxss {{[0-9]+}}(%esp), %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vmaxss {{[0-9]+}}(%esp), %xmm0, %xmm1
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
@@ -421,23 +419,22 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; SSE2-NEXT: divss %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: js .LBB9_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movaps %xmm1, %xmm3
-; SSE2-NEXT: .LBB9_2:
-; SSE2-NEXT: movaps %xmm3, %xmm2
-; SSE2-NEXT: cmpordss %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB9_4
-; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: js .LBB9_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: jmp .LBB9_3
+; SSE2-NEXT: .LBB9_1:
+; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm1
-; SSE2-NEXT: .LBB9_4:
-; SSE2-NEXT: maxss %xmm1, %xmm3
+; SSE2-NEXT: .LBB9_3:
+; SSE2-NEXT: movaps %xmm1, %xmm3
+; SSE2-NEXT: maxss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm0
+; SSE2-NEXT: cmpunordss %xmm3, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: andnps %xmm3, %xmm2
-; SSE2-NEXT: orps %xmm4, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_combine_cmps:
@@ -454,7 +451,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; AVX1-NEXT: vmovaps %xmm0, %xmm1
; AVX1-NEXT: .LBB9_3:
; AVX1-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX1-NEXT: vcmpordss %xmm1, %xmm1, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX1-NEXT: retq
;
@@ -469,7 +466,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; AVX512F-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512F-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512F-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512F-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512F-NEXT: retq
;
@@ -507,7 +504,7 @@ define float @test_fmaximumnum_combine_cmps(float %x, float %y) nounwind {
; X86-NEXT: vmovaps %xmm1, %xmm0
; X86-NEXT: .LBB9_3:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -527,23 +524,23 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; SSE2: # %bb.0:
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: js .LBB10_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movdqa %xmm0, %xmm3
-; SSE2-NEXT: .LBB10_2:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: cmpordss %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB10_4
-; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: js .LBB10_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: jmp .LBB10_3
+; SSE2-NEXT: .LBB10_1:
+; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: .LBB10_4:
-; SSE2-NEXT: minss %xmm0, %xmm3
+; SSE2-NEXT: .LBB10_3:
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: minss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: cmpunordss %xmm3, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: andnps %xmm3, %xmm2
-; SSE2-NEXT: orps %xmm4, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: orps %xmm2, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fminimumnum:
@@ -559,7 +556,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; AVX1-NEXT: vmovdqa %xmm1, %xmm0
; AVX1-NEXT: .LBB10_3:
; AVX1-NEXT: vminss %xmm2, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -573,7 +570,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; AVX512-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: vminss %xmm2, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmovaps %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -599,7 +596,7 @@ define float @test_fminimumnum(float %x, float %y) nounwind {
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB10_3:
; X86-NEXT: vminss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -857,26 +854,25 @@ define float @test_fminimumnum_nsz(float %x, float %y) nounwind {
; SSE2-LABEL: test_fminimumnum_nsz:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordss %xmm0, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm3
-; SSE2-NEXT: andps %xmm0, %xmm3
-; SSE2-NEXT: minss %xmm1, %xmm0
-; SSE2-NEXT: andnps %xmm0, %xmm2
-; SSE2-NEXT: orps %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: minss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm1
+; SSE2-NEXT: cmpunordss %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fminimumnum_nsz:
; AVX1: # %bb.0:
; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
; AVX512-LABEL: test_fminimumnum_nsz:
; AVX512: # %bb.0:
; AVX512-NEXT: vminss %xmm1, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmovaps %xmm1, %xmm0
; AVX512-NEXT: retq
@@ -890,9 +886,9 @@ define float @test_fminimumnum_nsz(float %x, float %y) nounwind {
; X86: # %bb.0:
; X86-NEXT: pushl %eax
; X86-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm1
-; X86-NEXT: vminss {{[0-9]+}}(%esp), %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm1, %xmm0, %xmm2, %xmm0
+; X86-NEXT: vminss {{[0-9]+}}(%esp), %xmm0, %xmm1
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
; X86-NEXT: popl %eax
@@ -907,23 +903,23 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; SSE2-NEXT: divss %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movaps %xmm1, %xmm3
-; SSE2-NEXT: js .LBB19_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: .LBB19_2:
-; SSE2-NEXT: movaps %xmm3, %xmm2
-; SSE2-NEXT: cmpordss %xmm3, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm4
-; SSE2-NEXT: andps %xmm3, %xmm4
-; SSE2-NEXT: js .LBB19_4
-; SSE2-NEXT: # %bb.3:
+; SSE2-NEXT: js .LBB19_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: jmp .LBB19_3
+; SSE2-NEXT: .LBB19_1:
+; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: .LBB19_4:
-; SSE2-NEXT: minss %xmm0, %xmm3
+; SSE2-NEXT: .LBB19_3:
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: minss %xmm2, %xmm3
+; SSE2-NEXT: movaps %xmm3, %xmm1
+; SSE2-NEXT: cmpunordss %xmm3, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: andnps %xmm3, %xmm2
-; SSE2-NEXT: orps %xmm4, %xmm2
-; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: orps %xmm2, %xmm1
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fminimumnum_combine_cmps:
@@ -940,7 +936,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; AVX1-NEXT: vmovaps %xmm2, %xmm0
; AVX1-NEXT: .LBB19_3:
; AVX1-NEXT: vminss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -955,7 +951,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; AVX512F-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1}
; AVX512F-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512F-NEXT: vminss %xmm2, %xmm0, %xmm1
-; AVX512F-NEXT: vcmpordss %xmm0, %xmm0, %k1
+; AVX512F-NEXT: vcmpunordss %xmm1, %xmm1, %k1
; AVX512F-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512F-NEXT: vmovaps %xmm1, %xmm0
; AVX512F-NEXT: retq
@@ -994,7 +990,7 @@ define float @test_fminimumnum_combine_cmps(float %x, float %y) nounwind {
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: .LBB19_3:
; X86-NEXT: vminss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -1022,9 +1018,9 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) {
; SSE2-NEXT: por %xmm4, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: cmpordpd %xmm3, %xmm0
-; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: orpd %xmm3, %xmm0
; SSE2-NEXT: retq
@@ -1034,7 +1030,7 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) {
; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
@@ -1048,7 +1044,7 @@ define <2 x double> @test_fminimumnum_vector(<2 x double> %x, <2 x double> %y) {
; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> %y)
@@ -1084,19 +1080,17 @@ define <2 x double> @test_fminimumnum_vector_zero(<2 x double> %x) {
; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movapd %xmm0, %xmm2
-; SSE2-NEXT: cmpordpd %xmm0, %xmm2
-; SSE2-NEXT: andpd %xmm2, %xmm0
-; SSE2-NEXT: andnpd %xmm1, %xmm2
-; SSE2-NEXT: orpd %xmm2, %xmm0
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimumnum_vector_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_zero:
@@ -1108,9 +1102,9 @@ define <2 x double> @test_fminimumnum_vector_zero(<2 x double> %x) {
; X86-LABEL: test_fminimumnum_vector_zero:
; X86: # %bb.0:
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; X86-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0.>)
ret <2 x double> %r
@@ -1120,20 +1114,21 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) {
; SSE2-LABEL: test_fmaximumnum_vector_signed_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; SSE2-NEXT: maxps %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordps %xmm0, %xmm2
-; SSE2-NEXT: andps %xmm2, %xmm0
-; SSE2-NEXT: andnps %xmm1, %xmm2
-; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: maxps %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fmaximumnum_vector_signed_zero:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero:
@@ -1144,9 +1139,9 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) {
; X86-LABEL: test_fmaximumnum_vector_signed_zero:
; X86: # %bb.0:
; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> %x, <4 x float> <float -0., float -0., float -0., float -0.>)
ret <4 x float> %r
@@ -1155,13 +1150,14 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero(<4 x float> %x) {
define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) {
; SSE2-LABEL: test_fminimumnum_vector_partially_zero:
; SSE2: # %bb.0:
-; SSE2-NEXT: movapd %xmm0, %xmm1
-; SSE2-NEXT: cmpordpd %xmm0, %xmm1
-; SSE2-NEXT: xorpd %xmm2, %xmm2
-; SSE2-NEXT: movhpd {{.*#+}} xmm2 = xmm2[0],mem[0]
+; SSE2-NEXT: xorpd %xmm1, %xmm1
+; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; SSE2-NEXT: movapd %xmm1, %xmm2
; SSE2-NEXT: minpd %xmm0, %xmm2
-; SSE2-NEXT: andpd %xmm1, %xmm0
-; SSE2-NEXT: andnpd %xmm2, %xmm1
+; SSE2-NEXT: movapd %xmm2, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm2, %xmm0
+; SSE2-NEXT: andpd %xmm0, %xmm1
+; SSE2-NEXT: andnpd %xmm2, %xmm0
; SSE2-NEXT: orpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -1169,9 +1165,9 @@ define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) {
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_partially_zero:
@@ -1185,9 +1181,9 @@ define <2 x double> @test_fminimumnum_vector_partially_zero(<2 x double> %x) {
; X86: # %bb.0:
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 5.>)
ret <2 x double> %r
@@ -1212,9 +1208,9 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) {
; SSE2-NEXT: por %xmm2, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm1
; SSE2-NEXT: minpd %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: cmpordpd %xmm3, %xmm0
-; SSE2-NEXT: andpd %xmm0, %xmm3
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: pand %xmm0, %xmm3
; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: orpd %xmm3, %xmm0
; SSE2-NEXT: retq
@@ -1226,7 +1222,7 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) {
; AVX-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; AVX-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
@@ -1244,7 +1240,7 @@ define <2 x double> @test_fminimumnum_vector_different_zeros(<2 x double> %x) {
; X86-NEXT: vblendvpd %xmm0, %xmm0, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm0, %xmm1, %xmm0, %xmm0
; X86-NEXT: vminpd %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordpd %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double -0.>)
@@ -1278,20 +1274,24 @@ define <4 x float> @test_fmaximumnum_vector_non_zero(<4 x float> %x) {
define <2 x double> @test_fminimumnum_vector_nan(<2 x double> %x) {
; SSE2-LABEL: test_fminimumnum_vector_nan:
; SSE2: # %bb.0:
-; SSE2-NEXT: xorpd %xmm2, %xmm2
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: movhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
-; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
-; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: movapd %xmm1, %xmm2
+; SSE2-NEXT: minpd %xmm0, %xmm2
+; SSE2-NEXT: movapd %xmm2, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm2, %xmm0
+; SSE2-NEXT: andpd %xmm0, %xmm1
+; SSE2-NEXT: andnpd %xmm2, %xmm0
+; SSE2-NEXT: orpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimumnum_vector_nan:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovhpd {{.*#+}} xmm2 = xmm1[0],mem[0]
-; AVX-NEXT: vminpd %xmm0, %xmm2, %xmm0
-; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; AVX-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_nan:
@@ -1306,7 +1306,7 @@ define <2 x double> @test_fminimumnum_vector_nan(<2 x double> %x) {
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
-; X86-NEXT: vcmpordpd %xmm1, %xmm1, %xmm2
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm2
; X86-NEXT: vblendvpd %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> %x, <2 x double> <double 0., double 0x7fff000000000000>)
@@ -1318,19 +1318,17 @@ define <2 x double> @test_fminimumnum_vector_zero_first(<2 x double> %x) {
; SSE2: # %bb.0:
; SSE2-NEXT: xorpd %xmm1, %xmm1
; SSE2-NEXT: minpd %xmm0, %xmm1
-; SSE2-NEXT: movapd %xmm0, %xmm2
-; SSE2-NEXT: cmpordpd %xmm0, %xmm2
-; SSE2-NEXT: andpd %xmm2, %xmm0
-; SSE2-NEXT: andnpd %xmm1, %xmm2
-; SSE2-NEXT: orpd %xmm2, %xmm0
+; SSE2-NEXT: movapd %xmm1, %xmm0
+; SSE2-NEXT: cmpunordpd %xmm1, %xmm0
+; SSE2-NEXT: andnpd %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fminimumnum_vector_zero_first:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; AVX-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fminimumnum_vector_zero_first:
@@ -1342,9 +1340,9 @@ define <2 x double> @test_fminimumnum_vector_zero_first(<2 x double> %x) {
; X86-LABEL: test_fminimumnum_vector_zero_first:
; X86: # %bb.0:
; X86-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; X86-NEXT: vminpd %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordpd %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vminpd %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordpd %xmm0, %xmm0, %xmm1
+; X86-NEXT: vandnpd %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%r = call <2 x double> @llvm.minimumnum.v2f64(<2 x double> <double 0., double 0.>, <2 x double> %x)
ret <2 x double> %r
@@ -1378,20 +1376,21 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero_first(<4 x float> %x) {
; SSE2-LABEL: test_fmaximumnum_vector_signed_zero_first:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; SSE2-NEXT: maxps %xmm0, %xmm1
-; SSE2-NEXT: movaps %xmm0, %xmm2
-; SSE2-NEXT: cmpordps %xmm0, %xmm2
-; SSE2-NEXT: andps %xmm2, %xmm0
-; SSE2-NEXT: andnps %xmm1, %xmm2
-; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: maxps %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordps %xmm2, %xmm0
+; SSE2-NEXT: andps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: test_fmaximumnum_vector_signed_zero_first:
; AVX: # %bb.0:
; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; AVX-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX10_2-LABEL: test_fmaximumnum_vector_signed_zero_first:
@@ -1402,9 +1401,9 @@ define <4 x float> @test_fmaximumnum_vector_signed_zero_first(<4 x float> %x) {
; X86-LABEL: test_fmaximumnum_vector_signed_zero_first:
; X86: # %bb.0:
; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
-; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm1
-; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
-; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; X86-NEXT: vmaxps %xmm0, %xmm1, %xmm0
+; X86-NEXT: vcmpunordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; X86-NEXT: retl
%r = call <4 x float> @llvm.maximumnum.v4f32(<4 x float> <float -0., float -0., float -0., float -0.>, <4 x float> %x)
ret <4 x float> %r
@@ -1455,11 +1454,11 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: maxps %xmm4, %xmm1
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: cmpordps %xmm0, %xmm2
-; SSE2-NEXT: andps %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: cmpunordps %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm2, %xmm0
; SSE2-NEXT: andnps %xmm1, %xmm2
-; SSE2-NEXT: orps %xmm2, %xmm0
+; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_v4f32_splat:
@@ -1468,7 +1467,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; AVX1-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vmaxps %xmm2, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: retq
;
@@ -1478,7 +1477,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; AVX512-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
; AVX512-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vmaxps %xmm2, %xmm0, %xmm1
-; AVX512-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; AVX512-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2
; AVX512-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
;
@@ -1494,7 +1493,7 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
; X86-NEXT: vblendvps %xmm0, %xmm1, %xmm0, %xmm2
; X86-NEXT: vblendvps %xmm0, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmaxps %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordps %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordps %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: retl
%splatinsert = insertelement <4 x float> poison, float %y, i64 0
@@ -1506,134 +1505,130 @@ define <4 x float> @test_fmaximumnum_v4f32_splat(<4 x float> %x, float %y) {
define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind {
; SSE2-LABEL: test_fmaximumnum_v4f16:
; SSE2: # %bb.0:
-; SSE2-NEXT: subq $104, %rsp
-; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: subq $136, %rsp
+; SSE2-NEXT: movaps %xmm0, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
+; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, %xmm2
+; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
+; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movss %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: psrld $16, %xmm0
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
+; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_2:
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_4
-; SSE2-NEXT: # %bb.3:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: .LBB33_4:
-; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
-; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: maxss %xmm4, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: js .LBB33_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_3
+; SSE2-NEXT: .LBB33_1:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_3:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm0
+; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: psrlq $48, %xmm0
+; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; SSE2-NEXT: movss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Reload
+; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
-; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
+; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_6
+; SSE2-NEXT: js .LBB33_4
; SSE2-NEXT: # %bb.5:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_6:
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_8
-; SSE2-NEXT: # %bb.7:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: .LBB33_8:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_6
+; SSE2-NEXT: .LBB33_4:
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm1
-; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm1
-; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
-; SSE2-NEXT: maxss %xmm4, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_6:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
+; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_10
-; SSE2-NEXT: # %bb.9:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_10:
-; SSE2-NEXT: movdqa %xmm2, %xmm1
-; SSE2-NEXT: cmpordss %xmm2, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_12
-; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: movdqa %xmm0, %xmm4
-; SSE2-NEXT: .LBB33_12:
-; SSE2-NEXT: maxss %xmm4, %xmm2
+; SSE2-NEXT: js .LBB33_7
+; SSE2-NEXT: # %bb.8:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_9
+; SSE2-NEXT: .LBB33_7:
+; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_9:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: andnps %xmm2, %xmm1
-; SSE2-NEXT: orps %xmm3, %xmm1
-; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movss %xmm0, (%rsp) # 4-byte Spill
+; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE2-NEXT: callq __extendhfsf2@PLT
-; SSE2-NEXT: movd (%rsp), %xmm4 # 4-byte Folded Reload
-; SSE2-NEXT: # xmm4 = mem[0],zero,zero,zero
-; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: js .LBB33_14
-; SSE2-NEXT: # %bb.13:
-; SSE2-NEXT: movdqa %xmm4, %xmm2
-; SSE2-NEXT: .LBB33_14:
-; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm3
-; SSE2-NEXT: andps %xmm2, %xmm3
-; SSE2-NEXT: js .LBB33_16
-; SSE2-NEXT: # %bb.15:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: .LBB33_16:
-; SSE2-NEXT: maxss %xmm4, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm3, %xmm0
+; SSE2-NEXT: js .LBB33_10
+; SSE2-NEXT: # %bb.11:
+; SSE2-NEXT: movdqa %xmm0, %xmm1
+; SSE2-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload
+; SSE2-NEXT: jmp .LBB33_12
+; SSE2-NEXT: .LBB33_10:
+; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: .LBB33_12:
+; SSE2-NEXT: movdqa %xmm3, %xmm2
+; SSE2-NEXT: maxss %xmm1, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm1
+; SSE2-NEXT: andnps %xmm2, %xmm1
+; SSE2-NEXT: andps %xmm3, %xmm0
+; SSE2-NEXT: orps %xmm1, %xmm0
; SSE2-NEXT: callq __truncsfhf2@PLT
; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -1641,7 +1636,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; SSE2-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; SSE2-NEXT: addq $104, %rsp
+; SSE2-NEXT: addq $136, %rsp
; SSE2-NEXT: retq
;
; AVX1-LABEL: test_fmaximumnum_v4f16:
@@ -1679,7 +1674,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -1700,7 +1695,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vmovdqa %xmm0, %xmm2
; AVX1-NEXT: .LBB33_6:
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -1721,7 +1716,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vmovdqa %xmm0, %xmm2
; AVX1-NEXT: .LBB33_9:
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -1742,7 +1737,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX1-NEXT: vmovdqa %xmm0, %xmm2
; AVX1-NEXT: .LBB33_12:
; AVX1-NEXT: vmaxss %xmm1, %xmm2, %xmm0
-; AVX1-NEXT: vcmpordss %xmm2, %xmm2, %xmm1
+; AVX1-NEXT: vcmpunordss %xmm0, %xmm0, %xmm1
; AVX1-NEXT: vblendvps %xmm1, %xmm2, %xmm0, %xmm0
; AVX1-NEXT: callq __truncsfhf2@PLT
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
@@ -1768,7 +1763,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vmaxss %xmm4, %xmm3, %xmm2
-; AVX512-NEXT: vcmpordss %xmm3, %xmm3, %k1
+; AVX512-NEXT: vcmpunordss %xmm2, %xmm2, %k1
; AVX512-NEXT: vmovss %xmm3, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm2, %xmm2
; AVX512-NEXT: vshufps {{.*#+}} xmm3 = xmm0[3,3,3,3]
@@ -1783,7 +1778,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
-; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
@@ -1799,7 +1794,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
-; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: vshufpd {{.*#+}} xmm4 = xmm0[1,0]
@@ -1814,7 +1809,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
-; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1
; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
@@ -1831,7 +1826,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm3, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm4, %xmm3
-; AVX512-NEXT: vcmpordss %xmm4, %xmm4, %k1
+; AVX512-NEXT: vcmpunordss %xmm3, %xmm3, %k1
; AVX512-NEXT: vmovss %xmm4, %xmm3, %xmm3 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm3, %xmm3
; AVX512-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
@@ -1846,7 +1841,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
-; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1
; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
@@ -1860,7 +1855,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm5, %xmm6, %xmm6 {%k1}
; AVX512-NEXT: vmovss %xmm4, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmaxss %xmm6, %xmm5, %xmm4
-; AVX512-NEXT: vcmpordss %xmm5, %xmm5, %k1
+; AVX512-NEXT: vcmpunordss %xmm4, %xmm4, %k1
; AVX512-NEXT: vmovss %xmm5, %xmm4, %xmm4 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm4, %xmm4
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm0
@@ -1875,7 +1870,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; AVX512-NEXT: vmovss %xmm1, %xmm5, %xmm5 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm5, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
@@ -1933,7 +1928,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_3:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __extendhfsf2
@@ -1955,7 +1950,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_6:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfhf2
@@ -1993,7 +1988,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_9:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __extendhfsf2
@@ -2015,7 +2010,7 @@ define <4 x half> @test_fmaximumnum_v4f16(<4 x half> %x, <4 x half> %y) nounwind
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: .LBB33_12:
; X86-NEXT: vmaxss %xmm2, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfhf2
@@ -2041,120 +2036,114 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; SSE2-NEXT: pushq %rbp
; SSE2-NEXT: pushq %r15
; SSE2-NEXT: pushq %r14
+; SSE2-NEXT: pushq %r13
+; SSE2-NEXT: pushq %r12
; SSE2-NEXT: pushq %rbx
; SSE2-NEXT: subq $56, %rsp
-; SSE2-NEXT: pextrw $0, %xmm1, %r14d
-; SSE2-NEXT: pextrw $0, %xmm0, %r15d
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psrld $16, %xmm2
-; SSE2-NEXT: pextrw $0, %xmm2, %eax
; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: psrld $16, %xmm2
-; SSE2-NEXT: pextrw $0, %xmm2, %ecx
+; SSE2-NEXT: psrlq $48, %xmm2
+; SSE2-NEXT: movdqa %xmm1, %xmm3
+; SSE2-NEXT: psrlq $48, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[1,1]
+; SSE2-NEXT: pextrw $0, %xmm4, %ebp
+; SSE2-NEXT: movdqa %xmm1, %xmm4
+; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
+; SSE2-NEXT: pextrw $0, %xmm4, %r15d
+; SSE2-NEXT: pextrw $0, %xmm0, %r12d
+; SSE2-NEXT: pextrw $0, %xmm1, %r13d
+; SSE2-NEXT: psrld $16, %xmm0
+; SSE2-NEXT: pextrw $0, %xmm0, %eax
+; SSE2-NEXT: psrld $16, %xmm1
+; SSE2-NEXT: pextrw $0, %xmm1, %ecx
; SSE2-NEXT: shll $16, %ecx
-; SSE2-NEXT: movd %ecx, %xmm3
+; SSE2-NEXT: movd %ecx, %xmm1
; SSE2-NEXT: shll $16, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: testl %ecx, %ecx
-; SSE2-NEXT: movdqa %xmm3, %xmm7
-; SSE2-NEXT: js .LBB34_2
-; SSE2-NEXT: # %bb.1:
-; SSE2-NEXT: movdqa %xmm2, %xmm7
-; SSE2-NEXT: .LBB34_2:
-; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm1, %xmm5
-; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[1,1]
-; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; SSE2-NEXT: movdqa %xmm0, %xmm6
-; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm0[1,1]
-; SSE2-NEXT: movdqa %xmm7, %xmm0
-; SSE2-NEXT: cmpordss %xmm7, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm7, %xmm4
-; SSE2-NEXT: js .LBB34_4
-; SSE2-NEXT: # %bb.3:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_4:
-; SSE2-NEXT: pextrw $0, %xmm5, %ebp
-; SSE2-NEXT: pextrw $0, %xmm6, %ebx
-; SSE2-NEXT: maxss %xmm2, %xmm7
-; SSE2-NEXT: andnps %xmm7, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: movd %eax, %xmm4
+; SSE2-NEXT: js .LBB34_1
+; SSE2-NEXT: # %bb.2:
+; SSE2-NEXT: movdqa %xmm4, %xmm0
+; SSE2-NEXT: jmp .LBB34_3
+; SSE2-NEXT: .LBB34_1:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm4, %xmm1
+; SSE2-NEXT: .LBB34_3:
+; SSE2-NEXT: pextrw $0, %xmm2, %ebx
+; SSE2-NEXT: pextrw $0, %xmm3, %r14d
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: shll $16, %r15d
-; SSE2-NEXT: movd %r15d, %xmm3
-; SSE2-NEXT: shll $16, %r14d
-; SSE2-NEXT: movd %r14d, %xmm2
-; SSE2-NEXT: testl %r15d, %r15d
-; SSE2-NEXT: movdqa %xmm3, %xmm1
-; SSE2-NEXT: js .LBB34_6
+; SSE2-NEXT: shll $16, %r13d
+; SSE2-NEXT: movd %r13d, %xmm1
+; SSE2-NEXT: shll $16, %r12d
+; SSE2-NEXT: movd %r12d, %xmm2
+; SSE2-NEXT: js .LBB34_4
; SSE2-NEXT: # %bb.5:
+; SSE2-NEXT: movdqa %xmm2, %xmm0
+; SSE2-NEXT: jmp .LBB34_6
+; SSE2-NEXT: .LBB34_4:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: .LBB34_6:
-; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm5
-; SSE2-NEXT: movdqa (%rsp), %xmm6 # 16-byte Reload
-; SSE2-NEXT: psrlq $48, %xmm6
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: cmpordss %xmm1, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm1, %xmm4
-; SSE2-NEXT: js .LBB34_8
-; SSE2-NEXT: # %bb.7:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_8:
-; SSE2-NEXT: pextrw $0, %xmm5, %r15d
-; SSE2-NEXT: pextrw $0, %xmm6, %r14d
-; SSE2-NEXT: maxss %xmm2, %xmm1
-; SSE2-NEXT: andnps %xmm1, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE2-NEXT: shll $16, %ebx
-; SSE2-NEXT: movd %ebx, %xmm1
+; SSE2-NEXT: shll $16, %r15d
+; SSE2-NEXT: movd %r15d, %xmm1
; SSE2-NEXT: shll $16, %ebp
-; SSE2-NEXT: movd %ebp, %xmm3
-; SSE2-NEXT: testl %ebx, %ebx
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: js .LBB34_10
-; SSE2-NEXT: # %bb.9:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_10:
+; SSE2-NEXT: movd %ebp, %xmm2
+; SSE2-NEXT: js .LBB34_7
+; SSE2-NEXT: # %bb.8:
; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm2, %xmm4
-; SSE2-NEXT: js .LBB34_12
-; SSE2-NEXT: # %bb.11:
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: .LBB34_12:
-; SSE2-NEXT: maxss %xmm3, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: jmp .LBB34_9
+; SSE2-NEXT: .LBB34_7:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: .LBB34_9:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE2-NEXT: shll $16, %r14d
; SSE2-NEXT: movd %r14d, %xmm1
-; SSE2-NEXT: shll $16, %r15d
-; SSE2-NEXT: movd %r15d, %xmm3
-; SSE2-NEXT: testl %r14d, %r14d
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: js .LBB34_14
-; SSE2-NEXT: # %bb.13:
-; SSE2-NEXT: movdqa %xmm3, %xmm2
-; SSE2-NEXT: .LBB34_14:
+; SSE2-NEXT: shll $16, %ebx
+; SSE2-NEXT: movd %ebx, %xmm2
+; SSE2-NEXT: js .LBB34_10
+; SSE2-NEXT: # %bb.11:
; SSE2-NEXT: movdqa %xmm2, %xmm0
-; SSE2-NEXT: cmpordss %xmm2, %xmm0
-; SSE2-NEXT: movaps %xmm0, %xmm4
-; SSE2-NEXT: andps %xmm2, %xmm4
-; SSE2-NEXT: js .LBB34_16
-; SSE2-NEXT: # %bb.15:
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: .LBB34_16:
-; SSE2-NEXT: maxss %xmm3, %xmm2
-; SSE2-NEXT: andnps %xmm2, %xmm0
-; SSE2-NEXT: orps %xmm4, %xmm0
+; SSE2-NEXT: jmp .LBB34_12
+; SSE2-NEXT: .LBB34_10:
+; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: .LBB34_12:
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: maxss %xmm0, %xmm2
+; SSE2-NEXT: movaps %xmm2, %xmm0
+; SSE2-NEXT: cmpunordss %xmm2, %xmm0
+; SSE2-NEXT: movaps %xmm0, %xmm3
+; SSE2-NEXT: andnps %xmm2, %xmm3
+; SSE2-NEXT: andps %xmm1, %xmm0
+; SSE2-NEXT: orps %xmm3, %xmm0
; SSE2-NEXT: callq __truncsfbf2@PLT
; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@@ -2164,6 +2153,8 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE2-NEXT: addq $56, %rsp
; SSE2-NEXT: popq %rbx
+; SSE2-NEXT: popq %r12
+; SSE2-NEXT: popq %r13
; SSE2-NEXT: popq %r14
; SSE2-NEXT: popq %r15
; SSE2-NEXT: popq %rbp
@@ -2205,7 +2196,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vpextrw $0, %xmm2, %ebp
; AVX1-NEXT: vpextrw $0, %xmm3, %r15d
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -2222,7 +2213,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: .LBB34_6:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -2239,7 +2230,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: .LBB34_9:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
@@ -2256,7 +2247,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX1-NEXT: vmovdqa %xmm2, %xmm0
; AVX1-NEXT: .LBB34_12:
; AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; AVX1-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; AVX1-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX1-NEXT: callq __truncsfbf2@PLT
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
@@ -2305,7 +2296,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
@@ -2319,7 +2310,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, (%rsp)
@@ -2333,7 +2324,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
@@ -2347,7 +2338,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1}
; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1}
; AVX512-NEXT: vmaxss %xmm2, %xmm1, %xmm0
-; AVX512-NEXT: vcmpordss %xmm1, %xmm1, %k1
+; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
; AVX512-NEXT: callq __truncsfbf2@PLT
; AVX512-NEXT: vpextrw $0, %xmm0, {{[0-9]+}}(%rsp)
@@ -2400,7 +2391,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vpextrw $0, %xmm2, %edi
; X86-NEXT: vpextrw $0, %xmm3, %ebp
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: shll $16, %ecx
@@ -2416,7 +2407,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB34_6:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfbf2
@@ -2436,7 +2427,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB34_9:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfbf2
@@ -2456,7 +2447,7 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: .LBB34_12:
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm1
-; X86-NEXT: vcmpordss %xmm0, %xmm0, %xmm2
+; X86-NEXT: vcmpunordss %xmm1, %xmm1, %xmm2
; X86-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; X86-NEXT: vmovups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; X86-NEXT: calll __truncsfbf2
diff --git a/llvm/test/CodeGen/X86/fp-undef.ll b/llvm/test/CodeGen/X86/fp-undef.ll
index 227f007..c358085 100644
--- a/llvm/test/CodeGen/X86/fp-undef.ll
+++ b/llvm/test/CodeGen/X86/fp-undef.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ANY
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -enable-unsafe-fp-math | FileCheck %s --check-prefix=ANY
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ANY
; This is duplicated from tests for InstSimplify. If you're
; adding something here, you should probably add it there too.
diff --git a/llvm/test/CodeGen/X86/fp128-select.ll b/llvm/test/CodeGen/X86/fp128-select.ll
index 659e4dd..27a651e 100644
--- a/llvm/test/CodeGen/X86/fp128-select.ll
+++ b/llvm/test/CodeGen/X86/fp128-select.ll
@@ -13,8 +13,8 @@ define void @test_select(ptr %p, ptr %q, i1 zeroext %c) nounwind {
; SSE: # %bb.0:
; SSE-NEXT: testl %edx, %edx
; SSE-NEXT: jne .LBB0_1
-; SSE-NEXT: # %bb.3:
-; SSE-NEXT: movaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: # %bb.2:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [NaN]
; SSE-NEXT: movaps %xmm0, (%rsi)
; SSE-NEXT: retq
; SSE-NEXT: .LBB0_1:
@@ -58,7 +58,7 @@ define fp128 @test_select_cc(fp128, fp128) nounwind {
; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: jmp .LBB1_3
; SSE-NEXT: .LBB1_1:
-; SSE-NEXT: movaps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.0E+0]
; SSE-NEXT: .LBB1_3: # %BB0
; SSE-NEXT: testl %ebx, %ebx
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
diff --git a/llvm/test/CodeGen/X86/fsxor-alignment.ll b/llvm/test/CodeGen/X86/fsxor-alignment.ll
index 6fa4a31..32af5b9 100644
--- a/llvm/test/CodeGen/X86/fsxor-alignment.ll
+++ b/llvm/test/CodeGen/X86/fsxor-alignment.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=i686-- -mattr=+sse2 | FileCheck %s
; Don't fold the incoming stack arguments into the xorps instructions used
; to do floating-point negations, because the arguments aren't vectors
diff --git a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
index f710a30..bd997d1 100644
--- a/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
+++ b/llvm/test/CodeGen/X86/machine-trace-metrics-crash.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse -enable-unsafe-fp-math < %s | FileCheck %s
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse < %s | FileCheck %s
; The debug info in this test case was causing a crash because machine trace metrics
; did not correctly ignore debug instructions. The check lines ensure that the
diff --git a/llvm/test/CodeGen/X86/neg_fp.ll b/llvm/test/CodeGen/X86/neg_fp.ll
index 8020982..18ded50 100644
--- a/llvm/test/CodeGen/X86/neg_fp.ll
+++ b/llvm/test/CodeGen/X86/neg_fp.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-- -mattr=+sse4.1 | FileCheck %s
-; Test that when we don't -enable-unsafe-fp-math, we don't do the optimization
+; Test that when we don't, we don't do the optimization
; -0 - (A - B) to (B - A) because A==B, -0 != 0
define float @negfp(float %a, float %b) nounwind {
diff --git a/llvm/test/CodeGen/X86/negate-add-zero.ll b/llvm/test/CodeGen/X86/negate-add-zero.ll
index eb4e2d3..4884832 100644
--- a/llvm/test/CodeGen/X86/negate-add-zero.ll
+++ b/llvm/test/CodeGen/X86/negate-add-zero.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s | FileCheck %s
; PR3374
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
diff --git a/llvm/test/CodeGen/X86/recip-pic.ll b/llvm/test/CodeGen/X86/recip-pic.ll
index d01ecc1..d2620e7 100644
--- a/llvm/test/CodeGen/X86/recip-pic.ll
+++ b/llvm/test/CodeGen/X86/recip-pic.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -enable-unsafe-fp-math -mcpu=slm -relocation-model=pic | FileCheck %s --check-prefix=CHECK
+; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=slm -relocation-model=pic | FileCheck %s --check-prefix=CHECK
define fastcc float @foo(float %x) unnamed_addr #0 {
; CHECK-LABEL: foo:
diff --git a/llvm/test/CodeGen/X86/sincos-opt.ll b/llvm/test/CodeGen/X86/sincos-opt.ll
index 6885456..51f3e52 100644
--- a/llvm/test/CodeGen/X86/sincos-opt.ll
+++ b/llvm/test/CodeGen/X86/sincos-opt.ll
@@ -1,10 +1,10 @@
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.9.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_SINCOS
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=core2 | FileCheck %s --check-prefix=OSX_NOOPT
; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
-; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnu -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
+; RUN: llc < %s -mtriple=x86_64-pc-linux-gnux32 -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS
-; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 -enable-unsafe-fp-math | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
+; RUN: llc < %s -mtriple=x86_64-fuchsia -mcpu=core2 | FileCheck %s --check-prefix=GNU_SINCOS_FASTMATH
; RUN: llc < %s -mtriple=x86_64-scei-ps4 -mcpu=btver2 | FileCheck %s --check-prefix=PS4_SINCOS
; RUN: llc < %s -mtriple=x86_64-sie-ps5 -mcpu=znver2 | FileCheck %s --check-prefix=PS4_SINCOS
diff --git a/llvm/test/CodeGen/X86/sincos.ll b/llvm/test/CodeGen/X86/sincos.ll
index 7903407..9206c25 100644
--- a/llvm/test/CodeGen/X86/sincos.ll
+++ b/llvm/test/CodeGen/X86/sincos.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; Make sure this testcase codegens to the sin and cos instructions, not calls
-; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 -enable-unsafe-fp-math | FileCheck %s
+; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 | FileCheck %s
; RUN: llc < %s -mtriple=i686-apple-macosx -mattr=-sse,-sse2,-sse3 | FileCheck %s
declare float @sinf(float) readonly
diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
index c0beb6f..2822d40 100644
--- a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
+++ b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll
@@ -1,9 +1,9 @@
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math | FileCheck %s --check-prefix=CST --check-prefix=SSE2
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+sse4.1 | FileCheck %s --check-prefix=CST --check-prefix=SSE41
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx | FileCheck %s --check-prefix=CST --check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
-; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VL
+; RUN: llc < %s -mtriple=x86_64 | FileCheck %s --check-prefix=CST --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64 -mattr=+sse4.1 | FileCheck %s --check-prefix=CST --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx | FileCheck %s --check-prefix=CST --check-prefix=AVX
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s --check-prefix=AVX512F
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512VL
; Check that the constant used in the vectors are the right ones.
; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]: