aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll98
-rw-r--r--llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/addsub64_carry.ll192
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll452
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll790
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll57
-rw-r--r--llvm/test/CodeGen/AMDGPU/carryout-selection.ll322
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdiv64.ll470
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem64.ll488
-rw-r--r--llvm/test/CodeGen/AMDGPU/uaddo.ll201
-rw-r--r--llvm/test/CodeGen/AMDGPU/uaddsat.ll47
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv64.ll472
-rw-r--r--llvm/test/CodeGen/AMDGPU/urem64.ll379
-rw-r--r--llvm/test/CodeGen/AMDGPU/usubo.ll201
-rw-r--r--llvm/test/CodeGen/AMDGPU/usubsat.ll54
-rw-r--r--llvm/test/CodeGen/Hexagon/fmul-v67.ll8
-rw-r--r--llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll16
-rw-r--r--llvm/test/CodeGen/NVPTX/i32x2-instructions.ll1625
-rw-r--r--llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll149
-rw-r--r--llvm/test/CodeGen/NVPTX/tcgen05-commit.ll152
-rw-r--r--llvm/test/CodeGen/NVPTX/tcgen05-cp.ll450
-rw-r--r--llvm/test/CodeGen/NVPTX/tcgen05-shift.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/double-arith.ll3
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbkb.ll6
-rw-r--r--llvm/test/CodeGen/X86/fmaxnum.ll46
-rw-r--r--llvm/test/CodeGen/X86/fminimum-fmaximum.ll99
-rw-r--r--llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll99
-rw-r--r--llvm/test/CodeGen/X86/fminnum.ll46
-rw-r--r--llvm/test/CodeGen/X86/pgo-profile-o0.ll49
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/alloca-offset-lifetime.ll27
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/calls-only-smallfn.ll18
-rw-r--r--llvm/test/Instrumentation/AddressSanitizer/calls-only.ll42
-rw-r--r--llvm/test/Instrumentation/SanitizerCoverage/missing_dbg.ll92
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll9
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-dereferencing-pointer.ll9
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-non-argument-value.ll9
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-zero-element.ll9
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/trunc_select.ll12
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/trunc_select_cmp.ll11
-rw-r--r--llvm/test/Transforms/Coroutines/coro-elide-safe.ll (renamed from llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll)28
-rw-r--r--llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-analysis.ll32
-rw-r--r--llvm/test/Transforms/DFAJumpThreading/max-path-length.ll6
-rw-r--r--llvm/test/Transforms/GVN/assume-equal.ll49
-rw-r--r--llvm/test/Transforms/InstCombine/ptrtoaddr.ll77
-rw-r--r--llvm/test/Transforms/InstSimplify/ptr_diff.ll59
-rw-r--r--llvm/test/Transforms/LICM/vector-intrinsics.ll176
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/veclib-function-calls.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll244
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/single_early_exit.ll47
-rw-r--r--llvm/test/Transforms/PreISelIntrinsicLowering/AArch64/expand-exp.ll1
-rw-r--r--llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s4
55 files changed, 6188 insertions, 1857 deletions
diff --git a/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll b/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll
index 8d091a0..d380104 100644
--- a/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll
+++ b/llvm/test/Analysis/ScalarEvolution/trip-count-minmax.ll
@@ -61,7 +61,7 @@ define void @umin(i32 noundef %a, i32 noundef %b) {
; CHECK-NEXT: Loop %for.body: backedge-taken count is (-1 + ((2 * %a) umin (4 * %b)))
; CHECK-NEXT: Loop %for.body: constant max backedge-taken count is i32 2147483646
; CHECK-NEXT: Loop %for.body: symbolic max backedge-taken count is (-1 + ((2 * %a) umin (4 * %b)))
-; CHECK-NEXT: Loop %for.body: Trip multiple is 1
+; CHECK-NEXT: Loop %for.body: Trip multiple is 2
;
; void umin(unsigned a, unsigned b) {
; a *= 2;
@@ -157,7 +157,7 @@ define void @smin(i32 noundef %a, i32 noundef %b) {
; CHECK-NEXT: Loop %for.body: backedge-taken count is (-1 + ((2 * %a)<nsw> smin (4 * %b)<nsw>))
; CHECK-NEXT: Loop %for.body: constant max backedge-taken count is i32 2147483646
; CHECK-NEXT: Loop %for.body: symbolic max backedge-taken count is (-1 + ((2 * %a)<nsw> smin (4 * %b)<nsw>))
-; CHECK-NEXT: Loop %for.body: Trip multiple is 1
+; CHECK-NEXT: Loop %for.body: Trip multiple is 2
;
; void smin(signed a, signed b) {
; a *= 2;
diff --git a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
index 4287507..dfff35d 100644
--- a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
+++ b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll
@@ -1451,3 +1451,52 @@ define <4 x i32> @partial_reduce_shl_zext_non_const_rhs(<16 x i8> %l, <4 x i32>
%red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift)
ret <4 x i32> %red
}
+
+define <2 x i32> @udot_v16i8tov2i32(<2 x i32> %acc, <16 x i8> %input) {
+; CHECK-NODOT-LABEL: udot_v16i8tov2i32:
+; CHECK-NODOT: // %bb.0: // %entry
+; CHECK-NODOT-NEXT: ushll v2.8h, v1.8b, #0
+; CHECK-NODOT-NEXT: // kill: def $d0 killed $d0 def $q0
+; CHECK-NODOT-NEXT: ushll2 v1.8h, v1.16b, #0
+; CHECK-NODOT-NEXT: ushll v3.4s, v2.4h, #0
+; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v2.4h
+; CHECK-NODOT-NEXT: ushll2 v4.4s, v2.8h, #0
+; CHECK-NODOT-NEXT: ext v2.16b, v2.16b, v2.16b, #8
+; CHECK-NODOT-NEXT: ext v3.16b, v3.16b, v3.16b, #8
+; CHECK-NODOT-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NODOT-NEXT: ext v3.16b, v4.16b, v4.16b, #8
+; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v2.4h
+; CHECK-NODOT-NEXT: ushll v2.4s, v1.4h, #0
+; CHECK-NODOT-NEXT: add v0.2s, v3.2s, v0.2s
+; CHECK-NODOT-NEXT: ext v2.16b, v2.16b, v2.16b, #8
+; CHECK-NODOT-NEXT: ushll2 v3.4s, v1.8h, #0
+; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v1.4h
+; CHECK-NODOT-NEXT: ext v1.16b, v1.16b, v1.16b, #8
+; CHECK-NODOT-NEXT: add v0.2s, v2.2s, v0.2s
+; CHECK-NODOT-NEXT: ext v2.16b, v3.16b, v3.16b, #8
+; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v1.4h
+; CHECK-NODOT-NEXT: add v0.2s, v2.2s, v0.2s
+; CHECK-NODOT-NEXT: ret
+;
+; CHECK-DOT-LABEL: udot_v16i8tov2i32:
+; CHECK-DOT: // %bb.0: // %entry
+; CHECK-DOT-NEXT: movi v2.16b, #1
+; CHECK-DOT-NEXT: fmov d0, d0
+; CHECK-DOT-NEXT: udot v0.4s, v1.16b, v2.16b
+; CHECK-DOT-NEXT: addp v0.4s, v0.4s, v0.4s
+; CHECK-DOT-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-DOT-NEXT: ret
+;
+; CHECK-DOT-I8MM-LABEL: udot_v16i8tov2i32:
+; CHECK-DOT-I8MM: // %bb.0: // %entry
+; CHECK-DOT-I8MM-NEXT: movi v2.16b, #1
+; CHECK-DOT-I8MM-NEXT: fmov d0, d0
+; CHECK-DOT-I8MM-NEXT: udot v0.4s, v1.16b, v2.16b
+; CHECK-DOT-I8MM-NEXT: addp v0.4s, v0.4s, v0.4s
+; CHECK-DOT-I8MM-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-DOT-I8MM-NEXT: ret
+entry:
+ %input.wide = zext <16 x i8> %input to <16 x i32>
+ %partial.reduce = tail call <2 x i32> @llvm.vector.partial.reduce.add(<2 x i32> %acc, <16 x i32> %input.wide)
+ ret <2 x i32> %partial.reduce
+}
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index 7cc5051..003aa04 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -8759,9 +8759,8 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v6
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8780,20 +8779,19 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX90A-NEXT: s_cbranch_execz .LBB113_6
; GFX90A-NEXT: ; %bb.5: ; %atomicrmw.private
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX90A-NEXT: v_cndmask_b32_e32 v4, -1, v4, vcc
-; GFX90A-NEXT: buffer_load_dword v0, v4, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v4, vcc
+; GFX90A-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v6
+; GFX90A-NEXT: v_sub_co_u32_e32 v3, vcc, v1, v6
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT: buffer_store_dword v0, v4, s[0:3], 0 offen offset:4
-; GFX90A-NEXT: buffer_store_dword v2, v4, s[0:3], 0 offen
+; GFX90A-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v7, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB113_6: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: ;;#ASMSTART
@@ -8827,10 +8825,9 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v6
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8856,11 +8853,11 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v6
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
; GFX950-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT: s_nop 0
; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
; GFX950-NEXT: scratch_store_dwordx2 v4, v[2:3], off
; GFX950-NEXT: .LBB113_6: ; %atomicrmw.phi
; GFX950-NEXT: s_or_b64 exec, exec, s[0:1]
@@ -8900,9 +8897,8 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
; GFX90A-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -8918,18 +8914,17 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX90A-NEXT: s_cbranch_execz .LBB114_6
; GFX90A-NEXT: ; %bb.5: ; %atomicrmw.private
; GFX90A-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX90A-NEXT: v_cndmask_b32_e32 v6, -1, v0, vcc
-; GFX90A-NEXT: buffer_load_dword v4, v6, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
+; GFX90A-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v4, v2
+; GFX90A-NEXT: v_sub_co_u32_e32 v1, vcc, v4, v2
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v5, v3, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
-; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_subb_co_u32_e32 v2, vcc, v5, v3, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB114_6: ; %atomicrmw.phi
; GFX90A-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX90A-NEXT: ;;#ASMSTART
@@ -8962,10 +8957,9 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -8988,7 +8982,6 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -17064,9 +17057,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17085,20 +17077,19 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX90A-NEXT: ; %bb.5: ; %atomicrmw.private
; GFX90A-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX90A-NEXT: s_cselect_b32 s4, s4, -1
-; GFX90A-NEXT: v_mov_b32_e32 v6, s4
-; GFX90A-NEXT: buffer_load_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT: buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT: v_mov_b32_e32 v0, s4
+; GFX90A-NEXT: buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: s_waitcnt vmcnt(1)
-; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v4
+; GFX90A-NEXT: v_sub_co_u32_e32 v3, vcc, v1, v4
; GFX90A-NEXT: s_waitcnt vmcnt(0)
-; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT: v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT: buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
-; GFX90A-NEXT: buffer_store_dword v2, v6, s[0:3], 0 offen
+; GFX90A-NEXT: v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT: v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT: buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
; GFX90A-NEXT: .LBB221_6: ; %atomicrmw.phi
; GFX90A-NEXT: ;;#ASMSTART
; GFX90A-NEXT: ; use a[0:1]
@@ -17131,10 +17122,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17158,11 +17148,11 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v4
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
; GFX950-NEXT: v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT: s_nop 0
; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT: v_accvgpr_write_b32 a1, v1
; GFX950-NEXT: scratch_store_dwordx2 off, v[2:3], s0
; GFX950-NEXT: .LBB221_6: ; %atomicrmw.phi
; GFX950-NEXT: ;;#ASMSTART
@@ -17201,9 +17191,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] glc
; GFX90A-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17226,7 +17215,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v0
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: buffer_store_dword v0, v4, s[0:3], 0 offen
@@ -17262,10 +17250,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
; GFX950-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17286,7 +17273,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
index c98fff9..34a4899 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
@@ -5804,9 +5804,8 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6
; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5839,10 +5838,9 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v4, v6
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5880,9 +5878,8 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
; GFX90A-NEXT: v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
; GFX90A-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -5911,10 +5908,9 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
; GFX950-NEXT: v_sub_co_u32_e32 v4, vcc, v6, v2
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v5, v5, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -11573,9 +11569,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX90A-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11609,10 +11604,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
; GFX950-NEXT: v_sub_co_u32_e32 v0, vcc, v2, v4
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11651,9 +11645,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
; GFX90A-NEXT: v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
; GFX90A-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX90A-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX90A-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc
; GFX90A-NEXT: s_waitcnt vmcnt(0)
; GFX90A-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -11683,10 +11676,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
; GFX950-NEXT: v_sub_co_u32_e32 v2, vcc, v8, v0
; GFX950-NEXT: s_nop 1
; GFX950-NEXT: v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
; GFX950-NEXT: s_nop 1
-; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT: v_cndmask_b32_e64 v7, v3, 0, vcc
; GFX950-NEXT: global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0
; GFX950-NEXT: s_waitcnt vmcnt(0)
; GFX950-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index d326966..b72eba8 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -17,12 +17,9 @@ define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) {
; CHECK-LABEL: v_add64_32:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v5, vcc, v0, v2
-; CHECK-NEXT: v_addc_co_u32_e32 v6, vcc, v1, v3, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1]
-; CHECK-NEXT: v_mov_b32_e32 v0, v5
+; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
; CHECK-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
-; CHECK-NEXT: v_mov_b32_e32 v1, v6
; CHECK-NEXT: s_setpc_b64 s[30:31]
%sum64 = add i64 %val64A, %val64B
%obit = icmp ult i64 %sum64, %val64A
@@ -38,16 +35,14 @@ define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_add_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT: v_add_co_u32_e64 v4, s[4:5], v0, v4
; CHECK-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT: v_add_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v1, v0
+; CHECK-NEXT: v_addc_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: v_mov_b32_e32 v3, v2
+; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
%pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -63,16 +58,14 @@ define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_sub_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT: v_sub_co_u32_e64 v4, s[4:5], v0, v4
; CHECK-NEXT: v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v1, v0
+; CHECK-NEXT: v_subb_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[4:5]
; CHECK-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: v_mov_b32_e32 v3, v2
+; CHECK-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_setpc_b64 s[30:31]
%pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -87,10 +80,9 @@ define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-LABEL: v_uadd_i64:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
-; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -109,7 +101,6 @@ define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
; CHECK-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
@@ -147,10 +138,9 @@ define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-LABEL: v_usub_p1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, -1, v0
-; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT: v_subrev_co_u32_e32 v0, vcc, 1, v0
+; CHECK-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -167,10 +157,9 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
; CHECK-LABEL: v_usub_n1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT: v_add_co_u32_e32 v2, vcc, 1, v0
-; CHECK-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT: v_subrev_co_u32_e32 v0, vcc, -1, v0
+; CHECK-NEXT: v_subbrev_co_u32_e32 v1, vcc, -1, v1, vcc
+; CHECK-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
; CHECK-NEXT: v_mov_b32_e32 v1, v0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -190,15 +179,13 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B, i32 inreg %val32) {
; CHECK-LABEL: s_add64_32:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s6, s0, s2
-; CHECK-NEXT: v_mov_b32_e32 v0, s0
-; CHECK-NEXT: s_addc_u32 s7, s1, s3
-; CHECK-NEXT: v_mov_b32_e32 v1, s1
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CHECK-NEXT: s_mov_b32 s0, s6
-; CHECK-NEXT: s_cmp_lg_u64 vcc, 0
+; CHECK-NEXT: s_add_u32 s0, s0, s2
+; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT: s_addc_u32 s1, s1, s3
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_addc_u32 s2, s4, 0
-; CHECK-NEXT: s_mov_b32 s1, s7
; CHECK-NEXT: ; return to shader part epilog
%sum64 = add i64 %val64A, %val64B
%obit = icmp ult i64 %sum64, %val64A
@@ -212,24 +199,24 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_v2i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s6, s2, s6
-; CHECK-NEXT: v_mov_b32_e32 v9, s3
-; CHECK-NEXT: s_addc_u32 s7, s3, s7
-; CHECK-NEXT: v_mov_b32_e32 v8, s2
-; CHECK-NEXT: s_add_u32 s4, s0, s4
-; CHECK-NEXT: v_mov_b32_e32 v7, s1
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT: s_addc_u32 s5, s1, s5
-; CHECK-NEXT: v_mov_b32_e32 v6, s0
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT: v_readfirstlane_b32 s2, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT: v_readfirstlane_b32 s0, v6
-; CHECK-NEXT: v_mov_b32_e32 v2, s4
-; CHECK-NEXT: v_mov_b32_e32 v3, s5
-; CHECK-NEXT: v_mov_b32_e32 v4, s6
-; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: s_add_u32 s10, s2, s6
+; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT: s_addc_u32 s8, s3, s7
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_add_u32 s0, s0, s4
+; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT: s_addc_u32 s1, s1, s5
+; CHECK-NEXT: v_mov_b32_e32 v2, s0
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT: v_readfirstlane_b32 s0, v7
+; CHECK-NEXT: v_readfirstlane_b32 s2, v6
+; CHECK-NEXT: v_mov_b32_e32 v4, s10
+; CHECK-NEXT: v_mov_b32_e32 v5, s8
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_mov_b32 s3, s2
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -246,24 +233,24 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_v2i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_sub_u32 s6, s2, s6
-; CHECK-NEXT: v_mov_b32_e32 v9, s3
-; CHECK-NEXT: s_subb_u32 s7, s3, s7
-; CHECK-NEXT: v_mov_b32_e32 v8, s2
-; CHECK-NEXT: s_sub_u32 s4, s0, s4
-; CHECK-NEXT: v_mov_b32_e32 v7, s1
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT: s_subb_u32 s5, s1, s5
-; CHECK-NEXT: v_mov_b32_e32 v6, s0
-; CHECK-NEXT: v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT: v_readfirstlane_b32 s2, v8
-; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT: v_readfirstlane_b32 s0, v6
-; CHECK-NEXT: v_mov_b32_e32 v2, s4
-; CHECK-NEXT: v_mov_b32_e32 v3, s5
-; CHECK-NEXT: v_mov_b32_e32 v4, s6
-; CHECK-NEXT: v_mov_b32_e32 v5, s7
+; CHECK-NEXT: s_sub_u32 s10, s2, s6
+; CHECK-NEXT: s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT: s_subb_u32 s8, s3, s7
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_sub_u32 s0, s0, s4
+; CHECK-NEXT: s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT: s_subb_u32 s1, s1, s5
+; CHECK-NEXT: v_mov_b32_e32 v2, s0
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT: v_readfirstlane_b32 s0, v7
+; CHECK-NEXT: v_readfirstlane_b32 s2, v6
+; CHECK-NEXT: v_mov_b32_e32 v4, s10
+; CHECK-NEXT: v_mov_b32_e32 v5, s8
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_mov_b32 s3, s2
; CHECK-NEXT: flat_store_dwordx4 v[0:1], v[2:5]
@@ -280,15 +267,15 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_i64:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s2, s0, s2
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: s_addc_u32 s3, s1, s3
+; CHECK-NEXT: s_add_u32 s0, s0, s2
+; CHECK-NEXT: s_cselect_b64 s[4:5], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[4:5], 0
+; CHECK-NEXT: s_addc_u32 s1, s1, s3
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: v_mov_b32_e32 v5, s3
-; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v4, s2
-; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -305,10 +292,11 @@ define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_uadd_p1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_add_u32 s0, s0, 1
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
; CHECK-NEXT: s_addc_u32 s1, s1, 0
-; CHECK-NEXT: s_cmp_eq_u64 s[0:1], 0
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: v_mov_b32_e32 v2, s0
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
@@ -350,15 +338,15 @@ define amdgpu_ps i64 @s_uadd_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_p1:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s2, s0, -1
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: s_addc_u32 s3, s1, -1
+; CHECK-NEXT: s_sub_u32 s0, s0, 1
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT: s_subb_u32 s1, s1, 0
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: v_mov_b32_e32 v5, s3
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v4, s2
-; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -374,15 +362,15 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
; CHECK-LABEL: s_usub_n1:
; CHECK: ; %bb.0:
-; CHECK-NEXT: s_add_u32 s2, s0, 1
-; CHECK-NEXT: v_mov_b32_e32 v3, s1
-; CHECK-NEXT: s_addc_u32 s3, s1, 0
+; CHECK-NEXT: s_sub_u32 s0, s0, -1
+; CHECK-NEXT: s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT: s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT: s_subb_u32 s1, s1, -1
; CHECK-NEXT: v_mov_b32_e32 v2, s0
-; CHECK-NEXT: v_mov_b32_e32 v5, s3
-; CHECK-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT: v_mov_b32_e32 v4, s2
-; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT: v_mov_b32_e32 v3, s1
+; CHECK-NEXT: s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT: v_cndmask_b32_e64 v0, 0, -1, s[0:1]
; CHECK-NEXT: v_readfirstlane_b32 s0, v0
; CHECK-NEXT: s_mov_b32 s1, s0
; CHECK-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
new file mode 100644
index 0000000..6c4f504
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-uniform-waterfall.ll
@@ -0,0 +1,452 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -O3 -S < %s | FileCheck %s -check-prefix=O3-CHECK
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK: [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 %ballot, 0 ; in this case is_done = !not_done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK: [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_swap_op(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 0, %ballot ; in this case is_done = !not_done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[WHILE:.*]]
+; CURRENT-CHECK: [[WHILE]]:
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP0:![0-9]+]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+ %is_done = icmp ne i64 0, %ballot ; in this case is_done = done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[WHILE:.*]]
+; CURRENT-CHECK: [[WHILE]]:
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_NOT:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP2:![0-9]+]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_swap(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %done)
+ %is_done = icmp ne i64 %ballot, 0 ; in this case is_done = done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_uniform_waterfall(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[WORK_PEEL:.*]]
+; CURRENT-CHECK: [[WORK_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 0, 0
+; PASS-CHECK-NEXT: br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
+; PASS-CHECK: [[WORK]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[TAIL]]
+; PASS-CHECK: [[TAIL]]:
+; PASS-CHECK-NEXT: [[NEW_DONE]] = phi i1 [ true, %[[WORK]] ], [ false, %[[IF]] ]
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_uniform_waterfall(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ false, %entry ], [ %new_done, %tail ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 %ballot, 0
+ br i1 %is_done, label %exit, label %if
+
+if:
+ %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 0)
+ %is_first_active_id = icmp eq i32 0, %first_active_id
+ br i1 %is_first_active_id, label %work, label %tail
+
+work:
+ store i32 5, ptr addrspace(1) %out
+ br label %tail
+
+tail:
+ %new_done = phi i1 [ true, %work ], [ false, %if ]
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @uniform_waterfall(ptr addrspace(1) %out, i32 %mymask) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]], i32 [[MYMASK:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[TMP0]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[WORK_PEEL:.*]]
+; CURRENT-CHECK: [[WORK_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[MYMASK:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ [[NEW_DONE:%.*]], %[[TAIL:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF:.*]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: [[IS_FIRST_ACTIVE_ID:%.*]] = icmp eq i32 [[MYMASK]], [[MYMASK]]
+; PASS-CHECK-NEXT: br i1 [[IS_FIRST_ACTIVE_ID]], label %[[WORK:.*]], label %[[TAIL]]
+; PASS-CHECK: [[WORK]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[TAIL]]
+; PASS-CHECK: [[TAIL]]:
+; PASS-CHECK-NEXT: [[NEW_DONE]] = phi i1 [ true, %[[WORK]] ], [ false, %[[IF]] ]
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @uniform_waterfall(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[MYMASK:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ false, %entry ], [ %new_done, %tail ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i64 @llvm.amdgcn.ballot.i64(i1 %not_done)
+ %is_done = icmp eq i64 %ballot, 0
+ br i1 %is_done, label %exit, label %if
+
+if:
+ %first_active_id = tail call noundef i32 @llvm.amdgcn.readfirstlane.i32(i32 %mymask)
+ %is_first_active_id = icmp eq i32 %mymask, %first_active_id
+ br i1 %is_first_active_id, label %work, label %tail
+
+work:
+ store i32 5, ptr addrspace(1) %out
+ br label %tail
+
+tail:
+ %new_done = phi i1 [ true, %work ], [ false, %if ]
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: [[BALLOT_PEEL:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_PEEL:%.*]] = icmp eq i32 [[BALLOT_PEEL]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_PEEL]], label %[[EXIT:.*]], label %[[IF_PEEL:.*]]
+; CURRENT-CHECK: [[IF_PEEL]]:
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[EXIT]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: [[NOT_DONE:%.*]] = xor i1 [[DONE]], true
+; PASS-CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[NOT_DONE]], true
+; PASS-CHECK-NEXT: br i1 [[TMP0]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_eq_zero_i32(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %not_done = xor i1 %done, true
+ %ballot = tail call i32 @llvm.amdgcn.ballot.i32(i1 %not_done)
+ %is_done = icmp eq i32 %ballot, 0 ; in this case is_done = !not_done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[ENTRY:.*:]]
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: br label %[[WHILE:.*]]
+; CURRENT-CHECK: [[WHILE]]:
+; CURRENT-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 true)
+; CURRENT-CHECK-NEXT: [[IS_DONE_NOT:%.*]] = icmp eq i32 [[BALLOT]], 0
+; CURRENT-CHECK-NEXT: br i1 [[IS_DONE_NOT]], label %[[WHILE]], label %[[EXIT:.*]], !llvm.loop [[LOOP3:![0-9]+]]
+; CURRENT-CHECK: [[EXIT]]:
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: br label %[[WHILE:.*]]
+; PASS-CHECK: [[WHILE]]:
+; PASS-CHECK-NEXT: [[DONE:%.*]] = phi i1 [ false, %[[ENTRY]] ], [ true, %[[IF:.*]] ]
+; PASS-CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[IF]]
+; PASS-CHECK: [[IF]]:
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: br label %[[WHILE]]
+; PASS-CHECK: [[EXIT]]:
+; PASS-CHECK-NEXT: ret void
+;
+; O3-CHECK-LABEL: define protected amdgpu_kernel void @trivial_waterfall_ne_zero_i32(
+; O3-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; O3-CHECK-NEXT: [[ENTRY:.*:]]
+; O3-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; O3-CHECK-NEXT: ret void
+;
+entry:
+ br label %while
+
+while:
+ %done = phi i1 [ 0, %entry ], [ 1, %if ]
+ %ballot = tail call i32 @llvm.amdgcn.ballot.i32(i1 %done)
+ %is_done = icmp ne i32 0, %ballot ; in this case is_done = done
+ br i1 %is_done, label %exit, label %if
+
+if:
+ store i32 5, ptr addrspace(1) %out
+ br label %while
+
+exit:
+ ret void
+}
+
+declare i64 @llvm.amdgcn.ballot.i64(i1) #1
+!6 = !{i64 690}
+!7 = distinct !{!7, !8}
+!8 = !{!"llvm.loop.mustprogress"}
+;.
+; CURRENT-CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]]}
+; CURRENT-CHECK: [[META1]] = !{!"llvm.loop.peeled.count", i32 1}
+; CURRENT-CHECK: [[LOOP2]] = distinct !{[[LOOP2]], [[META1]]}
+; CURRENT-CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+;.
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
new file mode 100644
index 0000000..aa11574
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-intrinsic-combine.ll
@@ -0,0 +1,790 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -amdgpu-enable-uniform-intrinsic-combine=0 -O3 -S < %s | FileCheck %s -check-prefix=CURRENT-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,dce -S < %s | FileCheck %s -check-prefix=DCE-CHECK
+
+define amdgpu_kernel void @permlane64_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CURRENT-CHECK-NEXT: store i32 77, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT: store i32 77, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0:[0-9]+]] {
+; DCE-CHECK-NEXT: store i32 77, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.permlane64(i32 77)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_uniform(ptr addrspace(1) %out, i32 %src) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_uniform(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 [[SRC]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %src)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @permlane64_nonuniform_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TID2:%.*]] = add nuw nsw i32 [[TID]], 1
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_nonuniform_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.permlane64.i32(i32 [[TID2]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid2 = add i32 %tid, 1
+ %v = call i32 @llvm.amdgcn.permlane64(i32 %tid2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readlane_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readlane(i32 7, i32 5)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_nonuniform_indices(ptr addrspace(1) %out, i32 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_indices(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_nonuniform_workitem(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TIDX]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_workitem(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readlane_nonuniform_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[TIDX2:%.*]] = add nuw nsw i32 [[TIDX]], 1
+; CURRENT-CHECK-NEXT: [[TIDY2:%.*]] = add nuw nsw i32 [[TIDY]], 2
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TIDX]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; PASS-CHECK-NEXT: [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_nonuniform_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[TIDX2:%.*]] = add i32 [[TIDX]], 1
+; DCE-CHECK-NEXT: [[TIDY2:%.*]] = add i32 [[TIDY]], 2
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX2]], i32 [[TIDY2]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TIDX]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %tidx2 = add i32 %tidx, 1
+ %tidy2 = add i32 %tidy, 2
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx2, i32 %tidy2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tidx
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_constant(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_constant(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 7, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 7)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_argument(ptr addrspace(1) %out, i32 %src0) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]], i32 [[SRC0:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_argument(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[SRC0:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 [[SRC0]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %src0)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_workitem_id(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_workitem_id(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_expression(i32 addrspace(1)* %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TID2:%.*]] = add nuw nsw i32 [[TID]], 1
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[TID2]] to i64
+; CURRENT-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i64 [[TMP1]]
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; PASS-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID2]]
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TID2:%.*]] = add i32 [[TID]], 1
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TID2]])
+; DCE-CHECK-NEXT: [[OUT_PTR:%.*]] = getelementptr i32, ptr addrspace(1) [[OUT]], i32 [[TID2]]
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT_PTR]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %tid2 = add i32 %tid, 1
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %tid2)
+ %out_ptr = getelementptr i32, i32 addrspace(1)* %out, i32 %tid2
+ store i32 %v, i32 addrspace(1)* %out_ptr
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_readfirstlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readfirstlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 5, ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 5)
+ %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_with_readlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_with_readlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %v2 = call i32 @llvm.amdgcn.readfirstlane(i32 %v1)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_with_firstlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; CURRENT-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; PASS-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_with_firstlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TIDX]])
+; DCE-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %v1 = call i32 @llvm.amdgcn.readfirstlane(i32 %tidx)
+ %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 3)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_readlane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR2]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.y()
+; CURRENT-CHECK-NEXT: [[V1:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; PASS-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_readlane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
+; DCE-CHECK-NEXT: [[V1:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: store i32 [[V1]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = call i32 @llvm.amdgcn.workitem.id.y()
+ %v1 = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ %v2 = call i32 @llvm.amdgcn.readlane(i32 %v1, i32 2)
+ store i32 %v2, ptr addrspace(1) %out
+ ret void
+}
+
+
+define amdgpu_kernel void @permlane64_boundary(ptr addrspace(1) %out_min, ptr addrspace(1) %out_max) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT_MIN:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT_MAX:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; CURRENT-CHECK-NEXT: store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; PASS-CHECK-NEXT: store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @permlane64_boundary(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT_MIN:%.*]], ptr addrspace(1) [[OUT_MAX:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: store i32 -2147483648, ptr addrspace(1) [[OUT_MIN]], align 4
+; DCE-CHECK-NEXT: store i32 2147483647, ptr addrspace(1) [[OUT_MAX]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %min_v = call i32 @llvm.amdgcn.permlane64(i32 -2147483648)
+ store i32 %min_v, ptr addrspace(1) %out_min
+ %max_v = call i32 @llvm.amdgcn.permlane64(i32 2147483647)
+ store i32 %max_v, ptr addrspace(1) %out_max
+ ret void
+}
+
+define amdgpu_kernel void @readlane_cross_lane(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[TIDX:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[TIDY:%.*]] = add nuw nsw i32 [[TIDX]], 5
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_cross_lane(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[TIDX:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[TIDY:%.*]] = add i32 [[TIDX]], 5
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[TIDX]], i32 [[TIDY]])
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %tidx = call i32 @llvm.amdgcn.workitem.id.x()
+ %tidy = add i32 %tidx, 5
+ %v = call i32 @llvm.amdgcn.readlane(i32 %tidx, i32 %tidy)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readfirstlane_random(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CURRENT-CHECK-NEXT: store i32 435, ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[RANDOM:%.*]] = xor i32 123, 456
+; PASS-CHECK-NEXT: store i32 [[RANDOM]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readfirstlane_random(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[RANDOM:%.*]] = xor i32 123, 456
+; DCE-CHECK-NEXT: store i32 [[RANDOM]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %random = xor i32 123, 456
+ %v = call i32 @llvm.amdgcn.readfirstlane(i32 %random)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @readlane_expression(ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; CURRENT-CHECK-SAME: ptr addrspace(1) writeonly captures(none) initializes((0, 4)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[IDX1:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
+; CURRENT-CHECK-NEXT: [[IDX2:%.*]] = shl nuw nsw i32 [[IDX1]], 1
+; CURRENT-CHECK-NEXT: [[V:%.*]] = tail call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; CURRENT-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: [[IDX2:%.*]] = mul i32 [[IDX1]], 2
+; PASS-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; PASS-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @readlane_expression(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[IDX1:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; DCE-CHECK-NEXT: [[IDX2:%.*]] = mul i32 [[IDX1]], 2
+; DCE-CHECK-NEXT: [[V:%.*]] = call i32 @llvm.amdgcn.readlane.i32(i32 [[IDX1]], i32 [[IDX2]])
+; DCE-CHECK-NEXT: store i32 [[V]], ptr addrspace(1) [[OUT]], align 4
+; DCE-CHECK-NEXT: ret void
+;
+ %idx1 = call i32 @llvm.amdgcn.workitem.id.x()
+ %idx2 = mul i32 %idx1, 2
+ %v = call i32 @llvm.amdgcn.readlane(i32 %idx1, i32 %idx2)
+ store i32 %v, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @ballot_i32(i32 %v, ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; CURRENT-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 1)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; CURRENT-CHECK-NEXT: [[BALLOT:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[C]])
+; CURRENT-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[BALLOT]], 0
+; CURRENT-CHECK-NEXT: store i1 [[BALLOT_NE_ZERO]], ptr addrspace(1) [[OUT]], align 1
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @ballot_i32(
+; DCE-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; DCE-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; DCE-CHECK-NEXT: ret void
+;
+ %c = trunc i32 %v to i1
+ %ballot = call i32 @llvm.amdgcn.ballot.i32(i1 %c)
+ %ballot_ne_zero = icmp ne i32 %ballot, 0
+ store i1 %ballot_ne_zero, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @ballot_i64(i32 %v, ptr addrspace(1) %out) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; CURRENT-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) writeonly captures(none) initializes((0, 1)) [[OUT:%.*]]) local_unnamed_addr #[[ATTR1]] {
+; CURRENT-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; CURRENT-CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.amdgcn.ballot.i32(i1 [[C]])
+; CURRENT-CHECK-NEXT: [[BALLOT_NE_ZERO:%.*]] = icmp ne i32 [[TMP1]], 0
+; CURRENT-CHECK-NEXT: store i1 [[BALLOT_NE_ZERO]], ptr addrspace(1) [[OUT]], align 1
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; PASS-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; PASS-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @ballot_i64(
+; DCE-CHECK-SAME: i32 [[V:%.*]], ptr addrspace(1) [[OUT:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[C:%.*]] = trunc i32 [[V]] to i1
+; DCE-CHECK-NEXT: store i1 [[C]], ptr addrspace(1) [[OUT]], align 1
+; DCE-CHECK-NEXT: ret void
+;
+ %c = trunc i32 %v to i1
+ %ballot = call i64 @llvm.amdgcn.ballot.i64(i1 %c)
+ %ballot_ne_zero = icmp ne i64 %ballot, 0
+ store i1 %ballot_ne_zero, ptr addrspace(1) %out
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_i16(i16 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; CURRENT-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; PASS-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i16(
+; DCE-CHECK-SAME: i16 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call i16 @llvm.amdgcn.readlane.i16(i16 %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(i16 %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_i64(i64 %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; CURRENT-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; PASS-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_i64(
+; DCE-CHECK-SAME: i64 [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call i64 @llvm.amdgcn.readlane.i64(i64 %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(i64 %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_bf16(bfloat %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; CURRENT-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; PASS-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_bf16(
+; DCE-CHECK-SAME: bfloat [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call bfloat @llvm.amdgcn.readlane.bf16(bfloat %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(bfloat %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_f16(half %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; CURRENT-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; PASS-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f16(
+; DCE-CHECK-SAME: half [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call half @llvm.amdgcn.readlane.f16(half %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(half %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_f32(float %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; CURRENT-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; PASS-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f32(
+; DCE-CHECK-SAME: float [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call float @llvm.amdgcn.readlane.f32(float %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(float %readlane)
+ ret void
+}
+
+define amdgpu_kernel void @test_readlane_f64(double %src0, i32 %src1) {
+; CURRENT-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; CURRENT-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; PASS-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define amdgpu_kernel void @test_readlane_f64(
+; DCE-CHECK-SAME: double [[SRC0:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %readlane = call double @llvm.amdgcn.readlane.f64(double %src0, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(double %readlane)
+ ret void
+}
+; All such cases can be optimised, given generic way to query getDeclarationIfExists()
+define void @test_readlane_v8i16(ptr addrspace(1) %out, <8 x i16> %src, i32 %src1) {
+; CURRENT-CHECK-LABEL: define void @test_readlane_v8i16(
+; CURRENT-CHECK-SAME: ptr addrspace(1) readnone captures(none) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) local_unnamed_addr #[[ATTR3]] {
+; CURRENT-CHECK-NEXT: [[X:%.*]] = tail call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; CURRENT-CHECK-NEXT: tail call void asm sideeffect "
+; CURRENT-CHECK-NEXT: ret void
+;
+; PASS-CHECK-LABEL: define void @test_readlane_v8i16(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; PASS-CHECK-NEXT: [[X:%.*]] = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; PASS-CHECK-NEXT: call void asm sideeffect "
+; PASS-CHECK-NEXT: ret void
+;
+; DCE-CHECK-LABEL: define void @test_readlane_v8i16(
+; DCE-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], <8 x i16> [[SRC:%.*]], i32 [[SRC1:%.*]]) #[[ATTR0]] {
+; DCE-CHECK-NEXT: [[X:%.*]] = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> [[SRC]], i32 [[SRC1]])
+; DCE-CHECK-NEXT: call void asm sideeffect "
+; DCE-CHECK-NEXT: ret void
+;
+ %x = call <8 x i16> @llvm.amdgcn.readlane.v8i16(<8 x i16> %src, i32 %src1)
+ call void asm sideeffect "; use $0", "s"(<8 x i16> %x)
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
new file mode 100644
index 0000000..2fde3e3
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-uniform-temporal-divergence.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine -S < %s | FileCheck %s -check-prefix=PASS-CHECK
+; RUN: opt -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -passes=amdgpu-uniform-intrinsic-combine,instcombine,early-cse,simplifycfg -S < %s | FileCheck %s -check-prefix=COMB-CHECK
+
+; This should not be optimized
+define amdgpu_cs void @temporal_divergence(ptr addrspace(1) %out, i32 %n) {
+; PASS-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
+; PASS-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; PASS-CHECK-NEXT: [[ENTRY:.*]]:
+; PASS-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; PASS-CHECK-NEXT: br label %[[H:.*]]
+; PASS-CHECK: [[H]]:
+; PASS-CHECK-NEXT: [[UNI_MERGE_H:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[UNI_INC:%.*]], %[[H]] ]
+; PASS-CHECK-NEXT: [[UNI_INC]] = add i32 [[UNI_MERGE_H]], 1
+; PASS-CHECK-NEXT: [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
+; PASS-CHECK-NEXT: br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
+; PASS-CHECK: [[X]]:
+; PASS-CHECK-NEXT: [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[UNI_INC]])
+; PASS-CHECK-NEXT: [[JOIN_USER:%.*]] = add i32 [[UNI_JOIN]], 5
+; PASS-CHECK-NEXT: store i32 [[JOIN_USER]], ptr addrspace(1) [[OUT]], align 4
+; PASS-CHECK-NEXT: ret void
+;
+; COMB-CHECK-LABEL: define amdgpu_cs void @temporal_divergence(
+; COMB-CHECK-SAME: ptr addrspace(1) [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; COMB-CHECK-NEXT: [[ENTRY:.*]]:
+; COMB-CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
+; COMB-CHECK-NEXT: br label %[[H:.*]]
+; COMB-CHECK: [[H]]:
+; COMB-CHECK-NEXT: [[UNI_MERGE_H:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[UNI_INC:%.*]], %[[H]] ]
+; COMB-CHECK-NEXT: [[UNI_INC]] = add i32 [[UNI_MERGE_H]], 1
+; COMB-CHECK-NEXT: [[DIV_EXITX:%.*]] = icmp eq i32 [[TID]], 0
+; COMB-CHECK-NEXT: br i1 [[DIV_EXITX]], label %[[X:.*]], label %[[H]]
+; COMB-CHECK: [[X]]:
+; COMB-CHECK-NEXT: [[UNI_JOIN:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[UNI_INC]])
+; COMB-CHECK-NEXT: [[JOIN_USER:%.*]] = add i32 [[UNI_JOIN]], 5
+; COMB-CHECK-NEXT: store i32 [[JOIN_USER]], ptr addrspace(1) [[OUT]], align 4
+; COMB-CHECK-NEXT: ret void
+;
+entry:
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ br label %H
+
+H:
+ %uni.merge.h = phi i32 [ 0, %entry ], [ %uni.inc, %H ]
+ %uni.inc = add i32 %uni.merge.h, 1
+ %div.exitx = icmp eq i32 %tid, 0
+ br i1 %div.exitx, label %X, label %H ; divergent branch
+
+X:
+ %uni.join = call i32 @llvm.amdgcn.readfirstlane.i32(i32 %uni.inc)
+ %join.user = add i32 %uni.join, 5
+ store i32 %join.user, ptr addrspace(1) %out
+ ret void
+}
+
+declare i32 @llvm.amdgcn.workitem.id.x()
+declare i32 @llvm.amdgcn.readfirstlane.i32(i32)
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 2ae6fc2..4a6fa4f 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -691,7 +691,8 @@ define amdgpu_kernel void @uaddo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
; GCN-ISEL-LABEL: name: suaddo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_ADD_U64_PSEUDO
+; GCN-ISEL: S_UADDO_PSEUDO
+; GCN-ISEL: S_ADD_CO_PSEUDO
define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
; CISI-LABEL: suaddo64:
@@ -700,21 +701,23 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s11, 0xf000
; CISI-NEXT: s_mov_b32 s10, -1
; CISI-NEXT: s_waitcnt lgkmcnt(0)
-; CISI-NEXT: s_add_u32 s6, s4, s6
-; CISI-NEXT: v_mov_b32_e32 v0, s4
-; CISI-NEXT: s_addc_u32 s7, s5, s7
-; CISI-NEXT: v_mov_b32_e32 v1, s5
-; CISI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT: v_mov_b32_e32 v2, s6
+; CISI-NEXT: s_add_u32 s4, s4, s6
+; CISI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; CISI-NEXT: s_or_b32 s6, s12, s13
+; CISI-NEXT: s_cmp_lg_u32 s6, 0
+; CISI-NEXT: s_addc_u32 s5, s5, s7
; CISI-NEXT: s_mov_b32 s8, s0
; CISI-NEXT: s_mov_b32 s9, s1
+; CISI-NEXT: v_mov_b32_e32 v0, s4
+; CISI-NEXT: v_mov_b32_e32 v1, s5
+; CISI-NEXT: s_cselect_b64 s[4:5], -1, 0
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s10
; CISI-NEXT: s_mov_b32 s3, s11
-; CISI-NEXT: v_mov_b32_e32 v3, s7
-; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT: s_waitcnt expcnt(0)
+; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; CISI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; CISI-NEXT: s_endpgm
;
@@ -722,37 +725,37 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_add_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_add_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_addc_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: suaddo64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_addc_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_add_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_addc_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX1010-LABEL: suaddo64:
@@ -761,10 +764,12 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: v_mov_b32_e32 v2, 0
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: s_add_u32 s0, s12, s14
-; GFX1010-NEXT: s_addc_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s1, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v0, s0
+; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1010-NEXT: s_addc_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s0, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v1, s1
-; GFX1010-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX1010-NEXT: global_store_byte v2, v3, s[10:11]
@@ -775,11 +780,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT: s_add_u32 s6, s4, s6
-; GFX1030W32-NEXT: s_addc_u32 s7, s5, s7
-; GFX1030W32-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT: s_add_u32 s4, s4, s6
+; GFX1030W32-NEXT: s_cselect_b32 s6, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1030W32-NEXT: s_addc_u32 s5, s5, s7
+; GFX1030W32-NEXT: s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
@@ -790,11 +797,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT: s_add_u32 s6, s4, s6
-; GFX1030W64-NEXT: s_addc_u32 s7, s5, s7
-; GFX1030W64-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT: v_cmp_lt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT: s_add_u32 s4, s4, s6
+; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0
+; GFX1030W64-NEXT: s_addc_u32 s5, s5, s7
+; GFX1030W64-NEXT: v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
@@ -804,12 +813,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_add_u32 s6, s4, s6
-; GFX11-NEXT: s_addc_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_add_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_addc_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -819,12 +829,14 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-LABEL: suaddo64:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_add_nc_u64 s[0:1], s[12:13], s[14:15]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX1250-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
+; GFX1250-NEXT: s_add_co_u32 s0, s12, s14
+; GFX1250-NEXT: s_cselect_b32 s1, -1, 0
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
+; GFX1250-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1250-NEXT: s_add_co_ci_u32 s1, s13, s15
+; GFX1250-NEXT: s_cselect_b32 s0, -1, 0
+; GFX1250-NEXT: v_mov_b32_e32 v1, s1
; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9]
@@ -841,7 +853,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GCN-ISEL-LABEL: name: vuaddo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_ADD_U64_PSEUDO
+; GCN-ISEL: V_ADD_CO_U32_e64
+; GCN-ISEL: V_ADDC_U32_e64
define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
; CISI-LABEL: vuaddo64:
@@ -854,9 +867,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s4, s0
; CISI-NEXT: v_mov_b32_e32 v1, s9
; CISI-NEXT: v_add_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT: v_cmp_gt_u64_e32 vcc, s[8:9], v[0:1]
; CISI-NEXT: s_mov_b32 s5, s1
+; CISI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s6
@@ -876,7 +888,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI-NEXT: v_mov_b32_e32 v6, s5
; VI-NEXT: v_add_u32_e32 v5, vcc, s4, v0
; VI-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[5:6]
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v3, s2
; VI-NEXT: v_mov_b32_e32 v4, s3
@@ -894,7 +905,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v2, v0, s[2:3]
@@ -909,8 +919,7 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: v_add_co_u32 v0, s4, s6, v0
; GFX1010-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1010-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1010-NEXT: s_endpgm
@@ -923,9 +932,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W32-NEXT: v_add_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT: v_add_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W32-NEXT: s_endpgm
@@ -938,9 +946,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W64-NEXT: v_add_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT: v_add_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT: v_add_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W64-NEXT: s_endpgm
@@ -955,10 +962,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_u32 v0, s4, s6, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: global_store_b8 v2, v3, s[2:3]
@@ -969,16 +975,17 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_add_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT: v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3]
; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
@@ -1671,7 +1678,8 @@ define amdgpu_kernel void @usubo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
; GCN-ISEL-LABEL: name: susubo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_SUB_U64_PSEUDO
+; GCN-ISEL: S_USUBO_PSEUDO
+; GCN-ISEL: S_SUB_CO_PSEUDO
define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
; CISI-LABEL: susubo64:
@@ -1680,21 +1688,23 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s11, 0xf000
; CISI-NEXT: s_mov_b32 s10, -1
; CISI-NEXT: s_waitcnt lgkmcnt(0)
-; CISI-NEXT: s_sub_u32 s6, s4, s6
-; CISI-NEXT: v_mov_b32_e32 v0, s4
-; CISI-NEXT: s_subb_u32 s7, s5, s7
-; CISI-NEXT: v_mov_b32_e32 v1, s5
-; CISI-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT: v_mov_b32_e32 v2, s6
+; CISI-NEXT: s_sub_u32 s4, s4, s6
+; CISI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; CISI-NEXT: s_or_b32 s6, s12, s13
+; CISI-NEXT: s_cmp_lg_u32 s6, 0
+; CISI-NEXT: s_subb_u32 s5, s5, s7
; CISI-NEXT: s_mov_b32 s8, s0
; CISI-NEXT: s_mov_b32 s9, s1
+; CISI-NEXT: v_mov_b32_e32 v0, s4
+; CISI-NEXT: v_mov_b32_e32 v1, s5
+; CISI-NEXT: s_cselect_b64 s[4:5], -1, 0
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s10
; CISI-NEXT: s_mov_b32 s3, s11
-; CISI-NEXT: v_mov_b32_e32 v3, s7
-; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT: s_waitcnt expcnt(0)
+; CISI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; CISI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; CISI-NEXT: s_endpgm
;
@@ -1702,37 +1712,37 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_sub_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_sub_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_subb_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_subb_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: susubo64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_sub_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_subb_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_sub_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_subb_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX1010-LABEL: susubo64:
@@ -1741,10 +1751,12 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: v_mov_b32_e32 v2, 0
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: s_sub_u32 s0, s12, s14
-; GFX1010-NEXT: s_subb_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s1, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v0, s0
+; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1010-NEXT: s_subb_u32 s1, s13, s15
+; GFX1010-NEXT: s_cselect_b32 s0, -1, 0
; GFX1010-NEXT: v_mov_b32_e32 v1, s1
-; GFX1010-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX1010-NEXT: global_store_byte v2, v3, s[10:11]
@@ -1755,11 +1767,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT: s_sub_u32 s6, s4, s6
-; GFX1030W32-NEXT: s_subb_u32 s7, s5, s7
-; GFX1030W32-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT: s_sub_u32 s4, s4, s6
+; GFX1030W32-NEXT: s_cselect_b32 s6, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1030W32-NEXT: s_subb_u32 s5, s5, s7
+; GFX1030W32-NEXT: s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT: v_mov_b32_e32 v1, s5
; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
@@ -1770,11 +1784,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT: s_sub_u32 s6, s4, s6
-; GFX1030W64-NEXT: s_subb_u32 s7, s5, s7
-; GFX1030W64-NEXT: v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT: v_cmp_gt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT: v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT: s_sub_u32 s4, s4, s6
+; GFX1030W64-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GFX1030W64-NEXT: v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT: s_cmp_lg_u64 s[8:9], 0
+; GFX1030W64-NEXT: s_subb_u32 s5, s5, s7
+; GFX1030W64-NEXT: v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT: s_cselect_b64 s[4:5], -1, 0
; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
@@ -1784,12 +1800,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_sub_u32 s6, s4, s6
-; GFX11-NEXT: s_subb_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_sub_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_subb_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -1799,12 +1816,14 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-LABEL: susubo64:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_load_b256 s[8:15], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_sub_nc_u64 s[0:1], s[12:13], s[14:15]
-; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[0:1]
-; GFX1250-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
+; GFX1250-NEXT: s_sub_co_u32 s0, s12, s14
+; GFX1250-NEXT: s_cselect_b32 s1, -1, 0
+; GFX1250-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s0
+; GFX1250-NEXT: s_cmp_lg_u32 s1, 0
+; GFX1250-NEXT: s_sub_co_ci_u32 s1, s13, s15
+; GFX1250-NEXT: s_cselect_b32 s0, -1, 0
+; GFX1250-NEXT: v_mov_b32_e32 v1, s1
; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[8:9]
@@ -1821,7 +1840,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GCN-ISEL-LABEL: name: vusubo64
; GCN-ISEL-LABEL: body:
; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_SUB_U64_PSEUDO
+; GCN-ISEL: V_SUB_CO_U32_e64
+; GCN-ISEL: V_SUBB_U32_e64
define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
; CISI-LABEL: vusubo64:
@@ -1834,9 +1854,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; CISI-NEXT: s_mov_b32 s4, s0
; CISI-NEXT: v_mov_b32_e32 v1, s9
; CISI-NEXT: v_sub_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
; CISI-NEXT: s_mov_b32 s5, s1
+; CISI-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
; CISI-NEXT: s_mov_b32 s0, s2
; CISI-NEXT: s_mov_b32 s1, s3
; CISI-NEXT: s_mov_b32 s2, s6
@@ -1856,7 +1875,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; VI-NEXT: v_mov_b32_e32 v6, s5
; VI-NEXT: v_sub_u32_e32 v5, vcc, s4, v0
; VI-NEXT: v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
; VI-NEXT: v_mov_b32_e32 v2, s1
; VI-NEXT: v_mov_b32_e32 v3, s2
; VI-NEXT: v_mov_b32_e32 v4, s3
@@ -1874,7 +1892,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v2, v0, s[2:3]
@@ -1889,8 +1906,7 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
; GFX1010-NEXT: v_sub_co_u32 v0, s4, s6, v0
; GFX1010-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1010-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1010-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1010-NEXT: s_endpgm
@@ -1903,9 +1919,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W32-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W32-NEXT: v_sub_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT: v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1030W32-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W32-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W32-NEXT: s_endpgm
@@ -1918,9 +1933,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1030W64-NEXT: v_mov_b32_e32 v2, 0
; GFX1030W64-NEXT: s_waitcnt lgkmcnt(0)
; GFX1030W64-NEXT: v_sub_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT: v_sub_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT: v_sub_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
; GFX1030W64-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX1030W64-NEXT: global_store_byte v2, v3, s[2:3]
; GFX1030W64-NEXT: s_endpgm
@@ -1935,10 +1949,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_sub_co_u32 v0, s4, s6, v0
-; GFX11-NEXT: v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: global_store_b8 v2, v3, s[2:3]
@@ -1949,16 +1962,17 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
; GFX1250-NEXT: s_clause 0x1
; GFX1250-NEXT: s_load_b64 s[6:7], s[4:5], 0x34
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: v_mov_b32_e32 v1, 0
; GFX1250-NEXT: v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT: v_mov_b32_e32 v2, 0
; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT: v_sub_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT: v_sub_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT: v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX1250-NEXT: s_clause 0x1
-; GFX1250-NEXT: global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT: global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT: global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT: global_store_b8 v2, v3, s[2:3]
; GFX1250-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 697bcc3..5f6d622 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -206,8 +206,11 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s18, s16, 1
-; GCN-IR-NEXT: s_addc_u32 s19, s17, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT: s_or_b32 s10, s10, s11
+; GCN-IR-NEXT: s_cmp_lg_u32 s10, 0
+; GCN-IR-NEXT: s_addc_u32 s10, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s16, 63, s16
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[12:13], s16
@@ -217,9 +220,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_add_u32 s18, s2, -1
; GCN-IR-NEXT: s_addc_u32 s19, s3, -1
; GCN-IR-NEXT: s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT: s_add_u32 s12, s8, s20
-; GCN-IR-NEXT: s_addc_u32 s13, s9, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s14, s8, s20
+; GCN-IR-NEXT: s_addc_u32 s15, s9, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
; GCN-IR-NEXT: s_mov_b32 s9, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -227,19 +230,22 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_lshr_b32 s8, s11, 31
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[14:15], s[10:11]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11]
; GCN-IR-NEXT: s_sub_u32 s8, s18, s16
; GCN-IR-NEXT: s_subb_u32 s8, s19, s17
-; GCN-IR-NEXT: s_ashr_i32 s14, s8, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s8, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s16, s16, s14
-; GCN-IR-NEXT: s_subb_u32 s17, s17, s15
-; GCN-IR-NEXT: s_add_u32 s12, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[8:9]
+; GCN-IR-NEXT: s_ashr_i32 s12, s8, 31
+; GCN-IR-NEXT: s_mov_b32 s13, s12
+; GCN-IR-NEXT: s_and_b32 s8, s12, 1
+; GCN-IR-NEXT: s_and_b64 s[20:21], s[12:13], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s16, s16, s20
+; GCN-IR-NEXT: s_subb_u32 s17, s17, s21
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_or_b32 s20, s20, s21
+; GCN-IR-NEXT: s_cmp_lg_u32 s20, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], s[8:9]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -389,25 +395,25 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
; GCN-IR-LABEL: v_test_sdiv:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v13, 31, v3
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v1, v12, vcc
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v13
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v13
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v11, 31, v3
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v1, v10, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v11
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v11
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[6:7], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v6
; GCN-IR-NEXT: v_add_i32_e64 v2, s[6:7], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v7
-; GCN-IR-NEXT: v_min_u32_e32 v11, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[6:7], v10, v11
+; GCN-IR-NEXT: v_min_u32_e32 v9, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[6:7], v8, v9
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
; GCN-IR-NEXT: v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
@@ -416,70 +422,69 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v14, v12
-; GCN-IR-NEXT: v_mov_b32_e32 v15, v13
+; GCN-IR-NEXT: v_mov_b32_e32 v12, v10
+; GCN-IR-NEXT: v_mov_b32_e32 v13, v11
; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v7, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v6, 0, s[4:5]
; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[6:7], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v4, v10
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, v4, v11
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[6:7], v14
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v4, v8
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, v4, v9
+; GCN-IR-NEXT: v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v16, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v17, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0
; GCN-IR-NEXT: .LBB1_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v13, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v15, v14
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v11, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v13, v12
; GCN-IR-NEXT: v_xor_b32_e32 v3, v4, v0
; GCN-IR-NEXT: v_xor_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v3, v0
@@ -1293,34 +1298,37 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[4:5]
; GCN-IR-NEXT: s_sub_u32 s2, s2, s4
; GCN-IR-NEXT: s_subb_u32 s3, s3, s4
-; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s10, s14, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s16, s[2:3]
+; GCN-IR-NEXT: s_add_u32 s10, s16, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s11, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
; GCN-IR-NEXT: s_or_b64 s[12:13], s[8:9], s[12:13]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[12:13], exec
; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13]
; GCN-IR-NEXT: s_mov_b32 s9, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s12, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[12:13], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s11, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s10, 63, s10
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], 24, s10
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[12:13], 24, s12
-; GCN-IR-NEXT: s_add_u32 s16, s2, -1
-; GCN-IR-NEXT: s_addc_u32 s17, s3, -1
-; GCN-IR-NEXT: s_sub_u32 s10, 58, s14
-; GCN-IR-NEXT: s_subb_u32 s11, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s14, s2, -1
+; GCN-IR-NEXT: s_addc_u32 s15, s3, -1
+; GCN-IR-NEXT: s_sub_u32 s16, 58, s16
+; GCN-IR-NEXT: s_subb_u32 s17, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: .LBB10_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -1328,19 +1336,22 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s6, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s6, s16, s12
-; GCN-IR-NEXT: s_subb_u32 s6, s17, s13
-; GCN-IR-NEXT: s_ashr_i32 s14, s6, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s6, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT: s_add_u32 s10, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s6, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s6, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s10, s6, 31
+; GCN-IR-NEXT: s_mov_b32 s11, s10
+; GCN-IR-NEXT: s_and_b32 s6, s10, 1
+; GCN-IR-NEXT: s_and_b64 s[18:19], s[10:11], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT: s_add_u32 s16, s16, 1
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_or_b32 s18, s18, s19
+; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[6:7]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19]
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_3
; GCN-IR-NEXT: .LBB10_4: ; %Flow6
@@ -1472,17 +1483,17 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v10, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1490,69 +1501,68 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, 24, 0, s[4:5]
; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB11_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB11_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v14, vcc, 58, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB11_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB11_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB11_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB11_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0
; GCN-IR-NEXT: .LBB11_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 24, %x
ret i64 %result
@@ -1665,17 +1675,17 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v10, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1684,70 +1694,69 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v4, 0, s[4:5]
; GCN-IR-NEXT: s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v14, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB12_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB12_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v0
; GCN-IR-NEXT: .LBB12_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 32768, %x
ret i64 %result
@@ -1767,20 +1776,20 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v0, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, v1, v10, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v8
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v8
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v0, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v5, vcc, v1, v8, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v0, v4
; GCN-IR-NEXT: v_add_i32_e64 v0, s[4:5], 32, v0
; GCN-IR-NEXT: v_ffbh_u32_e32 v1, v5
-; GCN-IR-NEXT: v_min_u32_e32 v8, v0, v1
-; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 48, v8
+; GCN-IR-NEXT: v_min_u32_e32 v6, v0, v1
+; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 48, v6
; GCN-IR-NEXT: v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v8
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[0:1]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
@@ -1790,61 +1799,60 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB13_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v0, s[4:5], 63, v0
-; GCN-IR-NEXT: v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[4:5], v0
+; GCN-IR-NEXT: v_mov_b32_e32 v2, 0
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB13_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[4:5], v6
-; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT: v_lshr_b64 v[4:5], v[4:5], v7
+; GCN-IR-NEXT: v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB13_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, s12, v6
+; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 1, v4
-; GCN-IR-NEXT: v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[4:5]
-; GCN-IR-NEXT: v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, s10, v4
+; GCN-IR-NEXT: v_subb_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v0, v6, v0
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT: v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT: v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v4, v6
+; GCN-IR-NEXT: v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT: v_or_b32_e32 v1, v7, v1
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB13_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB13_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB13_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_or_b32_e32 v3, v3, v1
; GCN-IR-NEXT: v_or_b32_e32 v2, v2, v0
; GCN-IR-NEXT: .LBB13_6: ; %Flow5
; GCN-IR-NEXT: s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v10
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v11
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v8
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v3, v9
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v9, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = sdiv i64 %x, 32768
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 465024a..33b0a5d 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -170,35 +170,38 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT: s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT: s_flbit_i32_b64 s16, s[2:3]
; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT: s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT: s_sub_u32 s12, s10, s16
; GCN-IR-NEXT: s_subb_u32 s13, 0, 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec
; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT: s_add_u32 s16, s6, -1
-; GCN-IR-NEXT: s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT: s_add_u32 s14, s6, -1
+; GCN-IR-NEXT: s_addc_u32 s15, s7, -1
; GCN-IR-NEXT: s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT: s_add_u32 s10, s4, s18
-; GCN-IR-NEXT: s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s16, s4, s16
+; GCN-IR-NEXT: s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -206,19 +209,22 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GCN-IR-NEXT: s_lshr_b32 s4, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT: s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT: s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s4, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT: s_add_u32 s10, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s11, s10
+; GCN-IR-NEXT: s_and_b32 s4, s10, 1
+; GCN-IR-NEXT: s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT: s_add_u32 s16, s16, 1
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_or_b32 s18, s18, s19
+; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -373,12 +379,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-LABEL: v_test_srem:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v14, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v14
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
; GCN-IR-NEXT: v_ashrrev_i32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v14, vcc
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
; GCN-IR-NEXT: v_xor_b32_e32 v2, v2, v4
; GCN-IR-NEXT: v_xor_b32_e32 v3, v3, v4
; GCN-IR-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
@@ -386,12 +392,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT: v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT: v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT: v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v10, v11
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -400,7 +406,7 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT: v_mov_b32_e32 v15, v14
+; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
; GCN-IR-NEXT: v_cndmask_b32_e64 v7, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v6, v0, 0, s[4:5]
; GCN-IR-NEXT: s_and_b64 s[4:5], s[6:7], vcc
@@ -408,54 +414,53 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, -1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v6, v12
-; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, v6, v11
+; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT: v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v16, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v17, v11, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT: v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT: v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v14, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v15, v9, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT: v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
; GCN-IR-NEXT: v_or_b32_e32 v7, v7, v5
; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
@@ -469,10 +474,10 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_add_i32_e32 v3, vcc, v4, v3
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v15
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v14
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v15, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v13
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = srem i64 %x, %y
ret i64 %result
@@ -1148,35 +1153,38 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[2:3], s[8:9], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[8:9]
; GCN-IR-NEXT: s_or_b64 s[10:11], s[2:3], s[10:11]
-; GCN-IR-NEXT: s_flbit_i32_b64 s20, s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s14, s12, s20
+; GCN-IR-NEXT: s_flbit_i32_b64 s18, s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s14, s12, s18
; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
; GCN-IR-NEXT: s_or_b64 s[16:17], s[10:11], s[16:17]
; GCN-IR-NEXT: s_and_b64 s[10:11], s[16:17], exec
; GCN-IR-NEXT: s_cselect_b32 s11, 0, s7
; GCN-IR-NEXT: s_cselect_b32 s10, 0, s6
-; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[20:21]
; GCN-IR-NEXT: s_mov_b64 s[2:3], 0
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s16, s14, 1
-; GCN-IR-NEXT: s_addc_u32 s17, s15, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT: s_or_b32 s10, s10, s11
+; GCN-IR-NEXT: s_cmp_lg_u32 s10, 0
+; GCN-IR-NEXT: s_addc_u32 s10, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s14, 63, s14
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[6:7], s14
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[6:7], s16
-; GCN-IR-NEXT: s_add_u32 s18, s8, -1
-; GCN-IR-NEXT: s_addc_u32 s19, s9, -1
+; GCN-IR-NEXT: s_add_u32 s16, s8, -1
+; GCN-IR-NEXT: s_addc_u32 s17, s9, -1
; GCN-IR-NEXT: s_not_b64 s[2:3], s[12:13]
-; GCN-IR-NEXT: s_add_u32 s12, s2, s20
-; GCN-IR-NEXT: s_addc_u32 s13, s3, 0
-; GCN-IR-NEXT: s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT: s_add_u32 s18, s2, s18
+; GCN-IR-NEXT: s_addc_u32 s19, s3, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
; GCN-IR-NEXT: s_mov_b32 s3, 0
; GCN-IR-NEXT: .LBB8_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -1184,19 +1192,22 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
; GCN-IR-NEXT: s_lshr_b32 s2, s11, 31
; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[16:17], s[10:11]
-; GCN-IR-NEXT: s_sub_u32 s2, s18, s14
-; GCN-IR-NEXT: s_subb_u32 s2, s19, s15
-; GCN-IR-NEXT: s_ashr_i32 s16, s2, 31
-; GCN-IR-NEXT: s_mov_b32 s17, s16
-; GCN-IR-NEXT: s_and_b32 s2, s16, 1
-; GCN-IR-NEXT: s_and_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s14, s14, s16
-; GCN-IR-NEXT: s_subb_u32 s15, s15, s17
-; GCN-IR-NEXT: s_add_u32 s12, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT: s_mov_b64 s[16:17], s[2:3]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT: s_sub_u32 s2, s16, s14
+; GCN-IR-NEXT: s_subb_u32 s2, s17, s15
+; GCN-IR-NEXT: s_ashr_i32 s12, s2, 31
+; GCN-IR-NEXT: s_mov_b32 s13, s12
+; GCN-IR-NEXT: s_and_b32 s2, s12, 1
+; GCN-IR-NEXT: s_and_b64 s[20:21], s[12:13], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s14, s14, s20
+; GCN-IR-NEXT: s_subb_u32 s15, s15, s21
+; GCN-IR-NEXT: s_add_u32 s18, s18, 1
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_or_b32 s20, s20, s21
+; GCN-IR-NEXT: s_cmp_lg_u32 s20, 0
+; GCN-IR-NEXT: s_addc_u32 s19, s19, 0
+; GCN-IR-NEXT: s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[12:13], s[2:3]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_3
; GCN-IR-NEXT: .LBB8_4: ; %Flow7
@@ -1461,34 +1472,37 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[8:9]
; GCN-IR-NEXT: s_sub_u32 s4, s2, s8
; GCN-IR-NEXT: s_subb_u32 s5, s3, s8
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[4:5]
-; GCN-IR-NEXT: s_add_u32 s2, s12, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[4:5]
+; GCN-IR-NEXT: s_add_u32 s2, s14, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s3, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[2:3], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[2:3], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 63
; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[10:11], exec
; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_mov_b32 s9, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s8, s2, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s3, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT: s_or_b32 s9, s10, s11
+; GCN-IR-NEXT: s_cmp_lg_u32 s9, 0
+; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
+; GCN-IR-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-IR-NEXT: s_sub_i32 s2, 63, s2
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_lshl_b64 s[2:3], 24, s2
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s8
-; GCN-IR-NEXT: s_add_u32 s14, s4, -1
-; GCN-IR-NEXT: s_addc_u32 s15, s5, -1
-; GCN-IR-NEXT: s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_add_u32 s12, s4, -1
+; GCN-IR-NEXT: s_addc_u32 s13, s5, -1
+; GCN-IR-NEXT: s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: .LBB10_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -1496,19 +1510,22 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s6, s3, 31
; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT: s_or_b64 s[2:3], s[12:13], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s6, s14, s10
-; GCN-IR-NEXT: s_subb_u32 s6, s15, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s6, 31
-; GCN-IR-NEXT: s_mov_b32 s13, s12
-; GCN-IR-NEXT: s_and_b32 s6, s12, 1
-; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[6:7]
+; GCN-IR-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s6, s12, s10
+; GCN-IR-NEXT: s_subb_u32 s6, s13, s11
+; GCN-IR-NEXT: s_ashr_i32 s8, s6, 31
+; GCN-IR-NEXT: s_mov_b32 s9, s8
+; GCN-IR-NEXT: s_and_b32 s6, s8, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[8:9], s[4:5]
+; GCN-IR-NEXT: s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT: s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[6:7]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_3
; GCN-IR-NEXT: .LBB10_4: ; %Flow6
@@ -1647,9 +1664,9 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1663,53 +1680,52 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB11_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB11_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 58, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB11_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB11_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB11_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB11_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -1838,9 +1854,9 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
; GCN-IR-NEXT: s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, s6, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1855,54 +1871,53 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB12_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB12_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB12_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -1937,20 +1952,20 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v10, vcc
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v8
; GCN-IR-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v10
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[2:3]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
@@ -1961,51 +1976,50 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB13_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB13_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT: v_addc_u32_e64 v13, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB13_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB13_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB13_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB13_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -2014,10 +2028,10 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[4:5], 15
; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v13
-; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT: v_xor_b32_e32 v1, v1, v11
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v11, vcc
; GCN-IR-NEXT: s_setpc_b64 s[30:31]
%result = srem i64 %x, 32768
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/uaddo.ll b/llvm/test/CodeGen/AMDGPU/uaddo.ll
index e1574dc..bb5918b2 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddo.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddo.ll
@@ -14,15 +14,16 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_add_u32 s0, s2, s8
-; SI-NEXT: v_mov_b32_e32 v0, s2
+; SI-NEXT: s_add_u32 s2, s2, s8
; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_addc_u32 s1, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_cmp_lg_u32 s0, 0
+; SI-NEXT: s_addc_u32 s3, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: v_mov_b32_e32 v1, s3
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v1, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -33,15 +34,15 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_add_u32 s0, s2, s4
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_add_u32 s2, s2, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s3, s3, s5
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: s_addc_u32 s1, s3, s5
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, s1
-; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_add_u32_e32 v2, vcc, s2, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; VI-NEXT: s_endpgm
@@ -52,14 +53,14 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: s_add_u32 s4, s2, s6
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_addc_u32 s5, s3, s7
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT: s_add_u32 s6, s2, s6
+; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT: s_addc_u32 s4, s3, s7
+; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
@@ -71,12 +72,14 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX10-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_add_u32 s4, s2, s6
-; GFX10-NEXT: s_addc_u32 s5, s3, s7
-; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT: s_add_u32 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s3, s3, s7
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -87,14 +90,16 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_add_u32 s4, s2, s4
-; GFX11-NEXT: s_addc_u32 s5, s3, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-NEXT: s_add_u32 s2, s2, s4
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-NEXT: s_addc_u32 s3, s3, s5
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s3, 0, s2
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
%uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
@@ -436,21 +441,23 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_add_u32 s6, s4, s6
-; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_addc_u32 s7, s5, s7
-; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_add_u32 s4, s4, s6
+; SI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; SI-NEXT: s_or_b32 s6, s12, s13
+; SI-NEXT: s_cmp_lg_u32 s6, 0
+; SI-NEXT: s_addc_u32 s5, s5, s7
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: s_mov_b32 s0, s2
; SI-NEXT: s_mov_b32 s1, s3
; SI-NEXT: s_mov_b32 s2, s10
; SI-NEXT: s_mov_b32 s3, s11
-; SI-NEXT: v_mov_b32_e32 v3, s7
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -458,37 +465,37 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_add_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_add_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_addc_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_addc_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: s_uaddo_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_add_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_addc_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_add_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_addc_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: s_uaddo_i64:
@@ -497,10 +504,12 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_add_u32 s0, s12, s14
-; GFX10-NEXT: s_addc_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s1, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_addc_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s0, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX10-NEXT: global_store_byte v2, v3, s[10:11]
@@ -510,12 +519,13 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_add_u32 s6, s4, s6
-; GFX11-NEXT: s_addc_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_add_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_addc_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -551,10 +561,10 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v2, vcc, v0, v2
-; SI-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -574,10 +584,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI-NEXT: v_mov_b32_e32 v6, s2
; VI-NEXT: v_mov_b32_e32 v7, s3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v2, vcc, v0, v2
-; VI-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; VI-NEXT: flat_store_byte v[6:7], v0
; VI-NEXT: s_endpgm
@@ -590,10 +599,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX9-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
; GFX9-NEXT: s_endpgm
@@ -607,12 +615,11 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX10-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT: global_store_byte v4, v2, s[10:11]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: v_uaddo_i64:
@@ -624,14 +631,12 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11-NEXT: global_load_b64 v[0:1], v4, s[4:5]
; GFX11-NEXT: global_load_b64 v[2:3], v4, s[6:7]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT: global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT: global_store_b8 v4, v2, s[2:3]
; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
index 9230174..7f89581 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
@@ -693,52 +693,47 @@ define i64 @v_uaddsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-LABEL: v_uaddsat_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_add_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_uaddsat_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_uaddsat_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: v_uaddsat_i64:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
; GFX11-LABEL: v_uaddsat_i64:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 1ed04f8..41199b0 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -146,8 +146,11 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12
@@ -157,9 +160,9 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_add_u32 s14, s6, -1
; GCN-IR-NEXT: s_addc_u32 s15, s7, -1
; GCN-IR-NEXT: s_not_b64 s[2:3], s[10:11]
-; GCN-IR-NEXT: s_add_u32 s2, s2, s16
-; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT: s_add_u32 s10, s2, s16
+; GCN-IR-NEXT: s_addc_u32 s11, s3, 0
+; GCN-IR-NEXT: s_mov_b64 s[2:3], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -167,19 +170,22 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_lshr_b32 s4, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s4, s14, s12
-; GCN-IR-NEXT: s_subb_u32 s4, s15, s13
-; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s11, s10
-; GCN-IR-NEXT: s_and_b32 s4, s10, 1
-; GCN-IR-NEXT: s_and_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s10
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s11
-; GCN-IR-NEXT: s_add_u32 s2, s2, 1
-; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[2:3], 0
-; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s2, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s2, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s2, s2, 31
+; GCN-IR-NEXT: s_mov_b32 s3, s2
+; GCN-IR-NEXT: s_and_b32 s4, s2, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[2:3], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s16
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s17
+; GCN-IR-NEXT: s_add_u32 s10, s10, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[2:3], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -313,19 +319,19 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT: v_min_u32_e32 v14, v4, v5
+; GCN-IR-NEXT: v_min_u32_e32 v8, v4, v5
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT: v_min_u32_e32 v15, v4, v5
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[6:7], v14, v15
+; GCN-IR-NEXT: v_min_u32_e32 v9, v4, v5
+; GCN-IR-NEXT: v_sub_i32_e64 v6, s[6:7], v8, v9
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[8:9]
+; GCN-IR-NEXT: v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[6:7], 63, v[6:7]
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[8:9]
+; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[6:7]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
; GCN-IR-NEXT: v_cndmask_b32_e64 v4, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v5, v0, 0, s[4:5]
@@ -333,55 +339,54 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v8
-; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT: v_addc_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v10
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v0, v14
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v15
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_lshr_b64 v[0:1], v[0:1], v10
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v3, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v6, v8
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, v6, v9
+; GCN-IR-NEXT: v_addc_u32_e64 v13, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT: v_or_b32_e32 v0, v0, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT: v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v6
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v10, v0
+; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v11, v1, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v4, v8, v4
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v6
+; GCN-IR-NEXT: v_or_b32_e32 v5, v9, v5
+; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v3
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v2
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v1, vcc, v1, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v7
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v6
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[4:5], 1
; GCN-IR-NEXT: v_or_b32_e32 v4, v7, v1
; GCN-IR-NEXT: v_or_b32_e32 v5, v6, v0
@@ -923,34 +928,37 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT: s_add_u32 s8, s14, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[10:11], exec
; GCN-IR-NEXT: s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT: s_add_u32 s14, s2, -1
-; GCN-IR-NEXT: s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT: s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_add_u32 s12, s2, -1
+; GCN-IR-NEXT: s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT: s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB8_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -958,19 +966,22 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT: s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s13, s12
-; GCN-IR-NEXT: s_and_b32 s4, s12, 1
-; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT: s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT: s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s9, s8
+; GCN-IR-NEXT: s_and_b32 s4, s8, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT: s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_3
; GCN-IR-NEXT: .LBB8_4: ; %Flow6
@@ -1094,12 +1105,12 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffd0, v10
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_add_i32_e32 v4, vcc, 0xffffffd0, v8
+; GCN-IR-NEXT: v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[6:7]
-; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT: v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
; GCN-IR-NEXT: v_mov_b32_e32 v3, 0x8000
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], vcc
; GCN-IR-NEXT: v_cndmask_b32_e64 v3, v3, 0, s[4:5]
@@ -1109,55 +1120,54 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB9_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v4
+; GCN-IR-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB9_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v8
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB9_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB9_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB9_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v3, v4, v0
@@ -1184,13 +1194,13 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], 48, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 48, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
; GCN-IR-NEXT: v_cndmask_b32_e64 v2, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1198,52 +1208,51 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB10_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB10_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT: v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT: v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB10_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v6
+; GCN-IR-NEXT: v_or_b32_e32 v0, v0, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s10, v0
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT: v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT: v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB10_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB10_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB10_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v3, v4, v0
@@ -1290,52 +1299,58 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s8, 59, s10
; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[4:5], exec
; GCN-IR-NEXT: s_cselect_b32 s7, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_add_u32 s11, s8, 1
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s10
-; GCN-IR-NEXT: s_add_u32 s2, s12, 0xffffffc4
-; GCN-IR-NEXT: s_addc_u32 s3, 0, -1
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT: s_lshr_b64 s[2:3], s[2:3], s11
+; GCN-IR-NEXT: s_add_u32 s10, s10, 0xffffffc4
+; GCN-IR-NEXT: s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB11_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, 23, s8
-; GCN-IR-NEXT: s_subb_u32 s4, 0, s9
-; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT: s_and_b32 s4, s10, 1
-; GCN-IR-NEXT: s_and_b32 s10, s10, 24
-; GCN-IR-NEXT: s_sub_u32 s8, s8, s10
-; GCN-IR-NEXT: s_subb_u32 s9, s9, 0
-; GCN-IR-NEXT: s_add_u32 s2, s2, 1
-; GCN-IR-NEXT: s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
-; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, 23, s2
+; GCN-IR-NEXT: s_subb_u32 s4, 0, s3
+; GCN-IR-NEXT: s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT: s_and_b32 s4, s8, 1
+; GCN-IR-NEXT: s_and_b32 s8, s8, 24
+; GCN-IR-NEXT: s_sub_u32 s2, s2, s8
+; GCN-IR-NEXT: s_subb_u32 s3, s3, 0
+; GCN-IR-NEXT: s_add_u32 s10, s10, 1
+; GCN-IR-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GCN-IR-NEXT: s_or_b32 s12, s12, s13
+; GCN-IR-NEXT: s_cmp_lg_u32 s12, 0
+; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT: s_cselect_b64 s[12:13], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[12:13]
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_3
; GCN-IR-NEXT: .LBB11_4: ; %Flow6
@@ -1384,13 +1399,13 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], 59, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT: v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 59, v6
+; GCN-IR-NEXT: v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
; GCN-IR-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 63, v[4:5]
; GCN-IR-NEXT: s_xor_b64 s[6:7], s[4:5], -1
; GCN-IR-NEXT: v_cndmask_b32_e64 v2, v1, 0, s[4:5]
; GCN-IR-NEXT: v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1398,51 +1413,50 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_and_saveexec_b64 s[6:7], s[4:5]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT: v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT: v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB12_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 0xffffffc4, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 0xffffffc4, v6
+; GCN-IR-NEXT: v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT: v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB12_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 23, v6
+; GCN-IR-NEXT: v_or_b32_e32 v0, v0, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT: v_and_b32_e32 v8, 24, v8
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, 23, v0
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT: v_and_b32_e32 v6, 24, v6
+; GCN-IR-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT: v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT: v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB12_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB12_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB12_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[0:1], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v2, v5, v1
; GCN-IR-NEXT: v_or_b32_e32 v3, v4, v0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index b846ce7..cdcc914 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -170,35 +170,38 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT: s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT: s_flbit_i32_b64 s16, s[2:3]
; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT: s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT: s_sub_u32 s12, s10, s16
; GCN-IR-NEXT: s_subb_u32 s13, 0, 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15]
; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec
; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s14, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT: s_or_b32 s8, s8, s9
+; GCN-IR-NEXT: s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT: s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT: s_add_u32 s16, s6, -1
-; GCN-IR-NEXT: s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT: s_add_u32 s14, s6, -1
+; GCN-IR-NEXT: s_addc_u32 s15, s7, -1
; GCN-IR-NEXT: s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT: s_add_u32 s10, s4, s18
-; GCN-IR-NEXT: s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT: s_add_u32 s16, s4, s16
+; GCN-IR-NEXT: s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB0_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -206,19 +209,22 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_lshr_b32 s4, s9, 31
; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT: s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT: s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT: s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s4, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT: s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT: s_add_u32 s10, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT: s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT: s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s11, s10
+; GCN-IR-NEXT: s_and_b32 s4, s10, 1
+; GCN-IR-NEXT: s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT: s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT: s_add_u32 s16, s16, 1
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_or_b32 s18, s18, s19
+; GCN-IR-NEXT: s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT: s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT: s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[18:19]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
@@ -362,12 +368,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v2
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT: v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT: v_min_u32_e32 v10, v4, v5
; GCN-IR-NEXT: v_ffbh_u32_e32 v4, v0
; GCN-IR-NEXT: v_add_i32_e64 v4, s[6:7], 32, v4
; GCN-IR-NEXT: v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT: v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT: v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT: v_sub_i32_e64 v4, s[6:7], v10, v11
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -383,54 +389,53 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
; GCN-IR-NEXT: s_cbranch_execz .LBB1_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT: v_mov_b32_e32 v6, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB1_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, -1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT: v_not_b32_e32 v6, v12
-; GCN-IR-NEXT: v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT: v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v3, vcc
+; GCN-IR-NEXT: v_not_b32_e32 v6, v10
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, v6, v11
+; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT: v_addc_u32_e64 v15, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
; GCN-IR-NEXT: v_mov_b32_e32 v7, 0
; GCN-IR-NEXT: .LBB1_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT: v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v6
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v14, v10
-; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v15, v11, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT: v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT: v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT: v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT: v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT: v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT: v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v12, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v6, vcc, v13, v9, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT: v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT: v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT: v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT: v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB1_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB1_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB1_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[4:5], v[4:5], 1
; GCN-IR-NEXT: v_or_b32_e32 v7, v7, v5
; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
@@ -948,34 +953,37 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT: s_add_u32 s8, s14, 0xffffffc5
; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[10:11], exec
; GCN-IR-NEXT: s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
; GCN-IR-NEXT: s_mov_b32 s7, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB6_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB6_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT: s_add_u32 s14, s2, -1
-; GCN-IR-NEXT: s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT: s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_add_u32 s12, s2, -1
+; GCN-IR-NEXT: s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT: s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB6_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -983,19 +991,22 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT: s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT: s_mov_b32 s13, s12
-; GCN-IR-NEXT: s_and_b32 s4, s12, 1
-; GCN-IR-NEXT: s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT: s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT: s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT: s_mov_b32 s9, s8
+; GCN-IR-NEXT: s_and_b32 s4, s8, 1
+; GCN-IR-NEXT: s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT: s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT: s_add_u32 s14, s14, 1
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_or_b32 s16, s16, s17
+; GCN-IR-NEXT: s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT: s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT: s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[16:17]
; GCN-IR-NEXT: s_cbranch_vccz .LBB6_3
; GCN-IR-NEXT: .LBB6_4: ; %Flow6
@@ -1064,52 +1075,58 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s8, 59, s10
; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
; GCN-IR-NEXT: s_and_b64 s[6:7], s[4:5], exec
; GCN-IR-NEXT: s_cselect_b32 s7, 0, s3
; GCN-IR-NEXT: s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB7_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT: s_add_u32 s11, s8, 1
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT: s_or_b32 s6, s6, s7
+; GCN-IR-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT: s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB7_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[10:11], s[2:3], s10
-; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc4
-; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
-; GCN-IR-NEXT: s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s11
+; GCN-IR-NEXT: s_add_u32 s12, s10, 0xffffffc4
+; GCN-IR-NEXT: s_addc_u32 s13, 0, -1
+; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
; GCN-IR-NEXT: .LBB7_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[8:9], 1
; GCN-IR-NEXT: s_lshr_b32 s4, s7, 31
; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s4, 23, s10
-; GCN-IR-NEXT: s_subb_u32 s4, 0, s11
-; GCN-IR-NEXT: s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT: s_and_b32 s4, s12, 1
-; GCN-IR-NEXT: s_and_b32 s12, s12, 24
-; GCN-IR-NEXT: s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT: s_subb_u32 s11, s11, 0
-; GCN-IR-NEXT: s_add_u32 s8, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 0
-; GCN-IR-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[4:5]
+; GCN-IR-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
+; GCN-IR-NEXT: s_sub_u32 s4, 23, s8
+; GCN-IR-NEXT: s_subb_u32 s4, 0, s9
+; GCN-IR-NEXT: s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT: s_and_b32 s4, s10, 1
+; GCN-IR-NEXT: s_and_b32 s10, s10, 24
+; GCN-IR-NEXT: s_sub_u32 s8, s8, s10
+; GCN-IR-NEXT: s_subb_u32 s9, s9, 0
+; GCN-IR-NEXT: s_add_u32 s12, s12, 1
+; GCN-IR-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GCN-IR-NEXT: s_or_b32 s14, s14, s15
+; GCN-IR-NEXT: s_cmp_lg_u32 s14, 0
+; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT: s_cselect_b64 s[14:15], -1, 0
+; GCN-IR-NEXT: s_mov_b64 s[10:11], s[4:5]
; GCN-IR-NEXT: s_and_b64 vcc, exec, s[14:15]
; GCN-IR-NEXT: s_cbranch_vccz .LBB7_3
; GCN-IR-NEXT: .LBB7_4: ; %Flow6
@@ -1241,8 +1258,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_add_i32_e32 v2, vcc, 0xffffffd0, v8
; GCN-IR-NEXT: v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1257,54 +1274,53 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB8_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT: s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[8:9], v2
; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_lshl_b64 v[2:3], s[4:5], v2
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[10:11]
; GCN-IR-NEXT: s_cbranch_execz .LBB8_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT: v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT: v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
; GCN-IR-NEXT: .LBB8_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT: v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT: v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB8_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB8_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB8_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
@@ -1337,8 +1353,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: v_ffbh_u32_e32 v2, v0
; GCN-IR-NEXT: v_add_i32_e64 v2, s[4:5], 32, v2
; GCN-IR-NEXT: v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT: v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT: v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 48, v8
; GCN-IR-NEXT: v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[0:1]
; GCN-IR-NEXT: v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1352,51 +1368,50 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
; GCN-IR-NEXT: s_cbranch_execz .LBB9_6
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; GCN-IR-NEXT: v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[6:7]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT: v_mov_b32_e32 v4, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT: s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT: s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[4:5], exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execz .LBB9_5
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT: v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT: v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT: v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT: v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT: v_mov_b32_e32 v9, 0
; GCN-IR-NEXT: v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT: s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT: s_movk_i32 s10, 0x7fff
; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
; GCN-IR-NEXT: v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT: v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT: v_or_b32_e32 v6, v6, v4
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT: v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT: v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT: v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT: v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT: v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT: v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT: v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT: v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT: s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT: v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT: v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT: v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT: v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT: v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT: v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT: v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT: v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT: v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT: v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT: s_andn2_b64 exec, exec, s[8:9]
; GCN-IR-NEXT: s_cbranch_execnz .LBB9_3
; GCN-IR-NEXT: ; %bb.4: ; %Flow
-; GCN-IR-NEXT: s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT: .LBB9_5: ; %Flow4
; GCN-IR-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT: .LBB9_5: ; %Flow4
+; GCN-IR-NEXT: s_or_b64 exec, exec, s[4:5]
; GCN-IR-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
; GCN-IR-NEXT: v_or_b32_e32 v5, v5, v3
; GCN-IR-NEXT: v_or_b32_e32 v4, v4, v2
diff --git a/llvm/test/CodeGen/AMDGPU/usubo.ll b/llvm/test/CodeGen/AMDGPU/usubo.ll
index 0289dab..d67a7b1 100644
--- a/llvm/test/CodeGen/AMDGPU/usubo.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubo.ll
@@ -14,15 +14,16 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
-; SI-NEXT: s_sub_u32 s0, s2, s8
-; SI-NEXT: v_mov_b32_e32 v0, s2
+; SI-NEXT: s_sub_u32 s2, s2, s8
; SI-NEXT: s_mov_b32 s5, s1
-; SI-NEXT: s_subb_u32 s1, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: s_or_b32 s0, s0, s1
+; SI-NEXT: s_cmp_lg_u32 s0, 0
+; SI-NEXT: s_subb_u32 s3, s3, s9
+; SI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; SI-NEXT: v_mov_b32_e32 v1, s3
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v1, s1
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -33,15 +34,15 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_sub_u32 s0, s2, s4
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_sub_u32 s2, s2, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_subb_u32 s3, s3, s5
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: s_subb_u32 s1, s3, s5
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT: v_mov_b32_e32 v3, s1
-; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_add_u32_e32 v2, vcc, s2, v2
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
; VI-NEXT: s_endpgm
@@ -52,14 +53,14 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: s_sub_u32 s4, s2, s6
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: s_subb_u32 s5, s3, s7
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT: s_sub_u32 s6, s2, s6
+; GFX9-NEXT: s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT: s_subb_u32 s4, s3, s7
+; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v1, s4
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s6, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX9-NEXT: s_endpgm
@@ -71,12 +72,14 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX10-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_sub_u32 s4, s2, s6
-; GFX10-NEXT: s_subb_u32 s5, s3, s7
-; GFX10-NEXT: v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT: s_sub_u32 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_subb_u32 s3, s3, s7
+; GFX10-NEXT: s_cselect_b32 s4, -1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
; GFX10-NEXT: s_endpgm
;
@@ -87,14 +90,16 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
; GFX11-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_sub_u32 s4, s2, s4
-; GFX11-NEXT: s_subb_u32 s5, s3, s5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-NEXT: s_sub_u32 s2, s2, s4
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_cmp_lg_u32 s4, 0
+; GFX11-NEXT: s_subb_u32 s3, s3, s5
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT: v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, s3, 0, s2
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
%usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0
@@ -435,21 +440,23 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: s_mov_b32 s10, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: s_sub_u32 s6, s4, s6
-; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_subb_u32 s7, s5, s7
-; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_sub_u32 s4, s4, s6
+; SI-NEXT: s_cselect_b64 s[12:13], -1, 0
+; SI-NEXT: s_or_b32 s6, s12, s13
+; SI-NEXT: s_cmp_lg_u32 s6, 0
+; SI-NEXT: s_subb_u32 s5, s5, s7
; SI-NEXT: s_mov_b32 s8, s0
; SI-NEXT: s_mov_b32 s9, s1
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
; SI-NEXT: s_mov_b32 s0, s2
; SI-NEXT: s_mov_b32 s1, s3
; SI-NEXT: s_mov_b32 s2, s10
; SI-NEXT: s_mov_b32 s3, s11
-; SI-NEXT: v_mov_b32_e32 v3, s7
-; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
@@ -457,37 +464,37 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_sub_u32 s2, s4, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: s_sub_u32 s0, s4, s6
-; VI-NEXT: v_mov_b32_e32 v4, s4
; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_subb_u32 s1, s5, s7
-; VI-NEXT: v_mov_b32_e32 v5, s5
-; VI-NEXT: v_mov_b32_e32 v7, s1
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT: v_mov_b32_e32 v6, s0
-; VI-NEXT: v_mov_b32_e32 v2, s2
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT: s_subb_u32 s0, s5, s7
+; VI-NEXT: v_mov_b32_e32 v4, s2
+; VI-NEXT: v_mov_b32_e32 v5, s0
+; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
; VI-NEXT: v_mov_b32_e32 v3, s3
-; VI-NEXT: flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
; VI-NEXT: flat_store_byte v[2:3], v0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: s_usubo_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_sub_u32 s0, s12, s14
-; GFX9-NEXT: v_mov_b32_e32 v0, s12
-; GFX9-NEXT: v_mov_b32_e32 v1, s13
-; GFX9-NEXT: s_subb_u32 s1, s13, s15
-; GFX9-NEXT: v_mov_b32_e32 v3, s1
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT: v_mov_b32_e32 v2, s0
-; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT: s_sub_u32 s2, s12, s14
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT: s_subb_u32 s0, s13, s15
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT: global_store_byte v2, v3, s[10:11]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: s_usubo_i64:
@@ -496,10 +503,12 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_sub_u32 s0, s12, s14
-; GFX10-NEXT: s_subb_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s1, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v0, s0
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_subb_u32 s1, s13, s15
+; GFX10-NEXT: s_cselect_b32 s0, -1, 0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
-; GFX10-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s0
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[8:9]
; GFX10-NEXT: global_store_byte v2, v3, s[10:11]
@@ -509,12 +518,13 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_sub_u32 s6, s4, s6
-; GFX11-NEXT: s_subb_u32 s7, s5, s7
-; GFX11-NEXT: v_mov_b32_e32 v0, s6
-; GFX11-NEXT: v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: s_sub_u32 s4, s4, s6
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_subb_u32 s5, s5, s7
+; GFX11-NEXT: s_cselect_b32 s4, -1, 0
+; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, 1, s4
; GFX11-NEXT: s_clause 0x1
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
@@ -550,10 +560,10 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_sub_i32_e32 v2, vcc, v0, v2
-; SI-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; SI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
@@ -573,10 +583,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; VI-NEXT: v_mov_b32_e32 v6, s2
; VI-NEXT: v_mov_b32_e32 v7, s3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_sub_u32_e32 v2, vcc, v0, v2
-; VI-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; VI-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
; VI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; VI-NEXT: flat_store_byte v[6:7], v0
; VI-NEXT: s_endpgm
@@ -589,10 +598,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX9-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GFX9-NEXT: global_store_byte v4, v0, s[10:11]
; GFX9-NEXT: s_endpgm
@@ -606,12 +614,11 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX10-NEXT: global_load_dwordx2 v[0:1], v4, s[12:13]
; GFX10-NEXT: global_load_dwordx2 v[2:3], v4, s[14:15]
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT: global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT: global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT: global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT: global_store_byte v4, v2, s[10:11]
; GFX10-NEXT: s_endpgm
;
; GFX11-LABEL: v_usubo_i64:
@@ -623,14 +630,12 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
; GFX11-NEXT: global_load_b64 v[0:1], v4, s[4:5]
; GFX11-NEXT: global_load_b64 v[2:3], v4, s[6:7]
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc_lo
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT: global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT: global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT: global_store_b8 v4, v2, s[2:3]
; GFX11-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/usubsat.ll b/llvm/test/CodeGen/AMDGPU/usubsat.ll
index 90491a0..3ddb2f0 100644
--- a/llvm/test/CodeGen/AMDGPU/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubsat.ll
@@ -730,52 +730,38 @@ define i64 @v_usubsat_i64(i64 %lhs, i64 %rhs) {
; GFX6-LABEL: v_usubsat_i64:
; GFX6: ; %bb.0:
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_usubsat_i64:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_usubsat_i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
-; GFX10-LABEL: v_usubsat_i64:
-; GFX10: ; %bb.0:
-; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX10-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_usubsat_i64:
-; GFX11: ; %bb.0:
-; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT: v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT: v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX11-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_usubsat_i64:
+; GFX10PLUS: ; %bb.0:
+; GFX10PLUS-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT: v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10PLUS-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10PLUS-NEXT: v_cndmask_b32_e64 v0, v0, 0, vcc_lo
+; GFX10PLUS-NEXT: v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
ret i64 %result
}
diff --git a/llvm/test/CodeGen/Hexagon/fmul-v67.ll b/llvm/test/CodeGen/Hexagon/fmul-v67.ll
index 49098cd..fc0b7f7 100644
--- a/llvm/test/CodeGen/Hexagon/fmul-v67.ll
+++ b/llvm/test/CodeGen/Hexagon/fmul-v67.ll
@@ -29,7 +29,7 @@ b2:
; CHECK: [[R22]] += dfmpylh([[R20]],[[R21]])
; CHECK: [[R22]] += dfmpylh([[R21]],[[R20]])
; CHECK: [[R22]] += dfmpyhh([[R20]],[[R21]])
-define double @test_02(double %a0, double %a1) #2 {
+define double @test_02(double %a0, double %a1) #1 {
b2:
%v3 = fmul double %a0, %a1
ret double %v3
@@ -40,13 +40,11 @@ b2:
; CHECK: [[R30]] += dfmpylh(r1:0,r3:2)
; CHECK: [[R30]] += dfmpylh(r3:2,r1:0)
; CHECK: [[R30]] += dfmpyhh(r1:0,r3:2)
-define double @test_03(double %a0, double %a1) #3 {
+define double @test_03(double %a0, double %a1) #1 {
b2:
- %v3 = fmul double %a0, %a1
+ %v3 = fmul afn double %a0, %a1
ret double %v3
}
attributes #0 = { nounwind }
attributes #1 = { nounwind "target-cpu"="hexagonv67" }
-attributes #2 = { nounwind "target-cpu"="hexagonv67" "unsafe-fp-math"="false" }
-attributes #3 = { nounwind "target-cpu"="hexagonv67" "unsafe-fp-math"="true" }
diff --git a/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll b/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll
index 1da516a..80b4048 100644
--- a/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll
+++ b/llvm/test/CodeGen/MIR2Vec/vocab-error-handling.ll
@@ -1,15 +1,15 @@
; REQUIRES: x86_64-linux
-; RUN: not llc -o /dev/null -print-mir2vec-vocab %s 2>&1 | FileCheck %s --check-prefix=CHECK-INVALID
-; RUN: not llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_zero_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-ZERO-DIM
-; RUN: not llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_invalid_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ENTITIES
-; RUN: not llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_inconsistent_dims.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-INCONSISTENT-DIMS
+; RUN: llc -o /dev/null -print-mir2vec-vocab %s 2>&1 | FileCheck %s --check-prefix=CHECK-INVALID
+; RUN: llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_zero_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-ZERO-DIM
+; RUN: llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_invalid_vocab.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-NO-ENTITIES
+; RUN: llc -o /dev/null -print-mir2vec-vocab -mir2vec-vocab-path=%S/Inputs/mir2vec_inconsistent_dims.json %s 2>&1 | FileCheck %s --check-prefix=CHECK-INCONSISTENT-DIMS
define dso_local void @test() {
entry:
ret void
}
-; CHECK-INVALID: error: MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path
-; CHECK-ZERO-DIM: error: Dimension of 'entities' section of the vocabulary is zero
-; CHECK-NO-ENTITIES: error: Missing 'entities' section in vocabulary file
-; CHECK-INCONSISTENT-DIMS: error: All vectors in the 'entities' section of the vocabulary are not of the same dimension
+; CHECK-INVALID: MIR2Vec Vocabulary Printer: Failed to get vocabulary - MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path
+; CHECK-ZERO-DIM: MIR2Vec Vocabulary Printer: Failed to get vocabulary - Dimension of 'entities' section of the vocabulary is zero
+; CHECK-NO-ENTITIES: MIR2Vec Vocabulary Printer: Failed to get vocabulary - Missing 'entities' section in vocabulary file
+; CHECK-INCONSISTENT-DIMS: MIR2Vec Vocabulary Printer: Failed to get vocabulary - All vectors in the 'entities' section of the vocabulary are not of the same dimension
diff --git a/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll
new file mode 100644
index 0000000..153ca10
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll
@@ -0,0 +1,1625 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mcpu=sm_80 -O0 -disable-post-ra -frame-pointer=all \
+; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-NOI32X2 %s
+; RUN: %if ptxas-sm_80 %{ \
+; RUN: llc < %s -mcpu=sm_80 -O0 -disable-post-ra -frame-pointer=all \
+; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_80 \
+; RUN: %}
+; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \
+; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-I32X2 %s
+; RUN: %if ptxas-sm_100 %{ \
+; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \
+; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+
+target triple = "nvptx64-nvidia-cuda"
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+define <2 x i32> @test_ret_const() #0 {
+; CHECK-LABEL: test_ret_const(
+; CHECK: {
+; CHECK-EMPTY:
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {-1, 2};
+; CHECK-NEXT: ret;
+ ret <2 x i32> <i32 -1, i32 2>
+}
+
+define i32 @test_extract_0(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_extract_0(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_extract_0_param_0];
+; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_extract_0(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<2>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_extract_0_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, _}, %rd1;
+; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-I32X2-NEXT: ret;
+ %e = extractelement <2 x i32> %a, i32 0
+ ret i32 %e
+}
+
+define i32 @test_extract_1(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_extract_1(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_extract_1_param_0];
+; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r2;
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_extract_1(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<2>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_extract_1_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {_, %r1}, %rd1;
+; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-I32X2-NEXT: ret;
+ %e = extractelement <2 x i32> %a, i32 1
+ ret i32 %e
+}
+
+define i32 @test_extract_i(<2 x i32> %a, i64 %idx) #0 {
+; CHECK-NOI32X2-LABEL: test_extract_i(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .local .align 8 .b8 __local_depot3[8];
+; CHECK-NOI32X2-NEXT: .reg .b64 %SP;
+; CHECK-NOI32X2-NEXT: .reg .b64 %SPL;
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>;
+; CHECK-NOI32X2-NEXT: .reg .b64 %rd<6>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: mov.b64 %SPL, __local_depot3;
+; CHECK-NOI32X2-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_extract_i_param_0];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_extract_i_param_1];
+; CHECK-NOI32X2-NEXT: st.v2.b32 [%SP], {%r1, %r2};
+; CHECK-NOI32X2-NEXT: and.b64 %rd2, %rd1, 1;
+; CHECK-NOI32X2-NEXT: shl.b64 %rd3, %rd2, 2;
+; CHECK-NOI32X2-NEXT: add.u64 %rd4, %SP, 0;
+; CHECK-NOI32X2-NEXT: or.b64 %rd5, %rd4, %rd3;
+; CHECK-NOI32X2-NEXT: ld.b32 %r3, [%rd5];
+; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r3;
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_extract_i(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .pred %p<2>;
+; CHECK-I32X2-NEXT: .reg .b32 %r<4>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_extract_i_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_extract_i_param_0];
+; CHECK-I32X2-NEXT: setp.eq.b64 %p1, %rd2, 0;
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: selp.b32 %r3, %r1, %r2, %p1;
+; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %r3;
+; CHECK-I32X2-NEXT: ret;
+ %e = extractelement <2 x i32> %a, i64 %idx
+ ret i32 %e
+}
+
+define <2 x i32> @test_add(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_add(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_add_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_add_param_0];
+; CHECK-NOI32X2-NEXT: add.s32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: add.s32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_add(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_add_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_add_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: add.s32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: add.s32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %r = add <2 x i32> %a, %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_add_imm_0(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_add_imm_0(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_add_imm_0_param_0];
+; CHECK-NOI32X2-NEXT: add.s32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: add.s32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_add_imm_0(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_add_imm_0_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: add.s32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: add.s32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = add <2 x i32> <i32 1, i32 2>, %a
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_add_imm_1(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_add_imm_1(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_add_imm_1_param_0];
+; CHECK-NOI32X2-NEXT: add.s32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: add.s32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_add_imm_1(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_add_imm_1_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: add.s32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: add.s32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = add <2 x i32> %a, <i32 1, i32 2>
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_sub(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_sub(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_sub_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sub_param_0];
+; CHECK-NOI32X2-NEXT: sub.s32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: sub.s32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_sub(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_sub_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_sub_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: sub.s32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: sub.s32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %r = sub <2 x i32> %a, %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_smax(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_smax(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_smax_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_smax_param_0];
+; CHECK-NOI32X2-NEXT: max.s32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: max.s32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_smax(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_smax_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_smax_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: max.s32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: max.s32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %cmp = icmp sgt <2 x i32> %a, %b
+ %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_umax(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_umax(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_umax_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_umax_param_0];
+; CHECK-NOI32X2-NEXT: max.u32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: max.u32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_umax(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_umax_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_umax_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: max.u32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: max.u32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %cmp = icmp ugt <2 x i32> %a, %b
+ %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_smin(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_smin(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_smin_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_smin_param_0];
+; CHECK-NOI32X2-NEXT: min.s32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: min.s32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_smin(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_smin_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_smin_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: min.s32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: min.s32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %cmp = icmp sle <2 x i32> %a, %b
+ %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_umin(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_umin(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_umin_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_umin_param_0];
+; CHECK-NOI32X2-NEXT: min.u32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: min.u32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_umin(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_umin_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_umin_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: min.u32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: min.u32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %cmp = icmp ule <2 x i32> %a, %b
+ %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_eq(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) #0 {
+; CHECK-NOI32X2-LABEL: test_eq(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .pred %p<3>;
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<9>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [test_eq_param_2];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_eq_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_eq_param_0];
+; CHECK-NOI32X2-NEXT: setp.eq.b32 %p1, %r1, %r3;
+; CHECK-NOI32X2-NEXT: setp.eq.b32 %p2, %r2, %r4;
+; CHECK-NOI32X2-NEXT: selp.b32 %r7, %r2, %r6, %p2;
+; CHECK-NOI32X2-NEXT: selp.b32 %r8, %r1, %r5, %p1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_eq(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .pred %p<3>;
+; CHECK-I32X2-NEXT: .reg .b32 %r<9>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_eq_param_2];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_eq_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_eq_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: setp.eq.b32 %p1, %r3, %r1;
+; CHECK-I32X2-NEXT: setp.eq.b32 %p2, %r4, %r2;
+; CHECK-I32X2-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-I32X2-NEXT: selp.b32 %r7, %r4, %r6, %p2;
+; CHECK-I32X2-NEXT: selp.b32 %r8, %r3, %r5, %p1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7};
+; CHECK-I32X2-NEXT: ret;
+ %cmp = icmp eq <2 x i32> %a, %b
+ %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %c
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_ne(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) #0 {
+; CHECK-NOI32X2-LABEL: test_ne(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .pred %p<3>;
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<9>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [test_ne_param_2];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_ne_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_ne_param_0];
+; CHECK-NOI32X2-NEXT: setp.ne.b32 %p1, %r1, %r3;
+; CHECK-NOI32X2-NEXT: setp.ne.b32 %p2, %r2, %r4;
+; CHECK-NOI32X2-NEXT: selp.b32 %r7, %r2, %r6, %p2;
+; CHECK-NOI32X2-NEXT: selp.b32 %r8, %r1, %r5, %p1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_ne(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .pred %p<3>;
+; CHECK-I32X2-NEXT: .reg .b32 %r<9>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_ne_param_2];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ne_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ne_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: setp.ne.b32 %p1, %r3, %r1;
+; CHECK-I32X2-NEXT: setp.ne.b32 %p2, %r4, %r2;
+; CHECK-I32X2-NEXT: mov.b64 {%r5, %r6}, %rd3;
+; CHECK-I32X2-NEXT: selp.b32 %r7, %r4, %r6, %p2;
+; CHECK-I32X2-NEXT: selp.b32 %r8, %r3, %r5, %p1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7};
+; CHECK-I32X2-NEXT: ret;
+ %cmp = icmp ne <2 x i32> %a, %b
+ %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %c
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_mul(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_mul(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_mul_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_mul_param_0];
+; CHECK-NOI32X2-NEXT: mul.lo.s32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: mul.lo.s32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_mul(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_mul_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_mul_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: mul.lo.s32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: mul.lo.s32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %r = mul <2 x i32> %a, %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_or(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_or(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_or_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_or_param_0];
+; CHECK-NOI32X2-NEXT: or.b32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: or.b32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_or(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_or_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_or_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: or.b32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: or.b32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %r = or <2 x i32> %a, %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_or_computed(i32 %a) {
+; CHECK-LABEL: test_or_computed(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_or_computed_param_0];
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, 5};
+; CHECK-NEXT: ret;
+ %ins.0 = insertelement <2 x i32> zeroinitializer, i32 %a, i32 0
+ %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1
+ %r = or <2 x i32> %ins.1, %ins.0
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_or_imm_0(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_or_imm_0(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_or_imm_0_param_0];
+; CHECK-NOI32X2-NEXT: or.b32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: or.b32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_or_imm_0(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_or_imm_0_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: or.b32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: or.b32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = or <2 x i32> <i32 1, i32 2>, %a
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_or_imm_1(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_or_imm_1(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_or_imm_1_param_0];
+; CHECK-NOI32X2-NEXT: or.b32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: or.b32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_or_imm_1(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_or_imm_1_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: or.b32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: or.b32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = or <2 x i32> %a, <i32 1, i32 2>
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_xor(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_xor(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_xor_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_xor_param_0];
+; CHECK-NOI32X2-NEXT: xor.b32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: xor.b32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_xor(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_xor_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_xor_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: xor.b32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: xor.b32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %r = xor <2 x i32> %a, %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_xor_computed(i32 %a) {
+; CHECK-LABEL: test_xor_computed(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_xor_computed_param_0];
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {0, 5};
+; CHECK-NEXT: ret;
+ %ins.0 = insertelement <2 x i32> zeroinitializer, i32 %a, i32 0
+ %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1
+ %r = xor <2 x i32> %ins.1, %ins.0
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_xor_imm_0(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_xor_imm_0(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_xor_imm_0_param_0];
+; CHECK-NOI32X2-NEXT: xor.b32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: xor.b32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_xor_imm_0(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_xor_imm_0_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: xor.b32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: xor.b32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = xor <2 x i32> <i32 1, i32 2>, %a
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_xor_imm_1(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_xor_imm_1(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_xor_imm_1_param_0];
+; CHECK-NOI32X2-NEXT: xor.b32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: xor.b32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_xor_imm_1(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_xor_imm_1_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: xor.b32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: xor.b32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = xor <2 x i32> %a, <i32 1, i32 2>
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_and(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_and(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_and_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_and_param_0];
+; CHECK-NOI32X2-NEXT: and.b32 %r5, %r2, %r4;
+; CHECK-NOI32X2-NEXT: and.b32 %r6, %r1, %r3;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_and(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_and_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_and_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1;
+; CHECK-I32X2-NEXT: and.b32 %r5, %r4, %r2;
+; CHECK-I32X2-NEXT: and.b32 %r6, %r3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-I32X2-NEXT: ret;
+ %r = and <2 x i32> %a, %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_and_computed(i32 %a) {
+; CHECK-LABEL: test_and_computed(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_and_computed_param_0];
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, 0};
+; CHECK-NEXT: ret;
+ %ins.0 = insertelement <2 x i32> zeroinitializer, i32 %a, i32 0
+ %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1
+ %r = and <2 x i32> %ins.1, %ins.0
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_and_imm_0(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_and_imm_0(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_and_imm_0_param_0];
+; CHECK-NOI32X2-NEXT: and.b32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: and.b32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_and_imm_0(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_and_imm_0_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: and.b32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: and.b32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = and <2 x i32> <i32 1, i32 2>, %a
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_and_imm_1(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_and_imm_1(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_and_imm_1_param_0];
+; CHECK-NOI32X2-NEXT: and.b32 %r3, %r2, 2;
+; CHECK-NOI32X2-NEXT: and.b32 %r4, %r1, 1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_and_imm_1(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_and_imm_1_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: and.b32 %r3, %r2, 2;
+; CHECK-I32X2-NEXT: and.b32 %r4, %r1, 1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = and <2 x i32> %a, <i32 1, i32 2>
+ ret <2 x i32> %r
+}
+
+define void @test_ldst_v2i32(ptr %a, ptr %b) {
+; CHECK-NOI32X2-LABEL: test_ldst_v2i32(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_param_0];
+; CHECK-NOI32X2-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1];
+; CHECK-NOI32X2-NEXT: st.v2.b32 [%rd2], {%r1, %r2};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_ldst_v2i32(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_param_0];
+; CHECK-I32X2-NEXT: ld.b64 %rd3, [%rd1];
+; CHECK-I32X2-NEXT: st.b64 [%rd2], %rd3;
+; CHECK-I32X2-NEXT: ret;
+ %t1 = load <2 x i32>, ptr %a
+ store <2 x i32> %t1, ptr %b, align 16
+ ret void
+}
+
+define void @test_ldst_v3i32(ptr %a, ptr %b) {
+; CHECK-LABEL: test_ldst_v3i32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd2, [test_ldst_v3i32_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_ldst_v3i32_param_0];
+; CHECK-NEXT: ld.b64 %rd3, [%rd1];
+; CHECK-NEXT: ld.b32 %r1, [%rd1+8];
+; CHECK-NEXT: st.b32 [%rd2+8], %r1;
+; CHECK-NEXT: st.b64 [%rd2], %rd3;
+; CHECK-NEXT: ret;
+ %t1 = load <3 x i32>, ptr %a
+ store <3 x i32> %t1, ptr %b, align 16
+ ret void
+}
+
+define void @test_ldst_v4i32(ptr %a, ptr %b) {
+; CHECK-NOI32X2-LABEL: test_ldst_v4i32(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v4i32_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v4i32_param_0];
+; CHECK-NOI32X2-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; CHECK-NOI32X2-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_ldst_v4i32(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<5>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v4i32_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v4i32_param_0];
+; CHECK-I32X2-NEXT: ld.v2.b64 {%rd3, %rd4}, [%rd1];
+; CHECK-I32X2-NEXT: st.v2.b64 [%rd2], {%rd3, %rd4};
+; CHECK-I32X2-NEXT: ret;
+ %t1 = load <4 x i32>, ptr %a
+ store <4 x i32> %t1, ptr %b, align 16
+ ret void
+}
+
+define void @test_ldst_v2i32_unaligned(ptr %a, ptr %b) {
+; CHECK-NOI32X2-LABEL: test_ldst_v2i32_unaligned(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<13>;
+; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_unaligned_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_unaligned_param_0];
+; CHECK-NOI32X2-NEXT: ld.b8 %r1, [%rd1+2];
+; CHECK-NOI32X2-NEXT: shl.b32 %r2, %r1, 16;
+; CHECK-NOI32X2-NEXT: ld.b8 %r3, [%rd1+3];
+; CHECK-NOI32X2-NEXT: shl.b32 %r4, %r3, 24;
+; CHECK-NOI32X2-NEXT: or.b32 %r5, %r4, %r2;
+; CHECK-NOI32X2-NEXT: ld.b8 %r6, [%rd1];
+; CHECK-NOI32X2-NEXT: ld.b8 %r7, [%rd1+1];
+; CHECK-NOI32X2-NEXT: ld.b8 %r8, [%rd1+4];
+; CHECK-NOI32X2-NEXT: ld.b8 %r9, [%rd1+5];
+; CHECK-NOI32X2-NEXT: ld.b8 %r10, [%rd1+6];
+; CHECK-NOI32X2-NEXT: ld.b8 %r11, [%rd1+7];
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2+7], %r11;
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2+6], %r10;
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2+5], %r9;
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2+4], %r8;
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2+1], %r7;
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2], %r6;
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2+3], %r3;
+; CHECK-NOI32X2-NEXT: shr.u32 %r12, %r5, 16;
+; CHECK-NOI32X2-NEXT: st.b8 [%rd2+2], %r12;
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_ldst_v2i32_unaligned(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<28>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_unaligned_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_unaligned_param_0];
+; CHECK-I32X2-NEXT: ld.b8 %rd3, [%rd1];
+; CHECK-I32X2-NEXT: ld.b8 %rd4, [%rd1+1];
+; CHECK-I32X2-NEXT: shl.b64 %rd5, %rd4, 8;
+; CHECK-I32X2-NEXT: or.b64 %rd6, %rd5, %rd3;
+; CHECK-I32X2-NEXT: ld.b8 %rd7, [%rd1+2];
+; CHECK-I32X2-NEXT: shl.b64 %rd8, %rd7, 16;
+; CHECK-I32X2-NEXT: ld.b8 %rd9, [%rd1+3];
+; CHECK-I32X2-NEXT: shl.b64 %rd10, %rd9, 24;
+; CHECK-I32X2-NEXT: or.b64 %rd11, %rd10, %rd8;
+; CHECK-I32X2-NEXT: or.b64 %rd12, %rd11, %rd6;
+; CHECK-I32X2-NEXT: ld.b8 %rd13, [%rd1+4];
+; CHECK-I32X2-NEXT: ld.b8 %rd14, [%rd1+5];
+; CHECK-I32X2-NEXT: shl.b64 %rd15, %rd14, 8;
+; CHECK-I32X2-NEXT: or.b64 %rd16, %rd15, %rd13;
+; CHECK-I32X2-NEXT: ld.b8 %rd17, [%rd1+6];
+; CHECK-I32X2-NEXT: shl.b64 %rd18, %rd17, 16;
+; CHECK-I32X2-NEXT: ld.b8 %rd19, [%rd1+7];
+; CHECK-I32X2-NEXT: shl.b64 %rd20, %rd19, 24;
+; CHECK-I32X2-NEXT: or.b64 %rd21, %rd20, %rd18;
+; CHECK-I32X2-NEXT: or.b64 %rd22, %rd21, %rd16;
+; CHECK-I32X2-NEXT: shl.b64 %rd23, %rd22, 32;
+; CHECK-I32X2-NEXT: or.b64 %rd24, %rd23, %rd12;
+; CHECK-I32X2-NEXT: st.b8 [%rd2+6], %rd17;
+; CHECK-I32X2-NEXT: shr.u64 %rd25, %rd24, 56;
+; CHECK-I32X2-NEXT: st.b8 [%rd2+7], %rd25;
+; CHECK-I32X2-NEXT: st.b8 [%rd2+4], %rd13;
+; CHECK-I32X2-NEXT: shr.u64 %rd26, %rd24, 40;
+; CHECK-I32X2-NEXT: st.b8 [%rd2+5], %rd26;
+; CHECK-I32X2-NEXT: st.b8 [%rd2+1], %rd4;
+; CHECK-I32X2-NEXT: st.b8 [%rd2], %rd3;
+; CHECK-I32X2-NEXT: st.b8 [%rd2+3], %rd9;
+; CHECK-I32X2-NEXT: shr.u64 %rd27, %rd24, 16;
+; CHECK-I32X2-NEXT: st.b8 [%rd2+2], %rd27;
+; CHECK-I32X2-NEXT: ret;
+ %t1 = load <2 x i32>, ptr %a, align 1
+ store <2 x i32> %t1, ptr %b, align 1
+ ret void
+}
+
+declare <2 x i32> @test_callee(<2 x i32> %a, <2 x i32> %b) #0
+
+define <2 x i32> @test_call(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_call(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_call_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_call_param_0];
+; CHECK-NOI32X2-NEXT: { // callseq 0, 0
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param1], {%r3, %r4};
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param0], {%r1, %r2};
+; CHECK-NOI32X2-NEXT: call.uni (retval0), test_callee, (param0, param1);
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [retval0];
+; CHECK-NOI32X2-NEXT: } // callseq 0
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_call(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_call_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_call_param_0];
+; CHECK-I32X2-NEXT: { // callseq 0, 0
+; CHECK-I32X2-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-I32X2-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-I32X2-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-I32X2-NEXT: st.param.b64 [param1], %rd2;
+; CHECK-I32X2-NEXT: st.param.b64 [param0], %rd1;
+; CHECK-I32X2-NEXT: call.uni (retval0), test_callee, (param0, param1);
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-I32X2-NEXT: } // callseq 0
+; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-I32X2-NEXT: ret;
+ %r = call <2 x i32> @test_callee(<2 x i32> %a, <2 x i32> %b)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_call_flipped(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_call_flipped(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_call_flipped_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_call_flipped_param_0];
+; CHECK-NOI32X2-NEXT: { // callseq 1, 0
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param1], {%r1, %r2};
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param0], {%r3, %r4};
+; CHECK-NOI32X2-NEXT: call.uni (retval0), test_callee, (param0, param1);
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [retval0];
+; CHECK-NOI32X2-NEXT: } // callseq 1
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_call_flipped(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_call_flipped_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_call_flipped_param_0];
+; CHECK-I32X2-NEXT: { // callseq 1, 0
+; CHECK-I32X2-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-I32X2-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-I32X2-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-I32X2-NEXT: st.param.b64 [param1], %rd1;
+; CHECK-I32X2-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-I32X2-NEXT: call.uni (retval0), test_callee, (param0, param1);
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-I32X2-NEXT: } // callseq 1
+; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-I32X2-NEXT: ret;
+ %r = call <2 x i32> @test_callee(<2 x i32> %b, <2 x i32> %a)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_tailcall_flipped(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_tailcall_flipped(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_tailcall_flipped_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_tailcall_flipped_param_0];
+; CHECK-NOI32X2-NEXT: { // callseq 2, 0
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-NOI32X2-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param1], {%r1, %r2};
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param0], {%r3, %r4};
+; CHECK-NOI32X2-NEXT: call.uni (retval0), test_callee, (param0, param1);
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [retval0];
+; CHECK-NOI32X2-NEXT: } // callseq 2
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_tailcall_flipped(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_tailcall_flipped_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_tailcall_flipped_param_0];
+; CHECK-I32X2-NEXT: { // callseq 2, 0
+; CHECK-I32X2-NEXT: .param .align 8 .b8 param0[8];
+; CHECK-I32X2-NEXT: .param .align 8 .b8 param1[8];
+; CHECK-I32X2-NEXT: .param .align 8 .b8 retval0[8];
+; CHECK-I32X2-NEXT: st.param.b64 [param1], %rd1;
+; CHECK-I32X2-NEXT: st.param.b64 [param0], %rd2;
+; CHECK-I32X2-NEXT: call.uni (retval0), test_callee, (param0, param1);
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [retval0];
+; CHECK-I32X2-NEXT: } // callseq 2
+; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-I32X2-NEXT: ret;
+ %r = tail call <2 x i32> @test_callee(<2 x i32> %b, <2 x i32> %a)
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_select(<2 x i32> %a, <2 x i32> %b, i1 zeroext %c) #0 {
+; CHECK-NOI32X2-LABEL: test_select(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .pred %p<2>;
+; CHECK-NOI32X2-NEXT: .reg .b16 %rs<3>;
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.b8 %rs1, [test_select_param_2];
+; CHECK-NOI32X2-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-NOI32X2-NEXT: setp.ne.b16 %p1, %rs2, 0;
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_select_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_select_param_0];
+; CHECK-NOI32X2-NEXT: selp.b32 %r5, %r2, %r4, %p1;
+; CHECK-NOI32X2-NEXT: selp.b32 %r6, %r1, %r3, %p1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_select(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .pred %p<2>;
+; CHECK-I32X2-NEXT: .reg .b16 %rs<3>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b8 %rs1, [test_select_param_2];
+; CHECK-I32X2-NEXT: and.b16 %rs2, %rs1, 1;
+; CHECK-I32X2-NEXT: setp.ne.b16 %p1, %rs2, 0;
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_select_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_select_param_0];
+; CHECK-I32X2-NEXT: selp.b64 %rd3, %rd1, %rd2, %p1;
+; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3;
+; CHECK-I32X2-NEXT: ret;
+ %r = select i1 %c, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_select_cc(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) #0 {
+; CHECK-NOI32X2-LABEL: test_select_cc(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .pred %p<3>;
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<11>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r7, %r8}, [test_select_cc_param_3];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [test_select_cc_param_2];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_select_cc_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_select_cc_param_0];
+; CHECK-NOI32X2-NEXT: setp.ne.b32 %p1, %r5, %r7;
+; CHECK-NOI32X2-NEXT: setp.ne.b32 %p2, %r6, %r8;
+; CHECK-NOI32X2-NEXT: selp.b32 %r9, %r2, %r4, %p2;
+; CHECK-NOI32X2-NEXT: selp.b32 %r10, %r1, %r3, %p1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r10, %r9};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_select_cc(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .pred %p<3>;
+; CHECK-I32X2-NEXT: .reg .b32 %r<11>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<5>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd4, [test_select_cc_param_3];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_select_cc_param_2];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_select_cc_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_select_cc_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd4;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd3;
+; CHECK-I32X2-NEXT: setp.ne.b32 %p1, %r3, %r1;
+; CHECK-I32X2-NEXT: setp.ne.b32 %p2, %r4, %r2;
+; CHECK-I32X2-NEXT: mov.b64 {%r5, %r6}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {%r7, %r8}, %rd1;
+; CHECK-I32X2-NEXT: selp.b32 %r9, %r8, %r6, %p2;
+; CHECK-I32X2-NEXT: selp.b32 %r10, %r7, %r5, %p1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r10, %r9};
+; CHECK-I32X2-NEXT: ret;
+ %cc = icmp ne <2 x i32> %c, %d
+ %r = select <2 x i1> %cc, <2 x i32> %a, <2 x i32> %b
+ ret <2 x i32> %r
+}
+
+define <2 x i16> @test_trunc_2xi32(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_trunc_2xi32(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_trunc_2xi32_param_0];
+; CHECK-NOI32X2-NEXT: prmt.b32 %r3, %r1, %r2, 0x5410U;
+; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r3;
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_trunc_2xi32(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_trunc_2xi32_param_0];
+; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %rd1;
+; CHECK-I32X2-NEXT: ret;
+ %r = trunc <2 x i32> %a to <2 x i16>
+ ret <2 x i16> %r
+}
+
+define <2 x i32> @test_trunc_2xi64(<2 x i64> %a) #0 {
+; CHECK-LABEL: test_trunc_2xi64(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_trunc_2xi64_param_0];
+; CHECK-NEXT: cvt.u32.u64 %r1, %rd2;
+; CHECK-NEXT: cvt.u32.u64 %r2, %rd1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT: ret;
+ %r = trunc <2 x i64> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_zext_2xi32(<2 x i16> %a) #0 {
+; CHECK-LABEL: test_zext_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_zext_2xi32_param_0];
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1;
+; CHECK-NEXT: cvt.u32.u16 %r2, %rs2;
+; CHECK-NEXT: cvt.u32.u16 %r3, %rs1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2};
+; CHECK-NEXT: ret;
+ %r = zext <2 x i16> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define <2 x i64> @test_zext_2xi64(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_zext_2xi64(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_zext_2xi64_param_0];
+; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd1, %r2;
+; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd2, %r1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b64 [func_retval0], {%rd2, %rd1};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_zext_2xi64(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_zext_2xi64_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: cvt.u64.u32 %rd2, %r2;
+; CHECK-I32X2-NEXT: cvt.u64.u32 %rd3, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-I32X2-NEXT: ret;
+ %r = zext <2 x i32> %a to <2 x i64>
+ ret <2 x i64> %r
+}
+
+define <2 x i32> @test_bitcast_i64_to_2xi32(i64 %a) #0 {
+; CHECK-LABEL: test_bitcast_i64_to_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_bitcast_i64_to_2xi32_param_0];
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %r = bitcast i64 %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_bitcast_double_to_2xi32(double %a) #0 {
+; CHECK-LABEL: test_bitcast_double_to_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [test_bitcast_double_to_2xi32_param_0];
+; CHECK-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT: ret;
+ %r = bitcast double %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define i64 @test_bitcast_2xi32_to_i64(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_bitcast_2xi32_to_i64(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_i64_param_0];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_bitcast_2xi32_to_i64(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_bitcast_2xi32_to_i64_param_0];
+; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-I32X2-NEXT: ret;
+ %r = bitcast <2 x i32> %a to i64
+ ret i64 %r
+}
+
+define double @test_bitcast_2xi32_to_double(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_bitcast_2xi32_to_double(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_double_param_0];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_bitcast_2xi32_to_double(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_bitcast_2xi32_to_double_param_0];
+; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd1;
+; CHECK-I32X2-NEXT: ret;
+ %r = bitcast <2 x i32> %a to double
+ ret double %r
+}
+
+
+define <4 x half> @test_bitcast_2xi32_to_4xhalf(i32 %a) #0 {
+; CHECK-LABEL: test_bitcast_2xi32_to_4xhalf(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_bitcast_2xi32_to_4xhalf_param_0];
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, 5};
+; CHECK-NEXT: ret;
+ %ins.0 = insertelement <2 x i32> poison, i32 %a, i32 0
+ %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1
+ %r = bitcast <2 x i32> %ins.1 to <4 x half>
+ ret <4 x half> %r
+}
+
+
+define <2 x i32> @test_shufflevector(<2 x i32> %a) #0 {
+; CHECK-NOI32X2-LABEL: test_shufflevector(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_shufflevector_param_0];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_shufflevector(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_shufflevector_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-I32X2-NEXT: ret;
+ %s = shufflevector <2 x i32> %a, <2 x i32> poison, <2 x i32> <i32 1, i32 0>
+ ret <2 x i32> %s
+}
+
+define <2 x i32> @test_shufflevector_2(<2 x i32> %a, <2 x i32> %b) #0 {
+; CHECK-NOI32X2-LABEL: test_shufflevector_2(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_shufflevector_2_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_shufflevector_2_param_0];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r4};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_shufflevector_2(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<3>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_shufflevector_2_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_shufflevector_2_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {_, %r1}, %rd2;
+; CHECK-I32X2-NEXT: mov.b64 {_, %r2}, %rd1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-I32X2-NEXT: ret;
+ %s = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %s
+}
+
+
+define <2 x i32> @test_insertelement(<2 x i32> %a, i32 %x) #0 {
+; CHECK-NOI32X2-LABEL: test_insertelement(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_insertelement_param_0];
+; CHECK-NOI32X2-NEXT: ld.param.b32 %r3, [test_insertelement_param_1];
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_insertelement(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<3>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b32 %r1, [test_insertelement_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_insertelement_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r2, _}, %rd1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-I32X2-NEXT: ret;
+ %i = insertelement <2 x i32> %a, i32 %x, i64 1
+ ret <2 x i32> %i
+}
+
+define <2 x i32> @test_fptosi_2xhalf_to_2xi32(<2 x half> %a) #0 {
+; CHECK-LABEL: test_fptosi_2xhalf_to_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_fptosi_2xhalf_to_2xi32_param_0];
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1;
+; CHECK-NEXT: cvt.rzi.s32.f16 %r2, %rs2;
+; CHECK-NEXT: cvt.rzi.s32.f16 %r3, %rs1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2};
+; CHECK-NEXT: ret;
+ %r = fptosi <2 x half> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define <2 x i32> @test_fptoui_2xhalf_to_2xi32(<2 x half> %a) #0 {
+; CHECK-LABEL: test_fptoui_2xhalf_to_2xi32(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_fptoui_2xhalf_to_2xi32_param_0];
+; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1;
+; CHECK-NEXT: cvt.rzi.u32.f16 %r2, %rs2;
+; CHECK-NEXT: cvt.rzi.u32.f16 %r3, %rs1;
+; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2};
+; CHECK-NEXT: ret;
+ %r = fptoui <2 x half> %a to <2 x i32>
+ ret <2 x i32> %r
+}
+
+define void @test_srem_v2i32(ptr %a, ptr %b, ptr %c) {
+; CHECK-LABEL: test_srem_v2i32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<7>;
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: ld.param.b64 %rd3, [test_srem_v2i32_param_2];
+; CHECK-NEXT: ld.param.b64 %rd2, [test_srem_v2i32_param_1];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_srem_v2i32_param_0];
+; CHECK-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1];
+; CHECK-NEXT: ld.v2.b32 {%r3, %r4}, [%rd2];
+; CHECK-NEXT: rem.s32 %r5, %r2, %r4;
+; CHECK-NEXT: rem.s32 %r6, %r1, %r3;
+; CHECK-NEXT: st.v2.b32 [%rd3], {%r6, %r5};
+; CHECK-NEXT: ret;
+entry:
+ %t57 = load <2 x i32>, ptr %a, align 8
+ %t59 = load <2 x i32>, ptr %b, align 8
+ %x = srem <2 x i32> %t57, %t59
+ store <2 x i32> %x, ptr %c, align 8
+ ret void
+}
+
+define void @test_srem_v3i32(ptr %a, ptr %b, ptr %c) {
+; CHECK-NOI32X2-LABEL: test_srem_v3i32(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<10>;
+; CHECK-NOI32X2-NEXT: .reg .b64 %rd<10>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0: // %entry
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd3, [test_srem_v3i32_param_2];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_srem_v3i32_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_srem_v3i32_param_0];
+; CHECK-NOI32X2-NEXT: ld.b32 %r1, [%rd1+8];
+; CHECK-NOI32X2-NEXT: ld.b64 %rd4, [%rd1];
+; CHECK-NOI32X2-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r2}, %rd4; }
+; CHECK-NOI32X2-NEXT: cvt.u32.u64 %r3, %rd4;
+; CHECK-NOI32X2-NEXT: ld.b32 %r4, [%rd2+8];
+; CHECK-NOI32X2-NEXT: ld.b64 %rd5, [%rd2];
+; CHECK-NOI32X2-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r5}, %rd5; }
+; CHECK-NOI32X2-NEXT: cvt.u32.u64 %r6, %rd5;
+; CHECK-NOI32X2-NEXT: rem.s32 %r7, %r3, %r6;
+; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd6, %r7;
+; CHECK-NOI32X2-NEXT: rem.s32 %r8, %r2, %r5;
+; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd7, %r8;
+; CHECK-NOI32X2-NEXT: shl.b64 %rd8, %rd7, 32;
+; CHECK-NOI32X2-NEXT: or.b64 %rd9, %rd6, %rd8;
+; CHECK-NOI32X2-NEXT: rem.s32 %r9, %r1, %r4;
+; CHECK-NOI32X2-NEXT: st.b32 [%rd3+8], %r9;
+; CHECK-NOI32X2-NEXT: st.b64 [%rd3], %rd9;
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_srem_v3i32(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<10>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0: // %entry
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_srem_v3i32_param_2];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_srem_v3i32_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_srem_v3i32_param_0];
+; CHECK-I32X2-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1];
+; CHECK-I32X2-NEXT: ld.b32 %r3, [%rd1+8];
+; CHECK-I32X2-NEXT: ld.v2.b32 {%r4, %r5}, [%rd2];
+; CHECK-I32X2-NEXT: ld.b32 %r6, [%rd2+8];
+; CHECK-I32X2-NEXT: rem.s32 %r7, %r3, %r6;
+; CHECK-I32X2-NEXT: rem.s32 %r8, %r2, %r5;
+; CHECK-I32X2-NEXT: rem.s32 %r9, %r1, %r4;
+; CHECK-I32X2-NEXT: st.v2.b32 [%rd3], {%r9, %r8};
+; CHECK-I32X2-NEXT: st.b32 [%rd3+8], %r7;
+; CHECK-I32X2-NEXT: ret;
+entry:
+ %t57 = load <3 x i32>, ptr %a, align 8
+ %t59 = load <3 x i32>, ptr %b, align 8
+ %x = srem <3 x i32> %t57, %t59
+ store <3 x i32> %x, ptr %c, align 8
+ ret void
+}
+
+define void @test_sext_v2i1_to_v2i32(ptr %a, ptr %b, ptr %c) {
+; CHECK-NOI32X2-LABEL: test_sext_v2i1_to_v2i32(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .pred %p<3>;
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-NOI32X2-NEXT: .reg .b64 %rd<4>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0: // %entry
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd3, [test_sext_v2i1_to_v2i32_param_2];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_sext_v2i1_to_v2i32_param_1];
+; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_sext_v2i1_to_v2i32_param_0];
+; CHECK-NOI32X2-NEXT: ld.b32 %r1, [%rd1];
+; CHECK-NOI32X2-NEXT: ld.b32 %r2, [%rd1+4];
+; CHECK-NOI32X2-NEXT: ld.b32 %r3, [%rd2];
+; CHECK-NOI32X2-NEXT: ld.b32 %r4, [%rd2+4];
+; CHECK-NOI32X2-NEXT: setp.gt.u32 %p1, %r2, %r4;
+; CHECK-NOI32X2-NEXT: setp.gt.u32 %p2, %r1, %r3;
+; CHECK-NOI32X2-NEXT: selp.b32 %r5, -1, 0, %p2;
+; CHECK-NOI32X2-NEXT: selp.b32 %r6, -1, 0, %p1;
+; CHECK-NOI32X2-NEXT: st.b32 [%rd3+4], %r6;
+; CHECK-NOI32X2-NEXT: st.b32 [%rd3], %r5;
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_sext_v2i1_to_v2i32(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .pred %p<3>;
+; CHECK-I32X2-NEXT: .reg .b32 %r<7>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<14>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0: // %entry
+; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_sext_v2i1_to_v2i32_param_2];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_sext_v2i1_to_v2i32_param_1];
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_sext_v2i1_to_v2i32_param_0];
+; CHECK-I32X2-NEXT: ld.b32 %rd4, [%rd1];
+; CHECK-I32X2-NEXT: ld.b32 %rd5, [%rd1+4];
+; CHECK-I32X2-NEXT: shl.b64 %rd6, %rd5, 32;
+; CHECK-I32X2-NEXT: or.b64 %rd7, %rd6, %rd4;
+; CHECK-I32X2-NEXT: ld.b32 %rd8, [%rd2];
+; CHECK-I32X2-NEXT: ld.b32 %rd9, [%rd2+4];
+; CHECK-I32X2-NEXT: shl.b64 %rd10, %rd9, 32;
+; CHECK-I32X2-NEXT: or.b64 %rd11, %rd10, %rd8;
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd11;
+; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd7;
+; CHECK-I32X2-NEXT: setp.gt.u32 %p1, %r3, %r1;
+; CHECK-I32X2-NEXT: setp.gt.u32 %p2, %r4, %r2;
+; CHECK-I32X2-NEXT: selp.b32 %r5, -1, 0, %p2;
+; CHECK-I32X2-NEXT: selp.b32 %r6, -1, 0, %p1;
+; CHECK-I32X2-NEXT: mov.b64 %rd12, {%r6, %r5};
+; CHECK-I32X2-NEXT: st.b32 [%rd3], %rd12;
+; CHECK-I32X2-NEXT: shr.u64 %rd13, %rd12, 32;
+; CHECK-I32X2-NEXT: st.b32 [%rd3+4], %rd13;
+; CHECK-I32X2-NEXT: ret;
+entry:
+ %t1 = load <2 x i32>, ptr %a, align 4
+ %t2 = load <2 x i32>, ptr %b, align 4
+ %t5 = icmp ugt <2 x i32> %t1, %t2
+ %t6 = sext <2 x i1> %t5 to <2 x i32>
+ store <2 x i32> %t6, ptr %c, align 4
+ ret void
+}
+
+define <2 x float> @test_uitofp_v2i32(<2 x i32> %a) {
+; CHECK-NOI32X2-LABEL: test_uitofp_v2i32(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_v2i32_param_0];
+; CHECK-NOI32X2-NEXT: cvt.rn.f32.u32 %r3, %r2;
+; CHECK-NOI32X2-NEXT: cvt.rn.f32.u32 %r4, %r1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_uitofp_v2i32(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_v2i32_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: cvt.rn.f32.u32 %r3, %r2;
+; CHECK-I32X2-NEXT: cvt.rn.f32.u32 %r4, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = uitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+define <2 x float> @test_sitofp_v2i32(<2 x i32> %a) {
+; CHECK-NOI32X2-LABEL: test_sitofp_v2i32(
+; CHECK-NOI32X2: {
+; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-NOI32X2-EMPTY:
+; CHECK-NOI32X2-NEXT: // %bb.0:
+; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_v2i32_param_0];
+; CHECK-NOI32X2-NEXT: cvt.rn.f32.s32 %r3, %r2;
+; CHECK-NOI32X2-NEXT: cvt.rn.f32.s32 %r4, %r1;
+; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOI32X2-NEXT: ret;
+;
+; CHECK-I32X2-LABEL: test_sitofp_v2i32(
+; CHECK-I32X2: {
+; CHECK-I32X2-NEXT: .reg .b32 %r<5>;
+; CHECK-I32X2-NEXT: .reg .b64 %rd<2>;
+; CHECK-I32X2-EMPTY:
+; CHECK-I32X2-NEXT: // %bb.0:
+; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_sitofp_v2i32_param_0];
+; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1;
+; CHECK-I32X2-NEXT: cvt.rn.f32.s32 %r3, %r2;
+; CHECK-I32X2-NEXT: cvt.rn.f32.s32 %r4, %r1;
+; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-I32X2-NEXT: ret;
+ %r = sitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %r
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll b/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll
index 41a0e81..1edb387 100644
--- a/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll
+++ b/llvm/test/CodeGen/NVPTX/tcgen05-alloc.ll
@@ -12,63 +12,104 @@ declare void @llvm.nvvm.tcgen05.alloc.cg2(ptr %addr, i32 %ncols)
declare void @llvm.nvvm.tcgen05.alloc.shared.cg1(ptr addrspace(3) %addr, i32 %ncols)
declare void @llvm.nvvm.tcgen05.alloc.shared.cg2(ptr addrspace(3) %addr, i32 %ncols)
-; CHECK-LABEL: test_tcgen05_alloc
-define void @test_tcgen05_alloc(ptr %addr, i32 %ncols) {
-; CHECK_PTX64-LABEL: test_tcgen05_alloc(
+define void @test_tcgen05_alloc_cg1(ptr %addr, i32 %ncols) {
+; CHECK_PTX64-LABEL: test_tcgen05_alloc_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-NEXT: .reg .b32 %r<2>;
; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
-; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_param_0];
-; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_param_1];
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_cg1_param_0];
+; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_cg1_param_1];
; CHECK_PTX64-NEXT: tcgen05.alloc.cta_group::1.sync.aligned.b32 [%rd1], %r1;
-; CHECK_PTX64-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.b32 [%rd1], %r1;
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_alloc(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_alloc_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>;
; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_param_0];
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_param_1];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_cg1_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_cg1_param_1];
; CHECK_PTX64_SHARED32-NEXT: tcgen05.alloc.cta_group::1.sync.aligned.b32 [%rd1], %r1;
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.b32 [%rd1], %r1;
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.alloc.cg1(ptr %addr, i32 %ncols)
- call void @llvm.nvvm.tcgen05.alloc.cg2(ptr %addr, i32 %ncols)
+ ret void
+}
+define void @test_tcgen05_alloc_cg2(ptr %addr, i32 %ncols) {
+; CHECK_PTX64-LABEL: test_tcgen05_alloc_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-NEXT: .reg .b32 %r<2>;
+; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_cg2_param_0];
+; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_cg2_param_1];
+; CHECK_PTX64-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.b32 [%rd1], %r1;
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_alloc_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>;
+; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_cg2_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_cg2_param_1];
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.b32 [%rd1], %r1;
+; CHECK_PTX64_SHARED32-NEXT: ret;
+ call void @llvm.nvvm.tcgen05.alloc.cg2(ptr %addr, i32 %ncols)
ret void
}
-; CHECK-LABEL: test_tcgen05_alloc_shared
-define void @test_tcgen05_alloc_shared(ptr addrspace(3) %addr, i32 %ncols) {
-; CHECK_PTX64-LABEL: test_tcgen05_alloc_shared(
+define void @test_tcgen05_alloc_shared_cg1(ptr addrspace(3) %addr, i32 %ncols) {
+; CHECK_PTX64-LABEL: test_tcgen05_alloc_shared_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-NEXT: .reg .b32 %r<2>;
; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
-; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_shared_param_0];
-; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_shared_param_1];
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_shared_cg1_param_0];
+; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_shared_cg1_param_1];
; CHECK_PTX64-NEXT: tcgen05.alloc.cta_group::1.sync.aligned.shared::cta.b32 [%rd1], %r1;
-; CHECK_PTX64-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.shared::cta.b32 [%rd1], %r1;
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_alloc_shared(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_alloc_shared_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<3>;
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_shared_param_0];
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r2, [test_tcgen05_alloc_shared_param_1];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_shared_cg1_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r2, [test_tcgen05_alloc_shared_cg1_param_1];
; CHECK_PTX64_SHARED32-NEXT: tcgen05.alloc.cta_group::1.sync.aligned.shared::cta.b32 [%r1], %r2;
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.shared::cta.b32 [%r1], %r2;
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.alloc.shared.cg1(ptr addrspace(3) %addr, i32 %ncols)
+ ret void
+}
+define void @test_tcgen05_alloc_shared_cg2(ptr addrspace(3) %addr, i32 %ncols) {
+; CHECK_PTX64-LABEL: test_tcgen05_alloc_shared_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-NEXT: .reg .b32 %r<2>;
+; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_alloc_shared_cg2_param_0];
+; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_shared_cg2_param_1];
+; CHECK_PTX64-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.shared::cta.b32 [%rd1], %r1;
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_alloc_shared_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<3>;
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_alloc_shared_cg2_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r2, [test_tcgen05_alloc_shared_cg2_param_1];
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.alloc.cta_group::2.sync.aligned.shared::cta.b32 [%r1], %r2;
+; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.alloc.shared.cg2(ptr addrspace(3) %addr, i32 %ncols)
ret void
}
@@ -76,31 +117,50 @@ define void @test_tcgen05_alloc_shared(ptr addrspace(3) %addr, i32 %ncols) {
declare void @llvm.nvvm.tcgen05.dealloc.cg1(ptr addrspace(6) %tmem_addr, i32 %ncols)
declare void @llvm.nvvm.tcgen05.dealloc.cg2(ptr addrspace(6) %tmem_addr, i32 %ncols)
-; CHECK-LABEL: test_tcgen05_dealloc
-define void @test_tcgen05_dealloc(ptr addrspace(6) %tmem_addr, i32 %ncols) {
-; CHECK_PTX64-LABEL: test_tcgen05_dealloc(
+define void @test_tcgen05_dealloc_cg1(ptr addrspace(6) %tmem_addr, i32 %ncols) {
+; CHECK_PTX64-LABEL: test_tcgen05_dealloc_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-NEXT: .reg .b32 %r<3>;
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
-; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_dealloc_param_0];
-; CHECK_PTX64-NEXT: ld.param.b32 %r2, [test_tcgen05_dealloc_param_1];
+; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_dealloc_cg1_param_0];
+; CHECK_PTX64-NEXT: ld.param.b32 %r2, [test_tcgen05_dealloc_cg1_param_1];
; CHECK_PTX64-NEXT: tcgen05.dealloc.cta_group::1.sync.aligned.b32 %r1, %r2;
-; CHECK_PTX64-NEXT: tcgen05.dealloc.cta_group::2.sync.aligned.b32 %r1, %r2;
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_dealloc(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_dealloc_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<3>;
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_dealloc_param_0];
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r2, [test_tcgen05_dealloc_param_1];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_dealloc_cg1_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r2, [test_tcgen05_dealloc_cg1_param_1];
; CHECK_PTX64_SHARED32-NEXT: tcgen05.dealloc.cta_group::1.sync.aligned.b32 %r1, %r2;
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.dealloc.cta_group::2.sync.aligned.b32 %r1, %r2;
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.dealloc.cg1(ptr addrspace(6) %tmem_addr, i32 %ncols)
+ ret void
+}
+define void @test_tcgen05_dealloc_cg2(ptr addrspace(6) %tmem_addr, i32 %ncols) {
+; CHECK_PTX64-LABEL: test_tcgen05_dealloc_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-NEXT: .reg .b32 %r<3>;
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: ld.param.b32 %r1, [test_tcgen05_dealloc_cg2_param_0];
+; CHECK_PTX64-NEXT: ld.param.b32 %r2, [test_tcgen05_dealloc_cg2_param_1];
+; CHECK_PTX64-NEXT: tcgen05.dealloc.cta_group::2.sync.aligned.b32 %r1, %r2;
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_dealloc_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<3>;
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_dealloc_cg2_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r2, [test_tcgen05_dealloc_cg2_param_1];
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.dealloc.cta_group::2.sync.aligned.b32 %r1, %r2;
+; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.dealloc.cg2(ptr addrspace(6) %tmem_addr, i32 %ncols)
ret void
}
@@ -108,27 +168,42 @@ define void @test_tcgen05_dealloc(ptr addrspace(6) %tmem_addr, i32 %ncols) {
declare void @llvm.nvvm.tcgen05.relinq.alloc.permit.cg1()
declare void @llvm.nvvm.tcgen05.relinq.alloc.permit.cg2()
-; CHECK-LABEL: test_tcgen05_relinquish_alloc_permit
-define void @test_tcgen05_relinquish_alloc_permit() {
-; CHECK_PTX64-LABEL: test_tcgen05_relinquish_alloc_permit(
+define void @test_tcgen05_relinquish_alloc_permit_cg1() {
+; CHECK_PTX64-LABEL: test_tcgen05_relinquish_alloc_permit_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
; CHECK_PTX64-NEXT: tcgen05.relinquish_alloc_permit.cta_group::1.sync.aligned;
-; CHECK_PTX64-NEXT: tcgen05.relinquish_alloc_permit.cta_group::2.sync.aligned;
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_relinquish_alloc_permit(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_relinquish_alloc_permit_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
; CHECK_PTX64_SHARED32-NEXT: tcgen05.relinquish_alloc_permit.cta_group::1.sync.aligned;
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.relinquish_alloc_permit.cta_group::2.sync.aligned;
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.relinq.alloc.permit.cg1()
+ ret void
+}
+define void @test_tcgen05_relinquish_alloc_permit_cg2() {
+; CHECK_PTX64-LABEL: test_tcgen05_relinquish_alloc_permit_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: tcgen05.relinquish_alloc_permit.cta_group::2.sync.aligned;
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_relinquish_alloc_permit_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.relinquish_alloc_permit.cta_group::2.sync.aligned;
+; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.relinq.alloc.permit.cg2()
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll
index 7981feb..2e80c4c 100644
--- a/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll
+++ b/llvm/test/CodeGen/NVPTX/tcgen05-commit.ll
@@ -11,57 +11,93 @@ declare void @llvm.nvvm.tcgen05.commit.cg2(ptr %bar_addr)
declare void @llvm.nvvm.tcgen05.commit.shared.cg1(ptr addrspace(3) %bar_addr)
declare void @llvm.nvvm.tcgen05.commit.shared.cg2(ptr addrspace(3) %bar_addr)
-; CHECK-LABEL: test_tcgen05_commit
-define void @test_tcgen05_commit(ptr %bar_addr) {
-; CHECK_PTX64-LABEL: test_tcgen05_commit(
+define void @test_tcgen05_commit_cg1(ptr %bar_addr) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
-; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_param_0];
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_cg1_param_0];
; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
-; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_cg1_param_0];
; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.cg1(ptr %bar_addr)
+ ret void
+}
+
+define void @test_tcgen05_commit_cg2(ptr %bar_addr) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_cg2_param_0];
+; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_cg2_param_0];
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
+; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.cg2(ptr %bar_addr)
ret void
}
-; CHECK-LABEL: test_tcgen05_commit_shared
-define void @test_tcgen05_commit_shared(ptr addrspace(3) %bar_addr) {
-; CHECK_PTX64-LABEL: test_tcgen05_commit_shared(
+define void @test_tcgen05_commit_shared_cg1(ptr addrspace(3) %bar_addr) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_shared_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
-; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_shared_param_0];
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_shared_cg1_param_0];
; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
-; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_shared(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_shared_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>;
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_commit_shared_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_commit_shared_cg1_param_0];
; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.b64 [%r1];
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%r1];
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.shared.cg1(ptr addrspace(3) %bar_addr)
+ ret void
+}
+
+define void @test_tcgen05_commit_shared_cg2(ptr addrspace(3) %bar_addr) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_shared_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_shared_cg2_param_0];
+; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%rd1];
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_shared_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>;
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_commit_shared_cg2_param_0];
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.b64 [%r1];
+; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.shared.cg2(ptr addrspace(3) %bar_addr)
ret void
@@ -72,66 +108,106 @@ declare void @llvm.nvvm.tcgen05.commit.mc.cg2(ptr %bar_addr, i16 %cta_mask)
declare void @llvm.nvvm.tcgen05.commit.mc.shared.cg1(ptr addrspace(3) %bar_addr, i16 %cta_mask)
declare void @llvm.nvvm.tcgen05.commit.mc.shared.cg2(ptr addrspace(3) %bar_addr, i16 %cta_mask)
-; CHECK-LABEL: test_tcgen05_commit_mc
-define void @test_tcgen05_commit_mc(ptr %bar_addr, i16 %cta_mask) {
-; CHECK_PTX64-LABEL: test_tcgen05_commit_mc(
+define void @test_tcgen05_commit_mc_cg1(ptr %bar_addr, i16 %cta_mask) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_mc_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-NEXT: .reg .b16 %rs<2>;
; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
-; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_param_0];
-; CHECK_PTX64-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_param_1];
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_cg1_param_0];
+; CHECK_PTX64-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_cg1_param_1];
; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
-; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>;
; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_param_0];
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_param_1];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_cg1_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_cg1_param_1];
; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.mc.cg1(ptr %bar_addr, i16 %cta_mask)
+ ret void
+}
+define void @test_tcgen05_commit_mc_cg2(ptr %bar_addr, i16 %cta_mask) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_mc_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-NEXT: .reg .b16 %rs<2>;
+; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_cg2_param_0];
+; CHECK_PTX64-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_cg2_param_1];
+; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>;
+; CHECK_PTX64_SHARED32-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_cg2_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_cg2_param_1];
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
+; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.mc.cg2(ptr %bar_addr, i16 %cta_mask)
-
ret void
}
-; CHECK-LABEL: test_tcgen05_commit_mc_shared
-define void @test_tcgen05_commit_mc_shared(ptr addrspace(3) %bar_addr, i16 %cta_mask) {
-; CHECK_PTX64-LABEL: test_tcgen05_commit_mc_shared(
+define void @test_tcgen05_commit_mc_shared_cg1(ptr addrspace(3) %bar_addr, i16 %cta_mask) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_mc_shared_cg1(
; CHECK_PTX64: {
; CHECK_PTX64-NEXT: .reg .b16 %rs<2>;
; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
; CHECK_PTX64-EMPTY:
; CHECK_PTX64-NEXT: // %bb.0:
-; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_shared_param_0];
-; CHECK_PTX64-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_shared_param_1];
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_shared_cg1_param_0];
+; CHECK_PTX64-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_shared_cg1_param_1];
; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
-; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
; CHECK_PTX64-NEXT: ret;
;
-; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc_shared(
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc_shared_cg1(
; CHECK_PTX64_SHARED32: {
; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>;
; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>;
; CHECK_PTX64_SHARED32-EMPTY:
; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_commit_mc_shared_param_0];
-; CHECK_PTX64_SHARED32-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_shared_param_1];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_commit_mc_shared_cg1_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_shared_cg1_param_1];
; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::1.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%r1], %rs1;
-; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%r1], %rs1;
; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.mc.shared.cg1(ptr addrspace(3) %bar_addr, i16 %cta_mask)
+ ret void
+}
+define void @test_tcgen05_commit_mc_shared_cg2(ptr addrspace(3) %bar_addr, i16 %cta_mask) {
+; CHECK_PTX64-LABEL: test_tcgen05_commit_mc_shared_cg2(
+; CHECK_PTX64: {
+; CHECK_PTX64-NEXT: .reg .b16 %rs<2>;
+; CHECK_PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK_PTX64-EMPTY:
+; CHECK_PTX64-NEXT: // %bb.0:
+; CHECK_PTX64-NEXT: ld.param.b64 %rd1, [test_tcgen05_commit_mc_shared_cg2_param_0];
+; CHECK_PTX64-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_shared_cg2_param_1];
+; CHECK_PTX64-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%rd1], %rs1;
+; CHECK_PTX64-NEXT: ret;
+;
+; CHECK_PTX64_SHARED32-LABEL: test_tcgen05_commit_mc_shared_cg2(
+; CHECK_PTX64_SHARED32: {
+; CHECK_PTX64_SHARED32-NEXT: .reg .b16 %rs<2>;
+; CHECK_PTX64_SHARED32-NEXT: .reg .b32 %r<2>;
+; CHECK_PTX64_SHARED32-EMPTY:
+; CHECK_PTX64_SHARED32-NEXT: // %bb.0:
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b32 %r1, [test_tcgen05_commit_mc_shared_cg2_param_0];
+; CHECK_PTX64_SHARED32-NEXT: ld.param.b16 %rs1, [test_tcgen05_commit_mc_shared_cg2_param_1];
+; CHECK_PTX64_SHARED32-NEXT: tcgen05.commit.cta_group::2.mbarrier::arrive::one.shared::cluster.multicast::cluster.b64 [%r1], %rs1;
+; CHECK_PTX64_SHARED32-NEXT: ret;
call void @llvm.nvvm.tcgen05.commit.mc.shared.cg2(ptr addrspace(3) %bar_addr, i16 %cta_mask)
-
ret void
}
diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll b/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll
index c540f78..817b1d5 100644
--- a/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll
+++ b/llvm/test/CodeGen/NVPTX/tcgen05-cp.ll
@@ -4,346 +4,580 @@
; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mcpu=sm_100a -mattr=+ptx86 | %ptxas-verify -arch=sm_100a %}
; RUN: %if ptxas-sm_103a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mcpu=sm_103a -mattr=+ptx88 | %ptxas-verify -arch=sm_103a %}
-; CHECK-LABEL: test_tcgen05_cp_64x128_v1
-define void @test_tcgen05_cp_64x128_v1(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_64x128_v1(
+define void @test_tcgen05_cp_64x128_v1_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v1_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.64x128b.warpx2::02_13 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::02_13 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_64x128_v1_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v1_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::02_13 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_64x128_v2
-define void @test_tcgen05_cp_64x128_v2(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_64x128_v2(
+define void @test_tcgen05_cp_64x128_v2_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v2_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.64x128b.warpx2::01_23 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::01_23 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_64x128_v2_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v2_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::01_23 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_32x128
-define void @test_tcgen05_cp_32x128(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_32x128(
+define void @test_tcgen05_cp_32x128_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_32x128_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.32x128b.warpx4 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.32x128b.warpx4 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_32x128_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_32x128_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.32x128b.warpx4 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_128x128b
-define void @test_tcgen05_cp_128x128b(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_128x128b(
+define void @test_tcgen05_cp_128x128b_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x128b_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.128x128b [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.128x128b [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x128b.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_128x128b_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x128b_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.128x128b [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x128b.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_128x256b
-define void @test_tcgen05_cp_128x256b(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_128x256b(
+define void @test_tcgen05_cp_128x256b_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x256b_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.128x256b [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.128x256b [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x256b.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_128x256b_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x256b_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.128x256b [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x256b.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_4x256b
-define void @test_tcgen05_cp_4x256b(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_4x256b(
+define void @test_tcgen05_cp_4x256b_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_4x256b_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.4x256b [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.4x256b [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.4x256b.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_4x256b_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_4x256b_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.4x256b [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.4x256b.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
; With src_fmt as b6x16_p32
-; CHECK-LABEL: test_tcgen05_cp_128x256b_b6x16_p32
-define void @test_tcgen05_cp_128x256b_b6x16_p32(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_128x256b_b6x16_p32(
+define void @test_tcgen05_cp_128x256b_b6x16_p32_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x256b_b6x16_p32_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_b6x16_p32_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_b6x16_p32_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_b6x16_p32_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_b6x16_p32_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.128x256b.b8x16.b6x16_p32 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.128x256b.b8x16.b6x16_p32 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x256b.b6x16_p32.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_128x256b_b6x16_p32_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x256b_b6x16_p32_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_b6x16_p32_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_b6x16_p32_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.128x256b.b8x16.b6x16_p32 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x256b.b6x16_p32.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_4x256b_b6x16_p32
-define void @test_tcgen05_cp_4x256b_b6x16_p32(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_4x256b_b6x16_p32(
+define void @test_tcgen05_cp_4x256b_b6x16_p32_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_4x256b_b6x16_p32_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_b6x16_p32_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_b6x16_p32_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_b6x16_p32_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_b6x16_p32_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.4x256b.b8x16.b6x16_p32 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.4x256b.b8x16.b6x16_p32 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.4x256b.b6x16_p32.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_4x256b_b6x16_p32_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_4x256b_b6x16_p32_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_b6x16_p32_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_b6x16_p32_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.4x256b.b8x16.b6x16_p32 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.4x256b.b6x16_p32.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_128x128b_b6x16_p32
-define void @test_tcgen05_cp_128x128b_b6x16_p32(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_128x128b_b6x16_p32(
+define void @test_tcgen05_cp_128x128b_b6x16_p32_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x128b_b6x16_p32_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_b6x16_p32_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_b6x16_p32_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_b6x16_p32_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_b6x16_p32_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.128x128b.b8x16.b6x16_p32 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.128x128b.b8x16.b6x16_p32 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x128b.b6x16_p32.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_128x128b_b6x16_p32_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x128b_b6x16_p32_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_b6x16_p32_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_b6x16_p32_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.128x128b.b8x16.b6x16_p32 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x128b.b6x16_p32.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b6x16_p32
-define void @test_tcgen05_cp_64x128_v1_b6x16_p32(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b6x16_p32(
+define void @test_tcgen05_cp_64x128_v1_b6x16_p32_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b6x16_p32_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_b6x16_p32_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_b6x16_p32_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_b6x16_p32_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_b6x16_p32_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.64x128b.warpx2::02_13.b8x16.b6x16_p32 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::02_13.b8x16.b6x16_p32 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.b6x16_p32.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_64x128_v1_b6x16_p32_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b6x16_p32_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_b6x16_p32_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_b6x16_p32_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::02_13.b8x16.b6x16_p32 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.b6x16_p32.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b6x16_p32
-define void @test_tcgen05_cp_64x128_v2_b6x16_p32(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b6x16_p32(
+define void @test_tcgen05_cp_64x128_v2_b6x16_p32_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b6x16_p32_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_b6x16_p32_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_b6x16_p32_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_b6x16_p32_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_b6x16_p32_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.64x128b.warpx2::01_23.b8x16.b6x16_p32 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::01_23.b8x16.b6x16_p32 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.b6x16_p32.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_64x128_v2_b6x16_p32_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b6x16_p32_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_b6x16_p32_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_b6x16_p32_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::01_23.b8x16.b6x16_p32 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.b6x16_p32.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_32x128_b6x16_p32
-define void @test_tcgen05_cp_32x128_b6x16_p32(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_32x128_b6x16_p32(
+define void @test_tcgen05_cp_32x128_b6x16_p32_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_32x128_b6x16_p32_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_b6x16_p32_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_b6x16_p32_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_b6x16_p32_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_b6x16_p32_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.32x128b.warpx4.b8x16.b6x16_p32 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.32x128b.warpx4.b8x16.b6x16_p32 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.b6x16_p32.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_32x128_b6x16_p32_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_32x128_b6x16_p32_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_b6x16_p32_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_b6x16_p32_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.32x128b.warpx4.b8x16.b6x16_p32 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.b6x16_p32.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
; With src_fmt as b4x16_p64
-; CHECK-LABEL: test_tcgen05_cp_128x256b_b4x16_p64
-define void @test_tcgen05_cp_128x256b_b4x16_p64(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_128x256b_b4x16_p64(
+define void @test_tcgen05_cp_128x256b_b4x16_p64_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x256b_b4x16_p64_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_b4x16_p64_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_b4x16_p64_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_b4x16_p64_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_b4x16_p64_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.128x256b.b8x16.b4x16_p64 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.128x256b.b8x16.b4x16_p64 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x256b.b4x16_p64.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_128x256b_b4x16_p64_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x256b_b4x16_p64_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x256b_b4x16_p64_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x256b_b4x16_p64_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.128x256b.b8x16.b4x16_p64 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x256b.b4x16_p64.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_4x256b_b4x16_p64
-define void @test_tcgen05_cp_4x256b_b4x16_p64(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_4x256b_b4x16_p64(
+define void @test_tcgen05_cp_4x256b_b4x16_p64_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_4x256b_b4x16_p64_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_b4x16_p64_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_b4x16_p64_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_b4x16_p64_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_b4x16_p64_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.4x256b.b8x16.b4x16_p64 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.4x256b.b8x16.b4x16_p64 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.4x256b.b4x16_p64.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_4x256b_b4x16_p64_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_4x256b_b4x16_p64_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_4x256b_b4x16_p64_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_4x256b_b4x16_p64_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.4x256b.b8x16.b4x16_p64 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.4x256b.b4x16_p64.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_128x128b_b4x16_p64
-define void @test_tcgen05_cp_128x128b_b4x16_p64(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_128x128b_b4x16_p64(
+define void @test_tcgen05_cp_128x128b_b4x16_p64_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x128b_b4x16_p64_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_b4x16_p64_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_b4x16_p64_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_b4x16_p64_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_b4x16_p64_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.128x128b.b8x16.b4x16_p64 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.128x128b.b8x16.b4x16_p64 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x128b.b4x16_p64.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_128x128b_b4x16_p64_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_128x128b_b4x16_p64_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_128x128b_b4x16_p64_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_128x128b_b4x16_p64_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.128x128b.b8x16.b4x16_p64 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.128x128b.b4x16_p64.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b4x16_p64
-define void @test_tcgen05_cp_64x128_v1_b4x16_p64(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b4x16_p64(
+define void @test_tcgen05_cp_64x128_v1_b4x16_p64_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b4x16_p64_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_b4x16_p64_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_b4x16_p64_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_b4x16_p64_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_b4x16_p64_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.64x128b.warpx2::02_13.b8x16.b4x16_p64 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::02_13.b8x16.b4x16_p64 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.b4x16_p64.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_64x128_v1_b4x16_p64_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v1_b4x16_p64_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v1_b4x16_p64_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v1_b4x16_p64_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::02_13.b8x16.b4x16_p64 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_02_13.b4x16_p64.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b4x16_p64
-define void @test_tcgen05_cp_64x128_v2_b4x16_p64(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b4x16_p64(
+define void @test_tcgen05_cp_64x128_v2_b4x16_p64_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b4x16_p64_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_b4x16_p64_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_b4x16_p64_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_b4x16_p64_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_b4x16_p64_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.64x128b.warpx2::01_23.b8x16.b4x16_p64 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::01_23.b8x16.b4x16_p64 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.b4x16_p64.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_64x128_v2_b4x16_p64_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_64x128_v2_b4x16_p64_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_64x128_v2_b4x16_p64_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_64x128_v2_b4x16_p64_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.64x128b.warpx2::01_23.b8x16.b4x16_p64 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.64x128b_warpx2_01_23.b4x16_p64.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
}
-; CHECK-LABEL: test_tcgen05_cp_32x128_b4x16_p64
-define void @test_tcgen05_cp_32x128_b4x16_p64(ptr addrspace(6) %addr, i64 %sdesc) {
-; CHECK-LABEL: test_tcgen05_cp_32x128_b4x16_p64(
+define void @test_tcgen05_cp_32x128_b4x16_p64_cg1(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_32x128_b4x16_p64_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-NEXT: .reg .b64 %rd<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_b4x16_p64_param_0];
-; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_b4x16_p64_param_1];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_b4x16_p64_cg1_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_b4x16_p64_cg1_param_1];
; CHECK-NEXT: tcgen05.cp.cta_group::1.32x128b.warpx4.b8x16.b4x16_p64 [%r1], %rd1;
-; CHECK-NEXT: tcgen05.cp.cta_group::2.32x128b.warpx4.b8x16.b4x16_p64 [%r1], %rd1;
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.b4x16_p64.cg1(ptr addrspace(6) %addr, i64 %sdesc)
+
+ ret void
+}
+
+define void @test_tcgen05_cp_32x128_b4x16_p64_cg2(ptr addrspace(6) %addr, i64 %sdesc) {
+; CHECK-LABEL: test_tcgen05_cp_32x128_b4x16_p64_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_cp_32x128_b4x16_p64_cg2_param_0];
+; CHECK-NEXT: ld.param.b64 %rd1, [test_tcgen05_cp_32x128_b4x16_p64_cg2_param_1];
+; CHECK-NEXT: tcgen05.cp.cta_group::2.32x128b.warpx4.b8x16.b4x16_p64 [%r1], %rd1;
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.cp.32x128b_warpx4.b4x16_p64.cg2(ptr addrspace(6) %addr, i64 %sdesc)
ret void
diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll b/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll
index 8ca6a2a0..bf2adac 100644
--- a/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll
+++ b/llvm/test/CodeGen/NVPTX/tcgen05-shift.ll
@@ -7,18 +7,29 @@
declare void @llvm.nvvm.tcgen05.shift.down.cg1(ptr addrspace(6) %tmem_addr)
declare void @llvm.nvvm.tcgen05.shift.down.cg2(ptr addrspace(6) %tmem_addr)
-; CHECK-LABEL: test_tcgen05_shift
-define void @test_tcgen05_shift(ptr addrspace(6) %tmem_addr) {
-; CHECK-LABEL: test_tcgen05_shift(
+define void @test_tcgen05_shift_cg1(ptr addrspace(6) %tmem_addr) {
+; CHECK-LABEL: test_tcgen05_shift_cg1(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_shift_param_0];
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_shift_cg1_param_0];
; CHECK-NEXT: tcgen05.shift.cta_group::1.down [%r1];
-; CHECK-NEXT: tcgen05.shift.cta_group::2.down [%r1];
; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.shift.down.cg1(ptr addrspace(6) %tmem_addr)
+
+ ret void
+}
+
+define void @test_tcgen05_shift_cg2(ptr addrspace(6) %tmem_addr) {
+; CHECK-LABEL: test_tcgen05_shift_cg2(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b32 %r1, [test_tcgen05_shift_cg2_param_0];
+; CHECK-NEXT: tcgen05.shift.cta_group::2.down [%r1];
+; CHECK-NEXT: ret;
call void @llvm.nvvm.tcgen05.shift.down.cg2(ptr addrspace(6) %tmem_addr)
ret void
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
index 4b1359e..73b0d3a 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/vec-ret.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+zvfhmin -global-isel -stop-after=irtranslator \
; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
-; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+zvfh -global-isel -stop-after=irtranslator \
+; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+zvfhmin -global-isel -stop-after=irtranslator \
; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
; ==========================================================================
diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll
index 911692e..f960bc1 100644
--- a/llvm/test/CodeGen/RISCV/double-arith.ll
+++ b/llvm/test/CodeGen/RISCV/double-arith.ll
@@ -305,9 +305,6 @@ define i32 @fneg_d(double %a, double %b) nounwind {
}
define double @fsgnjn_d(double %a, double %b) nounwind {
-; TODO: fsgnjn.s isn't selected on RV64 because DAGCombiner::visitBITCAST will
-; convert (bitconvert (fneg x)) to a xor.
-;
; CHECKIFD-LABEL: fsgnjn_d:
; CHECKIFD: # %bb.0:
; CHECKIFD-NEXT: fsgnjn.d fa0, fa0, fa1
diff --git a/llvm/test/CodeGen/RISCV/rv64zbkb.ll b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
index 4537d18..b2ad8d7 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbkb.ll
@@ -441,7 +441,7 @@ define void @pack_lo_packh_hi_packh_2(i8 zeroext %0, i8 zeroext %1, i8 zeroext %
; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_2:
; RV64ZBKB: # %bb.0:
; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packh a1, a2, a3
; RV64ZBKB-NEXT: packw a0, a0, a1
; RV64ZBKB-NEXT: sw a0, 0(a4)
; RV64ZBKB-NEXT: ret
@@ -477,7 +477,7 @@ define void @pack_lo_packh_hi_packh_3(i8 %0, i8 %1, i8 %2, i8 %3, ptr %p) nounwi
; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_3:
; RV64ZBKB: # %bb.0:
; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packh a1, a2, a3
; RV64ZBKB-NEXT: packw a0, a0, a1
; RV64ZBKB-NEXT: sw a0, 0(a4)
; RV64ZBKB-NEXT: ret
@@ -509,7 +509,7 @@ define i32 @pack_lo_packh_hi_packh_4(i8 zeroext %0, i8 zeroext %1, i8 zeroext %2
; RV64ZBKB-LABEL: pack_lo_packh_hi_packh_4:
; RV64ZBKB: # %bb.0:
; RV64ZBKB-NEXT: packh a0, a0, a1
-; RV64ZBKB-NEXT: packh a1, a3, a2
+; RV64ZBKB-NEXT: packh a1, a2, a3
; RV64ZBKB-NEXT: packw a0, a0, a1
; RV64ZBKB-NEXT: ret
%a = zext i8 %0 to i32
diff --git a/llvm/test/CodeGen/X86/fmaxnum.ll b/llvm/test/CodeGen/X86/fmaxnum.ll
index d6252cc..150bef0 100644
--- a/llvm/test/CodeGen/X86/fmaxnum.ll
+++ b/llvm/test/CodeGen/X86/fmaxnum.ll
@@ -645,11 +645,47 @@ define float @test_maxnum_const_op2(float %x) {
ret float %r
}
-define float @test_maxnum_const_nan(float %x) {
-; CHECK-LABEL: test_maxnum_const_nan:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %r = call float @llvm.maxnum.f32(float %x, float 0x7fff000000000000)
+define float @test_maxnum_const_nan(float %x, float %y) {
+; SSE-LABEL: test_maxnum_const_nan:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_maxnum_const_nan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+ %r = call float @llvm.maxnum.f32(float %y, float 0x7fff000000000000)
+ ret float %r
+}
+
+; nnan maxnum(Y, -inf) -> Y
+define float @test_maxnum_neg_inf_nnan(float %x, float %y) nounwind {
+; SSE-LABEL: test_maxnum_neg_inf_nnan:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_maxnum_neg_inf_nnan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+ %r = call nnan float @llvm.maxnum.f32(float %y, float 0xfff0000000000000)
+ ret float %r
+}
+
+; Test SNaN quieting
+define float @test_maxnum_snan(float %x) {
+; SSE-LABEL: test_maxnum_snan:
+; SSE: # %bb.0:
+; SSE-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_maxnum_snan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: retq
+ %r = call float @llvm.maxnum.f32(float 0x7ff4000000000000, float %x)
ret float %r
}
diff --git a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
index 864c233..06515e4 100644
--- a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
+++ b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll
@@ -2649,3 +2649,102 @@ define <4 x bfloat> @test_fmaximum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) {
%r = call <4 x bfloat> @llvm.maximum.v4bf16(<4 x bfloat> %x, <4 x bfloat> %y)
ret <4 x bfloat> %r
}
+
+; nnan minimum(Y, +inf) -> Y
+define float @test_fminimum_inf_nnan(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fminimum_inf_nnan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimum_inf_nnan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimum_inf_nnan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimum_inf_nnan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = call nnan float @llvm.minimum.f32(float %y, float 0x7ff0000000000000)
+ ret float %1
+}
+
+; nnan maximum(Y, -inf) -> Y
+define float @test_fmaximum_neg_inf_nnan(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fmaximum_neg_inf_nnan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximum_neg_inf_nnan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximum_neg_inf_nnan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximum_neg_inf_nnan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = call nnan float @llvm.maximum.f32(float %y, float 0xfff0000000000000)
+ ret float %1
+}
+
+; Test SNaN quieting
+define float @test_fmaximum_snan(float %x) {
+; SSE2-LABEL: test_fmaximum_snan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximum_snan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximum_snan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximum_snan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NEXT: retl
+ %1 = tail call float @llvm.maximum.f32(float 0x7ff4000000000000, float %x)
+ ret float %1
+}
+
+define float @test_fminimum_snan(float %x) {
+; SSE2-LABEL: test_fminimum_snan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimum_snan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimum_snan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimum_snan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NEXT: retl
+ %1 = tail call float @llvm.minimum.f32(float 0x7ff4000000000000, float %x)
+ ret float %1
+}
diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
index c66473e..0fe107c 100644
--- a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
+++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll
@@ -2479,3 +2479,102 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n
%r = call <4 x bfloat> @llvm.maximumnum.v4bf16(<4 x bfloat> %x, <4 x bfloat> %y)
ret <4 x bfloat> %r
}
+
+; nnan minimumnum(Y, +inf) -> Y
+define float @test_fminimumnum_inf_nnan(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fminimumnum_inf_nnan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_inf_nnan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_inf_nnan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_inf_nnan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = call nnan float @llvm.minimumnum.f32(float %y, float 0x7ff0000000000000)
+ ret float %1
+}
+
+; nnan maximumnum(Y, -inf) -> Y
+define float @test_fmaximumnum_neg_inf_nnan(float %x, float %y) nounwind {
+; SSE2-LABEL: test_fmaximumnum_neg_inf_nnan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_neg_inf_nnan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_neg_inf_nnan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_neg_inf_nnan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = call nnan float @llvm.maximumnum.f32(float %y, float 0xfff0000000000000)
+ ret float %1
+}
+
+; Test we propagate the non-NaN arg, even if one arg is SNaN
+define float @test_fmaximumnum_snan(float %x, float %y) {
+; SSE2-LABEL: test_fmaximumnum_snan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fmaximumnum_snan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fmaximumnum_snan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fmaximumnum_snan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = tail call float @llvm.maximumnum.f32(float 0x7ff4000000000000, float %y)
+ ret float %1
+}
+
+define float @test_fminimumnum_snan(float %x, float %y) {
+; SSE2-LABEL: test_fminimumnum_snan:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movaps %xmm1, %xmm0
+; SSE2-NEXT: retq
+;
+; AVX-LABEL: test_fminimumnum_snan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+;
+; AVX10_2-LABEL: test_fminimumnum_snan:
+; AVX10_2: # %bb.0:
+; AVX10_2-NEXT: vmovaps %xmm1, %xmm0
+; AVX10_2-NEXT: retq
+;
+; X86-LABEL: test_fminimumnum_snan:
+; X86: # %bb.0:
+; X86-NEXT: flds {{[0-9]+}}(%esp)
+; X86-NEXT: retl
+ %1 = tail call float @llvm.minimumnum.f32(float 0x7ff4000000000000, float %y)
+ ret float %1
+}
diff --git a/llvm/test/CodeGen/X86/fminnum.ll b/llvm/test/CodeGen/X86/fminnum.ll
index 0ef8fde..4aa1a61 100644
--- a/llvm/test/CodeGen/X86/fminnum.ll
+++ b/llvm/test/CodeGen/X86/fminnum.ll
@@ -645,11 +645,47 @@ define float @test_minnum_const_op2(float %x) {
ret float %r
}
-define float @test_minnum_const_nan(float %x) {
-; CHECK-LABEL: test_minnum_const_nan:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %r = call float @llvm.minnum.f32(float %x, float 0x7fff000000000000)
+define float @test_minnum_const_nan(float %x, float %y) {
+; SSE-LABEL: test_minnum_const_nan:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_minnum_const_nan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+ %r = call float @llvm.minnum.f32(float %y, float 0x7fff000000000000)
+ ret float %r
+}
+
+; nnan minnum(Y, +inf) -> Y
+define float @test_minnum_inf_nnan(float %x, float %y) nounwind {
+; SSE-LABEL: test_minnum_inf_nnan:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_minnum_inf_nnan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps %xmm1, %xmm0
+; AVX-NEXT: retq
+ %r = call nnan float @llvm.minnum.f32(float %y, float 0x7ff0000000000000)
+ ret float %r
+}
+
+; Test SNaN quieting
+define float @test_minnum_snan(float %x) {
+; SSE-LABEL: test_minnum_snan:
+; SSE: # %bb.0:
+; SSE-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_minnum_snan:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: retq
+ %r = call float @llvm.minnum.f32(float 0x7ff4000000000000, float %x)
ret float %r
}
diff --git a/llvm/test/CodeGen/X86/pgo-profile-o0.ll b/llvm/test/CodeGen/X86/pgo-profile-o0.ll
new file mode 100644
index 0000000..f9704fc
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pgo-profile-o0.ll
@@ -0,0 +1,49 @@
+; RUN: llc -mtriple=x86_64-- -O0 -pgo-kind=pgo-sample-use-pipeline -debug-pass=Structure %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=PASSES
+; RUN: llc -mtriple=x86_64-- -O0 -pgo-kind=pgo-sample-use-pipeline -debug-only=branch-prob %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=BRANCH_PROB
+; RUN: llc -mtriple=x86_64-- -O0 -pgo-kind=pgo-sample-use-pipeline -stop-after=finalize-isel %s -o - | FileCheck %s --check-prefix=MIR
+
+; REQUIRES: asserts
+
+; This test verifies that PGO profile information (branch weights) is preserved
+; during instruction selection at -O0.
+
+; Test function with explicit branch weights from PGO.
+define i32 @test_pgo_preservation(i32 %x) !prof !15 {
+entry:
+ %cmp = icmp sgt i32 %x, 10
+ ; This branch has bias: 97 taken vs 3 not taken
+ br i1 %cmp, label %if.then, label %if.else, !prof !16
+
+if.then:
+ ; Hot path - should have high frequency
+ %add = add nsw i32 %x, 100
+ br label %if.end
+
+if.else:
+ ; Cold path - should have low frequency
+ %sub = sub nsw i32 %x, 50
+ br label %if.end
+
+if.end:
+ %result = phi i32 [ %add, %if.then ], [ %sub, %if.else ]
+ ret i32 %result
+}
+
+; Profile metadata with branch weights 97:3.
+!15 = !{!"function_entry_count", i64 100}
+!16 = !{!"branch_weights", i32 97, i32 3}
+
+; Verify that Branch Probability Analysis runs at O0.
+; PASSES: Branch Probability Analysis
+
+; Verify that the branch probabilities reflect the exact profile data.
+; BRANCH_PROB: ---- Branch Probability Info : test_pgo_preservation ----
+; BRANCH_PROB: set edge entry -> 0 successor probability to {{.*}} = 97.00%
+; BRANCH_PROB: set edge entry -> 1 successor probability to {{.*}} = 3.00%
+
+; Verify that machine IR preserves the branch probabilities from profile data
+; MIR: bb.0.entry:
+; MIR-NEXT: successors: %bb.{{[0-9]+}}({{0x03d70a3d|0x7c28f5c3}}), %bb.{{[0-9]+}}({{0x7c28f5c3|0x03d70a3d}})
+; The two successor probability values should be:
+; - 0x7c28f5c3: approximately 97% (high probability successor)
+; - 0x03d70a3d: approximately 3% (low probability successor)
diff --git a/llvm/test/Instrumentation/AddressSanitizer/alloca-offset-lifetime.ll b/llvm/test/Instrumentation/AddressSanitizer/alloca-offset-lifetime.ll
deleted file mode 100644
index a4846176..0000000
--- a/llvm/test/Instrumentation/AddressSanitizer/alloca-offset-lifetime.ll
+++ /dev/null
@@ -1,27 +0,0 @@
-; Test that ASAN will not instrument lifetime markers on alloca offsets.
-;
-; RUN: opt < %s -passes=asan --asan-use-after-scope -S | FileCheck %s
-
-target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-apple-macosx10.15.0"
-
-%t = type { ptr, ptr, %sub, i64 }
-%sub = type { i32 }
-
-define void @foo() sanitize_address {
-entry:
- %0 = alloca %t, align 8
- %x = getelementptr inbounds %t, ptr %0, i64 0, i32 2
- call void @llvm.lifetime.start.p0(i64 4, ptr nonnull %x)
- call void @bar(ptr nonnull %x)
- call void @llvm.lifetime.end.p0(i64 4, ptr nonnull %x) #3
- ret void
-}
-
-declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
-declare void @bar(ptr)
-declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
-
-; CHECK: store i64 %[[STACK_BASE:.+]], ptr %asan_local_stack_base, align 8
-; CHECK-NOT: store i8 0
-; CHECK: call void @bar(ptr nonnull %x)
diff --git a/llvm/test/Instrumentation/AddressSanitizer/calls-only-smallfn.ll b/llvm/test/Instrumentation/AddressSanitizer/calls-only-smallfn.ll
index 0859a7e..d7204e6 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/calls-only-smallfn.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/calls-only-smallfn.ll
@@ -9,15 +9,15 @@ define void @foo() #0 {
entry:
%array01 = alloca [1 x i8], align 1
%array02 = alloca [2 x i8], align 1
-; OUTLINE: call void @__asan_set_shadow_f1(i64 %23, i64 4)
-; OUTLINE: call void @__asan_set_shadow_01(i64 %24, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f2(i64 %25, i64 1)
-; OUTLINE: call void @__asan_set_shadow_02(i64 %26, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f3(i64 %27, i64 1)
-; OUTLINE: call void @__asan_stack_free_0(i64 %7, i64 64)
-; OUTLINE: call void @__asan_set_shadow_00(i64 %55, i64 8)
-; INLINE: store i64 -935919682371587599, ptr %24, align 1
-; INLINE: store i64 -723401728380766731, ptr %52, align 1
+; OUTLINE: call void @__asan_set_shadow_f1(i64 %{{.+}}, i64 4)
+; OUTLINE: call void @__asan_set_shadow_01(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f2(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_02(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f3(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_stack_free_0(i64 %{{.+}}, i64 64)
+; OUTLINE: call void @__asan_set_shadow_00(i64 %{{.+}}, i64 8)
+; INLINE: store i64 -935919682371587599, ptr %{{.+}}, align 1
+; INLINE: store i64 -723401728380766731, ptr %{{.+}}, align 1
%arrayidx = getelementptr inbounds [1 x i8], ptr %array01, i64 0, i64 1
store i8 1, ptr %arrayidx, align 1
%arrayidx1 = getelementptr inbounds [2 x i8], ptr %array02, i64 0, i64 2
diff --git a/llvm/test/Instrumentation/AddressSanitizer/calls-only.ll b/llvm/test/Instrumentation/AddressSanitizer/calls-only.ll
index 5f122ad..6f52289 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/calls-only.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/calls-only.ll
@@ -14,26 +14,26 @@ entry:
%array05 = alloca [5 x i8], align 1
%array06 = alloca [6 x i8], align 1
%array07 = alloca [7 x i8], align 1
-; OUTLINE: call void @__asan_set_shadow_f1(i64 %33, i64 4)
-; OUTLINE: call void @__asan_set_shadow_01(i64 %34, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f2(i64 %35, i64 1)
-; OUTLINE: call void @__asan_set_shadow_02(i64 %36, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f2(i64 %37, i64 1)
-; OUTLINE: call void @__asan_set_shadow_03(i64 %38, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f2(i64 %39, i64 1)
-; OUTLINE: call void @__asan_set_shadow_04(i64 %40, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f2(i64 %41, i64 1)
-; OUTLINE: call void @__asan_set_shadow_05(i64 %42, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f2(i64 %43, i64 3)
-; OUTLINE: call void @__asan_set_shadow_06(i64 %44, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f2(i64 %45, i64 3)
-; OUTLINE: call void @__asan_set_shadow_07(i64 %46, i64 1)
-; OUTLINE: call void @__asan_set_shadow_f3(i64 %47, i64 3)
-; OUTLINE: call void @__asan_stack_free_2(i64 %7, i64 192)
-; OUTLINE: call void @__asan_set_shadow_00(i64 %135, i64 24)
-; INLINE: store i64 -1007977276409515535, ptr %34, align 1
-; INLINE: store i64 -940423264817843709, ptr %36, align 1
-; INLINE: store i64 -868083087686045178, ptr %38, align 1
+; OUTLINE: call void @__asan_set_shadow_f1(i64 %{{.+}}, i64 4)
+; OUTLINE: call void @__asan_set_shadow_01(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f2(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_02(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f2(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_03(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f2(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_04(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f2(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_05(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f2(i64 %{{.+}}, i64 3)
+; OUTLINE: call void @__asan_set_shadow_06(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f2(i64 %{{.+}}, i64 3)
+; OUTLINE: call void @__asan_set_shadow_07(i64 %{{.+}}, i64 1)
+; OUTLINE: call void @__asan_set_shadow_f3(i64 %{{.+}}, i64 3)
+; OUTLINE: call void @__asan_stack_free_2(i64 %{{.+}}, i64 192)
+; OUTLINE: call void @__asan_set_shadow_00(i64 %{{.+}}, i64 24)
+; INLINE: store i64 -1007977276409515535, ptr %{{.+}}, align 1
+; INLINE: store i64 -940423264817843709, ptr %{{.+}}, align 1
+; INLINE: store i64 -868083087686045178, ptr %{{.+}}, align 1
%arrayidx = getelementptr inbounds [1 x i8], ptr %array01, i64 0, i64 1
store i8 1, ptr %arrayidx, align 1
%arrayidx1 = getelementptr inbounds [2 x i8], ptr %array02, i64 0, i64 2
@@ -48,7 +48,7 @@ entry:
store i8 6, ptr %arrayidx5, align 1
%arrayidx6 = getelementptr inbounds [7 x i8], ptr %array07, i64 0, i64 7
store i8 7, ptr %arrayidx6, align 1
-; CHECK-NOT: store i64 -723401728380766731, ptr %126, align 1
+; CHECK-NOT: store i64 -723401728380766731, ptr %{{.+}}, align 1
ret void
}
attributes #0 = { noinline nounwind optnone sanitize_address ssp uwtable(sync) "frame-pointer"="non-leaf" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="apple-m1" "target-features"="+aes,+crc,+crypto,+dotprod,+fp-armv8,+fp16fml,+fullfp16,+lse,+neon,+ras,+rcpc,+rdm,+sha2,+sha3,+sm4,+v8.1a,+v8.2a,+v8.3a,+v8.4a,+v8a" }
diff --git a/llvm/test/Instrumentation/SanitizerCoverage/missing_dbg.ll b/llvm/test/Instrumentation/SanitizerCoverage/missing_dbg.ll
index 3568434..07b9a1c 100644
--- a/llvm/test/Instrumentation/SanitizerCoverage/missing_dbg.ll
+++ b/llvm/test/Instrumentation/SanitizerCoverage/missing_dbg.ll
@@ -1,5 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes='module(sancov-module)' -sanitizer-coverage-level=2 -S | FileCheck %s
+; RUN: opt < %s -passes='module(sancov-module)' -sanitizer-coverage-level=1 -sanitizer-coverage-stack-depth -sanitizer-coverage-stack-depth-callback-min=1 -S | FileCheck %s --check-prefix=CHECK-STACK-CALLBACK
+; RUN: opt < %s -passes='module(sancov-module)' -sanitizer-coverage-level=1 -sanitizer-coverage-stack-depth -S | FileCheck %s --check-prefix=CHECK-STACK-DEPTH
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
@@ -55,6 +57,86 @@ entry:
ret i32 %t
}
+define i32 @with_dbg_stack_callback(ptr %a) !dbg !8 {
+; CHECK-STACK-CALLBACK-LABEL: define i32 @with_dbg_stack_callback(
+; CHECK-STACK-CALLBACK-SAME: ptr [[A:%.*]]) !dbg [[DBG8:![0-9]+]] {
+; CHECK-STACK-CALLBACK-NEXT: entry:
+; CHECK-STACK-CALLBACK-NEXT: [[BUF:%.*]] = alloca [64 x i8], align 1
+; CHECK-STACK-CALLBACK-NEXT: call void @__sanitizer_cov_stack_depth() #[[ATTR1:[0-9]+]], !dbg [[DBG9:![0-9]+]]
+; CHECK-STACK-CALLBACK-NEXT: %t = load i32, ptr [[A]], align 4
+; CHECK-STACK-CALLBACK-NEXT: call void @external_func()
+; CHECK-STACK-CALLBACK-NEXT: ret i32 %t
+;
+entry:
+ %buf = alloca [64 x i8], align 1
+ %t = load i32, ptr %a, align 4
+ call void @external_func()
+ ret i32 %t
+}
+
+define i32 @with_dbg_stack_depth(ptr %a) !dbg !10 {
+; CHECK-STACK-DEPTH-LABEL: define i32 @with_dbg_stack_depth(
+; CHECK-STACK-DEPTH-SAME: ptr [[A:%.*]]) !dbg [[DBG10:![0-9]+]] {
+; CHECK-STACK-DEPTH-NEXT: entry:
+; CHECK-STACK-DEPTH-NEXT: [[BUF:%.*]] = alloca [64 x i8], align 1
+; CHECK-STACK-DEPTH-NEXT: [[TMP1:%.*]] = call ptr @llvm.frameaddress.p0(i32 0)
+; CHECK-STACK-DEPTH-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; CHECK-STACK-DEPTH-NEXT: [[TMP3:%.*]] = load i64, ptr @__sancov_lowest_stack, align 8
+; CHECK-STACK-DEPTH-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP2]], [[TMP3]]
+; CHECK-STACK-DEPTH-NEXT: br i1 [[TMP4]], label {{%.*}}, label {{%.*}}
+; CHECK-STACK-DEPTH: store i64 [[TMP2]], ptr @__sancov_lowest_stack, align 8, !dbg [[DBG11:![0-9]+]], {{.*}}!nosanitize
+; CHECK-STACK-DEPTH: %t = load i32, ptr [[A]], align 4
+; CHECK-STACK-DEPTH-NEXT: call void @external_func()
+; CHECK-STACK-DEPTH-NEXT: ret i32 %t
+;
+entry:
+ %buf = alloca [64 x i8], align 1
+ %t = load i32, ptr %a, align 4
+ call void @external_func()
+ ret i32 %t
+}
+
+define i32 @without_dbg_stack_callback(ptr %a) {
+; CHECK-STACK-CALLBACK-LABEL: define i32 @without_dbg_stack_callback(
+; CHECK-STACK-CALLBACK-SAME: ptr [[A:%.*]]) {
+; CHECK-STACK-CALLBACK-NEXT: entry:
+; CHECK-STACK-CALLBACK-NEXT: [[BUF:%.*]] = alloca [64 x i8], align 1
+; CHECK-STACK-CALLBACK-NEXT: call void @__sanitizer_cov_stack_depth() #[[ATTR1]]
+; CHECK-STACK-CALLBACK-NEXT: %t = load i32, ptr [[A]], align 4
+; CHECK-STACK-CALLBACK-NEXT: call void @external_func()
+; CHECK-STACK-CALLBACK-NEXT: ret i32 %t
+;
+entry:
+ %buf = alloca [64 x i8], align 1
+ %t = load i32, ptr %a, align 4
+ call void @external_func()
+ ret i32 %t
+}
+
+define i32 @without_dbg_stack_depth(ptr %a) {
+; CHECK-STACK-DEPTH-LABEL: define i32 @without_dbg_stack_depth(
+; CHECK-STACK-DEPTH-SAME: ptr [[A:%.*]]) {
+; CHECK-STACK-DEPTH-NEXT: entry:
+; CHECK-STACK-DEPTH-NEXT: [[BUF:%.*]] = alloca [64 x i8], align 1
+; CHECK-STACK-DEPTH-NEXT: [[TMP1:%.*]] = call ptr @llvm.frameaddress.p0(i32 0)
+; CHECK-STACK-DEPTH-NEXT: [[TMP2:%.*]] = ptrtoint ptr [[TMP1]] to i64
+; CHECK-STACK-DEPTH-NEXT: [[TMP3:%.*]] = load i64, ptr @__sancov_lowest_stack, align 8
+; CHECK-STACK-DEPTH-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP2]], [[TMP3]]
+; CHECK-STACK-DEPTH-NEXT: br i1 [[TMP4]], label {{%.*}}, label {{%.*}}
+; CHECK-STACK-DEPTH: store i64 [[TMP2]], ptr @__sancov_lowest_stack, align 8, {{.*}}!nosanitize
+; CHECK-STACK-DEPTH: %t = load i32, ptr [[A]], align 4
+; CHECK-STACK-DEPTH-NEXT: call void @external_func()
+; CHECK-STACK-DEPTH-NEXT: ret i32 %t
+;
+entry:
+ %buf = alloca [64 x i8], align 1
+ %t = load i32, ptr %a, align 4
+ call void @external_func()
+ ret i32 %t
+}
+
+declare void @external_func()
+
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!2}
@@ -66,6 +148,10 @@ entry:
!5 = !{}
!6 = !DILocation(line: 192, scope: !3)
!7 = !DILocation(line: 0, scope: !3)
+!8 = distinct !DISubprogram(name: "with_dbg_stack_callback", scope: !1, file: !1, line: 200, type: !4, scopeLine: 200, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!9 = !DILocation(line: 200, scope: !8)
+!10 = distinct !DISubprogram(name: "with_dbg_stack_depth", scope: !1, file: !1, line: 210, type: !4, scopeLine: 210, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!11 = !DILocation(line: 210, scope: !10)
;.
; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C89, file: [[META1:![0-9]+]], isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, splitDebugInlining: false, nameTableKind: None)
@@ -76,3 +162,9 @@ entry:
; CHECK: [[DBG6]] = !DILocation(line: 192, scope: [[DBG3]])
; CHECK: [[DBG7]] = !DILocation(line: 0, scope: [[DBG3]])
;.
+; CHECK-STACK-CALLBACK: [[DBG8]] = distinct !DISubprogram(name: "with_dbg_stack_callback", scope: {{.*}}, file: {{.*}}, line: 200
+; CHECK-STACK-CALLBACK: [[DBG9]] = !DILocation(line: 200, scope: [[DBG8]])
+;.
+; CHECK-STACK-DEPTH: [[DBG10]] = distinct !DISubprogram(name: "with_dbg_stack_depth", scope: {{.*}}, file: {{.*}}, line: 210
+; CHECK-STACK-DEPTH: [[DBG11]] = !DILocation(line: 210, scope: [[DBG10]])
+;.
diff --git a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
index bb3001e..a7d3446 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-basics.ll
@@ -91,12 +91,13 @@
@ctz7.table = internal unnamed_addr constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
-define i32 @ctz1(i32 %x) {
+define i32 @ctz1(i32 %x) !prof !0 {
; CHECK-LABEL: @ctz1(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]], !prof [[PROF_1:![0-9]+]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[CONV]]
@@ -498,3 +499,7 @@ entry:
%conv = zext i8 %0 to i32
ret i32 %conv
}
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 1, i32 1048575}
diff --git a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-dereferencing-pointer.ll b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-dereferencing-pointer.ll
index d2ecb57..0e5c4f0 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-dereferencing-pointer.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-dereferencing-pointer.ll
@@ -20,13 +20,14 @@
@table = internal unnamed_addr constant [64 x i32] [i32 0, i32 1, i32 12, i32 2, i32 13, i32 22, i32 17, i32 3, i32 14, i32 33, i32 23, i32 36, i32 18, i32 58, i32 28, i32 4, i32 62, i32 15, i32 34, i32 26, i32 24, i32 48, i32 50, i32 37, i32 19, i32 55, i32 59, i32 52, i32 29, i32 44, i32 39, i32 5, i32 63, i32 11, i32 21, i32 16, i32 32, i32 35, i32 57, i32 27, i32 61, i32 25, i32 47, i32 49, i32 54, i32 51, i32 43, i32 38, i32 10, i32 20, i32 31, i32 56, i32 60, i32 46, i32 53, i32 42, i32 9, i32 30, i32 45, i32 41, i32 8, i32 40, i32 7, i32 6], align 4
-define i32 @ctz6(ptr nocapture readonly %b) {
+define i32 @ctz6(ptr nocapture readonly %b) !prof !0 {
; CHECK-LABEL: @ctz6(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[B:%.*]], align 8
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.cttz.i64(i64 [[TMP0]], i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i64 [[TMP0]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 0, i64 [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i64 0, i64 [[TMP1]], !prof [[PROF_1:![0-9]+]]
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[TMP4]]
;
@@ -40,3 +41,7 @@ entry:
%1 = load i32, ptr %arrayidx, align 4
ret i32 %1
}
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 1, i32 1048575}
diff --git a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-non-argument-value.ll b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-non-argument-value.ll
index f63badb..a7732f0 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-non-argument-value.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-non-argument-value.ll
@@ -20,13 +20,14 @@
@.str = private constant [3 x i8] c"%u\00", align 1
@test.table = internal constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
-define i32 @test() {
+define i32 @test() !prof !0 {
; CHECK-LABEL: @test(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @x, align 4
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.cttz.i32(i32 [[TMP0]], i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP0]], 0
-; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 0, i32 [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 0, i32 [[TMP1]], !prof [[PROF_1:![0-9]+]]
; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i8
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP4]] to i32
; CHECK-NEXT: ret i32 [[CONV]]
@@ -43,3 +44,7 @@ entry:
%conv = zext i8 %1 to i32
ret i32 %conv
}
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 1, i32 1048575}
diff --git a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-zero-element.ll b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-zero-element.ll
index bbdd9b7c..5f9b4ce 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-zero-element.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/lower-table-based-cttz-zero-element.ll
@@ -3,12 +3,13 @@
@ctz1.table = internal constant [32 x i8] c"\00\01\1C\02\1D\0E\18\03\1E\16\14\0F\19\11\04\08\1F\1B\0D\17\15\13\10\07\1A\0C\12\06\0B\05\0A\09", align 1
-define i32 @ctz1(i32 %x) {
+define i32 @ctz1(i32 %x) !prof !0 {
; CHECK-LABEL: @ctz1(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]]
+; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 0, i32 [[TMP0]], !prof [[PROF_1:![0-9]+]]
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP3]] to i32
; CHECK-NEXT: ret i32 [[CONV]]
@@ -24,3 +25,7 @@ entry:
%conv = zext i8 %0 to i32
ret i32 %conv
}
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 1, i32 1048575}
diff --git a/llvm/test/Transforms/AggressiveInstCombine/trunc_select.ll b/llvm/test/Transforms/AggressiveInstCombine/trunc_select.ll
index fb14782..9352211 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/trunc_select.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/trunc_select.ll
@@ -3,16 +3,17 @@
target datalayout = "e-m:m-p1:64:64:64-p:32:32:32-n8:16:32"
-define dso_local i16 @select_i16(i16 %a, i16 %b, i1 %cond) {
+define dso_local i16 @select_i16(i16 %a, i16 %b, i1 %cond) !prof !0 {
; CHECK-LABEL: @select_i16(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], i16 [[A:%.*]], i16 [[B:%.*]]
+; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND:%.*]], i16 [[A:%.*]], i16 [[B:%.*]], !prof [[PROF_1:![0-9]+]]
; CHECK-NEXT: ret i16 [[SEL]]
;
entry:
%conv0 = sext i16 %a to i32
%conv1 = sext i16 %b to i32
- %sel = select i1 %cond, i32 %conv0, i32 %conv1
+ %sel = select i1 %cond, i32 %conv0, i32 %conv1, !prof !1
%conv4 = trunc i32 %sel to i16
ret i16 %conv4
}
@@ -134,3 +135,8 @@ entry:
ret i16 %conv4
}
+!0 = !{!"function_entry_count", i64 1000}
+!1 = !{!"branch_weights", i32 2, i32 3}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 2, i32 3}
+
diff --git a/llvm/test/Transforms/AggressiveInstCombine/trunc_select_cmp.ll b/llvm/test/Transforms/AggressiveInstCombine/trunc_select_cmp.ll
index ac9cf2d..69ad625 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/trunc_select_cmp.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/trunc_select_cmp.ll
@@ -1,19 +1,20 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s
-define dso_local i16 @cmp_select_sext_const(i8 %a) {
+define dso_local i16 @cmp_select_sext_const(i8 %a) !prof !0 {
; CHECK-LABEL: @cmp_select_sext_const(
+; CHECK: !prof [[PROF_0:![0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CONV:%.*]] = sext i8 [[A:%.*]] to i32
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], 109
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 109, i32 [[CONV]]
+; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 109, i32 [[CONV]], !prof [[PROF_1:![0-9]+]]
; CHECK-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
; CHECK-NEXT: ret i16 [[CONV4]]
;
entry:
%conv = sext i8 %a to i32
%cmp = icmp slt i32 %conv, 109
- %cond = select i1 %cmp, i32 109, i32 %conv
+ %cond = select i1 %cmp, i32 109, i32 %conv, !prof !1
%conv4 = trunc i32 %cond to i16
ret i16 %conv4
}
@@ -209,3 +210,7 @@ define i16 @cmp_select_unsigned_const_i16Const_noTransformation(i8 %a) {
ret i16 %conv4
}
+!0 = !{!"function_entry_count", i64 1000}
+!1 = !{!"branch_weights", i32 2, i32 3}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"branch_weights", i32 2, i32 3}
diff --git a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll b/llvm/test/Transforms/Coroutines/coro-elide-safe.ll
index 4eec7ed..722693d 100644
--- a/llvm/test/Transforms/Coroutines/coro-transform-must-elide.ll
+++ b/llvm/test/Transforms/Coroutines/coro-elide-safe.ll
@@ -1,4 +1,8 @@
-; Testing elide performed its job for calls to coroutines marked safe.
+; Coroutine calls marked with `coro_elide_safe` should be elided.
+; Inside `caller`, we expect the `callee` coroutine to be elided.
+; Inside `caller_conditional`, `callee` is only called on an unlikely
+; path, hence we expect the `callee` coroutine NOT to be elided.
+;
; RUN: opt < %s -S -passes='cgscc(coro-annotation-elide)' | FileCheck %s
%struct.Task = type { ptr }
@@ -57,7 +61,7 @@ define ptr @callee.noalloc(i8 %arg, ptr dereferenceable(32) align(8) %frame) {
; Function Attrs: presplitcoroutine
define ptr @caller() #0 {
entry:
- %task = call ptr @callee(i8 0) #1
+ %task = call ptr @callee(i8 0) coro_elide_safe
ret ptr %task
; CHECK: %[[TASK:.+]] = alloca %struct.Task, align 8
; CHECK-NEXT: %[[FRAME:.+]] = alloca [32 x i8], align 8
@@ -69,6 +73,25 @@ entry:
; CHECK-NEXT: ret ptr %[[TASK]]
}
+; CHECK-LABEL: define ptr @caller_conditional(i1 %cond)
+; Function Attrs: presplitcoroutine
+define ptr @caller_conditional(i1 %cond) #0 {
+entry:
+ br i1 %cond, label %call, label %ret
+
+call:
+ ; CHECK-NOT: alloca
+ ; CHECK-NOT: @llvm.coro.id({{.*}}, ptr @callee, {{.*}})
+ ; CHECK: %task = call ptr @callee(i8 0)
+ ; CHECK-NEXT: br label %ret
+ %task = call ptr @callee(i8 0) coro_elide_safe
+ br label %ret
+
+ret:
+ %retval = phi ptr [ %task, %call ], [ null, %entry ]
+ ret ptr %retval
+}
+
declare token @llvm.coro.id(i32, ptr, ptr, ptr)
declare ptr @llvm.coro.begin(token, ptr)
declare ptr @llvm.coro.frame()
@@ -76,4 +99,3 @@ declare ptr @llvm.coro.subfn.addr(ptr, i8)
declare i1 @llvm.coro.alloc(token)
attributes #0 = { presplitcoroutine }
-attributes #1 = { coro_elide_safe }
diff --git a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-analysis.ll b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-analysis.ll
index 4173c32..f45798b 100644
--- a/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-analysis.ll
+++ b/llvm/test/Transforms/DFAJumpThreading/dfa-jump-threading-analysis.ll
@@ -7,10 +7,10 @@
; state, and the block that determines the next state.
; < path of BBs that form a cycle > [ state, determinator ]
define i32 @test1(i32 %num) !prof !0{
-; CHECK: < case2 for.inc for.body > [ 1, for.inc ]
-; CHECK-NEXT: < for.inc for.body > [ 1, for.inc ]
-; CHECK-NEXT: < case1 for.inc for.body > [ 2, for.inc ]
-; CHECK-NEXT: < case2 sel.si.unfold.false for.inc for.body > [ 2, sel.si.unfold.false ]
+; CHECK: < case2, for.inc, for.body > [ 1, for.inc ]
+; CHECK-NEXT: < for.inc, for.body > [ 1, for.inc ]
+; CHECK-NEXT: < case1, for.inc, for.body > [ 2, for.inc ]
+; CHECK-NEXT: < case2, sel.si.unfold.false, for.inc, for.body > [ 2, sel.si.unfold.false ]
entry:
br label %for.body
@@ -47,12 +47,12 @@ for.end:
; complicated CFG. Here the FSM is represented as a nested loop, with
; fallthrough cases.
define i32 @test2(i32 %init) {
-; CHECK: < loop.1.backedge loop.1 loop.2 loop.3 > [ 1, loop.1 ]
-; CHECK-NEXT: < case4 loop.1.backedge state.1.be2.si.unfold.false loop.1 loop.2 loop.3 > [ 2, loop.1.backedge ]
-; CHECK-NEXT: < case2 loop.1.backedge state.1.be2.si.unfold.false loop.1 loop.2 loop.3 > [ 4, loop.1.backedge ]
-; CHECK-NEXT: < case4 loop.2.backedge loop.2 loop.3 > [ 3, loop.2.backedge ]
-; CHECK-NEXT: < case3 loop.2.backedge loop.2 loop.3 > [ 0, loop.2.backedge ]
-; CHECK-NEXT: < case2 loop.3 > [ 3, loop.3 ]
+; CHECK: < loop.1.backedge, loop.1, loop.2, loop.3 > [ 1, loop.1 ]
+; CHECK-NEXT: < case4, loop.1.backedge, state.1.be2.si.unfold.false, loop.1, loop.2, loop.3 > [ 2, loop.1.backedge ]
+; CHECK-NEXT: < case2, loop.1.backedge, state.1.be2.si.unfold.false, loop.1, loop.2, loop.3 > [ 4, loop.1.backedge ]
+; CHECK-NEXT: < case4, loop.2.backedge, loop.2, loop.3 > [ 3, loop.2.backedge ]
+; CHECK-NEXT: < case3, loop.2.backedge, loop.2, loop.3 > [ 0, loop.2.backedge ]
+; CHECK-NEXT: < case2, loop.3 > [ 3, loop.3 ]
entry:
%cmp = icmp eq i32 %init, 0
%sel = select i1 %cmp, i32 0, i32 2
@@ -187,12 +187,12 @@ bb66: ; preds = %bb59
; Value %init is not predictable but it's okay since it is the value initial to the switch.
define i32 @initial.value.positive1(i32 %init) !prof !0 {
-; CHECK: < loop.1.backedge loop.1 loop.2 loop.3 > [ 1, loop.1 ]
-; CHECK-NEXT: < case4 loop.1.backedge state.1.be2.si.unfold.false loop.1 loop.2 loop.3 > [ 2, loop.1.backedge ]
-; CHECK-NEXT: < case2 loop.1.backedge state.1.be2.si.unfold.false loop.1 loop.2 loop.3 > [ 4, loop.1.backedge ]
-; CHECK-NEXT: < case4 loop.2.backedge loop.2 loop.3 > [ 3, loop.2.backedge ]
-; CHECK-NEXT: < case3 loop.2.backedge loop.2 loop.3 > [ 0, loop.2.backedge ]
-; CHECK-NEXT: < case2 loop.3 > [ 3, loop.3 ]
+; CHECK: < loop.1.backedge, loop.1, loop.2, loop.3 > [ 1, loop.1 ]
+; CHECK-NEXT: < case4, loop.1.backedge, state.1.be2.si.unfold.false, loop.1, loop.2, loop.3 > [ 2, loop.1.backedge ]
+; CHECK-NEXT: < case2, loop.1.backedge, state.1.be2.si.unfold.false, loop.1, loop.2, loop.3 > [ 4, loop.1.backedge ]
+; CHECK-NEXT: < case4, loop.2.backedge, loop.2, loop.3 > [ 3, loop.2.backedge ]
+; CHECK-NEXT: < case3, loop.2.backedge, loop.2, loop.3 > [ 0, loop.2.backedge ]
+; CHECK-NEXT: < case2, loop.3 > [ 3, loop.3 ]
entry:
%cmp = icmp eq i32 %init, 0
br label %loop.1
diff --git a/llvm/test/Transforms/DFAJumpThreading/max-path-length.ll b/llvm/test/Transforms/DFAJumpThreading/max-path-length.ll
index 92747629..cb7c46e 100644
--- a/llvm/test/Transforms/DFAJumpThreading/max-path-length.ll
+++ b/llvm/test/Transforms/DFAJumpThreading/max-path-length.ll
@@ -9,9 +9,9 @@
; too long so that it is not jump-threaded.
define i32 @max_path_length(i32 %num) {
; CHECK-NOT: 3, case1
-; CHECK: < case2 for.inc for.body > [ 1, for.inc ]
-; CHECK-NEXT: < for.inc for.body > [ 1, for.inc ]
-; CHECK-NEXT: < case2 sel.si.unfold.false for.inc for.body > [ 2, sel.si.unfold.false ]
+; CHECK: < case2, for.inc, for.body > [ 1, for.inc ]
+; CHECK-NEXT: < for.inc, for.body > [ 1, for.inc ]
+; CHECK-NEXT: < case2, sel.si.unfold.false, for.inc, for.body > [ 2, sel.si.unfold.false ]
; CHECK-NEXT: DFA-JT: Renaming non-local uses of:
entry:
br label %for.body
diff --git a/llvm/test/Transforms/GVN/assume-equal.ll b/llvm/test/Transforms/GVN/assume-equal.ll
index 0c922da..bbbc5c5 100644
--- a/llvm/test/Transforms/GVN/assume-equal.ll
+++ b/llvm/test/Transforms/GVN/assume-equal.ll
@@ -221,21 +221,22 @@ define i32 @_Z1ii(i32 %p) {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[P]], 42
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
-; CHECK-NEXT: br i1 true, label %[[BB2:.*]], label %[[BB2]]
-; CHECK: [[BB2]]:
-; CHECK-NEXT: br i1 true, label %[[BB2]], label %[[BB2]]
-; CHECK: [[BB0:.*:]]
+; CHECK-NEXT: br i1 true, label %[[COMMON:.*]], label %[[COMMON]]
+; CHECK: [[COMMON]]:
+; CHECK-NEXT: br i1 true, label %[[COMMON]], label %[[COMMON]]
+; CHECK: [[EXIT:.*:]]
; CHECK-NEXT: ret i32 42
;
entry:
%cmp = icmp eq i32 %p, 42
call void @llvm.assume(i1 %cmp)
- br i1 %cmp, label %bb2, label %bb2
-bb2:
+ br i1 %cmp, label %common, label %common
+common:
call void @llvm.assume(i1 true)
- br i1 %cmp, label %bb2, label %bb2
+ br i1 %cmp, label %common, label %common
+exit:
ret i32 %p
}
@@ -357,8 +358,8 @@ define i8 @assume_ptr_eq_different_prov_matters(ptr %p, ptr %p2) {
ret i8 %v
}
-define i1 @assume_ptr_eq_different_prov_does_not_matter(ptr %p, ptr %p2) {
-; CHECK-LABEL: define i1 @assume_ptr_eq_different_prov_does_not_matter(
+define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp(ptr %p, ptr %p2) {
+; CHECK-LABEL: define i1 @assume_ptr_eq_different_prov_does_not_matter_icmp(
; CHECK-SAME: ptr [[P:%.*]], ptr [[P2:%.*]]) {
; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[P2]]
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
@@ -371,6 +372,36 @@ define i1 @assume_ptr_eq_different_prov_does_not_matter(ptr %p, ptr %p2) {
ret i1 %c
}
+; This is not correct, as it may change the provenance exposed by ptrtoint.
+; We still allow it for now.
+define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint(ptr %p, ptr %p2) {
+; CHECK-LABEL: define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoint(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[P2]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[INT:%.*]] = ptrtoint ptr [[P]] to i64
+; CHECK-NEXT: ret i64 [[INT]]
+;
+ %cmp = icmp eq ptr %p, %p2
+ call void @llvm.assume(i1 %cmp)
+ %int = ptrtoint ptr %p2 to i64
+ ret i64 %int
+}
+
+define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr(ptr %p, ptr %p2) {
+; CHECK-LABEL: define i64 @assume_ptr_eq_different_prov_does_not_matter_ptrtoaddr(
+; CHECK-SAME: ptr [[P:%.*]], ptr [[P2:%.*]]) {
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[P2]]
+; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT: [[INT:%.*]] = ptrtoaddr ptr [[P]] to i64
+; CHECK-NEXT: ret i64 [[INT]]
+;
+ %cmp = icmp eq ptr %p, %p2
+ call void @llvm.assume(i1 %cmp)
+ %int = ptrtoaddr ptr %p2 to i64
+ ret i64 %int
+}
+
define i8 @assume_ptr_eq_same_prov(ptr %p, i64 %x) {
; CHECK-LABEL: define i8 @assume_ptr_eq_same_prov(
; CHECK-SAME: ptr [[P:%.*]], i64 [[X:%.*]]) {
diff --git a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
index 61b1331..5211fbd 100644
--- a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
+++ b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
@@ -1,6 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
-target datalayout = "p1:64:64:64:32"
+
+; The ptrtoaddr folds are also valid for pointers that have external state.
+target datalayout = "pe1:64:64:64:32"
+
+@g = external global i8
+@g2 = external global i8
+
+@g.as1 = external addrspace(1) global i8
+@g2.as1 = external addrspace(1) global i8
define i32 @ptrtoaddr_inttoptr_arg(i32 %a) {
; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_arg(
@@ -24,14 +32,14 @@ define i32 @ptrtoaddr_inttoptr() {
define i32 @ptrtoaddr_inttoptr_diff_size1() {
; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_diff_size1() {
-; CHECK-NEXT: ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i64 -1 to ptr addrspace(1)) to i32)
+; CHECK-NEXT: ret i32 -1
;
ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i64 -1 to ptr addrspace(1)) to i32)
}
define i32 @ptrtoaddr_inttoptr_diff_size2() {
; CHECK-LABEL: define i32 @ptrtoaddr_inttoptr_diff_size2() {
-; CHECK-NEXT: ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i16 -1 to ptr addrspace(1)) to i32)
+; CHECK-NEXT: ret i32 65535
;
ret i32 ptrtoaddr (ptr addrspace(1) inttoptr (i16 -1 to ptr addrspace(1)) to i32)
}
@@ -52,14 +60,73 @@ define i64 @ptr2addr2_inttoptr_noas2() {
define i64 @ptrtoaddr_inttoptr_noas_diff_size1() {
; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas_diff_size1() {
-; CHECK-NEXT: ret i64 ptrtoaddr (ptr inttoptr (i32 -1 to ptr) to i64)
+; CHECK-NEXT: ret i64 4294967295
;
ret i64 ptrtoaddr (ptr inttoptr (i32 -1 to ptr) to i64)
}
define i64 @ptrtoaddr_inttoptr_noas_diff_size2() {
; CHECK-LABEL: define i64 @ptrtoaddr_inttoptr_noas_diff_size2() {
-; CHECK-NEXT: ret i64 ptrtoaddr (ptr inttoptr (i128 -1 to ptr) to i64)
+; CHECK-NEXT: ret i64 -1
;
ret i64 ptrtoaddr (ptr inttoptr (i128 -1 to ptr) to i64)
}
+
+define i64 @ptrtoaddr_gep_null() {
+; CHECK-LABEL: define i64 @ptrtoaddr_gep_null() {
+; CHECK-NEXT: ret i64 42
+;
+ ret i64 ptrtoaddr (ptr getelementptr (i8, ptr null, i64 42) to i64)
+}
+
+define i32 @ptrtoaddr_gep_null_addrsize() {
+; CHECK-LABEL: define i32 @ptrtoaddr_gep_null_addrsize() {
+; CHECK-NEXT: ret i32 42
+;
+ ret i32 ptrtoaddr (ptr addrspace(1) getelementptr (i8, ptr addrspace(1) null, i32 42) to i32)
+}
+
+define i64 @ptrtoaddr_gep_sub() {
+; CHECK-LABEL: define i64 @ptrtoaddr_gep_sub() {
+; CHECK-NEXT: ret i64 sub (i64 ptrtoaddr (ptr @g to i64), i64 ptrtoaddr (ptr @g2 to i64))
+;
+ ret i64 ptrtoaddr (ptr getelementptr (i8, ptr @g, i64 sub (i64 0, i64 ptrtoaddr (ptr @g2 to i64))) to i64)
+}
+
+define i32 @ptrtoaddr_gep_sub_addrsize() {
+; CHECK-LABEL: define i32 @ptrtoaddr_gep_sub_addrsize() {
+; CHECK-NEXT: ret i32 sub (i32 ptrtoaddr (ptr addrspace(1) @g.as1 to i32), i32 ptrtoaddr (ptr addrspace(1) @g2.as1 to i32))
+;
+ ret i32 ptrtoaddr (ptr addrspace(1) getelementptr (i8, ptr addrspace(1) @g.as1, i32 sub (i32 0, i32 ptrtoaddr (ptr addrspace(1) @g2.as1 to i32))) to i32)
+}
+
+; Don't fold inttoptr of ptrtoaddr away. inttoptr will pick a previously
+; exposed provenance, which is not necessarily that of @g (especially as
+; ptrtoaddr does not expose the provenance.)
+define ptr @inttoptr_of_ptrtoaddr() {
+; CHECK-LABEL: define ptr @inttoptr_of_ptrtoaddr() {
+; CHECK-NEXT: ret ptr inttoptr (i64 ptrtoaddr (ptr @g to i64) to ptr)
+;
+ ret ptr inttoptr (i64 ptrtoaddr (ptr @g to i64) to ptr)
+}
+
+define i64 @ptrtoaddr_sub_consts_unrelated() {
+; CHECK-LABEL: define i64 @ptrtoaddr_sub_consts_unrelated() {
+; CHECK-NEXT: ret i64 sub (i64 ptrtoaddr (ptr @g to i64), i64 ptrtoaddr (ptr @g2 to i64))
+;
+ ret i64 sub (i64 ptrtoaddr (ptr @g to i64), i64 ptrtoaddr (ptr @g2 to i64))
+}
+
+define i64 @ptrtoaddr_sub_consts_offset() {
+; CHECK-LABEL: define i64 @ptrtoaddr_sub_consts_offset() {
+; CHECK-NEXT: ret i64 42
+;
+ ret i64 sub (i64 ptrtoaddr (ptr getelementptr (i8, ptr @g, i64 42) to i64), i64 ptrtoaddr (ptr @g to i64))
+}
+
+define i32 @ptrtoaddr_sub_consts_offset_addrsize() {
+; CHECK-LABEL: define i32 @ptrtoaddr_sub_consts_offset_addrsize() {
+; CHECK-NEXT: ret i32 42
+;
+ ret i32 sub (i32 ptrtoaddr (ptr addrspace(1) getelementptr (i8, ptr addrspace(1) @g.as1, i32 42) to i32), i32 ptrtoaddr (ptr addrspace(1) @g.as1 to i32))
+}
diff --git a/llvm/test/Transforms/InstSimplify/ptr_diff.ll b/llvm/test/Transforms/InstSimplify/ptr_diff.ll
index d18b462..fdd9e8e 100644
--- a/llvm/test/Transforms/InstSimplify/ptr_diff.ll
+++ b/llvm/test/Transforms/InstSimplify/ptr_diff.ll
@@ -1,11 +1,9 @@
-; NOTE: Assertions have been autogenerated by update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instsimplify -S | FileCheck %s
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
-target triple = "x86_64-unknown-linux-gnu"
-define i64 @ptrdiff1(ptr %ptr) {
-; CHECK-LABEL: @ptrdiff1(
-; CHECK: ret i64 42
+define i64 @ptrdiff(ptr %ptr) {
+; CHECK-LABEL: @ptrdiff(
+; CHECK-NEXT: ret i64 42
;
%last = getelementptr inbounds i8, ptr %ptr, i32 42
%first.int = ptrtoint ptr %ptr to i64
@@ -14,9 +12,24 @@ define i64 @ptrdiff1(ptr %ptr) {
ret i64 %diff
}
-define i64 @ptrdiff2(ptr %ptr) {
-; CHECK-LABEL: @ptrdiff2(
-; CHECK: ret i64 42
+define i64 @ptrdiff_no_inbounds(ptr %ptr) {
+; CHECK-LABEL: @ptrdiff_no_inbounds(
+; CHECK-NEXT: [[LAST:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i32 42
+; CHECK-NEXT: [[FIRST_INT:%.*]] = ptrtoint ptr [[PTR]] to i64
+; CHECK-NEXT: [[LAST_INT:%.*]] = ptrtoint ptr [[LAST]] to i64
+; CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[LAST_INT]], [[FIRST_INT]]
+; CHECK-NEXT: ret i64 [[DIFF]]
+;
+ %last = getelementptr i8, ptr %ptr, i32 42
+ %first.int = ptrtoint ptr %ptr to i64
+ %last.int = ptrtoint ptr %last to i64
+ %diff = sub i64 %last.int, %first.int
+ ret i64 %diff
+}
+
+define i64 @ptrdiff_chain(ptr %ptr) {
+; CHECK-LABEL: @ptrdiff_chain(
+; CHECK-NEXT: ret i64 42
;
%first2 = getelementptr inbounds i8, ptr %ptr, i32 1
%first3 = getelementptr inbounds i8, ptr %first2, i32 2
@@ -31,26 +44,10 @@ define i64 @ptrdiff2(ptr %ptr) {
ret i64 %diff
}
-define i64 @ptrdiff3(ptr %ptr) {
-; Don't bother with non-inbounds GEPs.
-; CHECK-LABEL: @ptrdiff3(
-; CHECK: [[LAST:%.*]] = getelementptr i8, ptr %ptr, i32 42
-; CHECK-NEXT: [[FIRST_INT:%.*]] = ptrtoint ptr %ptr to i64
-; CHECK-NEXT: [[LAST_INT:%.*]] = ptrtoint ptr [[LAST]] to i64
-; CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[LAST_INT]], [[FIRST_INT]]
-; CHECK-NEXT: ret i64 [[DIFF]]
-;
- %last = getelementptr i8, ptr %ptr, i32 42
- %first.int = ptrtoint ptr %ptr to i64
- %last.int = ptrtoint ptr %last to i64
- %diff = sub i64 %last.int, %first.int
- ret i64 %diff
-}
-
-define <4 x i32> @ptrdiff4(<4 x ptr> %arg) nounwind {
; Handle simple cases of vectors of pointers.
-; CHECK-LABEL: @ptrdiff4(
-; CHECK: ret <4 x i32> zeroinitializer
+define <4 x i32> @ptrdiff_vectors(<4 x ptr> %arg) nounwind {
+; CHECK-LABEL: @ptrdiff_vectors(
+; CHECK-NEXT: ret <4 x i32> zeroinitializer
;
%p1 = ptrtoint <4 x ptr> %arg to <4 x i32>
%bc = bitcast <4 x ptr> %arg to <4 x ptr>
@@ -63,9 +60,9 @@ define <4 x i32> @ptrdiff4(<4 x ptr> %arg) nounwind {
@global = internal global %struct.ham zeroinitializer, align 4
-define i32 @ptrdiff5() nounwind {
-; CHECK-LABEL: @ptrdiff5(
-; CHECK: bb:
+define i32 @ptrdiff_global() nounwind {
+; CHECK-LABEL: @ptrdiff_global(
+; CHECK-NEXT: bb:
; CHECK-NEXT: ret i32 0
;
bb:
diff --git a/llvm/test/Transforms/LICM/vector-intrinsics.ll b/llvm/test/Transforms/LICM/vector-intrinsics.ll
new file mode 100644
index 0000000..351773e
--- /dev/null
+++ b/llvm/test/Transforms/LICM/vector-intrinsics.ll
@@ -0,0 +1,176 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -passes='loop-mssa(licm)' -verify-memoryssa %s | FileCheck %s
+
+define i32 @reduce_umax(<2 x i32> %inv, i1 %c) {
+; CHECK-LABEL: define i32 @reduce_umax(
+; CHECK-SAME: <2 x i32> [[INV:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[REDUCE_UMAX:%.*]] = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> [[INV]])
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[BACKEDGE_COND:%.*]] = icmp ult i32 [[IV]], [[REDUCE_UMAX]]
+; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C]], i1 [[BACKEDGE_COND]], i1 false
+; CHECK-NEXT: br i1 [[OR_COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i32 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i32 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %cond.true ]
+ %iv.next = add i32 %iv, 1
+ br i1 %c, label %cond.true, label %exit
+
+cond.true:
+ %reduce.umax = call i32 @llvm.vector.reduce.umax.v2i32(<2 x i32> %inv)
+ %backedge.cond = icmp ult i32 %iv, %reduce.umax
+ br i1 %backedge.cond, label %loop, label %exit
+
+exit:
+ ret i32 %iv
+}
+
+define i32 @vp_umax(<2 x i32> %inv.l, <2 x i32> %inv.r, i1 %c) {
+; CHECK-LABEL: define i32 @vp_umax(
+; CHECK-SAME: <2 x i32> [[INV_L:%.*]], <2 x i32> [[INV_R:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[VP_UMAX:%.*]] = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> [[INV_L]], <2 x i32> [[INV_R]], <2 x i1> splat (i1 true), i32 2)
+; CHECK-NEXT: [[EXTRACT:%.*]] = extractelement <2 x i32> [[VP_UMAX]], i32 0
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[BACKEDGE_COND:%.*]] = icmp ult i32 [[IV]], [[EXTRACT]]
+; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[C]], i1 [[BACKEDGE_COND]], i1 false
+; CHECK-NEXT: br i1 [[OR_COND]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i32 [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i32 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %cond.true ]
+ %iv.next = add i32 %iv, 1
+ br i1 %c, label %cond.true, label %exit
+
+cond.true:
+ %vp.umax = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %inv.l, <2 x i32> %inv.r, <2 x i1> splat (i1 1), i32 2)
+ %extract = extractelement <2 x i32> %vp.umax, i32 0
+ %backedge.cond = icmp ult i32 %iv, %extract
+ br i1 %backedge.cond, label %loop, label %exit
+
+exit:
+ ret i32 %iv
+}
+
+define i32 @vp_udiv(<2 x i32> %inv.q, <2 x i32> %inv.d, i1 %c) {
+; CHECK-LABEL: define i32 @vp_udiv(
+; CHECK-SAME: <2 x i32> [[INV_Q:%.*]], <2 x i32> [[INV_D:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[COND_TRUE:.*]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: br i1 [[C]], label %[[COND_TRUE]], label %[[EXIT:.*]]
+; CHECK: [[COND_TRUE]]:
+; CHECK-NEXT: [[VP_UDIV:%.*]] = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> [[INV_Q]], <2 x i32> [[INV_D]], <2 x i1> splat (i1 true), i32 2)
+; CHECK-NEXT: [[EXTRACT:%.*]] = extractelement <2 x i32> [[VP_UDIV]], i32 0
+; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV]], [[EXTRACT]]
+; CHECK-NEXT: br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i32 [ [[IV]], %[[COND_TRUE]] ], [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i32 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %cond.true ]
+ %iv.next = add i32 %iv, 1
+ br i1 %c, label %cond.true, label %exit
+
+cond.true:
+ %vp.udiv = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %inv.q, <2 x i32> %inv.d, <2 x i1> splat (i1 1), i32 2)
+ %extract = extractelement <2 x i32> %vp.udiv, i32 0
+ %backedge.cond = icmp ult i32 %iv, %extract
+ br i1 %backedge.cond, label %loop, label %exit
+
+exit:
+ ret i32 %iv
+}
+
+define i32 @vp_load(ptr %inv, i1 %c) {
+; CHECK-LABEL: define i32 @vp_load(
+; CHECK-SAME: ptr [[INV:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[COND_TRUE:.*]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: br i1 [[C]], label %[[COND_TRUE]], label %[[EXIT:.*]]
+; CHECK: [[COND_TRUE]]:
+; CHECK-NEXT: [[VP_LOAD:%.*]] = call <2 x i32> @llvm.vp.load.v2i32.p0(ptr [[INV]], <2 x i1> splat (i1 true), i32 2)
+; CHECK-NEXT: [[EXTRACT:%.*]] = extractelement <2 x i32> [[VP_LOAD]], i32 0
+; CHECK-NEXT: [[LOOP_COND:%.*]] = icmp ult i32 [[IV]], [[EXTRACT]]
+; CHECK-NEXT: br i1 [[LOOP_COND]], label %[[LOOP]], label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i32 [ [[IV]], %[[COND_TRUE]] ], [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i32 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %cond.true ]
+ %iv.next = add i32 %iv, 1
+ br i1 %c, label %cond.true, label %exit
+
+cond.true:
+ %vp.load = call <2 x i32> @llvm.vp.load.v2i32(ptr %inv, <2 x i1> splat (i1 1), i32 2)
+ %extract = extractelement <2 x i32> %vp.load, i32 0
+ %backedge.cond = icmp ult i32 %iv, %extract
+ br i1 %backedge.cond, label %loop, label %exit
+
+exit:
+ ret i32 %iv
+}
+
+define i32 @vp_store(<2 x i32> %inv.v, ptr %inv.p, i1 %c) {
+; CHECK-LABEL: define i32 @vp_store(
+; CHECK-SAME: <2 x i32> [[INV_V:%.*]], ptr [[INV_P:%.*]], i1 [[C:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[COND_TRUE:.*]] ]
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: br i1 [[C]], label %[[COND_TRUE]], label %[[EXIT:.*]]
+; CHECK: [[COND_TRUE]]:
+; CHECK-NEXT: call void @llvm.vp.store.v2i32.p0(<2 x i32> [[INV_V]], ptr [[INV_P]], <2 x i1> splat (i1 true), i32 2)
+; CHECK-NEXT: [[BACKEDGE_COND:%.*]] = icmp ult i32 [[IV]], 10
+; CHECK-NEXT: br i1 [[BACKEDGE_COND]], label %[[LOOP]], label %[[EXIT]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[IV_LCSSA:%.*]] = phi i32 [ [[IV]], %[[COND_TRUE]] ], [ [[IV]], %[[LOOP]] ]
+; CHECK-NEXT: ret i32 [[IV_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %cond.true ]
+ %iv.next = add i32 %iv, 1
+ br i1 %c, label %cond.true, label %exit
+
+cond.true:
+ call void @llvm.vp.store.v2i32(<2 x i32> %inv.v, ptr %inv.p, <2 x i1> splat (i1 1), i32 2)
+ %backedge.cond = icmp ult i32 %iv, 10
+ br i1 %backedge.cond, label %loop, label %exit
+
+exit:
+ ret i32 %iv
+}
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/veclib-function-calls.ll b/llvm/test/Transforms/LoopVectorize/RISCV/veclib-function-calls.ll
index d73900d..83b494a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/veclib-function-calls.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/veclib-function-calls.ll
@@ -2288,7 +2288,7 @@ define void @tgamma_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
}
;.
; CHECK: attributes #[[ATTR0]] = { "target-features"="+v" }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
; CHECK: attributes #[[ATTR2]] = { "vector-function-abi-variant"="_ZGVrNxv_acos(Sleef_acosdx_u10rvvm2)" }
; CHECK: attributes #[[ATTR3]] = { "vector-function-abi-variant"="_ZGVrNxv_acosf(Sleef_acosfx_u10rvvm2)" }
; CHECK: attributes #[[ATTR4]] = { "vector-function-abi-variant"="_ZGVrNxv_acosh(Sleef_acoshdx_u10rvvm2)" }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
index c225ede5..65058bd 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/replicating-load-store-costs.ll
@@ -621,8 +621,6 @@ define void @loaded_address_used_by_load_through_blend(i64 %start, ptr noalias %
; I32-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 8
; I32-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
; I32-NEXT: [[TMP2:%.*]] = sub i64 [[START]], [[N_VEC]]
-; I32-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[START]], i64 0
-; I32-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
; I32-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x ptr> poison, ptr [[SRC_2]], i64 0
; I32-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x ptr> [[BROADCAST_SPLATINSERT1]], <8 x ptr> poison, <8 x i32> zeroinitializer
; I32-NEXT: br label %[[VECTOR_BODY:.*]]
@@ -644,14 +642,6 @@ define void @loaded_address_used_by_load_through_blend(i64 %start, ptr noalias %
; I32-NEXT: [[TMP16:%.*]] = add i64 [[TMP8]], 1
; I32-NEXT: [[TMP17:%.*]] = add i64 [[TMP9]], 1
; I32-NEXT: [[TMP18:%.*]] = add i64 [[TMP10]], 1
-; I32-NEXT: [[TMP19:%.*]] = insertelement <8 x i64> poison, i64 [[TMP11]], i32 0
-; I32-NEXT: [[TMP20:%.*]] = insertelement <8 x i64> [[TMP19]], i64 [[TMP12]], i32 1
-; I32-NEXT: [[TMP21:%.*]] = insertelement <8 x i64> [[TMP20]], i64 [[TMP13]], i32 2
-; I32-NEXT: [[TMP22:%.*]] = insertelement <8 x i64> [[TMP21]], i64 [[TMP14]], i32 3
-; I32-NEXT: [[TMP23:%.*]] = insertelement <8 x i64> [[TMP22]], i64 [[TMP15]], i32 4
-; I32-NEXT: [[TMP24:%.*]] = insertelement <8 x i64> [[TMP23]], i64 [[TMP16]], i32 5
-; I32-NEXT: [[TMP25:%.*]] = insertelement <8 x i64> [[TMP24]], i64 [[TMP17]], i32 6
-; I32-NEXT: [[TMP26:%.*]] = insertelement <8 x i64> [[TMP25]], i64 [[TMP18]], i32 7
; I32-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP11]]
; I32-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP12]]
; I32-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP13]]
@@ -677,22 +667,21 @@ define void @loaded_address_used_by_load_through_blend(i64 %start, ptr noalias %
; I32-NEXT: [[TMP49:%.*]] = insertelement <8 x float> [[TMP48]], float [[TMP41]], i32 6
; I32-NEXT: [[TMP50:%.*]] = insertelement <8 x float> [[TMP49]], float [[TMP42]], i32 7
; I32-NEXT: [[TMP51:%.*]] = fcmp oeq <8 x float> [[TMP50]], zeroinitializer
-; I32-NEXT: [[TMP52:%.*]] = mul <8 x i64> [[TMP26]], [[BROADCAST_SPLAT]]
-; I32-NEXT: [[TMP53:%.*]] = extractelement <8 x i64> [[TMP52]], i32 0
+; I32-NEXT: [[TMP53:%.*]] = mul i64 [[TMP11]], [[START]]
+; I32-NEXT: [[TMP55:%.*]] = mul i64 [[TMP12]], [[START]]
+; I32-NEXT: [[TMP57:%.*]] = mul i64 [[TMP13]], [[START]]
+; I32-NEXT: [[TMP59:%.*]] = mul i64 [[TMP14]], [[START]]
+; I32-NEXT: [[TMP61:%.*]] = mul i64 [[TMP15]], [[START]]
+; I32-NEXT: [[TMP63:%.*]] = mul i64 [[TMP16]], [[START]]
+; I32-NEXT: [[TMP65:%.*]] = mul i64 [[TMP17]], [[START]]
+; I32-NEXT: [[TMP67:%.*]] = mul i64 [[TMP18]], [[START]]
; I32-NEXT: [[TMP54:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP53]]
-; I32-NEXT: [[TMP55:%.*]] = extractelement <8 x i64> [[TMP52]], i32 1
; I32-NEXT: [[TMP56:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP55]]
-; I32-NEXT: [[TMP57:%.*]] = extractelement <8 x i64> [[TMP52]], i32 2
; I32-NEXT: [[TMP58:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP57]]
-; I32-NEXT: [[TMP59:%.*]] = extractelement <8 x i64> [[TMP52]], i32 3
; I32-NEXT: [[TMP60:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP59]]
-; I32-NEXT: [[TMP61:%.*]] = extractelement <8 x i64> [[TMP52]], i32 4
; I32-NEXT: [[TMP62:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP61]]
-; I32-NEXT: [[TMP63:%.*]] = extractelement <8 x i64> [[TMP52]], i32 5
; I32-NEXT: [[TMP64:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP63]]
-; I32-NEXT: [[TMP65:%.*]] = extractelement <8 x i64> [[TMP52]], i32 6
; I32-NEXT: [[TMP66:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP65]]
-; I32-NEXT: [[TMP67:%.*]] = extractelement <8 x i64> [[TMP52]], i32 7
; I32-NEXT: [[TMP68:%.*]] = getelementptr i8, ptr [[SRC_2]], i64 [[TMP67]]
; I32-NEXT: [[TMP69:%.*]] = insertelement <8 x ptr> poison, ptr [[TMP54]], i32 0
; I32-NEXT: [[TMP70:%.*]] = insertelement <8 x ptr> [[TMP69]], ptr [[TMP56]], i32 1
@@ -774,7 +763,222 @@ exit:
ret void
}
-attributes #0 = { "target-cpu"="znver3" }
+define void @address_use_in_different_block(ptr noalias %dst, ptr %src.0, ptr %src.1, i32 %x) #0 {
+; I64-LABEL: define void @address_use_in_different_block(
+; I64-SAME: ptr noalias [[DST:%.*]], ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; I64-NEXT: [[ENTRY:.*:]]
+; I64-NEXT: [[X_POS:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 0)
+; I64-NEXT: [[OFFSET:%.*]] = zext i32 [[X_POS]] to i64
+; I64-NEXT: br label %[[VECTOR_PH:.*]]
+; I64: [[VECTOR_PH]]:
+; I64-NEXT: br label %[[VECTOR_BODY:.*]]
+; I64: [[VECTOR_BODY]]:
+; I64-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I64-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; I64-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; I64-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; I64-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; I64-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4
+; I64-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5
+; I64-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6
+; I64-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7
+; I64-NEXT: [[TMP8:%.*]] = mul i64 [[TMP0]], [[OFFSET]]
+; I64-NEXT: [[TMP9:%.*]] = mul i64 [[TMP1]], [[OFFSET]]
+; I64-NEXT: [[TMP10:%.*]] = mul i64 [[TMP2]], [[OFFSET]]
+; I64-NEXT: [[TMP11:%.*]] = mul i64 [[TMP3]], [[OFFSET]]
+; I64-NEXT: [[TMP12:%.*]] = mul i64 [[TMP4]], [[OFFSET]]
+; I64-NEXT: [[TMP13:%.*]] = mul i64 [[TMP5]], [[OFFSET]]
+; I64-NEXT: [[TMP14:%.*]] = mul i64 [[TMP6]], [[OFFSET]]
+; I64-NEXT: [[TMP15:%.*]] = mul i64 [[TMP7]], [[OFFSET]]
+; I64-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP8]]
+; I64-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP9]]
+; I64-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP10]]
+; I64-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP11]]
+; I64-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP12]]
+; I64-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP13]]
+; I64-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP14]]
+; I64-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP15]]
+; I64-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP16]], align 4
+; I64-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP17]], align 4
+; I64-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP18]], align 4
+; I64-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP19]], align 4
+; I64-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP20]], align 4
+; I64-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP21]], align 4
+; I64-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP22]], align 4
+; I64-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP23]], align 4
+; I64-NEXT: [[TMP32:%.*]] = sext i32 [[TMP24]] to i64
+; I64-NEXT: [[TMP33:%.*]] = sext i32 [[TMP25]] to i64
+; I64-NEXT: [[TMP34:%.*]] = sext i32 [[TMP26]] to i64
+; I64-NEXT: [[TMP35:%.*]] = sext i32 [[TMP27]] to i64
+; I64-NEXT: [[TMP36:%.*]] = sext i32 [[TMP28]] to i64
+; I64-NEXT: [[TMP37:%.*]] = sext i32 [[TMP29]] to i64
+; I64-NEXT: [[TMP38:%.*]] = sext i32 [[TMP30]] to i64
+; I64-NEXT: [[TMP39:%.*]] = sext i32 [[TMP31]] to i64
+; I64-NEXT: [[TMP40:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP32]]
+; I64-NEXT: [[TMP41:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP33]]
+; I64-NEXT: [[TMP42:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP34]]
+; I64-NEXT: [[TMP43:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP35]]
+; I64-NEXT: [[TMP44:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP36]]
+; I64-NEXT: [[TMP45:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP37]]
+; I64-NEXT: [[TMP46:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP38]]
+; I64-NEXT: [[TMP47:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP39]]
+; I64-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[TMP40]], i64 -8
+; I64-NEXT: [[TMP49:%.*]] = getelementptr i8, ptr [[TMP41]], i64 -8
+; I64-NEXT: [[TMP50:%.*]] = getelementptr i8, ptr [[TMP42]], i64 -8
+; I64-NEXT: [[TMP51:%.*]] = getelementptr i8, ptr [[TMP43]], i64 -8
+; I64-NEXT: [[TMP52:%.*]] = getelementptr i8, ptr [[TMP44]], i64 -8
+; I64-NEXT: [[TMP53:%.*]] = getelementptr i8, ptr [[TMP45]], i64 -8
+; I64-NEXT: [[TMP54:%.*]] = getelementptr i8, ptr [[TMP46]], i64 -8
+; I64-NEXT: [[TMP55:%.*]] = getelementptr i8, ptr [[TMP47]], i64 -8
+; I64-NEXT: [[TMP56:%.*]] = load double, ptr [[TMP48]], align 8
+; I64-NEXT: [[TMP57:%.*]] = load double, ptr [[TMP49]], align 8
+; I64-NEXT: [[TMP58:%.*]] = insertelement <2 x double> poison, double [[TMP56]], i32 0
+; I64-NEXT: [[TMP59:%.*]] = insertelement <2 x double> [[TMP58]], double [[TMP57]], i32 1
+; I64-NEXT: [[TMP60:%.*]] = load double, ptr [[TMP50]], align 8
+; I64-NEXT: [[TMP61:%.*]] = load double, ptr [[TMP51]], align 8
+; I64-NEXT: [[TMP62:%.*]] = insertelement <2 x double> poison, double [[TMP60]], i32 0
+; I64-NEXT: [[TMP63:%.*]] = insertelement <2 x double> [[TMP62]], double [[TMP61]], i32 1
+; I64-NEXT: [[TMP64:%.*]] = load double, ptr [[TMP52]], align 8
+; I64-NEXT: [[TMP65:%.*]] = load double, ptr [[TMP53]], align 8
+; I64-NEXT: [[TMP66:%.*]] = insertelement <2 x double> poison, double [[TMP64]], i32 0
+; I64-NEXT: [[TMP67:%.*]] = insertelement <2 x double> [[TMP66]], double [[TMP65]], i32 1
+; I64-NEXT: [[TMP68:%.*]] = load double, ptr [[TMP54]], align 8
+; I64-NEXT: [[TMP69:%.*]] = load double, ptr [[TMP55]], align 8
+; I64-NEXT: [[TMP70:%.*]] = insertelement <2 x double> poison, double [[TMP68]], i32 0
+; I64-NEXT: [[TMP71:%.*]] = insertelement <2 x double> [[TMP70]], double [[TMP69]], i32 1
+; I64-NEXT: [[TMP72:%.*]] = fsub <2 x double> zeroinitializer, [[TMP59]]
+; I64-NEXT: [[TMP73:%.*]] = fsub <2 x double> zeroinitializer, [[TMP63]]
+; I64-NEXT: [[TMP74:%.*]] = fsub <2 x double> zeroinitializer, [[TMP67]]
+; I64-NEXT: [[TMP75:%.*]] = fsub <2 x double> zeroinitializer, [[TMP71]]
+; I64-NEXT: [[TMP76:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP8]]
+; I64-NEXT: [[TMP77:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP9]]
+; I64-NEXT: [[TMP78:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP10]]
+; I64-NEXT: [[TMP79:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP11]]
+; I64-NEXT: [[TMP80:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP12]]
+; I64-NEXT: [[TMP81:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP13]]
+; I64-NEXT: [[TMP82:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP14]]
+; I64-NEXT: [[TMP83:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP15]]
+; I64-NEXT: [[TMP84:%.*]] = extractelement <2 x double> [[TMP72]], i32 0
+; I64-NEXT: store double [[TMP84]], ptr [[TMP76]], align 8
+; I64-NEXT: [[TMP85:%.*]] = extractelement <2 x double> [[TMP72]], i32 1
+; I64-NEXT: store double [[TMP85]], ptr [[TMP77]], align 8
+; I64-NEXT: [[TMP86:%.*]] = extractelement <2 x double> [[TMP73]], i32 0
+; I64-NEXT: store double [[TMP86]], ptr [[TMP78]], align 8
+; I64-NEXT: [[TMP87:%.*]] = extractelement <2 x double> [[TMP73]], i32 1
+; I64-NEXT: store double [[TMP87]], ptr [[TMP79]], align 8
+; I64-NEXT: [[TMP88:%.*]] = extractelement <2 x double> [[TMP74]], i32 0
+; I64-NEXT: store double [[TMP88]], ptr [[TMP80]], align 8
+; I64-NEXT: [[TMP89:%.*]] = extractelement <2 x double> [[TMP74]], i32 1
+; I64-NEXT: store double [[TMP89]], ptr [[TMP81]], align 8
+; I64-NEXT: [[TMP90:%.*]] = extractelement <2 x double> [[TMP75]], i32 0
+; I64-NEXT: store double [[TMP90]], ptr [[TMP82]], align 8
+; I64-NEXT: [[TMP91:%.*]] = extractelement <2 x double> [[TMP75]], i32 1
+; I64-NEXT: store double [[TMP91]], ptr [[TMP83]], align 8
+; I64-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; I64-NEXT: [[TMP92:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
+; I64-NEXT: br i1 [[TMP92]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; I64: [[MIDDLE_BLOCK]]:
+; I64-NEXT: br label %[[SCALAR_PH:.*]]
+; I64: [[SCALAR_PH]]:
+;
+; I32-LABEL: define void @address_use_in_different_block(
+; I32-SAME: ptr noalias [[DST:%.*]], ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], i32 [[X:%.*]]) #[[ATTR0]] {
+; I32-NEXT: [[ENTRY:.*:]]
+; I32-NEXT: [[X_POS:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 0)
+; I32-NEXT: [[OFFSET:%.*]] = zext i32 [[X_POS]] to i64
+; I32-NEXT: br label %[[VECTOR_PH:.*]]
+; I32: [[VECTOR_PH]]:
+; I32-NEXT: br label %[[VECTOR_BODY:.*]]
+; I32: [[VECTOR_BODY]]:
+; I32-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; I32-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; I32-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; I32-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
+; I32-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3
+; I32-NEXT: [[TMP4:%.*]] = mul i64 [[TMP0]], [[OFFSET]]
+; I32-NEXT: [[TMP5:%.*]] = mul i64 [[TMP1]], [[OFFSET]]
+; I32-NEXT: [[TMP6:%.*]] = mul i64 [[TMP2]], [[OFFSET]]
+; I32-NEXT: [[TMP7:%.*]] = mul i64 [[TMP3]], [[OFFSET]]
+; I32-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP4]]
+; I32-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP5]]
+; I32-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP6]]
+; I32-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[SRC_0]], i64 [[TMP7]]
+; I32-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP8]], align 4
+; I32-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP9]], align 4
+; I32-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP10]], align 4
+; I32-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP11]], align 4
+; I32-NEXT: [[TMP16:%.*]] = sext i32 [[TMP12]] to i64
+; I32-NEXT: [[TMP17:%.*]] = sext i32 [[TMP13]] to i64
+; I32-NEXT: [[TMP18:%.*]] = sext i32 [[TMP14]] to i64
+; I32-NEXT: [[TMP19:%.*]] = sext i32 [[TMP15]] to i64
+; I32-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP16]]
+; I32-NEXT: [[TMP21:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP17]]
+; I32-NEXT: [[TMP22:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP18]]
+; I32-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[TMP19]]
+; I32-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[TMP20]], i64 -8
+; I32-NEXT: [[TMP25:%.*]] = getelementptr i8, ptr [[TMP21]], i64 -8
+; I32-NEXT: [[TMP26:%.*]] = getelementptr i8, ptr [[TMP22]], i64 -8
+; I32-NEXT: [[TMP27:%.*]] = getelementptr i8, ptr [[TMP23]], i64 -8
+; I32-NEXT: [[TMP28:%.*]] = load double, ptr [[TMP24]], align 8
+; I32-NEXT: [[TMP29:%.*]] = load double, ptr [[TMP25]], align 8
+; I32-NEXT: [[TMP30:%.*]] = load double, ptr [[TMP26]], align 8
+; I32-NEXT: [[TMP31:%.*]] = load double, ptr [[TMP27]], align 8
+; I32-NEXT: [[TMP32:%.*]] = insertelement <4 x double> poison, double [[TMP28]], i32 0
+; I32-NEXT: [[TMP33:%.*]] = insertelement <4 x double> [[TMP32]], double [[TMP29]], i32 1
+; I32-NEXT: [[TMP34:%.*]] = insertelement <4 x double> [[TMP33]], double [[TMP30]], i32 2
+; I32-NEXT: [[TMP35:%.*]] = insertelement <4 x double> [[TMP34]], double [[TMP31]], i32 3
+; I32-NEXT: [[TMP36:%.*]] = fsub <4 x double> zeroinitializer, [[TMP35]]
+; I32-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP4]]
+; I32-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP5]]
+; I32-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP6]]
+; I32-NEXT: [[TMP40:%.*]] = getelementptr double, ptr [[DST]], i64 [[TMP7]]
+; I32-NEXT: [[TMP41:%.*]] = extractelement <4 x double> [[TMP36]], i32 0
+; I32-NEXT: store double [[TMP41]], ptr [[TMP37]], align 8
+; I32-NEXT: [[TMP42:%.*]] = extractelement <4 x double> [[TMP36]], i32 1
+; I32-NEXT: store double [[TMP42]], ptr [[TMP38]], align 8
+; I32-NEXT: [[TMP43:%.*]] = extractelement <4 x double> [[TMP36]], i32 2
+; I32-NEXT: store double [[TMP43]], ptr [[TMP39]], align 8
+; I32-NEXT: [[TMP44:%.*]] = extractelement <4 x double> [[TMP36]], i32 3
+; I32-NEXT: store double [[TMP44]], ptr [[TMP40]], align 8
+; I32-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; I32-NEXT: [[TMP45:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
+; I32-NEXT: br i1 [[TMP45]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; I32: [[MIDDLE_BLOCK]]:
+; I32-NEXT: br label %[[SCALAR_PH:.*]]
+; I32: [[SCALAR_PH]]:
+;
+entry:
+ %x.pos = call i32 @llvm.smax.i32(i32 %x, i32 0)
+ %offset = zext i32 %x.pos to i64
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %7 = mul i64 %iv, %offset
+ %gep.src.0 = getelementptr i32, ptr %src.0, i64 %7
+ %l8 = load i32, ptr %gep.src.0, align 4
+ %c = icmp sgt i32 %x, 0
+ br i1 %c, label %loop.latch, label %then
+
+then:
+ br label %loop.latch
+
+loop.latch:
+ %l.ext = sext i32 %l8 to i64
+ %gep.src.1 = getelementptr double, ptr %src.1, i64 %l.ext
+ %13 = getelementptr i8, ptr %gep.src.1, i64 -8
+ %l.2 = load double, ptr %13, align 8
+ %sub = fsub double 0.000000e+00, %l.2
+ %gep.dst = getelementptr double, ptr %dst, i64 %7
+ store double %sub, ptr %gep.dst, align 8
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv, 100
+ br i1 %ec, label %exit, label %loop.header
+
+exit:
+ ret void
+}
+
attributes #0 = { "target-cpu"="znver2" }
!0 = distinct !{!0, !1}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll
index 774f0db..f293ed1 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/x86-predication.ll
@@ -186,12 +186,11 @@ define i32 @scalarize_and_sink_gather(ptr %a, i1 %c, i32 %x, i64 %n) {
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE4:%.*]] ]
-; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[PRED_UDIV_CONTINUE4]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[PRED_UDIV_CONTINUE4]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = mul <2 x i64> [[VEC_IND]], splat (i64 777)
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_UDIV_IF:%.*]], label [[PRED_UDIV_CONTINUE:%.*]]
; CHECK: pred.udiv.if:
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i64> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP0]], 777
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
; CHECK-NEXT: [[TMP5:%.*]] = udiv i32 [[TMP4]], [[X]]
@@ -201,7 +200,8 @@ define i32 @scalarize_and_sink_gather(ptr %a, i1 %c, i32 %x, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = phi <2 x i32> [ poison, [[VECTOR_BODY]] ], [ [[TMP6]], [[PRED_UDIV_IF]] ]
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_UDIV_IF3:%.*]], label [[PRED_UDIV_CONTINUE4]]
; CHECK: pred.udiv.if3:
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i64> [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP7]], 777
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP11]], align 4
; CHECK-NEXT: [[TMP13:%.*]] = udiv i32 [[TMP12]], [[X]]
@@ -212,7 +212,6 @@ define i32 @scalarize_and_sink_gather(ptr %a, i1 %c, i32 %x, i64 %n) {
; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[BROADCAST_SPLAT]], <2 x i32> [[TMP16]], <2 x i32> [[BROADCAST_SPLAT4]]
; CHECK-NEXT: [[TMP18]] = add <2 x i32> [[VEC_PHI]], [[PREDPHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
-; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
index 3500c5c..4fd8d17 100644
--- a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
+++ b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
@@ -546,19 +546,50 @@ define i64 @loop_guards_needed_to_prove_deref_multiple(i32 %x, i1 %c, ptr derefe
; CHECK-NEXT: call void @llvm.assume(i1 [[PRE_2]])
; CHECK-NEXT: [[N:%.*]] = add i32 [[SEL]], -1
; CHECK-NEXT: [[N_EXT:%.*]] = zext i32 [[N]] to i64
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[SEL]], -2
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 2
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 4
+; CHECK-NEXT: [[IV_NEXT:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[LOOP_HEADER:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP_HEADER]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP3]], align 1
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i8> [[WIDE_LOAD]], zeroinitializer
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP5:%.*]] = freeze <4 x i1> [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP5]])
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]]
+; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_SPLIT:%.*]], label [[LOOP_HEADER]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK: middle.split:
+; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_EARLY_EXIT:%.*]], label [[LOOP_LATCH:%.*]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[IV_NEXT]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: vector.early.exit:
+; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.experimental.cttz.elts.i64.v4i1(<4 x i1> [[TMP4]], i1 true)
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], [[TMP9]]
+; CHECK-NEXT: br label [[EXIT_LOOPEXIT]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT]], [[LOOP_LATCH]] ], [ 0, [[PH]] ]
+; CHECK-NEXT: br label [[LOOP_HEADER1:%.*]]
; CHECK: loop.header:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ], [ 0, [[PH]] ]
-; CHECK-NEXT: [[GEP_SRC_I:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT1:%.*]], [[LOOP_LATCH1:%.*]] ], [ [[IV]], [[SCALAR_PH]] ]
+; CHECK-NEXT: [[GEP_SRC_I:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV1]]
; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC_I]], align 1
; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[L]], 0
-; CHECK-NEXT: br i1 [[C_1]], label [[EXIT_LOOPEXIT:%.*]], label [[LOOP_LATCH]]
+; CHECK-NEXT: br i1 [[C_1]], label [[EXIT_LOOPEXIT]], label [[LOOP_LATCH1]]
; CHECK: loop.latch:
-; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], [[N_EXT]]
-; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP_HEADER]]
+; CHECK-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV1]], [[N_EXT]]
+; CHECK-NEXT: br i1 [[EC]], label [[EXIT_LOOPEXIT]], label [[LOOP_HEADER1]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: exit.loopexit:
-; CHECK-NEXT: [[RES_PH:%.*]] = phi i64 [ [[IV]], [[LOOP_HEADER]] ], [ 0, [[LOOP_LATCH]] ]
+; CHECK-NEXT: [[RES_PH:%.*]] = phi i64 [ [[IV1]], [[LOOP_HEADER1]] ], [ 0, [[LOOP_LATCH1]] ], [ 0, [[LOOP_LATCH]] ], [ [[TMP10]], [[VECTOR_EARLY_EXIT]] ]
; CHECK-NEXT: br label [[EXIT]]
; CHECK: exit:
; CHECK-NEXT: [[RES:%.*]] = phi i64 [ -1, [[ENTRY:%.*]] ], [ -2, [[THEN]] ], [ [[RES_PH]], [[EXIT_LOOPEXIT]] ]
@@ -609,4 +640,6 @@ exit:
; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META2]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/PreISelIntrinsicLowering/AArch64/expand-exp.ll b/llvm/test/Transforms/PreISelIntrinsicLowering/AArch64/expand-exp.ll
index 9acc6d6..09f583f 100644
--- a/llvm/test/Transforms/PreISelIntrinsicLowering/AArch64/expand-exp.ll
+++ b/llvm/test/Transforms/PreISelIntrinsicLowering/AArch64/expand-exp.ll
@@ -39,5 +39,4 @@ declare <4 x float> @llvm.exp.v4f32(<4 x float>) #0
declare <vscale x 4 x float> @llvm.exp.nxv4f32(<vscale x 4 x float>) #0
; CHECK: attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
-; CHECK-NEXT: attributes #1 = { nocallback nofree nosync nounwind willreturn memory(none) }
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
diff --git a/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s
index da83c54..8348c97 100644
--- a/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s
+++ b/llvm/test/tools/llvm-exegesis/AArch64/no-aliasing-ld-str.s
@@ -1,10 +1,12 @@
REQUIRES: aarch64-registered-target
-// Flakey on SVE buildbots, disabled pending invesgitation.
+// This will sometimes fail with "Not all operands were initialized by the snippet generator for...".
UNSUPPORTED: target={{.*}}
RUN: llvm-exegesis -mtriple=aarch64 -mcpu=neoverse-v2 -mode=latency --dump-object-to-disk=%t.obj --opcode-name=FMOVWSr --benchmark-phase=assemble-measured-code 2>&1
RUN: llvm-objdump -d %t.obj > %t.s
RUN: FileCheck %s < %t.s
+// Start matching after the printed file path, as that may contain something that looks like a mnemonic.
+CHECK: Disassembly of section .text:
CHECK-NOT: ld{{[1-4]}}
CHECK-NOT: st{{[1-4]}}