aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen/AMDGPU
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU')
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-s64-s32.mir97
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll21
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll69
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll738
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaximum.mir275
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminimum.mir275
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shuffle-vector.mir48
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll24
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir10
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll56
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector-pointer-crash.mir6
-rw-r--r--llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll144
-rw-r--r--llvm/test/CodeGen/AMDGPU/div_i128.ll32
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmaximum.ll921
-rw-r--r--llvm/test/CodeGen/AMDGPU/fminimum.ll921
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-saddr-load.ll44
-rw-r--r--llvm/test/CodeGen/AMDGPU/itofp.i128.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll222
21 files changed, 2828 insertions, 1139 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
index 353c09b..ecd7cc2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ashr.ll
@@ -1778,7 +1778,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX6-NEXT: v_lshl_b64 v[0:1], v[1:2], 31
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 1, v3
-; GFX6-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 1, v2
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
@@ -1790,7 +1790,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 1, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 1, v2
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -1802,7 +1802,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 1, v3
-; GFX9-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v3
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 1, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1815,7 +1815,7 @@ define i65 @v_ashr_i65_33(i65 %value) {
; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX10PLUS-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v2, 1, v2
-; GFX10PLUS-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v3
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%result = ashr i65 %value, 33
ret i65 %result
@@ -1875,21 +1875,19 @@ define amdgpu_ps i65 @s_ashr_i65_33(i65 inreg %value) {
; GCN-LABEL: s_ashr_i65_33:
; GCN: ; %bb.0:
; GCN-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
-; GCN-NEXT: s_lshr_b32 s0, s1, 1
-; GCN-NEXT: s_mov_b32 s1, 0
-; GCN-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GCN-NEXT: s_lshr_b32 s4, s1, 1
+; GCN-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
+; GCN-NEXT: s_or_b32 s0, s0, s4
; GCN-NEXT: s_ashr_i32 s2, s3, 1
; GCN-NEXT: ; return to shader part epilog
;
; GFX10PLUS-LABEL: s_ashr_i65_33:
; GFX10PLUS: ; %bb.0:
; GFX10PLUS-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
-; GFX10PLUS-NEXT: s_lshr_b32 s0, s1, 1
-; GFX10PLUS-NEXT: s_mov_b32 s1, 0
-; GFX10PLUS-NEXT: s_lshl_b64 s[4:5], s[2:3], 31
+; GFX10PLUS-NEXT: s_lshr_b32 s4, s1, 1
+; GFX10PLUS-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
; GFX10PLUS-NEXT: s_ashr_i32 s2, s3, 1
-; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX10PLUS-NEXT: s_or_b32 s0, s0, s4
; GFX10PLUS-NEXT: ; return to shader part epilog
%result = ashr i65 %value, 33
ret i65 %result
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-s64-s32.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-s64-s32.mir
new file mode 100644
index 0000000..48e9818
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-s64-s32.mir
@@ -0,0 +1,97 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn -mcpu=tahiti -run-pass=amdgpu-prelegalizer-combiner %s -o - | FileCheck %s
+
+---
+name: test_combine_or_s64_s32
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combine_or_s64_s32
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[COPY1]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s32) = COPY $sgpr2
+ %2:_(s64) = G_ZEXT %1(s32)
+ %3:_(s64) = G_OR %0, %2
+ $sgpr0_sgpr1 = COPY %3(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_combine_or_s64_s32_rhs
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combine_or_s64_s32_rhs
+ ; CHECK: liveins: $sgpr0_sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $sgpr0_sgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[UV]], [[COPY1]]
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[OR]](s32), [[UV1]](s32)
+ ; CHECK-NEXT: $sgpr0_sgpr1 = COPY [[MV]](s64)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+ %0:_(s64) = COPY $sgpr0_sgpr1
+ %1:_(s32) = COPY $sgpr2
+ %2:_(s64) = G_ZEXT %1(s32)
+ %3:_(s64) = G_OR %2, %0
+ $sgpr0_sgpr1 = COPY %3(s64)
+ SI_RETURN_TO_EPILOG implicit $sgpr0_sgpr1
+...
+---
+name: test_combine_or_s64_s32_merge_unmerge
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-LABEL: name: test_combine_or_s64_s32_merge_unmerge
+ ; CHECK: liveins: $sgpr0, $sgpr1, $sgpr2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $sgpr2
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY2]]
+ ; CHECK-NEXT: $sgpr0 = COPY [[OR]](s32)
+ ; CHECK-NEXT: $sgpr1 = COPY [[COPY1]](s32)
+ ; CHECK-NEXT: SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+ %0:_(s32) = COPY $sgpr0
+ %1:_(s32) = COPY $sgpr1
+ %2:_(s32) = COPY $sgpr2
+ %3:_(s64) = G_MERGE_VALUES %0(s32), %1(s32)
+ %4:_(s64) = G_ZEXT %2(s32)
+ %5:_(s64) = G_OR %3, %4
+ %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %5(s64)
+ $sgpr0 = COPY %6(s32)
+ $sgpr1 = COPY %7(s32)
+ SI_RETURN_TO_EPILOG implicit $sgpr0, implicit $sgpr1
+...
+---
+name: negative_test_incorrect_types
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+ ; CHECK-LABEL: name: negative_test_incorrect_types
+ ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr4_vgpr5
+ ; CHECK-NEXT: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s64)
+ ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s128) = G_OR [[COPY]], [[ZEXT]]
+ ; CHECK-NEXT: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[OR]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s64) = COPY $vgpr4_vgpr5
+ %2:_(s128) = G_ZEXT %1
+ %3:_(s128) = G_OR %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
index 5dff8c1..667fa98 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergence-divergent-i1-phis-no-lane-mask-merging.ll
@@ -227,39 +227,38 @@ exit:
define amdgpu_cs void @single_lane_execution_attribute(i32 inreg %.userdata0, <3 x i32> inreg %.WorkgroupId, <3 x i32> %.LocalInvocationId) #0 {
; GFX10-LABEL: single_lane_execution_attribute:
; GFX10: ; %bb.0: ; %.entry
-; GFX10-NEXT: s_getpc_b64 s[12:13]
-; GFX10-NEXT: s_mov_b32 s12, 0
+; GFX10-NEXT: s_getpc_b64 s[4:5]
; GFX10-NEXT: s_mov_b32 s2, s0
-; GFX10-NEXT: s_mov_b32 s3, s12
+; GFX10-NEXT: s_mov_b32 s3, s5
; GFX10-NEXT: v_mbcnt_lo_u32_b32 v1, -1, 0
-; GFX10-NEXT: s_or_b64 s[2:3], s[12:13], s[2:3]
; GFX10-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x0
; GFX10-NEXT: v_mbcnt_hi_u32_b32 v1, -1, v1
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 2, v1
; GFX10-NEXT: v_and_b32_e32 v3, 1, v1
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
; GFX10-NEXT: s_xor_b32 s2, vcc_lo, exec_lo
-; GFX10-NEXT: s_and_b32 vcc_lo, s2, exec_lo
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: buffer_load_dword v2, v2, s[4:7], 0 offen
+; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, s2
+; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v2
; GFX10-NEXT: s_cbranch_vccnz .LBB4_4
; GFX10-NEXT: ; %bb.1: ; %.preheader.preheader
-; GFX10-NEXT: s_mov_b32 s2, 0
+; GFX10-NEXT: s_mov_b32 s3, 0
; GFX10-NEXT: .LBB4_2: ; %.preheader
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT: v_mov_b32_e32 v3, s12
+; GFX10-NEXT: v_mov_b32_e32 v3, s2
; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
-; GFX10-NEXT: s_add_i32 s12, s12, 4
+; GFX10-NEXT: s_add_i32 s2, s2, 4
; GFX10-NEXT: buffer_load_dword v3, v3, s[4:7], 0 offen
; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_readfirstlane_b32 s3, v3
-; GFX10-NEXT: s_add_i32 s2, s3, s2
+; GFX10-NEXT: v_readfirstlane_b32 s12, v3
+; GFX10-NEXT: s_add_i32 s3, s12, s3
; GFX10-NEXT: s_cbranch_vccnz .LBB4_2
; GFX10-NEXT: ; %bb.3: ; %.preheader._crit_edge
-; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s2, v2
+; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, s3, v2
; GFX10-NEXT: s_or_b32 s2, s0, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
; GFX10-NEXT: s_branch .LBB4_6
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index bd53032..715a777 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -4934,17 +4934,15 @@ define amdgpu_ps i64 @s_fshl_i64_5(i64 inreg %lhs, i64 inreg %rhs) {
; GCN: ; %bb.0:
; GCN-NEXT: s_lshl_b64 s[0:1], s[0:1], 5
; GCN-NEXT: s_lshr_b32 s2, s3, 27
-; GCN-NEXT: s_mov_b32 s3, 0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_or_b32 s0, s0, s2
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshl_i64_5:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 5
; GFX11-NEXT: s_lshr_b32 s2, s3, 27
-; GFX11-NEXT: s_mov_b32 s3, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_or_b32 s0, s0, s2
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshl.i64(i64 %lhs, i64 %rhs, i64 5)
ret i64 %result
@@ -4954,20 +4952,13 @@ define amdgpu_ps i64 @s_fshl_i64_32(i64 inreg %lhs, i64 inreg %rhs) {
; GCN-LABEL: s_fshl_i64_32:
; GCN: ; %bb.0:
; GCN-NEXT: s_mov_b32 s1, s0
-; GCN-NEXT: s_mov_b32 s0, 0
-; GCN-NEXT: s_mov_b32 s2, s3
-; GCN-NEXT: s_mov_b32 s3, s0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_mov_b32 s0, s3
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshl_i64_32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_mov_b32 s1, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_mov_b32 s2, s3
-; GFX11-NEXT: s_mov_b32 s3, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_mov_b32 s0, s3
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshl.i64(i64 %lhs, i64 %rhs, i64 32)
ret i64 %result
@@ -6823,56 +6814,50 @@ define amdgpu_ps i128 @s_fshl_i128_65(i128 inreg %lhs, i128 inreg %rhs) {
; GFX6: ; %bb.0:
; GFX6-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
; GFX6-NEXT: s_lshr_b32 s4, s5, 31
-; GFX6-NEXT: s_mov_b32 s5, 0
; GFX6-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
-; GFX6-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
+; GFX6-NEXT: s_or_b32 s0, s0, s4
; GFX6-NEXT: s_lshr_b32 s4, s7, 31
-; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX6-NEXT: s_or_b32 s2, s2, s4
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fshl_i128_65:
; GFX8: ; %bb.0:
; GFX8-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
; GFX8-NEXT: s_lshr_b32 s4, s5, 31
-; GFX8-NEXT: s_mov_b32 s5, 0
; GFX8-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
-; GFX8-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
+; GFX8-NEXT: s_or_b32 s0, s0, s4
; GFX8-NEXT: s_lshr_b32 s4, s7, 31
-; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX8-NEXT: s_or_b32 s2, s2, s4
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_fshl_i128_65:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
; GFX9-NEXT: s_lshr_b32 s4, s5, 31
-; GFX9-NEXT: s_mov_b32 s5, 0
; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
-; GFX9-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
+; GFX9-NEXT: s_or_b32 s0, s0, s4
; GFX9-NEXT: s_lshr_b32 s4, s7, 31
-; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT: s_or_b32 s2, s2, s4
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: s_fshl_i128_65:
; GFX10: ; %bb.0:
-; GFX10-NEXT: s_lshr_b32 s2, s5, 31
-; GFX10-NEXT: s_mov_b32 s3, 0
-; GFX10-NEXT: s_lshl_b64 s[4:5], s[6:7], 1
-; GFX10-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX10-NEXT: s_lshr_b32 s2, s7, 31
-; GFX10-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
+; GFX10-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
+; GFX10-NEXT: s_lshr_b32 s4, s5, 31
+; GFX10-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
+; GFX10-NEXT: s_lshr_b32 s5, s7, 31
+; GFX10-NEXT: s_or_b32 s0, s0, s4
+; GFX10-NEXT: s_or_b32 s2, s2, s5
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshl_i128_65:
; GFX11: ; %bb.0:
-; GFX11-NEXT: s_lshr_b32 s2, s5, 31
-; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_lshl_b64 s[4:5], s[6:7], 1
-; GFX11-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX11-NEXT: s_lshr_b32 s2, s7, 31
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[2:3], s[8:9], s[2:3]
+; GFX11-NEXT: s_lshl_b64 s[2:3], s[0:1], 1
+; GFX11-NEXT: s_lshr_b32 s4, s5, 31
+; GFX11-NEXT: s_lshl_b64 s[0:1], s[6:7], 1
+; GFX11-NEXT: s_lshr_b32 s5, s7, 31
+; GFX11-NEXT: s_or_b32 s0, s0, s4
+; GFX11-NEXT: s_or_b32 s2, s2, s5
; GFX11-NEXT: ; return to shader part epilog
%result = call i128 @llvm.fshl.i128(i128 %lhs, i128 %rhs, i128 65)
ret i128 %result
@@ -6885,7 +6870,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX6-NEXT: v_lshl_b64 v[2:3], v[0:1], 1
; GFX6-NEXT: v_lshl_b64 v[0:1], v[6:7], 1
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 31, v5
-; GFX6-NEXT: v_or_b32_e32 v0, v4, v0
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v4
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 31, v7
; GFX6-NEXT: v_or_b32_e32 v2, v2, v4
; GFX6-NEXT: s_setpc_b64 s[30:31]
@@ -6896,7 +6881,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 1, v[0:1]
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 1, v[6:7]
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 31, v5
-; GFX8-NEXT: v_or_b32_e32 v0, v4, v0
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 31, v7
; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
; GFX8-NEXT: s_setpc_b64 s[30:31]
@@ -6907,7 +6892,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[0:1]
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 1, v[6:7]
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v5
-; GFX9-NEXT: v_or_b32_e32 v0, v4, v0
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v4
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v7
; GFX9-NEXT: v_or_b32_e32 v2, v2, v4
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -6919,7 +6904,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[6:7]
; GFX10-NEXT: v_lshrrev_b32_e32 v4, 31, v5
; GFX10-NEXT: v_lshrrev_b32_e32 v5, 31, v7
-; GFX10-NEXT: v_or_b32_e32 v0, v4, v0
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v4
; GFX10-NEXT: v_or_b32_e32 v2, v2, v5
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -6931,7 +6916,7 @@ define i128 @v_fshl_i128_65(i128 %lhs, i128 %rhs) {
; GFX11-NEXT: v_lshrrev_b32_e32 v4, 31, v5
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 31, v7
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_or_b32_e32 v0, v4, v0
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v4
; GFX11-NEXT: v_or_b32_e32 v2, v2, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = call i128 @llvm.fshl.i128(i128 %lhs, i128 %rhs, i128 65)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index ea6b3a3..5aa5a671 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -4715,20 +4715,13 @@ define amdgpu_ps i64 @s_fshr_i64_32(i64 inreg %lhs, i64 inreg %rhs) {
; GCN-LABEL: s_fshr_i64_32:
; GCN: ; %bb.0:
; GCN-NEXT: s_mov_b32 s1, s0
-; GCN-NEXT: s_mov_b32 s0, 0
-; GCN-NEXT: s_mov_b32 s2, s3
-; GCN-NEXT: s_mov_b32 s3, s0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_mov_b32 s0, s3
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshr_i64_32:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_mov_b32 s1, s0
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_mov_b32 s2, s3
-; GFX11-NEXT: s_mov_b32 s3, s0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_mov_b32 s0, s3
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 32)
ret i64 %result
@@ -4739,17 +4732,15 @@ define amdgpu_ps i64 @s_fshr_i64_48(i64 inreg %lhs, i64 inreg %rhs) {
; GCN: ; %bb.0:
; GCN-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
; GCN-NEXT: s_lshr_b32 s2, s3, 16
-; GCN-NEXT: s_mov_b32 s3, 0
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_or_b32 s0, s0, s2
; GCN-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: s_fshr_i64_48:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
; GFX11-NEXT: s_lshr_b32 s2, s3, 16
-; GFX11-NEXT: s_mov_b32 s3, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX11-NEXT: s_or_b32 s0, s0, s2
; GFX11-NEXT: ; return to shader part epilog
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 48)
ret i64 %result
@@ -5293,34 +5284,33 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX6-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
-; GFX6-NEXT: s_mov_b32 s1, 0
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s8
+; GFX6-NEXT: s_or_b32 s2, s2, s0
+; GFX6-NEXT: s_andn2_b32 s0, 0x7f, s8
; GFX6-NEXT: s_not_b32 s9, s8
-; GFX6-NEXT: s_sub_i32 s16, s2, 64
-; GFX6-NEXT: s_sub_i32 s12, 64, s2
-; GFX6-NEXT: s_cmp_lt_u32 s2, 64
+; GFX6-NEXT: s_sub_i32 s16, s0, 64
+; GFX6-NEXT: s_sub_i32 s12, 64, s0
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
; GFX6-NEXT: s_cselect_b32 s17, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s2, 0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: s_cselect_b32 s18, 1, 0
; GFX6-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
-; GFX6-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
+; GFX6-NEXT: s_lshl_b64 s[14:15], s[2:3], s9
+; GFX6-NEXT: s_lshl_b64 s[0:1], s[10:11], s9
; GFX6-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GFX6-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
; GFX6-NEXT: s_cmp_lg_u32 s17, 0
-; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
; GFX6-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX6-NEXT: s_cmp_lg_u32 s18, 0
-; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
-; GFX6-NEXT: s_and_b32 s0, s8, 0x7f
-; GFX6-NEXT: s_sub_i32 s14, s0, 64
-; GFX6-NEXT: s_sub_i32 s12, 64, s0
-; GFX6-NEXT: s_cmp_lt_u32 s0, 64
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX6-NEXT: s_and_b32 s9, s8, 0x7f
+; GFX6-NEXT: s_sub_i32 s14, s9, 64
+; GFX6-NEXT: s_sub_i32 s12, 64, s9
+; GFX6-NEXT: s_cmp_lt_u32 s9, 64
; GFX6-NEXT: s_cselect_b32 s15, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s0, 0
+; GFX6-NEXT: s_cmp_eq_u32 s9, 0
; GFX6-NEXT: s_cselect_b32 s16, 1, 0
-; GFX6-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
+; GFX6-NEXT: s_lshr_b64 s[10:11], s[6:7], s8
; GFX6-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
; GFX6-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
; GFX6-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
@@ -5330,9 +5320,9 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX6-NEXT: s_cmp_lg_u32 s16, 0
; GFX6-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
; GFX6-NEXT: s_cmp_lg_u32 s15, 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX6-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[10:11], 0
+; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fshr_i128:
@@ -5340,34 +5330,33 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX8-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
-; GFX8-NEXT: s_mov_b32 s1, 0
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s8
+; GFX8-NEXT: s_or_b32 s2, s2, s0
+; GFX8-NEXT: s_andn2_b32 s0, 0x7f, s8
; GFX8-NEXT: s_not_b32 s9, s8
-; GFX8-NEXT: s_sub_i32 s16, s2, 64
-; GFX8-NEXT: s_sub_i32 s12, 64, s2
-; GFX8-NEXT: s_cmp_lt_u32 s2, 64
+; GFX8-NEXT: s_sub_i32 s16, s0, 64
+; GFX8-NEXT: s_sub_i32 s12, 64, s0
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
; GFX8-NEXT: s_cselect_b32 s17, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s2, 0
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: s_cselect_b32 s18, 1, 0
; GFX8-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
-; GFX8-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
+; GFX8-NEXT: s_lshl_b64 s[14:15], s[2:3], s9
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[10:11], s9
; GFX8-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GFX8-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
; GFX8-NEXT: s_cmp_lg_u32 s17, 0
-; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
; GFX8-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX8-NEXT: s_cmp_lg_u32 s18, 0
-; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
-; GFX8-NEXT: s_and_b32 s0, s8, 0x7f
-; GFX8-NEXT: s_sub_i32 s14, s0, 64
-; GFX8-NEXT: s_sub_i32 s12, 64, s0
-; GFX8-NEXT: s_cmp_lt_u32 s0, 64
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX8-NEXT: s_and_b32 s9, s8, 0x7f
+; GFX8-NEXT: s_sub_i32 s14, s9, 64
+; GFX8-NEXT: s_sub_i32 s12, 64, s9
+; GFX8-NEXT: s_cmp_lt_u32 s9, 64
; GFX8-NEXT: s_cselect_b32 s15, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s0, 0
+; GFX8-NEXT: s_cmp_eq_u32 s9, 0
; GFX8-NEXT: s_cselect_b32 s16, 1, 0
-; GFX8-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
+; GFX8-NEXT: s_lshr_b64 s[10:11], s[6:7], s8
; GFX8-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
; GFX8-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
; GFX8-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
@@ -5377,9 +5366,9 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX8-NEXT: s_cmp_lg_u32 s16, 0
; GFX8-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
; GFX8-NEXT: s_cmp_lg_u32 s15, 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX8-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[10:11], 0
+; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_fshr_i128:
@@ -5387,34 +5376,33 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX9-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
-; GFX9-NEXT: s_mov_b32 s1, 0
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s8
+; GFX9-NEXT: s_or_b32 s2, s2, s0
+; GFX9-NEXT: s_andn2_b32 s0, 0x7f, s8
; GFX9-NEXT: s_not_b32 s9, s8
-; GFX9-NEXT: s_sub_i32 s16, s2, 64
-; GFX9-NEXT: s_sub_i32 s12, 64, s2
-; GFX9-NEXT: s_cmp_lt_u32 s2, 64
+; GFX9-NEXT: s_sub_i32 s16, s0, 64
+; GFX9-NEXT: s_sub_i32 s12, 64, s0
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
; GFX9-NEXT: s_cselect_b32 s17, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: s_cselect_b32 s18, 1, 0
; GFX9-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
-; GFX9-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
+; GFX9-NEXT: s_lshl_b64 s[14:15], s[2:3], s9
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], s9
; GFX9-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
; GFX9-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
; GFX9-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
-; GFX9-NEXT: s_and_b32 s0, s8, 0x7f
-; GFX9-NEXT: s_sub_i32 s14, s0, 64
-; GFX9-NEXT: s_sub_i32 s12, 64, s0
-; GFX9-NEXT: s_cmp_lt_u32 s0, 64
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX9-NEXT: s_and_b32 s9, s8, 0x7f
+; GFX9-NEXT: s_sub_i32 s14, s9, 64
+; GFX9-NEXT: s_sub_i32 s12, 64, s9
+; GFX9-NEXT: s_cmp_lt_u32 s9, 64
; GFX9-NEXT: s_cselect_b32 s15, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s0, 0
+; GFX9-NEXT: s_cmp_eq_u32 s9, 0
; GFX9-NEXT: s_cselect_b32 s16, 1, 0
-; GFX9-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
+; GFX9-NEXT: s_lshr_b64 s[10:11], s[6:7], s8
; GFX9-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
; GFX9-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
; GFX9-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
@@ -5424,19 +5412,18 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
; GFX9-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
; GFX9-NEXT: s_cmp_lg_u32 s15, 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
-; GFX9-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[10:11], 0
+; GFX9-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: s_fshr_i128:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s10, s1, 31
-; GFX10-NEXT: s_mov_b32 s11, 0
-; GFX10-NEXT: s_andn2_b32 s9, 0x7f, s8
+; GFX10-NEXT: s_lshr_b32 s9, s1, 31
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
+; GFX10-NEXT: s_or_b32 s2, s2, s9
+; GFX10-NEXT: s_andn2_b32 s9, 0x7f, s8
; GFX10-NEXT: s_not_b32 s14, s8
; GFX10-NEXT: s_sub_i32 s16, s9, 64
; GFX10-NEXT: s_sub_i32 s10, 64, s9
@@ -5479,11 +5466,10 @@ define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg
; GFX11-LABEL: s_fshr_i128:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_lshr_b32 s10, s1, 31
-; GFX11-NEXT: s_mov_b32 s11, 0
-; GFX11-NEXT: s_and_not1_b32 s9, 0x7f, s8
+; GFX11-NEXT: s_lshr_b32 s9, s1, 31
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
+; GFX11-NEXT: s_or_b32 s2, s2, s9
+; GFX11-NEXT: s_and_not1_b32 s9, 0x7f, s8
; GFX11-NEXT: s_not_b32 s14, s8
; GFX11-NEXT: s_sub_i32 s16, s9, 64
; GFX11-NEXT: s_sub_i32 s10, 64, s9
@@ -5786,13 +5772,12 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX6-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
-; GFX6-NEXT: s_mov_b32 s1, 0
; GFX6-NEXT: v_bfi_b32 v7, v0, 0, v1
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX6-NEXT: s_or_b32 s2, s2, s0
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 64, v7
; GFX6-NEXT: v_not_b32_e32 v8, 63
; GFX6-NEXT: v_lshr_b64 v[1:2], s[8:9], v1
-; GFX6-NEXT: v_lshl_b64 v[3:4], s[0:1], v7
+; GFX6-NEXT: v_lshl_b64 v[3:4], s[2:3], v7
; GFX6-NEXT: v_add_i32_e32 v9, vcc, v7, v8
; GFX6-NEXT: v_lshl_b64 v[5:6], s[8:9], v7
; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
@@ -5803,8 +5788,8 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX6-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX6-NEXT: v_mov_b32_e32 v3, s0
-; GFX6-NEXT: v_mov_b32_e32 v4, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s2
+; GFX6-NEXT: v_mov_b32_e32 v4, s3
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX6-NEXT: v_and_b32_e32 v11, 0x7f, v0
; GFX6-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
@@ -5839,13 +5824,12 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX8-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
-; GFX8-NEXT: s_mov_b32 s1, 0
; GFX8-NEXT: v_bfi_b32 v7, v0, 0, v1
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX8-NEXT: s_or_b32 s2, s2, s0
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 64, v7
; GFX8-NEXT: v_not_b32_e32 v8, 63
; GFX8-NEXT: v_lshrrev_b64 v[1:2], v1, s[8:9]
-; GFX8-NEXT: v_lshlrev_b64 v[3:4], v7, s[0:1]
+; GFX8-NEXT: v_lshlrev_b64 v[3:4], v7, s[2:3]
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v7, v8
; GFX8-NEXT: v_lshlrev_b64 v[5:6], v7, s[8:9]
; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
@@ -5856,8 +5840,8 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX8-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX8-NEXT: v_mov_b32_e32 v3, s0
-; GFX8-NEXT: v_mov_b32_e32 v4, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s2
+; GFX8-NEXT: v_mov_b32_e32 v4, s3
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX8-NEXT: v_and_b32_e32 v11, 0x7f, v0
; GFX8-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
@@ -5892,12 +5876,11 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX9-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
-; GFX9-NEXT: s_mov_b32 s1, 0
; GFX9-NEXT: v_bfi_b32 v7, v0, 0, v1
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX9-NEXT: s_or_b32 s2, s2, s0
; GFX9-NEXT: v_sub_u32_e32 v1, 64, v7
; GFX9-NEXT: v_lshrrev_b64 v[1:2], v1, s[8:9]
-; GFX9-NEXT: v_lshlrev_b64 v[3:4], v7, s[0:1]
+; GFX9-NEXT: v_lshlrev_b64 v[3:4], v7, s[2:3]
; GFX9-NEXT: v_add_u32_e32 v8, 0xffffffc0, v7
; GFX9-NEXT: v_lshlrev_b64 v[5:6], v7, s[8:9]
; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
@@ -5908,10 +5891,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX9-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
-; GFX9-NEXT: v_mov_b32_e32 v4, s1
+; GFX9-NEXT: v_mov_b32_e32 v4, s3
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
; GFX9-NEXT: v_and_b32_e32 v10, 0x7f, v0
-; GFX9-NEXT: v_mov_b32_e32 v3, s0
+; GFX9-NEXT: v_mov_b32_e32 v3, s2
; GFX9-NEXT: v_cndmask_b32_e32 v9, v2, v4, vcc
; GFX9-NEXT: v_sub_u32_e32 v2, 64, v10
; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
@@ -5941,34 +5924,33 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX10-LABEL: v_fshr_i128_ssv:
; GFX10: ; %bb.0:
; GFX10-NEXT: v_bfi_b32 v11, v0, 0, 0x7f
-; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s8, s1, 31
-; GFX10-NEXT: s_mov_b32 s9, 0
+; GFX10-NEXT: s_lshl_b64 s[8:9], s[2:3], 1
+; GFX10-NEXT: s_lshr_b32 s2, s1, 31
; GFX10-NEXT: v_and_b32_e32 v12, 0x7f, v0
-; GFX10-NEXT: v_sub_nc_u32_e32 v1, 64, v11
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, 64, v11
+; GFX10-NEXT: s_or_b32 s8, s8, s2
; GFX10-NEXT: v_add_nc_u32_e32 v0, 0xffffffc0, v11
; GFX10-NEXT: v_lshlrev_b64 v[3:4], v11, s[8:9]
-; GFX10-NEXT: v_lshrrev_b64 v[1:2], v1, s[0:1]
; GFX10-NEXT: v_sub_nc_u32_e32 v9, 64, v12
+; GFX10-NEXT: v_lshrrev_b64 v[1:2], v1, s[0:1]
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v11
; GFX10-NEXT: v_add_nc_u32_e32 v13, 0xffffffc0, v12
; GFX10-NEXT: v_lshrrev_b64 v[7:8], v12, s[4:5]
+; GFX10-NEXT: v_lshlrev_b64 v[9:10], v9, s[6:7]
; GFX10-NEXT: v_lshlrev_b64 v[5:6], v11, s[0:1]
; GFX10-NEXT: v_or_b32_e32 v3, v1, v3
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v0, s[0:1]
-; GFX10-NEXT: v_lshlrev_b64 v[9:10], v9, s[6:7]
; GFX10-NEXT: v_or_b32_e32 v4, v2, v4
; GFX10-NEXT: v_cmp_gt_u32_e64 s1, 64, v12
; GFX10-NEXT: v_cmp_eq_u32_e64 s0, 0, v11
; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v12
+; GFX10-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v14, v0, v3, vcc_lo
; GFX10-NEXT: v_lshrrev_b64 v[2:3], v13, s[6:7]
; GFX10-NEXT: v_or_b32_e32 v0, v7, v9
; GFX10-NEXT: v_or_b32_e32 v7, v8, v10
; GFX10-NEXT: v_cndmask_b32_e32 v4, v1, v4, vcc_lo
-; GFX10-NEXT: v_cndmask_b32_e32 v5, 0, v5, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc_lo
; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, v0, s1
; GFX10-NEXT: v_lshrrev_b64 v[0:1], v12, s[6:7]
@@ -5988,18 +5970,18 @@ define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs,
; GFX11-LABEL: v_fshr_i128_ssv:
; GFX11: ; %bb.0:
; GFX11-NEXT: v_bfi_b32 v11, v0, 0, 0x7f
-; GFX11-NEXT: s_lshr_b32 s8, s1, 31
+; GFX11-NEXT: s_lshl_b64 s[8:9], s[2:3], 1
+; GFX11-NEXT: s_lshr_b32 s2, s1, 31
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_mov_b32 s9, 0
+; GFX11-NEXT: s_or_b32 s8, s8, s2
; GFX11-NEXT: v_sub_nc_u32_e32 v1, 64, v11
; GFX11-NEXT: v_lshlrev_b64 v[5:6], v11, s[0:1]
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v11
; GFX11-NEXT: v_and_b32_e32 v12, 0x7f, v0
-; GFX11-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
-; GFX11-NEXT: v_lshrrev_b64 v[1:2], v1, s[0:1]
; GFX11-NEXT: v_lshlrev_b64 v[3:4], v11, s[8:9]
+; GFX11-NEXT: v_lshrrev_b64 v[1:2], v1, s[0:1]
; GFX11-NEXT: v_dual_cndmask_b32 v5, 0, v5 :: v_dual_add_nc_u32 v0, 0xffffffc0, v11
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_sub_nc_u32_e32 v9, 64, v12
; GFX11-NEXT: v_lshrrev_b64 v[7:8], v12, s[4:5]
; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 0, v12
@@ -6045,26 +6027,25 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX6-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
-; GFX6-NEXT: s_mov_b32 s1, 0
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s4
+; GFX6-NEXT: s_or_b32 s2, s2, s0
+; GFX6-NEXT: s_andn2_b32 s0, 0x7f, s4
; GFX6-NEXT: s_not_b32 s5, s4
-; GFX6-NEXT: s_sub_i32 s12, s2, 64
-; GFX6-NEXT: s_sub_i32 s8, 64, s2
-; GFX6-NEXT: s_cmp_lt_u32 s2, 64
+; GFX6-NEXT: s_sub_i32 s12, s0, 64
+; GFX6-NEXT: s_sub_i32 s8, 64, s0
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
; GFX6-NEXT: s_cselect_b32 s13, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s2, 0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: s_cselect_b32 s14, 1, 0
; GFX6-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
-; GFX6-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
+; GFX6-NEXT: s_lshl_b64 s[10:11], s[2:3], s5
+; GFX6-NEXT: s_lshl_b64 s[0:1], s[6:7], s5
; GFX6-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
; GFX6-NEXT: s_cmp_lg_u32 s13, 0
-; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
+; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[8:9], s[6:7]
; GFX6-NEXT: s_cmp_lg_u32 s14, 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX6-NEXT: s_and_b32 s0, s4, 0x7f
; GFX6-NEXT: s_sub_i32 s1, s0, 64
; GFX6-NEXT: s_sub_i32 s4, 64, s0
@@ -6073,14 +6054,14 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: v_lshr_b64 v[4:5], v[0:1], s0
; GFX6-NEXT: v_lshl_b64 v[6:7], v[2:3], s4
-; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_cselect_b32 s6, 1, 0
; GFX6-NEXT: v_lshr_b64 v[8:9], v[2:3], s0
; GFX6-NEXT: v_lshr_b64 v[2:3], v[2:3], s1
; GFX6-NEXT: s_and_b32 s0, 1, s5
; GFX6-NEXT: v_or_b32_e32 v4, v4, v6
; GFX6-NEXT: v_or_b32_e32 v5, v5, v7
; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
-; GFX6-NEXT: s_and_b32 s0, 1, s8
+; GFX6-NEXT: s_and_b32 s0, 1, s6
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
@@ -6088,10 +6069,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
; GFX6-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
-; GFX6-NEXT: v_or_b32_e32 v0, s2, v0
-; GFX6-NEXT: v_or_b32_e32 v1, s3, v1
-; GFX6-NEXT: v_or_b32_e32 v2, s6, v2
-; GFX6-NEXT: v_or_b32_e32 v3, s7, v3
+; GFX6-NEXT: v_or_b32_e32 v0, s10, v0
+; GFX6-NEXT: v_or_b32_e32 v1, s11, v1
+; GFX6-NEXT: v_or_b32_e32 v2, s2, v2
+; GFX6-NEXT: v_or_b32_e32 v3, s3, v3
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: v_fshr_i128_svs:
@@ -6099,26 +6080,25 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX8-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
-; GFX8-NEXT: s_mov_b32 s1, 0
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s4
+; GFX8-NEXT: s_or_b32 s2, s2, s0
+; GFX8-NEXT: s_andn2_b32 s0, 0x7f, s4
; GFX8-NEXT: s_not_b32 s5, s4
-; GFX8-NEXT: s_sub_i32 s12, s2, 64
-; GFX8-NEXT: s_sub_i32 s8, 64, s2
-; GFX8-NEXT: s_cmp_lt_u32 s2, 64
+; GFX8-NEXT: s_sub_i32 s12, s0, 64
+; GFX8-NEXT: s_sub_i32 s8, 64, s0
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
; GFX8-NEXT: s_cselect_b32 s13, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s2, 0
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: s_cselect_b32 s14, 1, 0
; GFX8-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
-; GFX8-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
+; GFX8-NEXT: s_lshl_b64 s[10:11], s[2:3], s5
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[6:7], s5
; GFX8-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
; GFX8-NEXT: s_cmp_lg_u32 s13, 0
-; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
+; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[8:9], s[6:7]
; GFX8-NEXT: s_cmp_lg_u32 s14, 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX8-NEXT: s_and_b32 s0, s4, 0x7f
; GFX8-NEXT: s_sub_i32 s1, s0, 64
; GFX8-NEXT: s_sub_i32 s4, 64, s0
@@ -6127,14 +6107,14 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
; GFX8-NEXT: v_lshlrev_b64 v[6:7], s4, v[2:3]
-; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
; GFX8-NEXT: v_lshrrev_b64 v[8:9], s0, v[2:3]
; GFX8-NEXT: v_lshrrev_b64 v[2:3], s1, v[2:3]
; GFX8-NEXT: s_and_b32 s0, 1, s5
; GFX8-NEXT: v_or_b32_e32 v4, v4, v6
; GFX8-NEXT: v_or_b32_e32 v5, v5, v7
; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
-; GFX8-NEXT: s_and_b32 s0, 1, s8
+; GFX8-NEXT: s_and_b32 s0, 1, s6
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
@@ -6142,10 +6122,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
-; GFX8-NEXT: v_or_b32_e32 v0, s2, v0
-; GFX8-NEXT: v_or_b32_e32 v1, s3, v1
-; GFX8-NEXT: v_or_b32_e32 v2, s6, v2
-; GFX8-NEXT: v_or_b32_e32 v3, s7, v3
+; GFX8-NEXT: v_or_b32_e32 v0, s10, v0
+; GFX8-NEXT: v_or_b32_e32 v1, s11, v1
+; GFX8-NEXT: v_or_b32_e32 v2, s2, v2
+; GFX8-NEXT: v_or_b32_e32 v3, s3, v3
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: v_fshr_i128_svs:
@@ -6153,26 +6133,25 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX9-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
-; GFX9-NEXT: s_mov_b32 s1, 0
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s4
+; GFX9-NEXT: s_or_b32 s2, s2, s0
+; GFX9-NEXT: s_andn2_b32 s0, 0x7f, s4
; GFX9-NEXT: s_not_b32 s5, s4
-; GFX9-NEXT: s_sub_i32 s12, s2, 64
-; GFX9-NEXT: s_sub_i32 s8, 64, s2
-; GFX9-NEXT: s_cmp_lt_u32 s2, 64
+; GFX9-NEXT: s_sub_i32 s12, s0, 64
+; GFX9-NEXT: s_sub_i32 s8, 64, s0
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
; GFX9-NEXT: s_cselect_b32 s13, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: s_cselect_b32 s14, 1, 0
; GFX9-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
-; GFX9-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
+; GFX9-NEXT: s_lshl_b64 s[10:11], s[2:3], s5
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], s5
; GFX9-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
; GFX9-NEXT: s_cmp_lg_u32 s13, 0
-; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
+; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[8:9], s[6:7]
; GFX9-NEXT: s_cmp_lg_u32 s14, 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX9-NEXT: s_and_b32 s0, s4, 0x7f
; GFX9-NEXT: s_sub_i32 s1, s0, 64
; GFX9-NEXT: s_sub_i32 s4, 64, s0
@@ -6181,14 +6160,14 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
; GFX9-NEXT: v_lshlrev_b64 v[6:7], s4, v[2:3]
-; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
; GFX9-NEXT: v_lshrrev_b64 v[8:9], s0, v[2:3]
; GFX9-NEXT: v_lshrrev_b64 v[2:3], s1, v[2:3]
; GFX9-NEXT: s_and_b32 s0, 1, s5
; GFX9-NEXT: v_or_b32_e32 v4, v4, v6
; GFX9-NEXT: v_or_b32_e32 v5, v5, v7
; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
-; GFX9-NEXT: s_and_b32 s0, 1, s8
+; GFX9-NEXT: s_and_b32 s0, 1, s6
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
@@ -6196,20 +6175,19 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
-; GFX9-NEXT: v_or_b32_e32 v0, s2, v0
-; GFX9-NEXT: v_or_b32_e32 v1, s3, v1
-; GFX9-NEXT: v_or_b32_e32 v2, s6, v2
-; GFX9-NEXT: v_or_b32_e32 v3, s7, v3
+; GFX9-NEXT: v_or_b32_e32 v0, s10, v0
+; GFX9-NEXT: v_or_b32_e32 v1, s11, v1
+; GFX9-NEXT: v_or_b32_e32 v2, s2, v2
+; GFX9-NEXT: v_or_b32_e32 v3, s3, v3
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: v_fshr_i128_svs:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s6, s1, 31
-; GFX10-NEXT: s_mov_b32 s7, 0
-; GFX10-NEXT: s_andn2_b32 s5, 0x7f, s4
+; GFX10-NEXT: s_lshr_b32 s5, s1, 31
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GFX10-NEXT: s_or_b32 s2, s2, s5
+; GFX10-NEXT: s_andn2_b32 s5, 0x7f, s4
; GFX10-NEXT: s_not_b32 s10, s4
; GFX10-NEXT: s_sub_i32 s12, s5, 64
; GFX10-NEXT: s_sub_i32 s6, 64, s5
@@ -6259,11 +6237,10 @@ define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 i
; GFX11-LABEL: v_fshr_i128_svs:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_lshr_b32 s6, s1, 31
-; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_and_not1_b32 s5, 0x7f, s4
+; GFX11-NEXT: s_lshr_b32 s5, s1, 31
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
+; GFX11-NEXT: s_or_b32 s2, s2, s5
+; GFX11-NEXT: s_and_not1_b32 s5, 0x7f, s4
; GFX11-NEXT: s_not_b32 s10, s4
; GFX11-NEXT: s_sub_i32 s12, s5, 64
; GFX11-NEXT: s_sub_i32 s6, 64, s5
@@ -6714,81 +6691,80 @@ define i128 @v_fshr_i128_65(i128 %lhs, i128 %rhs) {
define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inreg %rhs, <2 x i128> inreg %amt) {
; GFX6-LABEL: s_fshr_v2i128:
; GFX6: ; %bb.0:
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX6-NEXT: s_lshr_b32 s22, s1, 31
-; GFX6-NEXT: s_mov_b32 s23, 0
; GFX6-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
-; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s16
+; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
+; GFX6-NEXT: s_lshr_b32 s0, s1, 31
+; GFX6-NEXT: s_or_b32 s2, s2, s0
+; GFX6-NEXT: s_andn2_b32 s0, 0x7f, s16
; GFX6-NEXT: s_not_b32 s17, s16
-; GFX6-NEXT: s_sub_i32 s21, s2, 64
-; GFX6-NEXT: s_sub_i32 s22, 64, s2
-; GFX6-NEXT: s_cmp_lt_u32 s2, 64
-; GFX6-NEXT: s_cselect_b32 s28, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s2, 0
-; GFX6-NEXT: s_cselect_b32 s29, 1, 0
-; GFX6-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
-; GFX6-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
-; GFX6-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
-; GFX6-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
-; GFX6-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
-; GFX6-NEXT: s_cmp_lg_u32 s28, 0
-; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX6-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
-; GFX6-NEXT: s_cmp_lg_u32 s29, 0
-; GFX6-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
-; GFX6-NEXT: s_and_b32 s0, s16, 0x7f
; GFX6-NEXT: s_sub_i32 s21, s0, 64
; GFX6-NEXT: s_sub_i32 s22, 64, s0
; GFX6-NEXT: s_cmp_lt_u32 s0, 64
; GFX6-NEXT: s_cselect_b32 s26, 1, 0
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
; GFX6-NEXT: s_cselect_b32 s27, 1, 0
-; GFX6-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
+; GFX6-NEXT: s_lshr_b64 s[22:23], s[18:19], s22
+; GFX6-NEXT: s_lshl_b64 s[24:25], s[2:3], s17
+; GFX6-NEXT: s_lshl_b64 s[0:1], s[18:19], s17
+; GFX6-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
+; GFX6-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
+; GFX6-NEXT: s_cmp_lg_u32 s26, 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX6-NEXT: s_cselect_b64 s[18:19], s[22:23], s[18:19]
+; GFX6-NEXT: s_cmp_lg_u32 s27, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[18:19]
+; GFX6-NEXT: s_and_b32 s17, s16, 0x7f
+; GFX6-NEXT: s_sub_i32 s21, s17, 64
+; GFX6-NEXT: s_sub_i32 s22, 64, s17
+; GFX6-NEXT: s_cmp_lt_u32 s17, 64
+; GFX6-NEXT: s_cselect_b32 s24, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s17, 0
+; GFX6-NEXT: s_cselect_b32 s25, 1, 0
+; GFX6-NEXT: s_lshr_b64 s[18:19], s[10:11], s16
; GFX6-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
-; GFX6-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
-; GFX6-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
+; GFX6-NEXT: s_lshl_b64 s[22:23], s[10:11], s22
+; GFX6-NEXT: s_or_b64 s[16:17], s[16:17], s[22:23]
; GFX6-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
-; GFX6-NEXT: s_cmp_lg_u32 s26, 0
+; GFX6-NEXT: s_cmp_lg_u32 s24, 0
; GFX6-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
-; GFX6-NEXT: s_cmp_lg_u32 s27, 0
+; GFX6-NEXT: s_cmp_lg_u32 s25, 0
; GFX6-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
-; GFX6-NEXT: s_cmp_lg_u32 s26, 0
-; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
-; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GFX6-NEXT: s_lshr_b32 s22, s5, 31
-; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
+; GFX6-NEXT: s_cmp_lg_u32 s24, 0
+; GFX6-NEXT: s_cselect_b64 s[10:11], s[18:19], 0
+; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
; GFX6-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
-; GFX6-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
-; GFX6-NEXT: s_andn2_b32 s6, 0x7f, s20
-; GFX6-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
+; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
+; GFX6-NEXT: s_lshr_b32 s4, s5, 31
+; GFX6-NEXT: s_or_b32 s6, s6, s4
+; GFX6-NEXT: s_andn2_b32 s4, 0x7f, s20
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
; GFX6-NEXT: s_not_b32 s16, s20
-; GFX6-NEXT: s_sub_i32 s18, s6, 64
-; GFX6-NEXT: s_sub_i32 s10, 64, s6
-; GFX6-NEXT: s_cmp_lt_u32 s6, 64
+; GFX6-NEXT: s_sub_i32 s18, s4, 64
+; GFX6-NEXT: s_sub_i32 s10, 64, s4
+; GFX6-NEXT: s_cmp_lt_u32 s4, 64
; GFX6-NEXT: s_cselect_b32 s19, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s6, 0
+; GFX6-NEXT: s_cmp_eq_u32 s4, 0
; GFX6-NEXT: s_cselect_b32 s21, 1, 0
-; GFX6-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
+; GFX6-NEXT: s_lshl_b64 s[4:5], s[8:9], s16
; GFX6-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
-; GFX6-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
+; GFX6-NEXT: s_lshl_b64 s[16:17], s[6:7], s16
; GFX6-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX6-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
-; GFX6-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
+; GFX6-NEXT: s_cselect_b64 s[4:5], s[4:5], 0
; GFX6-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
; GFX6-NEXT: s_cmp_lg_u32 s21, 0
-; GFX6-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
-; GFX6-NEXT: s_and_b32 s4, s20, 0x7f
-; GFX6-NEXT: s_sub_i32 s18, s4, 64
-; GFX6-NEXT: s_sub_i32 s16, 64, s4
-; GFX6-NEXT: s_cmp_lt_u32 s4, 64
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[6:7], s[8:9]
+; GFX6-NEXT: s_and_b32 s8, s20, 0x7f
+; GFX6-NEXT: s_sub_i32 s18, s8, 64
+; GFX6-NEXT: s_sub_i32 s16, 64, s8
+; GFX6-NEXT: s_cmp_lt_u32 s8, 64
; GFX6-NEXT: s_cselect_b32 s19, 1, 0
-; GFX6-NEXT: s_cmp_eq_u32 s4, 0
+; GFX6-NEXT: s_cmp_eq_u32 s8, 0
; GFX6-NEXT: s_cselect_b32 s21, 1, 0
; GFX6-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
; GFX6-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
-; GFX6-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
+; GFX6-NEXT: s_lshr_b64 s[8:9], s[14:15], s20
; GFX6-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX6-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
@@ -6796,88 +6772,87 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX6-NEXT: s_cmp_lg_u32 s21, 0
; GFX6-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
-; GFX6-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
-; GFX6-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
-; GFX6-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
+; GFX6-NEXT: s_cselect_b64 s[8:9], s[8:9], 0
+; GFX6-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GFX6-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX6-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fshr_v2i128:
; GFX8: ; %bb.0:
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX8-NEXT: s_lshr_b32 s22, s1, 31
-; GFX8-NEXT: s_mov_b32 s23, 0
; GFX8-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
-; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s16
+; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
+; GFX8-NEXT: s_lshr_b32 s0, s1, 31
+; GFX8-NEXT: s_or_b32 s2, s2, s0
+; GFX8-NEXT: s_andn2_b32 s0, 0x7f, s16
; GFX8-NEXT: s_not_b32 s17, s16
-; GFX8-NEXT: s_sub_i32 s21, s2, 64
-; GFX8-NEXT: s_sub_i32 s22, 64, s2
-; GFX8-NEXT: s_cmp_lt_u32 s2, 64
-; GFX8-NEXT: s_cselect_b32 s28, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s2, 0
-; GFX8-NEXT: s_cselect_b32 s29, 1, 0
-; GFX8-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
-; GFX8-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
-; GFX8-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
-; GFX8-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
-; GFX8-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
-; GFX8-NEXT: s_cmp_lg_u32 s28, 0
-; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX8-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
-; GFX8-NEXT: s_cmp_lg_u32 s29, 0
-; GFX8-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
-; GFX8-NEXT: s_and_b32 s0, s16, 0x7f
; GFX8-NEXT: s_sub_i32 s21, s0, 64
; GFX8-NEXT: s_sub_i32 s22, 64, s0
; GFX8-NEXT: s_cmp_lt_u32 s0, 64
; GFX8-NEXT: s_cselect_b32 s26, 1, 0
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
; GFX8-NEXT: s_cselect_b32 s27, 1, 0
-; GFX8-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
+; GFX8-NEXT: s_lshr_b64 s[22:23], s[18:19], s22
+; GFX8-NEXT: s_lshl_b64 s[24:25], s[2:3], s17
+; GFX8-NEXT: s_lshl_b64 s[0:1], s[18:19], s17
+; GFX8-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
+; GFX8-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
+; GFX8-NEXT: s_cmp_lg_u32 s26, 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX8-NEXT: s_cselect_b64 s[18:19], s[22:23], s[18:19]
+; GFX8-NEXT: s_cmp_lg_u32 s27, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[18:19]
+; GFX8-NEXT: s_and_b32 s17, s16, 0x7f
+; GFX8-NEXT: s_sub_i32 s21, s17, 64
+; GFX8-NEXT: s_sub_i32 s22, 64, s17
+; GFX8-NEXT: s_cmp_lt_u32 s17, 64
+; GFX8-NEXT: s_cselect_b32 s24, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s17, 0
+; GFX8-NEXT: s_cselect_b32 s25, 1, 0
+; GFX8-NEXT: s_lshr_b64 s[18:19], s[10:11], s16
; GFX8-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
-; GFX8-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
-; GFX8-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
+; GFX8-NEXT: s_lshl_b64 s[22:23], s[10:11], s22
+; GFX8-NEXT: s_or_b64 s[16:17], s[16:17], s[22:23]
; GFX8-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
-; GFX8-NEXT: s_cmp_lg_u32 s26, 0
+; GFX8-NEXT: s_cmp_lg_u32 s24, 0
; GFX8-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
-; GFX8-NEXT: s_cmp_lg_u32 s27, 0
+; GFX8-NEXT: s_cmp_lg_u32 s25, 0
; GFX8-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
-; GFX8-NEXT: s_cmp_lg_u32 s26, 0
-; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
-; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GFX8-NEXT: s_lshr_b32 s22, s5, 31
-; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
+; GFX8-NEXT: s_cmp_lg_u32 s24, 0
+; GFX8-NEXT: s_cselect_b64 s[10:11], s[18:19], 0
+; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
; GFX8-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
-; GFX8-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
-; GFX8-NEXT: s_andn2_b32 s6, 0x7f, s20
-; GFX8-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
+; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
+; GFX8-NEXT: s_lshr_b32 s4, s5, 31
+; GFX8-NEXT: s_or_b32 s6, s6, s4
+; GFX8-NEXT: s_andn2_b32 s4, 0x7f, s20
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
; GFX8-NEXT: s_not_b32 s16, s20
-; GFX8-NEXT: s_sub_i32 s18, s6, 64
-; GFX8-NEXT: s_sub_i32 s10, 64, s6
-; GFX8-NEXT: s_cmp_lt_u32 s6, 64
+; GFX8-NEXT: s_sub_i32 s18, s4, 64
+; GFX8-NEXT: s_sub_i32 s10, 64, s4
+; GFX8-NEXT: s_cmp_lt_u32 s4, 64
; GFX8-NEXT: s_cselect_b32 s19, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s6, 0
+; GFX8-NEXT: s_cmp_eq_u32 s4, 0
; GFX8-NEXT: s_cselect_b32 s21, 1, 0
-; GFX8-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[8:9], s16
; GFX8-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
-; GFX8-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
+; GFX8-NEXT: s_lshl_b64 s[16:17], s[6:7], s16
; GFX8-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX8-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
-; GFX8-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
+; GFX8-NEXT: s_cselect_b64 s[4:5], s[4:5], 0
; GFX8-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
-; GFX8-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
-; GFX8-NEXT: s_and_b32 s4, s20, 0x7f
-; GFX8-NEXT: s_sub_i32 s18, s4, 64
-; GFX8-NEXT: s_sub_i32 s16, 64, s4
-; GFX8-NEXT: s_cmp_lt_u32 s4, 64
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[6:7], s[8:9]
+; GFX8-NEXT: s_and_b32 s8, s20, 0x7f
+; GFX8-NEXT: s_sub_i32 s18, s8, 64
+; GFX8-NEXT: s_sub_i32 s16, 64, s8
+; GFX8-NEXT: s_cmp_lt_u32 s8, 64
; GFX8-NEXT: s_cselect_b32 s19, 1, 0
-; GFX8-NEXT: s_cmp_eq_u32 s4, 0
+; GFX8-NEXT: s_cmp_eq_u32 s8, 0
; GFX8-NEXT: s_cselect_b32 s21, 1, 0
; GFX8-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
; GFX8-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
-; GFX8-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
+; GFX8-NEXT: s_lshr_b64 s[8:9], s[14:15], s20
; GFX8-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX8-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
@@ -6885,88 +6860,87 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
; GFX8-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
-; GFX8-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
-; GFX8-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
-; GFX8-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
+; GFX8-NEXT: s_cselect_b64 s[8:9], s[8:9], 0
+; GFX8-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GFX8-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX8-NEXT: ; return to shader part epilog
;
; GFX9-LABEL: s_fshr_v2i128:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX9-NEXT: s_lshr_b32 s22, s1, 31
-; GFX9-NEXT: s_mov_b32 s23, 0
; GFX9-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
-; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s16
+; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
+; GFX9-NEXT: s_lshr_b32 s0, s1, 31
+; GFX9-NEXT: s_or_b32 s2, s2, s0
+; GFX9-NEXT: s_andn2_b32 s0, 0x7f, s16
; GFX9-NEXT: s_not_b32 s17, s16
-; GFX9-NEXT: s_sub_i32 s21, s2, 64
-; GFX9-NEXT: s_sub_i32 s22, 64, s2
-; GFX9-NEXT: s_cmp_lt_u32 s2, 64
-; GFX9-NEXT: s_cselect_b32 s28, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s2, 0
-; GFX9-NEXT: s_cselect_b32 s29, 1, 0
-; GFX9-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
-; GFX9-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
-; GFX9-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
-; GFX9-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
-; GFX9-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
-; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
-; GFX9-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
-; GFX9-NEXT: s_cmp_lg_u32 s29, 0
-; GFX9-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
-; GFX9-NEXT: s_and_b32 s0, s16, 0x7f
; GFX9-NEXT: s_sub_i32 s21, s0, 64
; GFX9-NEXT: s_sub_i32 s22, 64, s0
; GFX9-NEXT: s_cmp_lt_u32 s0, 64
; GFX9-NEXT: s_cselect_b32 s26, 1, 0
; GFX9-NEXT: s_cmp_eq_u32 s0, 0
; GFX9-NEXT: s_cselect_b32 s27, 1, 0
-; GFX9-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
+; GFX9-NEXT: s_lshr_b64 s[22:23], s[18:19], s22
+; GFX9-NEXT: s_lshl_b64 s[24:25], s[2:3], s17
+; GFX9-NEXT: s_lshl_b64 s[0:1], s[18:19], s17
+; GFX9-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
+; GFX9-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
+; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], 0
+; GFX9-NEXT: s_cselect_b64 s[18:19], s[22:23], s[18:19]
+; GFX9-NEXT: s_cmp_lg_u32 s27, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[18:19]
+; GFX9-NEXT: s_and_b32 s17, s16, 0x7f
+; GFX9-NEXT: s_sub_i32 s21, s17, 64
+; GFX9-NEXT: s_sub_i32 s22, 64, s17
+; GFX9-NEXT: s_cmp_lt_u32 s17, 64
+; GFX9-NEXT: s_cselect_b32 s24, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s17, 0
+; GFX9-NEXT: s_cselect_b32 s25, 1, 0
+; GFX9-NEXT: s_lshr_b64 s[18:19], s[10:11], s16
; GFX9-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
-; GFX9-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
-; GFX9-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
+; GFX9-NEXT: s_lshl_b64 s[22:23], s[10:11], s22
+; GFX9-NEXT: s_or_b64 s[16:17], s[16:17], s[22:23]
; GFX9-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
-; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_cmp_lg_u32 s24, 0
; GFX9-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
-; GFX9-NEXT: s_cmp_lg_u32 s27, 0
+; GFX9-NEXT: s_cmp_lg_u32 s25, 0
; GFX9-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
-; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
-; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
-; GFX9-NEXT: s_lshr_b32 s22, s5, 31
-; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
+; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_cselect_b64 s[10:11], s[18:19], 0
+; GFX9-NEXT: s_or_b64 s[0:1], s[0:1], s[8:9]
; GFX9-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
-; GFX9-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
-; GFX9-NEXT: s_andn2_b32 s6, 0x7f, s20
-; GFX9-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
+; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
+; GFX9-NEXT: s_lshr_b32 s4, s5, 31
+; GFX9-NEXT: s_or_b32 s6, s6, s4
+; GFX9-NEXT: s_andn2_b32 s4, 0x7f, s20
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
; GFX9-NEXT: s_not_b32 s16, s20
-; GFX9-NEXT: s_sub_i32 s18, s6, 64
-; GFX9-NEXT: s_sub_i32 s10, 64, s6
-; GFX9-NEXT: s_cmp_lt_u32 s6, 64
+; GFX9-NEXT: s_sub_i32 s18, s4, 64
+; GFX9-NEXT: s_sub_i32 s10, 64, s4
+; GFX9-NEXT: s_cmp_lt_u32 s4, 64
; GFX9-NEXT: s_cselect_b32 s19, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s6, 0
+; GFX9-NEXT: s_cmp_eq_u32 s4, 0
; GFX9-NEXT: s_cselect_b32 s21, 1, 0
-; GFX9-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
+; GFX9-NEXT: s_lshl_b64 s[4:5], s[8:9], s16
; GFX9-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
-; GFX9-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
+; GFX9-NEXT: s_lshl_b64 s[16:17], s[6:7], s16
; GFX9-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX9-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
+; GFX9-NEXT: s_cselect_b64 s[4:5], s[4:5], 0
; GFX9-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
-; GFX9-NEXT: s_and_b32 s4, s20, 0x7f
-; GFX9-NEXT: s_sub_i32 s18, s4, 64
-; GFX9-NEXT: s_sub_i32 s16, 64, s4
-; GFX9-NEXT: s_cmp_lt_u32 s4, 64
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-NEXT: s_and_b32 s8, s20, 0x7f
+; GFX9-NEXT: s_sub_i32 s18, s8, 64
+; GFX9-NEXT: s_sub_i32 s16, 64, s8
+; GFX9-NEXT: s_cmp_lt_u32 s8, 64
; GFX9-NEXT: s_cselect_b32 s19, 1, 0
-; GFX9-NEXT: s_cmp_eq_u32 s4, 0
+; GFX9-NEXT: s_cmp_eq_u32 s8, 0
; GFX9-NEXT: s_cselect_b32 s21, 1, 0
; GFX9-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
; GFX9-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
-; GFX9-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
+; GFX9-NEXT: s_lshr_b64 s[8:9], s[14:15], s20
; GFX9-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
; GFX9-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
@@ -6974,61 +6948,60 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
; GFX9-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
-; GFX9-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
-; GFX9-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
+; GFX9-NEXT: s_cselect_b64 s[8:9], s[8:9], 0
+; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GFX9-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: s_fshr_v2i128:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX10-NEXT: s_lshr_b32 s18, s1, 31
-; GFX10-NEXT: s_mov_b32 s19, 0
-; GFX10-NEXT: s_andn2_b32 s17, 0x7f, s16
+; GFX10-NEXT: s_lshr_b32 s17, s1, 31
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[18:19]
-; GFX10-NEXT: s_not_b32 s18, s16
-; GFX10-NEXT: s_sub_i32 s21, s17, 64
-; GFX10-NEXT: s_sub_i32 s22, 64, s17
+; GFX10-NEXT: s_or_b32 s2, s2, s17
+; GFX10-NEXT: s_andn2_b32 s17, 0x7f, s16
+; GFX10-NEXT: s_not_b32 s21, s16
+; GFX10-NEXT: s_sub_i32 s26, s17, 64
+; GFX10-NEXT: s_sub_i32 s18, 64, s17
; GFX10-NEXT: s_cmp_lt_u32 s17, 64
-; GFX10-NEXT: s_cselect_b32 s28, 1, 0
+; GFX10-NEXT: s_cselect_b32 s27, 1, 0
; GFX10-NEXT: s_cmp_eq_u32 s17, 0
; GFX10-NEXT: s_cselect_b32 s17, 1, 0
-; GFX10-NEXT: s_lshr_b64 s[22:23], s[0:1], s22
-; GFX10-NEXT: s_lshl_b64 s[24:25], s[2:3], s18
-; GFX10-NEXT: s_lshl_b64 s[26:27], s[0:1], s18
-; GFX10-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
-; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s21
-; GFX10-NEXT: s_cmp_lg_u32 s28, 0
-; GFX10-NEXT: s_cselect_b64 s[24:25], s[26:27], 0
-; GFX10-NEXT: s_cselect_b64 s[0:1], s[22:23], s[0:1]
+; GFX10-NEXT: s_lshr_b64 s[18:19], s[0:1], s18
+; GFX10-NEXT: s_lshl_b64 s[22:23], s[2:3], s21
+; GFX10-NEXT: s_lshl_b64 s[24:25], s[0:1], s21
+; GFX10-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23]
+; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s26
+; GFX10-NEXT: s_cmp_lg_u32 s27, 0
+; GFX10-NEXT: s_cselect_b64 s[22:23], s[24:25], 0
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[18:19], s[0:1]
; GFX10-NEXT: s_cmp_lg_u32 s17, 0
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX10-NEXT: s_and_b32 s0, s16, 0x7f
-; GFX10-NEXT: s_sub_i32 s18, s0, 64
+; GFX10-NEXT: s_sub_i32 s21, s0, 64
; GFX10-NEXT: s_sub_i32 s17, 64, s0
; GFX10-NEXT: s_cmp_lt_u32 s0, 64
-; GFX10-NEXT: s_cselect_b32 s21, 1, 0
+; GFX10-NEXT: s_cselect_b32 s24, 1, 0
; GFX10-NEXT: s_cmp_eq_u32 s0, 0
-; GFX10-NEXT: s_cselect_b32 s26, 1, 0
+; GFX10-NEXT: s_cselect_b32 s25, 1, 0
; GFX10-NEXT: s_lshr_b64 s[0:1], s[8:9], s16
-; GFX10-NEXT: s_lshl_b64 s[22:23], s[10:11], s17
+; GFX10-NEXT: s_lshl_b64 s[18:19], s[10:11], s17
; GFX10-NEXT: s_lshr_b64 s[16:17], s[10:11], s16
-; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[22:23]
-; GFX10-NEXT: s_lshr_b64 s[10:11], s[10:11], s18
-; GFX10-NEXT: s_cmp_lg_u32 s21, 0
+; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[18:19]
+; GFX10-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
-; GFX10-NEXT: s_cmp_lg_u32 s26, 0
+; GFX10-NEXT: s_cmp_lg_u32 s25, 0
; GFX10-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
-; GFX10-NEXT: s_cmp_lg_u32 s21, 0
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
; GFX10-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
; GFX10-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
-; GFX10-NEXT: s_lshr_b32 s18, s5, 31
+; GFX10-NEXT: s_lshr_b32 s8, s5, 31
+; GFX10-NEXT: s_or_b64 s[0:1], s[22:23], s[0:1]
+; GFX10-NEXT: s_or_b32 s6, s6, s8
; GFX10-NEXT: s_andn2_b32 s8, 0x7f, s20
-; GFX10-NEXT: s_or_b64 s[0:1], s[24:25], s[0:1]
; GFX10-NEXT: s_lshl_b64 s[4:5], s[4:5], 1
-; GFX10-NEXT: s_or_b64 s[6:7], s[6:7], s[18:19]
; GFX10-NEXT: s_not_b32 s16, s20
; GFX10-NEXT: s_sub_i32 s18, s8, 64
; GFX10-NEXT: s_sub_i32 s9, 64, s8
@@ -7071,54 +7044,53 @@ define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inr
; GFX11-LABEL: s_fshr_v2i128:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
-; GFX11-NEXT: s_lshr_b32 s18, s1, 31
-; GFX11-NEXT: s_mov_b32 s19, 0
-; GFX11-NEXT: s_and_not1_b32 s17, 0x7f, s16
+; GFX11-NEXT: s_lshr_b32 s17, s1, 31
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
-; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[18:19]
-; GFX11-NEXT: s_not_b32 s18, s16
-; GFX11-NEXT: s_sub_i32 s21, s17, 64
-; GFX11-NEXT: s_sub_i32 s22, 64, s17
+; GFX11-NEXT: s_or_b32 s2, s2, s17
+; GFX11-NEXT: s_and_not1_b32 s17, 0x7f, s16
+; GFX11-NEXT: s_not_b32 s21, s16
+; GFX11-NEXT: s_sub_i32 s26, s17, 64
+; GFX11-NEXT: s_sub_i32 s18, 64, s17
; GFX11-NEXT: s_cmp_lt_u32 s17, 64
-; GFX11-NEXT: s_cselect_b32 s28, 1, 0
+; GFX11-NEXT: s_cselect_b32 s27, 1, 0
; GFX11-NEXT: s_cmp_eq_u32 s17, 0
; GFX11-NEXT: s_cselect_b32 s17, 1, 0
-; GFX11-NEXT: s_lshr_b64 s[22:23], s[0:1], s22
-; GFX11-NEXT: s_lshl_b64 s[24:25], s[2:3], s18
-; GFX11-NEXT: s_lshl_b64 s[26:27], s[0:1], s18
-; GFX11-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
-; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s21
-; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_cselect_b64 s[24:25], s[26:27], 0
-; GFX11-NEXT: s_cselect_b64 s[0:1], s[22:23], s[0:1]
+; GFX11-NEXT: s_lshr_b64 s[18:19], s[0:1], s18
+; GFX11-NEXT: s_lshl_b64 s[22:23], s[2:3], s21
+; GFX11-NEXT: s_lshl_b64 s[24:25], s[0:1], s21
+; GFX11-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23]
+; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s26
+; GFX11-NEXT: s_cmp_lg_u32 s27, 0
+; GFX11-NEXT: s_cselect_b64 s[22:23], s[24:25], 0
+; GFX11-NEXT: s_cselect_b64 s[0:1], s[18:19], s[0:1]
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
; GFX11-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
; GFX11-NEXT: s_and_b32 s0, s16, 0x7f
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_sub_i32 s18, s0, 64
+; GFX11-NEXT: s_sub_i32 s21, s0, 64
; GFX11-NEXT: s_sub_i32 s17, 64, s0
; GFX11-NEXT: s_cmp_lt_u32 s0, 64
-; GFX11-NEXT: s_cselect_b32 s21, 1, 0
+; GFX11-NEXT: s_cselect_b32 s24, 1, 0
; GFX11-NEXT: s_cmp_eq_u32 s0, 0
-; GFX11-NEXT: s_cselect_b32 s26, 1, 0
+; GFX11-NEXT: s_cselect_b32 s25, 1, 0
; GFX11-NEXT: s_lshr_b64 s[0:1], s[8:9], s16
-; GFX11-NEXT: s_lshl_b64 s[22:23], s[10:11], s17
+; GFX11-NEXT: s_lshl_b64 s[18:19], s[10:11], s17
; GFX11-NEXT: s_lshr_b64 s[16:17], s[10:11], s16
-; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[22:23]
-; GFX11-NEXT: s_lshr_b64 s[10:11], s[10:11], s18
-; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[18:19]
+; GFX11-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
+; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
-; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_cmp_lg_u32 s25, 0
; GFX11-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
-; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_cmp_lg_u32 s24, 0
; GFX11-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
; GFX11-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
-; GFX11-NEXT: s_lshr_b32 s18, s5, 31
+; GFX11-NEXT: s_lshr_b32 s8, s5, 31
+; GFX11-NEXT: s_or_b64 s[0:1], s[22:23], s[0:1]
+; GFX11-NEXT: s_or_b32 s6, s6, s8
; GFX11-NEXT: s_and_not1_b32 s8, 0x7f, s20
-; GFX11-NEXT: s_or_b64 s[0:1], s[24:25], s[0:1]
; GFX11-NEXT: s_lshl_b64 s[4:5], s[4:5], 1
-; GFX11-NEXT: s_or_b64 s[6:7], s[6:7], s[18:19]
; GFX11-NEXT: s_not_b32 s16, s20
; GFX11-NEXT: s_sub_i32 s18, s8, 64
; GFX11-NEXT: s_sub_i32 s9, 64, s8
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaximum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaximum.mir
new file mode 100644
index 0000000..4b214e6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmaximum.mir
@@ -0,0 +1,275 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -mattr=-real-true16 -run-pass=legalizer %s -o - | FileCheck -check-prefixes=GFX12 %s
+
+---
+name: test_fmaximum_f16
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fmaximum_f16
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(s16) = G_FMAXNUM_IEEE [[TRUNC]], [[TRUNC1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[TRUNC]](s16), [[TRUNC1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH7E00
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[FMAXNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[SELECT]](s16)
+ ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
+ ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fmaximum_f16
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX12-NEXT: [[FMAXIMUM:%[0-9]+]]:_(s16) = G_FMAXIMUM [[TRUNC]], [[TRUNC1]]
+ ; GFX12-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMAXIMUM]](s16)
+ ; GFX12-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s16) = G_TRUNC %0(s32)
+ %2:_(s32) = COPY $vgpr1
+ %3:_(s16) = G_TRUNC %2(s32)
+ %4:_(s16) = G_FMAXIMUM %1, %3
+ %5:_(s32) = G_ANYEXT %4(s16)
+ $vgpr0 = COPY %5(s32)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fmaximum_f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fmaximum_f32
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s32), [[COPY1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[FMAXNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fmaximum_f32
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMAXIMUM:%[0-9]+]]:_(s32) = G_FMAXIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMAXIMUM]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = G_FMAXIMUM %0, %1
+ $vgpr0 = COPY %2(s32)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fmaximum_f64
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+ ; GFX9-LABEL: name: test_fmaximum_f64
+ ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX9-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(s64) = G_FMAXNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s64), [[COPY1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMAXNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[SELECT]](s64)
+ ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0_vgpr1
+ ;
+ ; GFX12-LABEL: name: test_fmaximum_f64
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX12-NEXT: [[FMAXIMUM:%[0-9]+]]:_(s64) = G_FMAXIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0_vgpr1 = COPY [[FMAXIMUM]](s64)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0_vgpr1
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = G_FMAXIMUM %0, %1
+ $vgpr0_vgpr1 = COPY %2(s64)
+ SI_RETURN implicit $vgpr0_vgpr1
+...
+---
+name: test_fmaximum_v2f16
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fmaximum_v2f16
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(<2 x s16>) = G_FMAXNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX9-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[TRUNC]](s16), [[TRUNC2]]
+ ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[TRUNC1]](s16), [[TRUNC3]]
+ ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH7E00
+ ; GFX9-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[FMAXNUM_IEEE]](<2 x s16>)
+ ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX9-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[TRUNC4]], [[C1]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[TRUNC5]], [[C1]]
+ ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SELECT]](s16), [[SELECT1]](s16)
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](<2 x s16>)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fmaximum_v2f16
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMAXIMUM:%[0-9]+]]:_(<2 x s16>) = G_FMAXIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMAXIMUM]](<2 x s16>)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(<2 x s16>) = COPY $vgpr0
+ %1:_(<2 x s16>) = COPY $vgpr1
+ %2:_(<2 x s16>) = G_FMAXIMUM %0, %1
+ $vgpr0 = COPY %2(<2 x s16>)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fmaximum_v2f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+
+ ; GFX9-LABEL: name: test_fmaximum_v2f32
+ ; GFX9: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+ ; GFX9-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[COPY]], [[COPY2]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s32), [[COPY2]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[FMAXNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; GFX9-NEXT: [[FMAXNUM_IEEE1:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[COPY1]], [[COPY3]]
+ ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY1]](s32), [[COPY3]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[FMAXNUM_IEEE1]], [[C]]
+ ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SELECT1]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY4]](s32)
+ ; GFX9-NEXT: $vgpr1 = COPY [[COPY5]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX12-LABEL: name: test_fmaximum_v2f32
+ ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+ ; GFX12-NEXT: [[FMAXIMUM:%[0-9]+]]:_(s32) = G_FMAXIMUM [[COPY]], [[COPY2]]
+ ; GFX12-NEXT: [[FMAXIMUM1:%[0-9]+]]:_(s32) = G_FMAXIMUM [[COPY1]], [[COPY3]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMAXIMUM]](s32)
+ ; GFX12-NEXT: $vgpr1 = COPY [[FMAXIMUM1]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(<2 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32)
+ %3:_(s32) = COPY $vgpr2
+ %4:_(s32) = COPY $vgpr3
+ %5:_(<2 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32)
+ %6:_(<2 x s32>) = G_FMAXIMUM %2, %5
+ %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(<2 x s32>)
+ $vgpr0 = COPY %7(s32)
+ $vgpr1 = COPY %8(s32)
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+...
+---
+name: test_fmaximum_nsz_f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fmaximum_nsz_f32
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s32), [[COPY1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[FMAXNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fmaximum_nsz_f32
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMAXIMUM:%[0-9]+]]:_(s32) = nsz G_FMAXIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMAXIMUM]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = nsz G_FMAXIMUM %0, %1
+ $vgpr0 = COPY %2(s32)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fmaximum_nnan_f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fmaximum_nnan_f32
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMAXNUM_IEEE:%[0-9]+]]:_(s32) = G_FMAXNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[FMAXNUM_IEEE]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fmaximum_nnan_f32
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMAXIMUM:%[0-9]+]]:_(s32) = nnan G_FMAXIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMAXIMUM]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = nnan G_FMAXIMUM %0, %1
+ $vgpr0 = COPY %2(s32)
+ SI_RETURN implicit $vgpr0
+...
+
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminimum.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminimum.mir
new file mode 100644
index 0000000..8ba0794
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fminimum.mir
@@ -0,0 +1,275 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1200 -mattr=-real-true16 -run-pass=legalizer %s -o - | FileCheck -check-prefixes=GFX12 %s
+
+---
+name: test_fminimum_f16
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fminimum_f16
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9-NEXT: [[FMINNUM_IEEE:%[0-9]+]]:_(s16) = G_FMINNUM_IEEE [[TRUNC]], [[TRUNC1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[TRUNC]](s16), [[TRUNC1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH7E00
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[FMINNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s16) = COPY [[SELECT]](s16)
+ ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
+ ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fminimum_f16
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX12-NEXT: [[FMINIMUM:%[0-9]+]]:_(s16) = G_FMINIMUM [[TRUNC]], [[TRUNC1]]
+ ; GFX12-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMINIMUM]](s16)
+ ; GFX12-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s16) = G_TRUNC %0(s32)
+ %2:_(s32) = COPY $vgpr1
+ %3:_(s16) = G_TRUNC %2(s32)
+ %4:_(s16) = G_FMINIMUM %1, %3
+ %5:_(s32) = G_ANYEXT %4(s16)
+ $vgpr0 = COPY %5(s32)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fminimum_f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fminimum_f32
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s32), [[COPY1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[FMINNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fminimum_f32
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMINIMUM:%[0-9]+]]:_(s32) = G_FMINIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMINIMUM]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = G_FMINIMUM %0, %1
+ $vgpr0 = COPY %2(s32)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fminimum_f64
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+
+ ; GFX9-LABEL: name: test_fminimum_f64
+ ; GFX9: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX9-NEXT: [[FMINNUM_IEEE:%[0-9]+]]:_(s64) = G_FMINNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s64), [[COPY1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[FCMP]](s1), [[FMINNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY [[SELECT]](s64)
+ ; GFX9-NEXT: $vgpr0_vgpr1 = COPY [[COPY2]](s64)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0_vgpr1
+ ;
+ ; GFX12-LABEL: name: test_fminimum_f64
+ ; GFX12: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
+ ; GFX12-NEXT: [[FMINIMUM:%[0-9]+]]:_(s64) = G_FMINIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0_vgpr1 = COPY [[FMINIMUM]](s64)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0_vgpr1
+ %0:_(s64) = COPY $vgpr0_vgpr1
+ %1:_(s64) = COPY $vgpr2_vgpr3
+ %2:_(s64) = G_FMINIMUM %0, %1
+ $vgpr0_vgpr1 = COPY %2(s64)
+ SI_RETURN implicit $vgpr0_vgpr1
+...
+---
+name: test_fminimum_v2f16
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fminimum_v2f16
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMINNUM_IEEE:%[0-9]+]]:_(<2 x s16>) = G_FMINNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX9-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX9-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX9-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX9-NEXT: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[TRUNC]](s16), [[TRUNC2]]
+ ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[TRUNC1]](s16), [[TRUNC3]]
+ ; GFX9-NEXT: [[C1:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH7E00
+ ; GFX9-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[FMINNUM_IEEE]](<2 x s16>)
+ ; GFX9-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX9-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX9-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s16) = G_SELECT [[FCMP]](s1), [[TRUNC4]], [[C1]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s16) = G_SELECT [[FCMP1]](s1), [[TRUNC5]], [[C1]]
+ ; GFX9-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SELECT]](s16), [[SELECT1]](s16)
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](<2 x s16>)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fminimum_v2f16
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMINIMUM:%[0-9]+]]:_(<2 x s16>) = G_FMINIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMINIMUM]](<2 x s16>)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(<2 x s16>) = COPY $vgpr0
+ %1:_(<2 x s16>) = COPY $vgpr1
+ %2:_(<2 x s16>) = G_FMINIMUM %0, %1
+ $vgpr0 = COPY %2(<2 x s16>)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fminimum_v2f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+
+ ; GFX9-LABEL: name: test_fminimum_v2f32
+ ; GFX9: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+ ; GFX9-NEXT: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY2]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s32), [[COPY2]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[FMINNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; GFX9-NEXT: [[FMINNUM_IEEE1:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY1]], [[COPY3]]
+ ; GFX9-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY1]](s32), [[COPY3]]
+ ; GFX9-NEXT: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[FCMP1]](s1), [[FMINNUM_IEEE1]], [[C]]
+ ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SELECT1]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY4]](s32)
+ ; GFX9-NEXT: $vgpr1 = COPY [[COPY5]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ ;
+ ; GFX12-LABEL: name: test_fminimum_v2f32
+ ; GFX12: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
+ ; GFX12-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
+ ; GFX12-NEXT: [[FMINIMUM:%[0-9]+]]:_(s32) = G_FMINIMUM [[COPY]], [[COPY2]]
+ ; GFX12-NEXT: [[FMINIMUM1:%[0-9]+]]:_(s32) = G_FMINIMUM [[COPY1]], [[COPY3]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMINIMUM]](s32)
+ ; GFX12-NEXT: $vgpr1 = COPY [[FMINIMUM1]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(<2 x s32>) = G_BUILD_VECTOR %0(s32), %1(s32)
+ %3:_(s32) = COPY $vgpr2
+ %4:_(s32) = COPY $vgpr3
+ %5:_(<2 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32)
+ %6:_(<2 x s32>) = G_FMINIMUM %2, %5
+ %7:_(s32), %8:_(s32) = G_UNMERGE_VALUES %6(<2 x s32>)
+ $vgpr0 = COPY %7(s32)
+ $vgpr1 = COPY %8(s32)
+ SI_RETURN implicit $vgpr0, implicit $vgpr1
+...
+---
+name: test_fminimum_nsz_f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fminimum_nsz_f32
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ord), [[COPY]](s32), [[COPY1]]
+ ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0x7FF8000000000000
+ ; GFX9-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[FCMP]](s1), [[FMINNUM_IEEE]], [[C]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fminimum_nsz_f32
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMINIMUM:%[0-9]+]]:_(s32) = nsz G_FMINIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMINIMUM]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = nsz G_FMINIMUM %0, %1
+ $vgpr0 = COPY %2(s32)
+ SI_RETURN implicit $vgpr0
+...
+---
+name: test_fminimum_nnan_f32
+body: |
+ bb.0:
+ liveins: $vgpr0, $vgpr1
+
+ ; GFX9-LABEL: name: test_fminimum_nnan_f32
+ ; GFX9: liveins: $vgpr0, $vgpr1
+ ; GFX9-NEXT: {{ $}}
+ ; GFX9-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9-NEXT: [[FMINNUM_IEEE:%[0-9]+]]:_(s32) = G_FMINNUM_IEEE [[COPY]], [[COPY1]]
+ ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[FMINNUM_IEEE]](s32)
+ ; GFX9-NEXT: $vgpr0 = COPY [[COPY2]](s32)
+ ; GFX9-NEXT: SI_RETURN implicit $vgpr0
+ ;
+ ; GFX12-LABEL: name: test_fminimum_nnan_f32
+ ; GFX12: liveins: $vgpr0, $vgpr1
+ ; GFX12-NEXT: {{ $}}
+ ; GFX12-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+ ; GFX12-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX12-NEXT: [[FMINIMUM:%[0-9]+]]:_(s32) = nnan G_FMINIMUM [[COPY]], [[COPY1]]
+ ; GFX12-NEXT: $vgpr0 = COPY [[FMINIMUM]](s32)
+ ; GFX12-NEXT: SI_RETURN implicit $vgpr0
+ %0:_(s32) = COPY $vgpr0
+ %1:_(s32) = COPY $vgpr1
+ %2:_(s32) = nnan G_FMINIMUM %0, %1
+ $vgpr0 = COPY %2(s32)
+ SI_RETURN implicit $vgpr0
+...
+
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shuffle-vector.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shuffle-vector.mir
index c8bd8ab..423ce82 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shuffle-vector.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shuffle-vector.mir
@@ -18,58 +18,12 @@ body: |
; CHECK-NEXT: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
- %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0, %1, shufflemask(0, 1)
+ %2:_(<2 x s32>) = G_BUILD_VECTOR %0, %1
$vgpr0_vgpr1 = COPY %2
...
---
-name: shufflevector_scalar_src_dst
-tracksRegLiveness: true
-
-body: |
- bb.0:
- liveins: $vgpr0, $vgpr1
-
- ; CHECK-LABEL: name: shufflevector_scalar_src_dst
- ; CHECK: liveins: $vgpr0, $vgpr1
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY2]](s32)
- %0:_(s32) = COPY $vgpr0
- %1:_(s32) = COPY $vgpr1
- %2:_(s32) = G_SHUFFLE_VECTOR %0, %1, shufflemask(1)
- $vgpr0 = COPY %2
-
-...
-
----
-name: shufflevector_scalar_dst
-tracksRegLiveness: true
-
-body: |
- bb.0:
- liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
-
- ; CHECK-LABEL: name: shufflevector_scalar_dst
- ; CHECK: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
- ; CHECK-NEXT: {{ $}}
- ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV]](s32)
- ; CHECK-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
- ; CHECK-NEXT: $vgpr0 = COPY [[COPY3]](s32)
- %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
- %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
- %2:_(s32) = G_SHUFFLE_VECTOR %0, %1, shufflemask(2)
- $vgpr0 = COPY %2
-
-...
-
----
name: shufflevector_v2s32_0_1
tracksRegLiveness: true
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
index 8533e34..518af70 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
@@ -1750,7 +1750,7 @@ define i65 @v_lshr_i65_33(i65 %value) {
; GFX6-NEXT: v_and_b32_e32 v0, 1, v2
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 31
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 1, v3
-; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
; GFX6-NEXT: v_mov_b32_e32 v2, 0
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
@@ -1763,7 +1763,7 @@ define i65 @v_lshr_i65_33(i65 %value) {
; GFX8-NEXT: v_and_b32_e32 v0, 1, v2
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 1, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
; GFX8-NEXT: v_mov_b32_e32 v2, 0
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -1776,7 +1776,7 @@ define i65 @v_lshr_i65_33(i65 %value) {
; GFX9-NEXT: v_and_b32_e32 v0, 1, v2
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
; GFX9-NEXT: v_lshrrev_b32_e32 v2, 1, v3
-; GFX9-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1789,7 +1789,7 @@ define i65 @v_lshr_i65_33(i65 %value) {
; GFX10-NEXT: v_and_b32_e32 v0, 1, v2
; GFX10-NEXT: v_lshrrev_b32_e32 v2, 1, v3
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
-; GFX10-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
; GFX10-NEXT: v_mov_b32_e32 v2, 0
; GFX10-NEXT: s_setpc_b64 s[30:31]
;
@@ -1800,7 +1800,7 @@ define i65 @v_lshr_i65_33(i65 %value) {
; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_and_b32 v0, 1, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 1, v3
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 31, v[0:1]
-; GFX11-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
; GFX11-NEXT: v_mov_b32_e32 v2, 0
; GFX11-NEXT: s_setpc_b64 s[30:31]
%result = lshr i65 %value, 33
@@ -1859,21 +1859,19 @@ define amdgpu_ps i65 @s_lshr_i65_33(i65 inreg %value) {
; GCN-LABEL: s_lshr_i65_33:
; GCN: ; %bb.0:
; GCN-NEXT: s_and_b64 s[2:3], s[2:3], 1
-; GCN-NEXT: s_lshr_b32 s0, s1, 1
-; GCN-NEXT: s_mov_b32 s1, 0
-; GCN-NEXT: s_lshl_b64 s[2:3], s[2:3], 31
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT: s_lshr_b32 s4, s1, 1
+; GCN-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
+; GCN-NEXT: s_or_b32 s0, s0, s4
; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: ; return to shader part epilog
;
; GFX10PLUS-LABEL: s_lshr_i65_33:
; GFX10PLUS: ; %bb.0:
; GFX10PLUS-NEXT: s_and_b64 s[2:3], s[2:3], 1
-; GFX10PLUS-NEXT: s_lshr_b32 s0, s1, 1
-; GFX10PLUS-NEXT: s_mov_b32 s1, 0
-; GFX10PLUS-NEXT: s_lshl_b64 s[2:3], s[2:3], 31
-; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10PLUS-NEXT: s_lshr_b32 s4, s1, 1
+; GFX10PLUS-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
; GFX10PLUS-NEXT: s_mov_b32 s2, 0
+; GFX10PLUS-NEXT: s_or_b32 s0, s0, s4
; GFX10PLUS-NEXT: ; return to shader part epilog
%result = lshr i65 %value, 33
ret i65 %result
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
index af377b1..e0581f01 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/or.ll
@@ -597,13 +597,13 @@ define amdgpu_kernel void @s_or_u64_zext_with_sregs(ptr addrspace(1) %out, ptr a
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GFX7-NEXT: s_mov_b32 s5, 0
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_load_dword s4, s[2:3], 0x0
+; GFX7-NEXT: s_load_dword s3, s[2:3], 0x0
; GFX7-NEXT: s_mov_b32 s2, -1
-; GFX7-NEXT: s_mov_b32 s3, 0xf000
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7-NEXT: s_or_b64 s[4:5], s[4:5], 0x50
+; GFX7-NEXT: s_or_b32 s4, s3, 0x50
; GFX7-NEXT: v_mov_b32_e32 v0, s4
; GFX7-NEXT: v_mov_b32_e32 v1, s5
+; GFX7-NEXT: s_mov_b32 s3, 0xf000
; GFX7-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GFX7-NEXT: s_endpgm
;
@@ -616,7 +616,7 @@ define amdgpu_kernel void @s_or_u64_zext_with_sregs(ptr addrspace(1) %out, ptr a
; GFX8-NEXT: s_mov_b32 s3, 0
; GFX8-NEXT: v_mov_b32_e32 v2, s0
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
-; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX8-NEXT: s_or_b32 s2, s2, 0x50
; GFX8-NEXT: v_mov_b32_e32 v0, s2
; GFX8-NEXT: v_mov_b32_e32 v1, s3
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
@@ -630,7 +630,7 @@ define amdgpu_kernel void @s_or_u64_zext_with_sregs(ptr addrspace(1) %out, ptr a
; GFX9-NEXT: s_load_dword s2, s[2:3], 0x0
; GFX9-NEXT: s_mov_b32 s3, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX9-NEXT: s_or_b32 s2, s2, 0x50
; GFX9-NEXT: v_mov_b32_e32 v0, s2
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -644,7 +644,7 @@ define amdgpu_kernel void @s_or_u64_zext_with_sregs(ptr addrspace(1) %out, ptr a
; GFX10-NEXT: s_load_dword s2, s[2:3], 0x0
; GFX10-NEXT: s_mov_b32 s3, 0
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX10-NEXT: s_or_b32 s2, s2, 0x50
; GFX10-NEXT: v_mov_b32_e32 v0, s2
; GFX10-NEXT: v_mov_b32_e32 v1, s3
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -658,7 +658,7 @@ define amdgpu_kernel void @s_or_u64_zext_with_sregs(ptr addrspace(1) %out, ptr a
; GFX11-NEXT: s_load_b32 s2, s[2:3], 0x0
; GFX11-NEXT: s_mov_b32 s3, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX11-NEXT: s_or_b32 s2, s2, 0x50
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: global_store_b64 v2, v[0:1], s[0:1]
; GFX11-NEXT: s_endpgm
@@ -671,7 +671,7 @@ define amdgpu_kernel void @s_or_u64_zext_with_sregs(ptr addrspace(1) %out, ptr a
; GFX12-NEXT: s_load_b32 s2, s[2:3], 0x0
; GFX12-NEXT: s_mov_b32 s3, 0
; GFX12-NEXT: s_wait_kmcnt 0x0
-; GFX12-NEXT: s_or_b64 s[2:3], s[2:3], 0x50
+; GFX12-NEXT: s_or_b32 s2, s2, 0x50
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, s3
; GFX12-NEXT: global_store_b64 v2, v[0:1], s[0:1]
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
index 6e4c6bc..31e3d97 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/prelegalizer-combiner-shuffle.mir
@@ -147,15 +147,17 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
- ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[COPY]](p3) :: (load (<8 x s16>), align 8, addrspace 3)
- ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<8 x s16>)
- ; CHECK-NEXT: G_STORE [[UV4]](s16), [[COPY1]](p3) :: (store (s16), addrspace 3)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[PTR_ADD]](p3) :: (load (s16), addrspace 3)
+ ; CHECK-NEXT: G_STORE [[LOAD]](s16), [[COPY1]](p3) :: (store (s16), addrspace 3)
; CHECK-NEXT: SI_RETURN
%0:_(p3) = COPY $vgpr0
%1:_(p3) = COPY $vgpr1
%2:_(<8 x s16>) = G_IMPLICIT_DEF
%3:_(<8 x s16>) = G_LOAD %0(p3) :: (load (<8 x s16>), align 8, addrspace 3)
- %4:_(s16) = G_SHUFFLE_VECTOR %3(<8 x s16>), %2, shufflemask(4)
+ %idx:_(s32) = G_CONSTANT i32 4
+ %4:_(s16) = G_EXTRACT_VECTOR_ELT %3(<8 x s16>), %idx
G_STORE %4(s16), %1(p3) :: (store (s16), addrspace 3)
SI_RETURN
...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll
index a9b3deb..cfe655f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sext_inreg.ll
@@ -1381,7 +1381,7 @@ define i65 @v_sext_inreg_i65_33(i65 %value) {
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX6-NEXT: v_lshl_b64 v[0:1], v[1:2], 31
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 1, v3
-; GFX6-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v3
; GFX6-NEXT: v_ashrrev_i32_e32 v2, 1, v2
; GFX6-NEXT: s_setpc_b64 s[30:31]
;
@@ -1393,7 +1393,7 @@ define i65 @v_sext_inreg_i65_33(i65 %value) {
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 1, v3
-; GFX8-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
; GFX8-NEXT: v_ashrrev_i32_e32 v2, 1, v2
; GFX8-NEXT: s_setpc_b64 s[30:31]
;
@@ -1405,7 +1405,7 @@ define i65 @v_sext_inreg_i65_33(i65 %value) {
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 1, v3
-; GFX9-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v3
; GFX9-NEXT: v_ashrrev_i32_e32 v2, 1, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
;
@@ -1418,7 +1418,7 @@ define i65 @v_sext_inreg_i65_33(i65 %value) {
; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v2, 31, v1
; GFX10PLUS-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
; GFX10PLUS-NEXT: v_ashrrev_i32_e32 v2, 1, v2
-; GFX10PLUS-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX10PLUS-NEXT: v_or_b32_e32 v0, v0, v3
; GFX10PLUS-NEXT: s_setpc_b64 s[30:31]
%shl = shl i65 %value, 33
%ashr = ashr i65 %value, 33
@@ -1429,29 +1429,27 @@ define amdgpu_ps i65 @s_sext_inreg_i65_18(i65 inreg %value) {
; GCN-LABEL: s_sext_inreg_i65_18:
; GCN: ; %bb.0:
; GCN-NEXT: s_lshl_b64 s[2:3], s[2:3], 18
-; GCN-NEXT: s_lshr_b32 s4, s1, 14
-; GCN-NEXT: s_mov_b32 s5, 0
-; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GCN-NEXT: s_lshr_b32 s3, s1, 14
+; GCN-NEXT: s_or_b32 s2, s2, s3
; GCN-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
; GCN-NEXT: s_bfe_u64 s[0:1], s[0:1], 0x2e0000
-; GCN-NEXT: s_lshl_b32 s7, s2, 14
-; GCN-NEXT: s_mov_b32 s6, s5
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
+; GCN-NEXT: s_lshl_b32 s5, s2, 14
+; GCN-NEXT: s_mov_b32 s4, 0
+; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GCN-NEXT: s_ashr_i64 s[2:3], s[2:3], 18
; GCN-NEXT: ; return to shader part epilog
;
; GFX10PLUS-LABEL: s_sext_inreg_i65_18:
; GFX10PLUS: ; %bb.0:
; GFX10PLUS-NEXT: s_lshl_b64 s[2:3], s[2:3], 18
-; GFX10PLUS-NEXT: s_lshr_b32 s4, s1, 14
-; GFX10PLUS-NEXT: s_mov_b32 s5, 0
+; GFX10PLUS-NEXT: s_lshr_b32 s3, s1, 14
; GFX10PLUS-NEXT: s_bfe_u64 s[0:1], s[0:1], 0x2e0000
-; GFX10PLUS-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
-; GFX10PLUS-NEXT: s_mov_b32 s6, s5
+; GFX10PLUS-NEXT: s_or_b32 s2, s2, s3
+; GFX10PLUS-NEXT: s_mov_b32 s4, 0
; GFX10PLUS-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
-; GFX10PLUS-NEXT: s_lshl_b32 s7, s2, 14
+; GFX10PLUS-NEXT: s_lshl_b32 s5, s2, 14
; GFX10PLUS-NEXT: s_ashr_i64 s[2:3], s[2:3], 18
-; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[6:7]
+; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
; GFX10PLUS-NEXT: ; return to shader part epilog
%shl = shl i65 %value, 18
%ashr = ashr i65 %shl, 18
@@ -1464,13 +1462,12 @@ define amdgpu_ps i65 @s_sext_inreg_i65_33(i65 inreg %value) {
; GCN-NEXT: s_lshl_b32 s3, s2, 1
; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: s_lshr_b64 s[4:5], s[0:1], 31
-; GCN-NEXT: s_or_b64 s[4:5], s[2:3], s[4:5]
-; GCN-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
-; GCN-NEXT: s_bfe_u32 s0, s0, 0x1f0000
-; GCN-NEXT: s_mov_b32 s1, s2
-; GCN-NEXT: s_lshl_b64 s[2:3], s[4:5], 31
-; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
-; GCN-NEXT: s_ashr_i32 s2, s5, 1
+; GCN-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GCN-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
+; GCN-NEXT: s_bfe_u32 s4, s0, 0x1f0000
+; GCN-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
+; GCN-NEXT: s_or_b32 s0, s0, s4
+; GCN-NEXT: s_ashr_i32 s2, s3, 1
; GCN-NEXT: ; return to shader part epilog
;
; GFX10PLUS-LABEL: s_sext_inreg_i65_33:
@@ -1478,13 +1475,12 @@ define amdgpu_ps i65 @s_sext_inreg_i65_33(i65 inreg %value) {
; GFX10PLUS-NEXT: s_lshl_b32 s3, s2, 1
; GFX10PLUS-NEXT: s_mov_b32 s2, 0
; GFX10PLUS-NEXT: s_lshr_b64 s[4:5], s[0:1], 31
-; GFX10PLUS-NEXT: s_bfe_u32 s0, s0, 0x1f0000
-; GFX10PLUS-NEXT: s_or_b64 s[4:5], s[2:3], s[4:5]
-; GFX10PLUS-NEXT: s_mov_b32 s1, s2
-; GFX10PLUS-NEXT: s_bfe_i64 s[4:5], s[4:5], 0x10000
-; GFX10PLUS-NEXT: s_lshl_b64 s[2:3], s[4:5], 31
-; GFX10PLUS-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
-; GFX10PLUS-NEXT: s_ashr_i32 s2, s5, 1
+; GFX10PLUS-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX10PLUS-NEXT: s_bfe_u32 s4, s0, 0x1f0000
+; GFX10PLUS-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x10000
+; GFX10PLUS-NEXT: s_lshl_b64 s[0:1], s[2:3], 31
+; GFX10PLUS-NEXT: s_ashr_i32 s2, s3, 1
+; GFX10PLUS-NEXT: s_or_b32 s0, s0, s4
; GFX10PLUS-NEXT: ; return to shader part epilog
%shl = shl i65 %value, 33
%ashr = ashr i65 %shl, 33
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector-pointer-crash.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector-pointer-crash.mir
index ac903ad..e7bba9d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector-pointer-crash.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/shufflevector-pointer-crash.mir
@@ -19,15 +19,15 @@ body: |
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<2 x p0>) = G_BITCAST [[BUILD_VECTOR]](<4 x s32>)
; CHECK-NEXT: [[UV:%[0-9]+]]:_(p0), [[UV1:%[0-9]+]]:_(p0) = G_UNMERGE_VALUES [[BITCAST]](<2 x p0>)
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY [[UV]](p0)
- ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
- ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](p0)
+ ; CHECK-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](p0)
; CHECK-NEXT: $vgpr0 = COPY [[UV2]](s32)
; CHECK-NEXT: $vgpr1 = COPY [[UV3]](s32)
; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
%0:_(p0) = G_CONSTANT i64 0
%1:_(<2 x p0>) = G_BUILD_VECTOR %0:_(p0), %0:_(p0)
%2:_(<2 x p0>) = G_LOAD %0:_(p0) :: (load (<2 x p0>))
- %3:_(p0) = G_SHUFFLE_VECTOR %2:_(<2 x p0>), %1:_, shufflemask(0)
+ %idx:_(s32) = G_CONSTANT i32 0
+ %3:_(p0) = G_EXTRACT_VECTOR_ELT %2:_(<2 x p0>), %idx
%4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %3:_(p0)
$vgpr0 = COPY %4:_(s32)
$vgpr1 = COPY %5:_(s32)
diff --git a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
index afd0f01..6831380 100644
--- a/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
+++ b/llvm/test/CodeGen/AMDGPU/buffer-fat-pointers-memcpy.ll
@@ -415,28 +415,18 @@ define amdgpu_kernel void @memcpy_known(ptr addrspace(7) %src, ptr addrspace(7)
; GISEL-GFX942-LABEL: memcpy_known:
; GISEL-GFX942: ; %bb.0:
; GISEL-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GISEL-GFX942-NEXT: s_load_dword s7, s[4:5], 0x54
; GISEL-GFX942-NEXT: s_load_dword s11, s[4:5], 0x34
-; GISEL-GFX942-NEXT: s_mov_b32 s7, 0
; GISEL-GFX942-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x44
-; GISEL-GFX942-NEXT: s_mov_b32 s8, s7
+; GISEL-GFX942-NEXT: s_mov_b32 s16, 0
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x2000
; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s1
+; GISEL-GFX942-NEXT: s_mov_b32 s8, s1
; GISEL-GFX942-NEXT: s_mov_b32 s9, s2
-; GISEL-GFX942-NEXT: s_or_b64 s[8:9], s[6:7], s[8:9]
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s3
-; GISEL-GFX942-NEXT: s_load_dword s3, s[4:5], 0x54
-; GISEL-GFX942-NEXT: s_mov_b32 s10, s7
-; GISEL-GFX942-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s13
-; GISEL-GFX942-NEXT: s_mov_b32 s4, s7
+; GISEL-GFX942-NEXT: s_mov_b32 s10, s3
+; GISEL-GFX942-NEXT: s_mov_b32 s4, s13
; GISEL-GFX942-NEXT: s_mov_b32 s5, s14
-; GISEL-GFX942-NEXT: s_mov_b32 s16, 0
-; GISEL-GFX942-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
; GISEL-GFX942-NEXT: s_mov_b32 s6, s15
-; GISEL-GFX942-NEXT: s_mov_b32 s2, s7
-; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX942-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3]
-; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x2000
; GISEL-GFX942-NEXT: v_mov_b32_e32 v1, s16
; GISEL-GFX942-NEXT: .LBB0_1: ; %load-store-loop
; GISEL-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -491,25 +481,16 @@ define amdgpu_kernel void @memcpy_known(ptr addrspace(7) %src, ptr addrspace(7)
; GISEL-GFX1100-NEXT: s_load_b128 s[8:11], s[4:5], 0x44
; GISEL-GFX1100-NEXT: s_load_b32 s7, s[4:5], 0x34
; GISEL-GFX1100-NEXT: s_load_b32 s15, s[4:5], 0x54
-; GISEL-GFX1100-NEXT: s_mov_b32 s17, 0
-; GISEL-GFX1100-NEXT: s_mov_b32 s12, 0
-; GISEL-GFX1100-NEXT: s_mov_b32 s4, s17
-; GISEL-GFX1100-NEXT: s_mov_b32 s6, s17
-; GISEL-GFX1100-NEXT: v_mov_b32_e32 v0, s12
-; GISEL-GFX1100-NEXT: s_mov_b32 s14, s17
+; GISEL-GFX1100-NEXT: s_mov_b32 s4, 0
+; GISEL-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-GFX1100-NEXT: v_mov_b32_e32 v0, s4
; GISEL-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s1
+; GISEL-GFX1100-NEXT: s_mov_b32 s4, s1
; GISEL-GFX1100-NEXT: s_mov_b32 s5, s2
-; GISEL-GFX1100-NEXT: s_mov_b32 s2, s17
-; GISEL-GFX1100-NEXT: s_or_b64 s[4:5], s[16:17], s[4:5]
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s3
-; GISEL-GFX1100-NEXT: s_mov_b32 s3, s10
-; GISEL-GFX1100-NEXT: s_or_b64 s[6:7], s[16:17], s[6:7]
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s9
-; GISEL-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GISEL-GFX1100-NEXT: s_or_b64 s[12:13], s[16:17], s[2:3]
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s11
-; GISEL-GFX1100-NEXT: s_or_b64 s[14:15], s[16:17], s[14:15]
+; GISEL-GFX1100-NEXT: s_mov_b32 s6, s3
+; GISEL-GFX1100-NEXT: s_mov_b32 s12, s9
+; GISEL-GFX1100-NEXT: s_mov_b32 s13, s10
+; GISEL-GFX1100-NEXT: s_mov_b32 s14, s11
; GISEL-GFX1100-NEXT: .LBB0_1: ; %load-store-loop
; GISEL-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1
; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0
@@ -960,28 +941,18 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX942-LABEL: memcpy_known_medium:
; GISEL-GFX942: ; %bb.0:
; GISEL-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GISEL-GFX942-NEXT: s_load_dword s7, s[4:5], 0x54
; GISEL-GFX942-NEXT: s_load_dword s11, s[4:5], 0x34
-; GISEL-GFX942-NEXT: s_mov_b32 s7, 0
; GISEL-GFX942-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x44
-; GISEL-GFX942-NEXT: s_mov_b32 s8, s7
+; GISEL-GFX942-NEXT: s_mov_b32 s16, 0
+; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x100
; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s1
+; GISEL-GFX942-NEXT: s_mov_b32 s8, s1
; GISEL-GFX942-NEXT: s_mov_b32 s9, s2
-; GISEL-GFX942-NEXT: s_or_b64 s[8:9], s[6:7], s[8:9]
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s3
-; GISEL-GFX942-NEXT: s_load_dword s3, s[4:5], 0x54
-; GISEL-GFX942-NEXT: s_mov_b32 s10, s7
-; GISEL-GFX942-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s13
-; GISEL-GFX942-NEXT: s_mov_b32 s4, s7
+; GISEL-GFX942-NEXT: s_mov_b32 s10, s3
+; GISEL-GFX942-NEXT: s_mov_b32 s4, s13
; GISEL-GFX942-NEXT: s_mov_b32 s5, s14
-; GISEL-GFX942-NEXT: s_mov_b32 s16, 0
-; GISEL-GFX942-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
; GISEL-GFX942-NEXT: s_mov_b32 s6, s15
-; GISEL-GFX942-NEXT: s_mov_b32 s2, s7
-; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX942-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3]
-; GISEL-GFX942-NEXT: v_mov_b32_e32 v0, 0x100
; GISEL-GFX942-NEXT: v_mov_b32_e32 v1, s16
; GISEL-GFX942-NEXT: .LBB1_1: ; %load-store-loop
; GISEL-GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
@@ -1036,25 +1007,16 @@ define amdgpu_kernel void @memcpy_known_medium(ptr addrspace(7) %src, ptr addrsp
; GISEL-GFX1100-NEXT: s_load_b128 s[8:11], s[4:5], 0x44
; GISEL-GFX1100-NEXT: s_load_b32 s7, s[4:5], 0x34
; GISEL-GFX1100-NEXT: s_load_b32 s15, s[4:5], 0x54
-; GISEL-GFX1100-NEXT: s_mov_b32 s17, 0
-; GISEL-GFX1100-NEXT: s_mov_b32 s12, 0
-; GISEL-GFX1100-NEXT: s_mov_b32 s4, s17
-; GISEL-GFX1100-NEXT: s_mov_b32 s6, s17
-; GISEL-GFX1100-NEXT: v_mov_b32_e32 v0, s12
-; GISEL-GFX1100-NEXT: s_mov_b32 s14, s17
+; GISEL-GFX1100-NEXT: s_mov_b32 s4, 0
+; GISEL-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GISEL-GFX1100-NEXT: v_mov_b32_e32 v0, s4
; GISEL-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s1
+; GISEL-GFX1100-NEXT: s_mov_b32 s4, s1
; GISEL-GFX1100-NEXT: s_mov_b32 s5, s2
-; GISEL-GFX1100-NEXT: s_mov_b32 s2, s17
-; GISEL-GFX1100-NEXT: s_or_b64 s[4:5], s[16:17], s[4:5]
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s3
-; GISEL-GFX1100-NEXT: s_mov_b32 s3, s10
-; GISEL-GFX1100-NEXT: s_or_b64 s[6:7], s[16:17], s[6:7]
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s9
-; GISEL-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GISEL-GFX1100-NEXT: s_or_b64 s[12:13], s[16:17], s[2:3]
-; GISEL-GFX1100-NEXT: s_mov_b32 s16, s11
-; GISEL-GFX1100-NEXT: s_or_b64 s[14:15], s[16:17], s[14:15]
+; GISEL-GFX1100-NEXT: s_mov_b32 s6, s3
+; GISEL-GFX1100-NEXT: s_mov_b32 s12, s9
+; GISEL-GFX1100-NEXT: s_mov_b32 s13, s10
+; GISEL-GFX1100-NEXT: s_mov_b32 s14, s11
; GISEL-GFX1100-NEXT: .LBB1_1: ; %load-store-loop
; GISEL-GFX1100-NEXT: ; =>This Inner Loop Header: Depth=1
; GISEL-GFX1100-NEXT: v_add_nc_u32_e32 v61, s0, v0
@@ -1228,27 +1190,18 @@ define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) %src, ptr addrspa
; GISEL-GFX942: ; %bb.0:
; GISEL-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
; GISEL-GFX942-NEXT: s_load_dword s11, s[4:5], 0x34
-; GISEL-GFX942-NEXT: s_mov_b32 s7, 0
-; GISEL-GFX942-NEXT: s_mov_b32 s8, s7
-; GISEL-GFX942-NEXT: s_mov_b32 s10, s7
; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s1
+; GISEL-GFX942-NEXT: s_mov_b32 s8, s1
; GISEL-GFX942-NEXT: s_mov_b32 s9, s2
-; GISEL-GFX942-NEXT: s_or_b64 s[8:9], s[6:7], s[8:9]
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s3
-; GISEL-GFX942-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
+; GISEL-GFX942-NEXT: s_mov_b32 s10, s3
; GISEL-GFX942-NEXT: v_mov_b32_e32 v4, s0
; GISEL-GFX942-NEXT: buffer_load_dwordx4 v[0:3], v4, s[8:11], 0 offen
; GISEL-GFX942-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x44
-; GISEL-GFX942-NEXT: s_load_dword s13, s[4:5], 0x54
-; GISEL-GFX942-NEXT: s_mov_b32 s4, s7
-; GISEL-GFX942-NEXT: s_mov_b32 s12, s7
+; GISEL-GFX942-NEXT: s_load_dword s7, s[4:5], 0x54
; GISEL-GFX942-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX942-NEXT: s_mov_b32 s6, s1
+; GISEL-GFX942-NEXT: s_mov_b32 s4, s1
; GISEL-GFX942-NEXT: s_mov_b32 s5, s2
-; GISEL-GFX942-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
; GISEL-GFX942-NEXT: s_mov_b32 s6, s3
-; GISEL-GFX942-NEXT: s_or_b64 s[6:7], s[6:7], s[12:13]
; GISEL-GFX942-NEXT: v_mov_b32_e32 v5, s0
; GISEL-GFX942-NEXT: s_waitcnt vmcnt(0)
; GISEL-GFX942-NEXT: buffer_store_dwordx4 v[0:3], v5, s[4:7], 0 offen
@@ -1261,35 +1214,24 @@ define amdgpu_kernel void @memcpy_known_small(ptr addrspace(7) %src, ptr addrspa
; GISEL-GFX1100: ; %bb.0:
; GISEL-GFX1100-NEXT: s_clause 0x1
; GISEL-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GISEL-GFX1100-NEXT: s_load_b32 s7, s[4:5], 0x34
-; GISEL-GFX1100-NEXT: s_mov_b32 s13, 0
-; GISEL-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-GFX1100-NEXT: s_mov_b32 s8, s13
-; GISEL-GFX1100-NEXT: s_mov_b32 s6, s13
+; GISEL-GFX1100-NEXT: s_load_b32 s11, s[4:5], 0x34
; GISEL-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX1100-NEXT: s_mov_b32 s12, s1
-; GISEL-GFX1100-NEXT: s_mov_b32 s9, s2
; GISEL-GFX1100-NEXT: v_mov_b32_e32 v4, s0
-; GISEL-GFX1100-NEXT: s_or_b64 s[0:1], s[12:13], s[8:9]
-; GISEL-GFX1100-NEXT: s_mov_b32 s12, s3
-; GISEL-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-GFX1100-NEXT: s_or_b64 s[2:3], s[12:13], s[6:7]
-; GISEL-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[0:3], 0 offen
+; GISEL-GFX1100-NEXT: s_mov_b32 s8, s1
+; GISEL-GFX1100-NEXT: s_mov_b32 s9, s2
+; GISEL-GFX1100-NEXT: s_mov_b32 s10, s3
+; GISEL-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[8:11], 0 offen
; GISEL-GFX1100-NEXT: s_clause 0x1
-; GISEL-GFX1100-NEXT: s_load_b128 s[8:11], s[4:5], 0x44
+; GISEL-GFX1100-NEXT: s_load_b128 s[0:3], s[4:5], 0x44
; GISEL-GFX1100-NEXT: s_load_b32 s7, s[4:5], 0x54
-; GISEL-GFX1100-NEXT: s_mov_b32 s4, s13
; GISEL-GFX1100-NEXT: s_waitcnt lgkmcnt(0)
-; GISEL-GFX1100-NEXT: s_mov_b32 s12, s9
-; GISEL-GFX1100-NEXT: s_mov_b32 s5, s10
-; GISEL-GFX1100-NEXT: v_mov_b32_e32 v5, s8
-; GISEL-GFX1100-NEXT: s_or_b64 s[4:5], s[12:13], s[4:5]
-; GISEL-GFX1100-NEXT: s_mov_b32 s12, s11
-; GISEL-GFX1100-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GISEL-GFX1100-NEXT: s_or_b64 s[6:7], s[12:13], s[6:7]
+; GISEL-GFX1100-NEXT: v_mov_b32_e32 v5, s0
+; GISEL-GFX1100-NEXT: s_mov_b32 s4, s1
+; GISEL-GFX1100-NEXT: s_mov_b32 s5, s2
+; GISEL-GFX1100-NEXT: s_mov_b32 s6, s3
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0)
; GISEL-GFX1100-NEXT: buffer_store_b128 v[0:3], v5, s[4:7], 0 offen
-; GISEL-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[0:3], 0 offen offset:16
+; GISEL-GFX1100-NEXT: buffer_load_b128 v[0:3], v4, s[8:11], 0 offen offset:16
; GISEL-GFX1100-NEXT: s_waitcnt vmcnt(0)
; GISEL-GFX1100-NEXT: buffer_store_b128 v[0:3], v5, s[4:7], 0 offen offset:16
; GISEL-GFX1100-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/div_i128.ll b/llvm/test/CodeGen/AMDGPU/div_i128.ll
index 6c8207a..df7f8c6 100644
--- a/llvm/test/CodeGen/AMDGPU/div_i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/div_i128.ll
@@ -4344,7 +4344,7 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-G-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v3, vcc
; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[1:2]
; GFX9-G-NEXT: v_lshrrev_b32_e32 v3, 1, v4
-; GFX9-G-NEXT: v_or_b32_e32 v0, v3, v0
+; GFX9-G-NEXT: v_or_b32_e32 v0, v0, v3
; GFX9-G-NEXT: v_ashrrev_i32_e32 v3, 31, v2
; GFX9-G-NEXT: v_ashrrev_i32_e32 v2, 1, v2
; GFX9-G-NEXT: s_setpc_b64 s[30:31]
@@ -4375,14 +4375,12 @@ define i128 @v_sdiv_i128_v_pow2k(i128 %lhs) {
; GFX9-G-O0-NEXT: v_mov_b32_e32 v6, v4
; GFX9-G-O0-NEXT: s_mov_b32 s5, 1
; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s5
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[5:6]
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v3, v5
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
-; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v3
-; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v2, v0, v1
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v0, v[5:6]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v5
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v6
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v2
; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
; GFX9-G-O0-NEXT: v_ashrrev_i32_e64 v3, v2, v4
@@ -4437,7 +4435,7 @@ define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
; GFX9-G-NEXT: v_mov_b32_e32 v4, v1
; GFX9-G-NEXT: v_lshlrev_b64 v[0:1], 31, v[2:3]
; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v4
-; GFX9-G-NEXT: v_or_b32_e32 v0, v2, v0
+; GFX9-G-NEXT: v_or_b32_e32 v0, v0, v2
; GFX9-G-NEXT: v_lshrrev_b32_e32 v2, 1, v3
; GFX9-G-NEXT: v_mov_b32_e32 v3, 0
; GFX9-G-NEXT: s_setpc_b64 s[30:31]
@@ -4450,15 +4448,13 @@ define i128 @v_udiv_i128_v_pow2k(i128 %lhs) {
; GFX9-G-O0-NEXT: v_mov_b32_e32 v5, v3
; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
-; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v0, v0, v1
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, 0
+; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v2, v0, v1
; GFX9-G-O0-NEXT: s_mov_b32 s4, 31
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
-; GFX9-G-O0-NEXT: v_lshlrev_b64 v[5:6], v2, v[4:5]
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v4, v5
-; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, v6
-; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v4
-; GFX9-G-O0-NEXT: v_or_b32_e64 v1, v1, v2
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-G-O0-NEXT: v_lshlrev_b64 v[4:5], v0, v[4:5]
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v0, v4
+; GFX9-G-O0-NEXT: v_mov_b32_e32 v1, v5
+; GFX9-G-O0-NEXT: v_or_b32_e64 v0, v0, v2
; GFX9-G-O0-NEXT: s_mov_b32 s4, 1
; GFX9-G-O0-NEXT: v_mov_b32_e32 v2, s4
; GFX9-G-O0-NEXT: v_lshrrev_b32_e64 v2, v2, v3
diff --git a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
index b5b2655..31344c7 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-saddr-load.ll
@@ -2080,21 +2080,13 @@ define amdgpu_ps float @flat_load_saddr_i8_offset_or_i64_imm_offset_16(ptr addrs
}
define amdgpu_ps float @flat_load_saddr_i8_offset_or_i64_imm_offset_4160(ptr addrspace(6) inreg %sbase, i32 %idx) {
-; GFX1250-SDAG-LABEL: flat_load_saddr_i8_offset_or_i64_imm_offset_4160:
-; GFX1250-SDAG: ; %bb.0:
-; GFX1250-SDAG-NEXT: v_or_b32_e32 v0, 0x1040, v0
-; GFX1250-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-SDAG-NEXT: flat_load_u8 v0, v[0:1]
-; GFX1250-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-SDAG-NEXT: ; return to shader part epilog
-;
-; GFX1250-GISEL-LABEL: flat_load_saddr_i8_offset_or_i64_imm_offset_4160:
-; GFX1250-GISEL: ; %bb.0:
-; GFX1250-GISEL-NEXT: v_mov_b32_e32 v1, 0
-; GFX1250-GISEL-NEXT: v_or_b32_e32 v0, 0x1040, v0
-; GFX1250-GISEL-NEXT: flat_load_u8 v0, v[0:1]
-; GFX1250-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-GISEL-NEXT: ; return to shader part epilog
+; GFX1250-LABEL: flat_load_saddr_i8_offset_or_i64_imm_offset_4160:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: v_or_b32_e32 v0, 0x1040, v0
+; GFX1250-NEXT: v_mov_b32_e32 v1, 0
+; GFX1250-NEXT: flat_load_u8 v0, v[0:1]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: ; return to shader part epilog
%zext.idx = zext i32 %idx to i64
%or = or i64 %zext.idx, 4160
%addr = inttoptr i64 %or to ptr
diff --git a/llvm/test/CodeGen/AMDGPU/fmaximum.ll b/llvm/test/CodeGen/AMDGPU/fmaximum.ll
index e59fbad..62ec010 100644
--- a/llvm/test/CodeGen/AMDGPU/fmaximum.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmaximum.ll
@@ -1,117 +1,296 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-SDAG,GFX12-SDAG-TRUE16 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-SDAG,GFX12-SDAG-FAKE16 %s
-; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-GISEL,GFX12-GISEL-TRUE16 %s
-; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-GISEL,GFX12-GISEL-FAKE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-GISEL %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-TRUE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-TRUE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-FAKE16 %s
define amdgpu_ps float @test_fmaximum_f32_vv(float %a, float %b) {
-; GCN-LABEL: test_fmaximum_f32_vv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f32 v0, v0, v1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_f32_vv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f32_vv:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
%val = call float @llvm.maximum.f32(float %a, float %b)
ret float %val
}
define amdgpu_ps float @test_fmaximum_f32_ss(float inreg %a, float inreg %b) {
-; GCN-LABEL: test_fmaximum_f32_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_maximum_f32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_f32_ss:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_max_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f32_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_maximum_f32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: ; return to shader part epilog
%val = call float @llvm.maximum.f32(float %a, float %b)
ret float %val
}
define amdgpu_ps float @test_fmaximum_f32_vs(float %a, float inreg %b) {
-; GCN-LABEL: test_fmaximum_f32_vs:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f32 v0, v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_f32_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f32_vs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, s0
+; GFX12-NEXT: ; return to shader part epilog
%val = call float @llvm.maximum.f32(float %a, float %b)
ret float %val
}
define amdgpu_ps float @test_fmaximum_nnan_f32(float %a, float %b) {
-; GCN-LABEL: test_fmaximum_nnan_f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f32 v0, v0, v1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_nnan_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_nnan_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
%val = call nnan float @llvm.maximum.f32(float %a, float %b)
ret float %val
}
+define amdgpu_ps float @test_fmaximum_nsz_f32(float %a, float %b) {
+; GFX9-LABEL: test_fmaximum_nsz_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_nsz_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call nsz float @llvm.maximum.f32(float %a, float %b)
+ ret float %val
+}
+
+define amdgpu_ps float @test_fmaximum_signed_zero_f32() {
+; GFX9-LABEL: test_fmaximum_signed_zero_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_signed_zero_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call float @llvm.maximum.f32(float -0.0, float 0.0)
+ ret float %val
+}
+
define amdgpu_ps <2 x float> @test_fmaximum_v2f32(<2 x float> %a, <2 x float> %b) {
-; GCN-LABEL: test_fmaximum_v2f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f32 v0, v0, v2
-; GCN-NEXT: v_maximum_f32 v1, v1, v3
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_v2f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v4, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_max_f32_e32 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v2f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v2
+; GFX12-NEXT: v_maximum_f32 v1, v1, v3
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a, <2 x float> %b)
ret <2 x float> %val
}
define amdgpu_ps <2 x float> @test_fmaximum_v2f32_ss(<2 x float> inreg %a, <2 x float> inreg %b) {
-; GCN-LABEL: test_fmaximum_v2f32_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_maximum_f32 s0, s0, s2
-; GCN-NEXT: s_maximum_f32 s1, s1, s3
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_v2f32_ss:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_max_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_max_f32_e32 v3, s1, v1
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v2f32_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_maximum_f32 s0, s0, s2
+; GFX12-NEXT: s_maximum_f32 s1, s1, s3
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x float> @llvm.maximum.v2f32(<2 x float> %a, <2 x float> %b)
ret <2 x float> %val
}
define amdgpu_ps <3 x float> @test_fmaximum_v3f32(<3 x float> %a, <3 x float> %b) {
-; GCN-LABEL: test_fmaximum_v3f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f32 v0, v0, v3
-; GCN-NEXT: v_maximum_f32 v1, v1, v4
-; GCN-NEXT: v_maximum_f32 v2, v2, v5
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_v3f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v6, v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_max_f32_e32 v3, v1, v4
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc
+; GFX9-NEXT: v_max_f32_e32 v3, v2, v5
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v3f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v3
+; GFX12-NEXT: v_maximum_f32 v1, v1, v4
+; GFX12-NEXT: v_maximum_f32 v2, v2, v5
+; GFX12-NEXT: ; return to shader part epilog
%val = call <3 x float> @llvm.maximum.v3f32(<3 x float> %a, <3 x float> %b)
ret <3 x float> %val
}
define amdgpu_ps <4 x float> @test_fmaximum_v4f32(<4 x float> %a, <4 x float> %b) {
-; GCN-LABEL: test_fmaximum_v4f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f32 v0, v0, v4
-; GCN-NEXT: v_maximum_f32 v1, v1, v5
-; GCN-NEXT: v_maximum_f32 v2, v2, v6
-; GCN-NEXT: v_maximum_f32 v3, v3, v7
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_v4f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v8, v0, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v9, v8, vcc
+; GFX9-NEXT: v_max_f32_e32 v4, v1, v5
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v4, vcc
+; GFX9-NEXT: v_max_f32_e32 v4, v2, v6
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v4, vcc
+; GFX9-NEXT: v_max_f32_e32 v4, v3, v7
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v3, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v4, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v4f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v4
+; GFX12-NEXT: v_maximum_f32 v1, v1, v5
+; GFX12-NEXT: v_maximum_f32 v2, v2, v6
+; GFX12-NEXT: v_maximum_f32 v3, v3, v7
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x float> @llvm.maximum.v4f32(<4 x float> %a, <4 x float> %b)
ret <4 x float> %val
}
define amdgpu_ps <16 x float> @test_fmaximum_v16f32(<16 x float> %a, <16 x float> %b) {
-; GCN-LABEL: test_fmaximum_v16f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f32 v0, v0, v16
-; GCN-NEXT: v_maximum_f32 v1, v1, v17
-; GCN-NEXT: v_maximum_f32 v2, v2, v18
-; GCN-NEXT: v_maximum_f32 v3, v3, v19
-; GCN-NEXT: v_maximum_f32 v4, v4, v20
-; GCN-NEXT: v_maximum_f32 v5, v5, v21
-; GCN-NEXT: v_maximum_f32 v6, v6, v22
-; GCN-NEXT: v_maximum_f32 v7, v7, v23
-; GCN-NEXT: v_maximum_f32 v8, v8, v24
-; GCN-NEXT: v_maximum_f32 v9, v9, v25
-; GCN-NEXT: v_maximum_f32 v10, v10, v26
-; GCN-NEXT: v_maximum_f32 v11, v11, v27
-; GCN-NEXT: v_maximum_f32 v12, v12, v28
-; GCN-NEXT: v_maximum_f32 v13, v13, v29
-; GCN-NEXT: v_maximum_f32 v14, v14, v30
-; GCN-NEXT: v_maximum_f32 v15, v15, v31
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_v16f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v32, v1, v17
+; GFX9-NEXT: v_mov_b32_e32 v33, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v17
+; GFX9-NEXT: v_max_f32_e32 v1, v0, v16
+; GFX9-NEXT: v_cmp_o_f32_e64 s[12:13], v0, v16
+; GFX9-NEXT: v_max_f32_e32 v17, v2, v18
+; GFX9-NEXT: v_cmp_o_f32_e64 s[0:1], v2, v18
+; GFX9-NEXT: v_max_f32_e32 v18, v3, v19
+; GFX9-NEXT: v_cmp_o_f32_e64 s[2:3], v3, v19
+; GFX9-NEXT: v_max_f32_e32 v19, v4, v20
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], v4, v20
+; GFX9-NEXT: v_max_f32_e32 v20, v5, v21
+; GFX9-NEXT: v_cmp_o_f32_e64 s[6:7], v5, v21
+; GFX9-NEXT: v_max_f32_e32 v21, v6, v22
+; GFX9-NEXT: v_cmp_o_f32_e64 s[8:9], v6, v22
+; GFX9-NEXT: v_max_f32_e32 v22, v7, v23
+; GFX9-NEXT: v_cmp_o_f32_e64 s[10:11], v7, v23
+; GFX9-NEXT: v_max_f32_e32 v23, v8, v24
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v33, v1, s[12:13]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v33, v32, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v8, v24
+; GFX9-NEXT: v_max_f32_e32 v34, v9, v25
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v33, v23, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v9, v25
+; GFX9-NEXT: v_max_f32_e32 v35, v10, v26
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v33, v34, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v10, v26
+; GFX9-NEXT: v_max_f32_e32 v36, v11, v27
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v33, v35, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v11, v27
+; GFX9-NEXT: v_max_f32_e32 v37, v12, v28
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v33, v36, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v12, v28
+; GFX9-NEXT: v_max_f32_e32 v16, v13, v29
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v33, v37, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v13, v29
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v33, v16, vcc
+; GFX9-NEXT: v_max_f32_e32 v16, v14, v30
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v14, v30
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v33, v16, vcc
+; GFX9-NEXT: v_max_f32_e32 v16, v15, v31
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v15, v31
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v33, v17, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v33, v18, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v33, v19, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v33, v20, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v33, v21, s[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v33, v22, s[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v33, v16, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v16f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v16
+; GFX12-NEXT: v_maximum_f32 v1, v1, v17
+; GFX12-NEXT: v_maximum_f32 v2, v2, v18
+; GFX12-NEXT: v_maximum_f32 v3, v3, v19
+; GFX12-NEXT: v_maximum_f32 v4, v4, v20
+; GFX12-NEXT: v_maximum_f32 v5, v5, v21
+; GFX12-NEXT: v_maximum_f32 v6, v6, v22
+; GFX12-NEXT: v_maximum_f32 v7, v7, v23
+; GFX12-NEXT: v_maximum_f32 v8, v8, v24
+; GFX12-NEXT: v_maximum_f32 v9, v9, v25
+; GFX12-NEXT: v_maximum_f32 v10, v10, v26
+; GFX12-NEXT: v_maximum_f32 v11, v11, v27
+; GFX12-NEXT: v_maximum_f32 v12, v12, v28
+; GFX12-NEXT: v_maximum_f32 v13, v13, v29
+; GFX12-NEXT: v_maximum_f32 v14, v14, v30
+; GFX12-NEXT: v_maximum_f32 v15, v15, v31
+; GFX12-NEXT: ; return to shader part epilog
%val = call <16 x float> @llvm.maximum.v16f32(<16 x float> %a, <16 x float> %b)
ret <16 x float> %val
}
define amdgpu_ps half @test_fmaximum_f16_vv(half %a, half %b) {
+; GFX9-LABEL: test_fmaximum_f16_vv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-TRUE16-LABEL: test_fmaximum_f16_vv:
; GFX12-SDAG-TRUE16: ; %bb.0:
; GFX12-SDAG-TRUE16-NEXT: v_maximum_f16 v0.l, v0.l, v1.l
@@ -136,35 +315,131 @@ define amdgpu_ps half @test_fmaximum_f16_vv(half %a, half %b) {
}
define amdgpu_ps half @test_fmaximum_f16_ss(half inreg %a, half inreg %b) {
-; GCN-LABEL: test_fmaximum_f16_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_maximum_f16 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fmaximum_f16_ss:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_max_f16_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f16_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_maximum_f16 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: ; return to shader part epilog
%val = call half @llvm.maximum.f16(half %a, half %b)
ret half %val
}
define amdgpu_ps <2 x half> @test_fmaximum_v2f16_vv(<2 x half> %a, <2 x half> %b) {
-; GCN-LABEL: test_fmaximum_v2f16_vv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_maximum_f16 v0, v0, v1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_v2f16_vv:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_pk_max_f16 v2, v0, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v0, v3, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: s_mov_b32 s0, 0x5040100
+; GFX9-SDAG-NEXT: v_perm_b32 v0, v0, v4, s0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v2f16_vv:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_pk_max_f16 v2, v0, v1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], v0, v1
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v1, v3, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v2f16_vv:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b)
ret <2 x half> %val
}
define amdgpu_ps <2 x half> @test_fmaximum_v2f16_ss(<2 x half> inreg %a, <2 x half> inreg %b) {
-; GCN-LABEL: test_fmaximum_v2f16_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_maximum_f16 v0, s0, s1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_v2f16_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s1, 16
+; GFX9-SDAG-NEXT: v_pk_max_f16 v1, s0, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s0, s0, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v3
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v2f16_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-GISEL-NEXT: s_lshr_b32 s1, s1, 16
+; GFX9-GISEL-NEXT: s_lshr_b32 s2, s0, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-GISEL-NEXT: v_pk_max_f16 v1, s0, v0
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s2, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], s0, v0
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v2f16_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_maximum_f16 v0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x half> @llvm.maximum.v2f16(<2 x half> %a, <2 x half> %b)
ret <2 x half> %val
}
define amdgpu_ps <3 x half> @test_fmaximum_v3f16_vv(<3 x half> %a, <3 x half> %b) {
+; GFX9-SDAG-LABEL: test_fmaximum_v3f16_vv:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_pk_max_f16 v4, v1, v3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
+; GFX9-SDAG-NEXT: v_pk_max_f16 v3, v0, v2
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v0, v5, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: s_mov_b32 s0, 0x5040100
+; GFX9-SDAG-NEXT: v_perm_b32 v0, v0, v4, s0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v3f16_vv:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_pk_max_f16 v4, v0, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], v0, v2
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v5, v4, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v2, v5, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_pk_max_f16 v4, v1, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-LABEL: test_fmaximum_v3f16_vv:
; GFX12-SDAG: ; %bb.0:
; GFX12-SDAG-NEXT: v_pk_maximum_f16 v0, v0, v2
@@ -187,6 +462,49 @@ define amdgpu_ps <3 x half> @test_fmaximum_v3f16_vv(<3 x half> %a, <3 x half> %b
}
define amdgpu_ps <3 x half> @test_fmaximum_v3f16_ss(<3 x half> inreg %a, <3 x half> inreg %b) {
+; GFX9-SDAG-LABEL: test_fmaximum_v3f16_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-SDAG-NEXT: v_pk_max_f16 v1, s1, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s1, v0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, s2
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s2, 16
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_pk_max_f16 v3, s0, v3
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s0, s0, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, s1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v4
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v3f16_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_lshr_b32 s5, s2, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: s_lshr_b32 s4, s0, 16
+; GFX9-GISEL-NEXT: v_pk_max_f16 v1, s0, v0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s5
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s4, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
+; GFX9-GISEL-NEXT: v_pk_max_f16 v3, s1, v1
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s1, v1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-LABEL: test_fmaximum_v3f16_ss:
; GFX12-SDAG: ; %bb.0:
; GFX12-SDAG-NEXT: v_pk_maximum_f16 v0, s0, s2
@@ -206,97 +524,384 @@ define amdgpu_ps <3 x half> @test_fmaximum_v3f16_ss(<3 x half> inreg %a, <3 x ha
}
define amdgpu_ps <4 x half> @test_fmaximum_v4f16(<4 x half> %a, <4 x half> %b) {
-; GCN-LABEL: test_fmaximum_v4f16:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_maximum_f16 v0, v0, v2
-; GCN-NEXT: v_pk_maximum_f16 v1, v1, v3
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_v4f16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_pk_max_f16 v4, v1, v3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v1, v5, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_pk_max_f16 v3, v0, v2
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v0, v5, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: s_mov_b32 s0, 0x5040100
+; GFX9-SDAG-NEXT: v_perm_b32 v0, v0, v4, s0
+; GFX9-SDAG-NEXT: v_perm_b32 v1, v1, v6, s0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v4f16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_pk_max_f16 v4, v0, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v6, v5, vcc
+; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v2
+; GFX9-GISEL-NEXT: v_pk_max_f16 v2, v1, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], v1, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v1, v6, v2, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v2, v6, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v4f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_maximum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_maximum_f16 v1, v1, v3
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b)
ret <4 x half> %val
}
define amdgpu_ps <4 x half> @test_fmaximum_v4f16_ss(<4 x half> inreg %a, <4 x half> inreg %b) {
-; GCN-LABEL: test_fmaximum_v4f16_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_maximum_f16 v0, s0, s2
-; GCN-NEXT: v_pk_maximum_f16 v1, s1, s3
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_v4f16_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX9-SDAG-NEXT: v_pk_max_f16 v1, s1, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s1, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s1, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s1, v0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, s2
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s2, 16
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_pk_max_f16 v4, s0, v4
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s0, s0, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v5
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v2, v2, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v1, v1, 16, v2
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v4f16_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: s_lshr_b32 s2, s2, 16
+; GFX9-GISEL-NEXT: v_pk_max_f16 v1, s0, v0
+; GFX9-GISEL-NEXT: s_lshr_b32 s4, s0, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: s_lshr_b32 s2, s3, 16
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-GISEL-NEXT: s_lshr_b32 s0, s1, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, s2
+; GFX9-GISEL-NEXT: v_pk_max_f16 v2, s1, v1
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s0, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], s1, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v1, v4, v2, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v2, v4, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v4f16_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_maximum_f16 v0, s0, s2
+; GFX12-NEXT: v_pk_maximum_f16 v1, s1, s3
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x half> @llvm.maximum.v4f16(<4 x half> %a, <4 x half> %b)
ret <4 x half> %val
}
define amdgpu_ps <2 x float> @test_fmaximum_f64_vv(double %a, double %b) {
-; GCN-LABEL: test_fmaximum_f64_vv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_f64_vv:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_f64_vv:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_max_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f64_vv:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: ; return to shader part epilog
%val = call double @llvm.maximum.f64(double %a, double %b)
%ret = bitcast double %val to <2 x float>
ret <2 x float> %ret
}
define amdgpu_ps <2 x float> @test_fmaximum_f64_ss(double inreg %a, double inreg %b) {
-; GCN-LABEL: test_fmaximum_f64_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f64 v[0:1], s[0:1], s[2:3]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_f64_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-SDAG-NEXT: v_max_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_f64_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-GISEL-NEXT: v_max_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f64_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f64 v[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
%val = call double @llvm.maximum.f64(double %a, double %b)
%ret = bitcast double %val to <2 x float>
ret <2 x float> %ret
}
define amdgpu_ps <4 x float> @test_fmaximum_v2f64_ss(<2 x double> inreg %a, <2 x double> inreg %b) {
-; GCN-LABEL: test_fmaximum_v2f64_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f64 v[0:1], s[0:1], s[4:5]
-; GCN-NEXT: v_maximum_f64 v[2:3], s[2:3], s[6:7]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_v2f64_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-SDAG-NEXT: v_max_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-SDAG-NEXT: v_max_f64 v[4:5], s[2:3], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[0:1], s[2:3], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v2, v4, 0, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v3, v5, v6, s[0:1]
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v2f64_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-GISEL-NEXT: v_max_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-GISEL-NEXT: v_max_f64 v[4:5], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[0:1], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v2, 0, v4, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v3, v6, v5, s[0:1]
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v2f64_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f64 v[0:1], s[0:1], s[4:5]
+; GFX12-NEXT: v_maximum_f64 v[2:3], s[2:3], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x double> @llvm.maximum.v2f64(<2 x double> %a, <2 x double> %b)
%ret = bitcast <2 x double> %val to <4 x float>
ret <4 x float> %ret
}
define amdgpu_ps <8 x float> @test_fmaximum_v4f64(<4 x double> %a, <4 x double> %b) {
-; GCN-LABEL: test_fmaximum_v4f64:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f64 v[0:1], v[0:1], v[8:9]
-; GCN-NEXT: v_maximum_f64 v[2:3], v[2:3], v[10:11]
-; GCN-NEXT: v_maximum_f64 v[4:5], v[4:5], v[12:13]
-; GCN-NEXT: v_maximum_f64 v[6:7], v[6:7], v[14:15]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_v4f64:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_max_f64 v[16:17], v[0:1], v[8:9]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[8:9]
+; GFX9-SDAG-NEXT: v_max_f64 v[8:9], v[2:3], v[10:11]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-SDAG-NEXT: v_max_f64 v[10:11], v[4:5], v[12:13]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-SDAG-NEXT: v_max_f64 v[12:13], v[6:7], v[14:15]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[4:5], v[6:7], v[14:15]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v7, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v16, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v17, v7, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v2, v8, 0, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v3, v9, v7, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v4, v10, 0, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v5, v11, v7, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v6, v12, 0, s[4:5]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v7, v13, v7, s[4:5]
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v4f64:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_max_f64 v[16:17], v[0:1], v[8:9]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, v[0:1], v[8:9]
+; GFX9-GISEL-NEXT: v_max_f64 v[8:9], v[2:3], v[10:11]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-GISEL-NEXT: v_max_f64 v[10:11], v[4:5], v[12:13]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-GISEL-NEXT: v_max_f64 v[12:13], v[6:7], v[14:15]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[4:5], v[6:7], v[14:15]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v16, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v18, v17, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v2, 0, v8, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v3, v18, v9, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v10, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v5, v18, v11, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v6, 0, v12, s[4:5]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v7, v18, v13, s[4:5]
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v4f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f64 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: v_maximum_f64 v[2:3], v[2:3], v[10:11]
+; GFX12-NEXT: v_maximum_f64 v[4:5], v[4:5], v[12:13]
+; GFX12-NEXT: v_maximum_f64 v[6:7], v[6:7], v[14:15]
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x double> @llvm.maximum.v4f64(<4 x double> %a, <4 x double> %b)
%ret = bitcast <4 x double> %val to <8 x float>
ret <8 x float> %ret
}
define amdgpu_ps <8 x float> @test_fmaximum_v4f64_ss(<4 x double> inreg %a, <4 x double> inreg %b) {
-; GCN-LABEL: test_fmaximum_v4f64_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_maximum_f64 v[0:1], s[0:1], s[8:9]
-; GCN-NEXT: v_maximum_f64 v[2:3], s[2:3], s[10:11]
-; GCN-NEXT: v_maximum_f64 v[4:5], s[4:5], s[12:13]
-; GCN-NEXT: v_maximum_f64 v[6:7], s[6:7], s[14:15]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fmaximum_v4f64_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-SDAG-NEXT: v_max_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v10, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s10
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s11
+; GFX9-SDAG-NEXT: v_max_f64 v[4:5], s[2:3], v[1:2]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[0:1], s[2:3], v[1:2]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s12
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s13
+; GFX9-SDAG-NEXT: v_max_f64 v[6:7], s[4:5], v[1:2]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[2:3], s[4:5], v[1:2]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s14
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s15
+; GFX9-SDAG-NEXT: v_max_f64 v[8:9], s[6:7], v[1:2]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[4:5], s[6:7], v[1:2]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v2, v4, 0, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v3, v5, v10, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v4, v6, 0, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v5, v7, v10, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v6, v8, 0, s[4:5]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v7, v9, v10, s[4:5]
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fmaximum_v4f64_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-GISEL-NEXT: v_max_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s10
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s11
+; GFX9-GISEL-NEXT: v_max_f64 v[4:5], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[0:1], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s12
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s13
+; GFX9-GISEL-NEXT: v_max_f64 v[6:7], s[4:5], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[2:3], s[4:5], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s14
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s15
+; GFX9-GISEL-NEXT: v_max_f64 v[8:9], s[6:7], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[4:5], s[6:7], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v10, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v10, v3, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v2, 0, v4, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v3, v10, v5, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v6, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v5, v10, v7, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v6, 0, v8, s[4:5]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v7, v10, v9, s[4:5]
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_v4f64_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f64 v[0:1], s[0:1], s[8:9]
+; GFX12-NEXT: v_maximum_f64 v[2:3], s[2:3], s[10:11]
+; GFX12-NEXT: v_maximum_f64 v[4:5], s[4:5], s[12:13]
+; GFX12-NEXT: v_maximum_f64 v[6:7], s[6:7], s[14:15]
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x double> @llvm.maximum.v4f64(<4 x double> %a, <4 x double> %b)
%ret = bitcast <4 x double> %val to <8 x float>
ret <8 x float> %ret
}
define amdgpu_kernel void @fmaximumi_f32_move_to_valu(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GCN-LABEL: fmaximumi_f32_move_to_valu:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_clause 0x1
-; GCN-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GCN-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
-; GCN-NEXT: v_mov_b32_e32 v0, 0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_maximum_f32 v1, v1, v2
-; GCN-NEXT: global_store_b32 v0, v1, s[0:1]
-; GCN-NEXT: s_endpgm
+; GFX9-LABEL: fmaximumi_f32_move_to_valu:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v1, v0, s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: global_load_dword v2, v0, s[6:7] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f32_e32 v4, v1, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX12-LABEL: fmaximumi_f32_move_to_valu:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_maximum_f32 v1, v1, v2
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_endpgm
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
%v = call float @llvm.maximum.f32(float %a, float %b)
@@ -305,6 +910,23 @@ define amdgpu_kernel void @fmaximumi_f32_move_to_valu(ptr addrspace(1) %out, ptr
}
define amdgpu_kernel void @fmaximum_f16_move_to_valu(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX9-LABEL: fmaximum_f16_move_to_valu:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_ushort v1, v0, s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: global_load_ushort v2, v0, s[6:7] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v4, v1, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: global_store_short v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
; GFX12-SDAG-TRUE16-LABEL: fmaximum_f16_move_to_valu:
; GFX12-SDAG-TRUE16: ; %bb.0:
; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1
@@ -371,6 +993,40 @@ define amdgpu_kernel void @fmaximum_f16_move_to_valu(ptr addrspace(1) %out, ptr
ret void
}
+define amdgpu_ps float @test_fmaximum_f32_ieee_on(float %a, float %b) #0 {
+; GFX9-LABEL: test_fmaximum_f32_ieee_on:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f32_ieee_on:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call float @llvm.maximum.f32(float %a, float %b)
+ ret float %val
+}
+
+define amdgpu_ps float @test_fmaximum_f32_ieee_off(float %a, float %b) #1 {
+; GFX9-LABEL: test_fmaximum_f32_ieee_off:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fmaximum_f32_ieee_off:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maximum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call float @llvm.maximum.f32(float %a, float %b)
+ ret float %val
+}
+
declare float @llvm.maximum.f32(float, float)
declare <2 x float> @llvm.maximum.v2f32(<2 x float>, <2 x float>)
declare <3 x float> @llvm.maximum.v3f32(<3 x float>, <3 x float>)
@@ -383,3 +1039,6 @@ declare <4 x half> @llvm.maximum.v4f16(<4 x half>, <4 x half>)
declare double @llvm.maximum.f64(double, double)
declare <2 x double> @llvm.maximum.v2f64(<2 x double>, <2 x double>)
declare <4 x double> @llvm.maximum.v4f64(<4 x double>, <4 x double>)
+
+attributes #0 = { nounwind "amdgpu-ieee"="true" }
+attributes #1 = { nounwind "amdgpu-ieee"="false" }
diff --git a/llvm/test/CodeGen/AMDGPU/fminimum.ll b/llvm/test/CodeGen/AMDGPU/fminimum.ll
index b25120f..474ac7c 100644
--- a/llvm/test/CodeGen/AMDGPU/fminimum.ll
+++ b/llvm/test/CodeGen/AMDGPU/fminimum.ll
@@ -1,117 +1,296 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-SDAG,GFX12-SDAG-TRUE16 %s
-; RUN: llc -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-SDAG,GFX12-SDAG-FAKE16 %s
-; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-GISEL,GFX12-GISEL-TRUE16 %s
-; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX12-GISEL,GFX12-GISEL-FAKE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9,GFX9-GISEL %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-TRUE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-SDAG,GFX12-SDAG-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-TRUE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX12,GFX12-GISEL,GFX12-GISEL-FAKE16 %s
define amdgpu_ps float @test_fminimum_f32_vv(float %a, float %b) {
-; GCN-LABEL: test_fminimum_f32_vv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f32 v0, v0, v1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_f32_vv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f32_vv:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
%val = call float @llvm.minimum.f32(float %a, float %b)
ret float %val
}
define amdgpu_ps float @test_fminimum_f32_ss(float inreg %a, float inreg %b) {
-; GCN-LABEL: test_fminimum_f32_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_minimum_f32 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_f32_ss:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_min_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f32_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_minimum_f32 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: ; return to shader part epilog
%val = call float @llvm.minimum.f32(float %a, float %b)
ret float %val
}
define amdgpu_ps float @test_fminimum_f32_vs(float %a, float inreg %b) {
-; GCN-LABEL: test_fminimum_f32_vs:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f32 v0, v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_f32_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f32_vs:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, s0
+; GFX12-NEXT: ; return to shader part epilog
%val = call float @llvm.minimum.f32(float %a, float %b)
ret float %val
}
define amdgpu_ps float @test_fminimum_nnan_f32(float %a, float %b) {
-; GCN-LABEL: test_fminimum_nnan_f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f32 v0, v0, v1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_nnan_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_nnan_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
%val = call nnan float @llvm.minimum.f32(float %a, float %b)
ret float %val
}
+define amdgpu_ps float @test_fminimum_nsz_f32(float %a, float %b) {
+; GFX9-LABEL: test_fminimum_nsz_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_nsz_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call nsz float @llvm.minimum.f32(float %a, float %b)
+ ret float %val
+}
+
+define amdgpu_ps float @test_fminimum_signed_zero_f32() {
+; GFX9-LABEL: test_fminimum_signed_zero_f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_bfrev_b32_e32 v0, 1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_signed_zero_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_bfrev_b32_e32 v0, 1
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call float @llvm.minimum.f32(float -0.0, float 0.0)
+ ret float %val
+}
+
define amdgpu_ps <2 x float> @test_fminimum_v2f32(<2 x float> %a, <2 x float> %b) {
-; GCN-LABEL: test_fminimum_v2f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f32 v0, v0, v2
-; GCN-NEXT: v_minimum_f32 v1, v1, v3
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_v2f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v4, v0, v2
+; GFX9-NEXT: v_mov_b32_e32 v5, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v4, vcc
+; GFX9-NEXT: v_min_f32_e32 v2, v1, v3
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v2f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v2
+; GFX12-NEXT: v_minimum_f32 v1, v1, v3
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a, <2 x float> %b)
ret <2 x float> %val
}
define amdgpu_ps <2 x float> @test_fminimum_v2f32_ss(<2 x float> inreg %a, <2 x float> inreg %b) {
-; GCN-LABEL: test_fminimum_v2f32_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_minimum_f32 s0, s0, s2
-; GCN-NEXT: s_minimum_f32 s1, s1, s3
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
-; GCN-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_v2f32_ss:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_min_f32_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_min_f32_e32 v3, s1, v1
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, s1, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v2f32_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_minimum_f32 s0, s0, s2
+; GFX12-NEXT: s_minimum_f32 s1, s1, s3
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x float> @llvm.minimum.v2f32(<2 x float> %a, <2 x float> %b)
ret <2 x float> %val
}
define amdgpu_ps <3 x float> @test_fminimum_v3f32(<3 x float> %a, <3 x float> %b) {
-; GCN-LABEL: test_fminimum_v3f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f32 v0, v0, v3
-; GCN-NEXT: v_minimum_f32 v1, v1, v4
-; GCN-NEXT: v_minimum_f32 v2, v2, v5
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_v3f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v6, v0, v3
+; GFX9-NEXT: v_mov_b32_e32 v7, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v7, v6, vcc
+; GFX9-NEXT: v_min_f32_e32 v3, v1, v4
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v7, v3, vcc
+; GFX9-NEXT: v_min_f32_e32 v3, v2, v5
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v7, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v3f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v3
+; GFX12-NEXT: v_minimum_f32 v1, v1, v4
+; GFX12-NEXT: v_minimum_f32 v2, v2, v5
+; GFX12-NEXT: ; return to shader part epilog
%val = call <3 x float> @llvm.minimum.v3f32(<3 x float> %a, <3 x float> %b)
ret <3 x float> %val
}
define amdgpu_ps <4 x float> @test_fminimum_v4f32(<4 x float> %a, <4 x float> %b) {
-; GCN-LABEL: test_fminimum_v4f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f32 v0, v0, v4
-; GCN-NEXT: v_minimum_f32 v1, v1, v5
-; GCN-NEXT: v_minimum_f32 v2, v2, v6
-; GCN-NEXT: v_minimum_f32 v3, v3, v7
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_v4f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v8, v0, v4
+; GFX9-NEXT: v_mov_b32_e32 v9, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v9, v8, vcc
+; GFX9-NEXT: v_min_f32_e32 v4, v1, v5
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v4, vcc
+; GFX9-NEXT: v_min_f32_e32 v4, v2, v6
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v2, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v9, v4, vcc
+; GFX9-NEXT: v_min_f32_e32 v4, v3, v7
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v3, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v4, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v4f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v4
+; GFX12-NEXT: v_minimum_f32 v1, v1, v5
+; GFX12-NEXT: v_minimum_f32 v2, v2, v6
+; GFX12-NEXT: v_minimum_f32 v3, v3, v7
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x float> @llvm.minimum.v4f32(<4 x float> %a, <4 x float> %b)
ret <4 x float> %val
}
define amdgpu_ps <16 x float> @test_fminimum_v16f32(<16 x float> %a, <16 x float> %b) {
-; GCN-LABEL: test_fminimum_v16f32:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f32 v0, v0, v16
-; GCN-NEXT: v_minimum_f32 v1, v1, v17
-; GCN-NEXT: v_minimum_f32 v2, v2, v18
-; GCN-NEXT: v_minimum_f32 v3, v3, v19
-; GCN-NEXT: v_minimum_f32 v4, v4, v20
-; GCN-NEXT: v_minimum_f32 v5, v5, v21
-; GCN-NEXT: v_minimum_f32 v6, v6, v22
-; GCN-NEXT: v_minimum_f32 v7, v7, v23
-; GCN-NEXT: v_minimum_f32 v8, v8, v24
-; GCN-NEXT: v_minimum_f32 v9, v9, v25
-; GCN-NEXT: v_minimum_f32 v10, v10, v26
-; GCN-NEXT: v_minimum_f32 v11, v11, v27
-; GCN-NEXT: v_minimum_f32 v12, v12, v28
-; GCN-NEXT: v_minimum_f32 v13, v13, v29
-; GCN-NEXT: v_minimum_f32 v14, v14, v30
-; GCN-NEXT: v_minimum_f32 v15, v15, v31
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_v16f32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v32, v1, v17
+; GFX9-NEXT: v_mov_b32_e32 v33, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v17
+; GFX9-NEXT: v_min_f32_e32 v1, v0, v16
+; GFX9-NEXT: v_cmp_o_f32_e64 s[12:13], v0, v16
+; GFX9-NEXT: v_min_f32_e32 v17, v2, v18
+; GFX9-NEXT: v_cmp_o_f32_e64 s[0:1], v2, v18
+; GFX9-NEXT: v_min_f32_e32 v18, v3, v19
+; GFX9-NEXT: v_cmp_o_f32_e64 s[2:3], v3, v19
+; GFX9-NEXT: v_min_f32_e32 v19, v4, v20
+; GFX9-NEXT: v_cmp_o_f32_e64 s[4:5], v4, v20
+; GFX9-NEXT: v_min_f32_e32 v20, v5, v21
+; GFX9-NEXT: v_cmp_o_f32_e64 s[6:7], v5, v21
+; GFX9-NEXT: v_min_f32_e32 v21, v6, v22
+; GFX9-NEXT: v_cmp_o_f32_e64 s[8:9], v6, v22
+; GFX9-NEXT: v_min_f32_e32 v22, v7, v23
+; GFX9-NEXT: v_cmp_o_f32_e64 s[10:11], v7, v23
+; GFX9-NEXT: v_min_f32_e32 v23, v8, v24
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v33, v1, s[12:13]
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v33, v32, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v8, v24
+; GFX9-NEXT: v_min_f32_e32 v34, v9, v25
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v33, v23, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v9, v25
+; GFX9-NEXT: v_min_f32_e32 v35, v10, v26
+; GFX9-NEXT: v_cndmask_b32_e32 v9, v33, v34, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v10, v26
+; GFX9-NEXT: v_min_f32_e32 v36, v11, v27
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v33, v35, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v11, v27
+; GFX9-NEXT: v_min_f32_e32 v37, v12, v28
+; GFX9-NEXT: v_cndmask_b32_e32 v11, v33, v36, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v12, v28
+; GFX9-NEXT: v_min_f32_e32 v16, v13, v29
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v33, v37, vcc
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v13, v29
+; GFX9-NEXT: v_cndmask_b32_e32 v13, v33, v16, vcc
+; GFX9-NEXT: v_min_f32_e32 v16, v14, v30
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v14, v30
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v33, v16, vcc
+; GFX9-NEXT: v_min_f32_e32 v16, v15, v31
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v15, v31
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v33, v17, s[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v33, v18, s[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v33, v19, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v33, v20, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v33, v21, s[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v33, v22, s[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v15, v33, v16, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v16f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v16
+; GFX12-NEXT: v_minimum_f32 v1, v1, v17
+; GFX12-NEXT: v_minimum_f32 v2, v2, v18
+; GFX12-NEXT: v_minimum_f32 v3, v3, v19
+; GFX12-NEXT: v_minimum_f32 v4, v4, v20
+; GFX12-NEXT: v_minimum_f32 v5, v5, v21
+; GFX12-NEXT: v_minimum_f32 v6, v6, v22
+; GFX12-NEXT: v_minimum_f32 v7, v7, v23
+; GFX12-NEXT: v_minimum_f32 v8, v8, v24
+; GFX12-NEXT: v_minimum_f32 v9, v9, v25
+; GFX12-NEXT: v_minimum_f32 v10, v10, v26
+; GFX12-NEXT: v_minimum_f32 v11, v11, v27
+; GFX12-NEXT: v_minimum_f32 v12, v12, v28
+; GFX12-NEXT: v_minimum_f32 v13, v13, v29
+; GFX12-NEXT: v_minimum_f32 v14, v14, v30
+; GFX12-NEXT: v_minimum_f32 v15, v15, v31
+; GFX12-NEXT: ; return to shader part epilog
%val = call <16 x float> @llvm.minimum.v16f32(<16 x float> %a, <16 x float> %b)
ret <16 x float> %val
}
define amdgpu_ps half @test_fminimum_f16_vv(half %a, half %b) {
+; GFX9-LABEL: test_fminimum_f16_vv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f16_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-TRUE16-LABEL: test_fminimum_f16_vv:
; GFX12-SDAG-TRUE16: ; %bb.0:
; GFX12-SDAG-TRUE16-NEXT: v_minimum_f16 v0.l, v0.l, v1.l
@@ -136,35 +315,131 @@ define amdgpu_ps half @test_fminimum_f16_vv(half %a, half %b) {
}
define amdgpu_ps half @test_fminimum_f16_ss(half inreg %a, half inreg %b) {
-; GCN-LABEL: test_fminimum_f16_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_minimum_f16 s0, s0, s1
-; GCN-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
-; GCN-NEXT: v_mov_b32_e32 v0, s0
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-LABEL: test_fminimum_f16_ss:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-NEXT: v_min_f16_e32 v1, s0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f16_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_minimum_f16 s0, s0, s1
+; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_3)
+; GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GFX12-NEXT: ; return to shader part epilog
%val = call half @llvm.minimum.f16(half %a, half %b)
ret half %val
}
define amdgpu_ps <2 x half> @test_fminimum_v2f16_vv(<2 x half> %a, <2 x half> %b) {
-; GCN-LABEL: test_fminimum_v2f16_vv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_minimum_f16 v0, v0, v1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_v2f16_vv:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_pk_min_f16 v2, v0, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v0, v1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v3, v2, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v0, v3, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: s_mov_b32 s0, 0x5040100
+; GFX9-SDAG-NEXT: v_perm_b32 v0, v0, v4, s0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v2f16_vv:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_pk_min_f16 v2, v0, v1
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], v0, v1
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v0, v1 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v1, v3, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v2f16_vv:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b)
ret <2 x half> %val
}
define amdgpu_ps <2 x half> @test_fminimum_v2f16_ss(<2 x half> inreg %a, <2 x half> inreg %b) {
-; GCN-LABEL: test_fminimum_v2f16_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_minimum_f16 v0, s0, s1
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_v2f16_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s1, 16
+; GFX9-SDAG-NEXT: v_pk_min_f16 v1, s0, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s0, s0, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v3
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v2f16_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s1
+; GFX9-GISEL-NEXT: s_lshr_b32 s1, s1, 16
+; GFX9-GISEL-NEXT: s_lshr_b32 s2, s0, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-GISEL-NEXT: v_pk_min_f16 v1, s0, v0
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s2, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], s0, v0
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v2f16_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_minimum_f16 v0, s0, s1
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x half> @llvm.minimum.v2f16(<2 x half> %a, <2 x half> %b)
ret <2 x half> %val
}
define amdgpu_ps <3 x half> @test_fminimum_v3f16_vv(<3 x half> %a, <3 x half> %b) {
+; GFX9-SDAG-LABEL: test_fminimum_v3f16_vv:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_pk_min_f16 v4, v1, v3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
+; GFX9-SDAG-NEXT: v_pk_min_f16 v3, v0, v2
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v0, v5, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: s_mov_b32 s0, 0x5040100
+; GFX9-SDAG-NEXT: v_perm_b32 v0, v0, v4, s0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v3f16_vv:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_pk_min_f16 v4, v0, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], v0, v2
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v5, v4, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v2, v5, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_pk_min_f16 v4, v1, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-LABEL: test_fminimum_v3f16_vv:
; GFX12-SDAG: ; %bb.0:
; GFX12-SDAG-NEXT: v_pk_minimum_f16 v0, v0, v2
@@ -187,6 +462,49 @@ define amdgpu_ps <3 x half> @test_fminimum_v3f16_vv(<3 x half> %a, <3 x half> %b
}
define amdgpu_ps <3 x half> @test_fminimum_v3f16_ss(<3 x half> inreg %a, <3 x half> inreg %b) {
+; GFX9-SDAG-LABEL: test_fminimum_v3f16_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-SDAG-NEXT: v_pk_min_f16 v1, s1, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s1, v0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v3, s2
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s2, 16
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_pk_min_f16 v3, s0, v3
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s0, s0, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, s1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v3, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v4
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v3f16_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: s_lshr_b32 s5, s2, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: s_lshr_b32 s4, s0, 16
+; GFX9-GISEL-NEXT: v_pk_min_f16 v1, s0, v0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s5
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s4, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v2, v4, v3, vcc
+; GFX9-GISEL-NEXT: v_pk_min_f16 v3, s1, v1
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s1, v1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
; GFX12-SDAG-LABEL: test_fminimum_v3f16_ss:
; GFX12-SDAG: ; %bb.0:
; GFX12-SDAG-NEXT: v_pk_minimum_f16 v0, s0, s2
@@ -206,97 +524,384 @@ define amdgpu_ps <3 x half> @test_fminimum_v3f16_ss(<3 x half> inreg %a, <3 x ha
}
define amdgpu_ps <4 x half> @test_fminimum_v4f16(<4 x half> %a, <4 x half> %b) {
-; GCN-LABEL: test_fminimum_v4f16:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_minimum_f16 v0, v0, v2
-; GCN-NEXT: v_pk_minimum_f16 v1, v1, v3
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_v4f16:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_pk_min_f16 v4, v1, v3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v5, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v1, v3
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v6, v5, v4, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v1, v5, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_pk_min_f16 v3, v0, v2
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v4, v5, v3, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v0, v5, v3, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: s_mov_b32 s0, 0x5040100
+; GFX9-SDAG-NEXT: v_perm_b32 v0, v0, v4, s0
+; GFX9-SDAG-NEXT: v_perm_b32 v1, v1, v6, s0
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v4f16:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_pk_min_f16 v4, v0, v2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, v0, v2
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v5, 16, v4
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v4, v6, v4, vcc
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v0, v2 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v6, v5, vcc
+; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v4
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v0, 16, v2
+; GFX9-GISEL-NEXT: v_pk_min_f16 v2, v1, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], v1, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_sdwa vcc, v1, v3 src0_sel:WORD_1 src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v1, v6, v2, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v2, v6, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v4f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_minimum_f16 v0, v0, v2
+; GFX12-NEXT: v_pk_minimum_f16 v1, v1, v3
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b)
ret <4 x half> %val
}
define amdgpu_ps <4 x half> @test_fminimum_v4f16_ss(<4 x half> inreg %a, <4 x half> inreg %b) {
-; GCN-LABEL: test_fminimum_v4f16_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_pk_minimum_f16 v0, s0, s2
-; GCN-NEXT: v_pk_minimum_f16 v1, s1, s3
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_v4f16_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-SDAG-NEXT: s_lshr_b32 s3, s3, 16
+; GFX9-SDAG-NEXT: v_pk_min_f16 v1, s1, v1
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, 0x7e00
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s1, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s1, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s3
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v3, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s1, v0
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v4, s2
+; GFX9-SDAG-NEXT: s_lshr_b32 s1, s2, 16
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-SDAG-NEXT: v_pk_min_f16 v4, s0, v4
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-SDAG-NEXT: s_lshr_b32 s0, s0, 16
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX9-SDAG-NEXT: v_cmp_o_f16_e32 vcc, s0, v5
+; GFX9-SDAG-NEXT: v_cndmask_b32_sdwa v2, v2, v4, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-SDAG-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GFX9-SDAG-NEXT: v_lshl_or_b32 v1, v1, 16, v2
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v4f16_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: s_lshr_b32 s2, s2, 16
+; GFX9-GISEL-NEXT: v_pk_min_f16 v1, s0, v0
+; GFX9-GISEL-NEXT: s_lshr_b32 s4, s0, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v2, s2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v4, 0x7e00
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s0, v0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, v4, v1, vcc
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s4, v2
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
+; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-GISEL-NEXT: s_lshr_b32 s2, s3, 16
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-GISEL-NEXT: s_lshr_b32 s0, s1, 16
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v3, s2
+; GFX9-GISEL-NEXT: v_pk_min_f16 v2, s1, v1
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e32 vcc, s0, v3
+; GFX9-GISEL-NEXT: v_cmp_o_f16_e64 s[0:1], s1, v1
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v1, v4, v2, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_sdwa v2, v4, v2, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v2, 16, v1
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v4f16_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_pk_minimum_f16 v0, s0, s2
+; GFX12-NEXT: v_pk_minimum_f16 v1, s1, s3
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x half> @llvm.minimum.v4f16(<4 x half> %a, <4 x half> %b)
ret <4 x half> %val
}
define amdgpu_ps <2 x float> @test_fminimum_f64_vv(double %a, double %b) {
-; GCN-LABEL: test_fminimum_f64_vv:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_f64_vv:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_f64_vv:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_min_f64 v[4:5], v[0:1], v[2:3]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, v[0:1], v[2:3]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f64_vv:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[2:3]
+; GFX12-NEXT: ; return to shader part epilog
%val = call double @llvm.minimum.f64(double %a, double %b)
%ret = bitcast double %val to <2 x float>
ret <2 x float> %ret
}
define amdgpu_ps <2 x float> @test_fminimum_f64_ss(double inreg %a, double inreg %b) {
-; GCN-LABEL: test_fminimum_f64_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f64 v[0:1], s[0:1], s[2:3]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_f64_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-SDAG-NEXT: v_min_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_f64_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-GISEL-NEXT: v_min_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f64_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f64 v[0:1], s[0:1], s[2:3]
+; GFX12-NEXT: ; return to shader part epilog
%val = call double @llvm.minimum.f64(double %a, double %b)
%ret = bitcast double %val to <2 x float>
ret <2 x float> %ret
}
define amdgpu_ps <4 x float> @test_fminimum_v2f64_ss(<2 x double> inreg %a, <2 x double> inreg %b) {
-; GCN-LABEL: test_fminimum_v2f64_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f64 v[0:1], s[0:1], s[4:5]
-; GCN-NEXT: v_minimum_f64 v[2:3], s[2:3], s[6:7]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_v2f64_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-SDAG-NEXT: v_min_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-SDAG-NEXT: v_min_f64 v[4:5], s[2:3], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[0:1], s[2:3], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v6, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v2, v4, 0, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v3, v5, v6, s[0:1]
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v2f64_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-GISEL-NEXT: v_min_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-GISEL-NEXT: v_min_f64 v[4:5], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[0:1], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v6, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v2, 0, v4, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v3, v6, v5, s[0:1]
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v2f64_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f64 v[0:1], s[0:1], s[4:5]
+; GFX12-NEXT: v_minimum_f64 v[2:3], s[2:3], s[6:7]
+; GFX12-NEXT: ; return to shader part epilog
%val = call <2 x double> @llvm.minimum.v2f64(<2 x double> %a, <2 x double> %b)
%ret = bitcast <2 x double> %val to <4 x float>
ret <4 x float> %ret
}
define amdgpu_ps <8 x float> @test_fminimum_v4f64(<4 x double> %a, <4 x double> %b) {
-; GCN-LABEL: test_fminimum_v4f64:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f64 v[0:1], v[0:1], v[8:9]
-; GCN-NEXT: v_minimum_f64 v[2:3], v[2:3], v[10:11]
-; GCN-NEXT: v_minimum_f64 v[4:5], v[4:5], v[12:13]
-; GCN-NEXT: v_minimum_f64 v[6:7], v[6:7], v[14:15]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_v4f64:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_min_f64 v[16:17], v[0:1], v[8:9]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, v[0:1], v[8:9]
+; GFX9-SDAG-NEXT: v_min_f64 v[8:9], v[2:3], v[10:11]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-SDAG-NEXT: v_min_f64 v[10:11], v[4:5], v[12:13]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-SDAG-NEXT: v_min_f64 v[12:13], v[6:7], v[14:15]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[4:5], v[6:7], v[14:15]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v7, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v16, 0, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v17, v7, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v2, v8, 0, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v3, v9, v7, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v4, v10, 0, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v5, v11, v7, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v6, v12, 0, s[4:5]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v7, v13, v7, s[4:5]
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v4f64:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_min_f64 v[16:17], v[0:1], v[8:9]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, v[0:1], v[8:9]
+; GFX9-GISEL-NEXT: v_min_f64 v[8:9], v[2:3], v[10:11]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[0:1], v[2:3], v[10:11]
+; GFX9-GISEL-NEXT: v_min_f64 v[10:11], v[4:5], v[12:13]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[2:3], v[4:5], v[12:13]
+; GFX9-GISEL-NEXT: v_min_f64 v[12:13], v[6:7], v[14:15]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[4:5], v[6:7], v[14:15]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v18, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v16, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v18, v17, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v2, 0, v8, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v3, v18, v9, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v10, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v5, v18, v11, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v6, 0, v12, s[4:5]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v7, v18, v13, s[4:5]
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v4f64:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f64 v[0:1], v[0:1], v[8:9]
+; GFX12-NEXT: v_minimum_f64 v[2:3], v[2:3], v[10:11]
+; GFX12-NEXT: v_minimum_f64 v[4:5], v[4:5], v[12:13]
+; GFX12-NEXT: v_minimum_f64 v[6:7], v[6:7], v[14:15]
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x double> @llvm.minimum.v4f64(<4 x double> %a, <4 x double> %b)
%ret = bitcast <4 x double> %val to <8 x float>
ret <8 x float> %ret
}
define amdgpu_ps <8 x float> @test_fminimum_v4f64_ss(<4 x double> inreg %a, <4 x double> inreg %b) {
-; GCN-LABEL: test_fminimum_v4f64_ss:
-; GCN: ; %bb.0:
-; GCN-NEXT: v_minimum_f64 v[0:1], s[0:1], s[8:9]
-; GCN-NEXT: v_minimum_f64 v[2:3], s[2:3], s[10:11]
-; GCN-NEXT: v_minimum_f64 v[4:5], s[4:5], s[12:13]
-; GCN-NEXT: v_minimum_f64 v[6:7], s[6:7], s[14:15]
-; GCN-NEXT: ; return to shader part epilog
+; GFX9-SDAG-LABEL: test_fminimum_v4f64_ss:
+; GFX9-SDAG: ; %bb.0:
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-SDAG-NEXT: v_min_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v10, 0x7ff80000
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s10
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s11
+; GFX9-SDAG-NEXT: v_min_f64 v[4:5], s[2:3], v[1:2]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[0:1], s[2:3], v[1:2]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s12
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s13
+; GFX9-SDAG-NEXT: v_min_f64 v[6:7], s[4:5], v[1:2]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[2:3], s[4:5], v[1:2]
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v1, s14
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v2, s15
+; GFX9-SDAG-NEXT: v_min_f64 v[8:9], s[6:7], v[1:2]
+; GFX9-SDAG-NEXT: v_cmp_u_f64_e64 s[4:5], s[6:7], v[1:2]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v10, vcc
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v2, v4, 0, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v3, v5, v10, s[0:1]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v4, v6, 0, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v5, v7, v10, s[2:3]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v6, v8, 0, s[4:5]
+; GFX9-SDAG-NEXT: v_cndmask_b32_e64 v7, v9, v10, s[4:5]
+; GFX9-SDAG-NEXT: ; return to shader part epilog
+;
+; GFX9-GISEL-LABEL: test_fminimum_v4f64_ss:
+; GFX9-GISEL: ; %bb.0:
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-GISEL-NEXT: v_min_f64 v[2:3], s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e32 vcc, s[0:1], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s10
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s11
+; GFX9-GISEL-NEXT: v_min_f64 v[4:5], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[0:1], s[2:3], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s12
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s13
+; GFX9-GISEL-NEXT: v_min_f64 v[6:7], s[4:5], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[2:3], s[4:5], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s14
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s15
+; GFX9-GISEL-NEXT: v_min_f64 v[8:9], s[6:7], v[0:1]
+; GFX9-GISEL-NEXT: v_cmp_o_f64_e64 s[4:5], s[6:7], v[0:1]
+; GFX9-GISEL-NEXT: v_mov_b32_e32 v10, 0x7ff80000
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v0, 0, v2, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, v10, v3, vcc
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v2, 0, v4, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v3, v10, v5, s[0:1]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v4, 0, v6, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v5, v10, v7, s[2:3]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v6, 0, v8, s[4:5]
+; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v7, v10, v9, s[4:5]
+; GFX9-GISEL-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_v4f64_ss:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f64 v[0:1], s[0:1], s[8:9]
+; GFX12-NEXT: v_minimum_f64 v[2:3], s[2:3], s[10:11]
+; GFX12-NEXT: v_minimum_f64 v[4:5], s[4:5], s[12:13]
+; GFX12-NEXT: v_minimum_f64 v[6:7], s[6:7], s[14:15]
+; GFX12-NEXT: ; return to shader part epilog
%val = call <4 x double> @llvm.minimum.v4f64(<4 x double> %a, <4 x double> %b)
%ret = bitcast <4 x double> %val to <8 x float>
ret <8 x float> %ret
}
define amdgpu_kernel void @fminimumi_f32_move_to_valu(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GCN-LABEL: fminimumi_f32_move_to_valu:
-; GCN: ; %bb.0:
-; GCN-NEXT: s_clause 0x1
-; GCN-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GCN-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
-; GCN-NEXT: v_mov_b32_e32 v0, 0
-; GCN-NEXT: s_wait_kmcnt 0x0
-; GCN-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS
-; GCN-NEXT: s_wait_loadcnt 0x0
-; GCN-NEXT: v_minimum_f32 v1, v1, v2
-; GCN-NEXT: global_store_b32 v0, v1, s[0:1]
-; GCN-NEXT: s_endpgm
+; GFX9-LABEL: fminimumi_f32_move_to_valu:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_dword v1, v0, s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: global_load_dword v2, v0, s[6:7] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_min_f32_e32 v4, v1, v2
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v1, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
+; GFX12-LABEL: fminimumi_f32_move_to_valu:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_clause 0x1
+; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
+; GFX12-NEXT: s_load_b64 s[4:5], s[4:5], 0x34
+; GFX12-NEXT: v_mov_b32_e32 v0, 0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: global_load_b32 v1, v0, s[2:3] scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: global_load_b32 v2, v0, s[4:5] scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: v_minimum_f32 v1, v1, v2
+; GFX12-NEXT: global_store_b32 v0, v1, s[0:1]
+; GFX12-NEXT: s_endpgm
%a = load volatile float, ptr addrspace(1) %aptr, align 4
%b = load volatile float, ptr addrspace(1) %bptr, align 4
%v = call float @llvm.minimum.f32(float %a, float %b)
@@ -305,6 +910,23 @@ define amdgpu_kernel void @fminimumi_f32_move_to_valu(ptr addrspace(1) %out, ptr
}
define amdgpu_kernel void @fminimum_f16_move_to_valu(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX9-LABEL: fminimum_f16_move_to_valu:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7e00
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: global_load_ushort v1, v0, s[2:3] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: global_load_ushort v2, v0, s[6:7] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v4, v1, v2
+; GFX9-NEXT: v_cmp_o_f16_e32 vcc, v1, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v4, vcc
+; GFX9-NEXT: global_store_short v0, v1, s[0:1]
+; GFX9-NEXT: s_endpgm
+;
; GFX12-SDAG-TRUE16-LABEL: fminimum_f16_move_to_valu:
; GFX12-SDAG-TRUE16: ; %bb.0:
; GFX12-SDAG-TRUE16-NEXT: s_clause 0x1
@@ -371,6 +993,40 @@ define amdgpu_kernel void @fminimum_f16_move_to_valu(ptr addrspace(1) %out, ptr
ret void
}
+define amdgpu_ps float @test_fminimum_f32_ieee_on(float %a, float %b) #0 {
+; GFX9-LABEL: test_fminimum_f32_ieee_on:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f32_ieee_on:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call float @llvm.minimum.f32(float %a, float %b)
+ ret float %val
+}
+
+define amdgpu_ps float @test_fminimum_f32_ieee_off(float %a, float %b) #1 {
+; GFX9-LABEL: test_fminimum_f32_ieee_off:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_f32_e32 v2, v0, v1
+; GFX9-NEXT: v_mov_b32_e32 v3, 0x7fc00000
+; GFX9-NEXT: v_cmp_o_f32_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_fminimum_f32_ieee_off:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minimum_f32 v0, v0, v1
+; GFX12-NEXT: ; return to shader part epilog
+ %val = call float @llvm.minimum.f32(float %a, float %b)
+ ret float %val
+}
+
declare float @llvm.minimum.f32(float, float)
declare <2 x float> @llvm.minimum.v2f32(<2 x float>, <2 x float>)
declare <3 x float> @llvm.minimum.v3f32(<3 x float>, <3 x float>)
@@ -383,3 +1039,6 @@ declare <4 x half> @llvm.minimum.v4f16(<4 x half>, <4 x half>)
declare double @llvm.minimum.f64(double, double)
declare <2 x double> @llvm.minimum.v2f64(<2 x double>, <2 x double>)
declare <4 x double> @llvm.minimum.v4f64(<4 x double>, <4 x double>)
+
+attributes #0 = { nounwind "amdgpu-ieee"="true" }
+attributes #1 = { nounwind "amdgpu-ieee"="false" }
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
index 94afa88..9ebf6ae 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
@@ -4666,21 +4666,13 @@ define amdgpu_ps float @global_load_saddr_i8_offset_or_i64_imm_offset_16(ptr add
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
-; GFX12-SDAG-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_16:
-; GFX12-SDAG: ; %bb.0:
-; GFX12-SDAG-NEXT: v_or_b32_e32 v0, 16, v0
-; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX12-SDAG-NEXT: global_load_u8 v0, v[0:1], off
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-NEXT: ; return to shader part epilog
-;
-; GFX12-GISEL-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_16:
-; GFX12-GISEL: ; %bb.0:
-; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, 0
-; GFX12-GISEL-NEXT: v_or_b32_e32 v0, 16, v0
-; GFX12-GISEL-NEXT: global_load_u8 v0, v[0:1], off
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-NEXT: ; return to shader part epilog
+; GFX12-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_or_b32_e32 v0, 16, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: global_load_u8 v0, v[0:1], off
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: ; return to shader part epilog
%zext.idx = zext i32 %idx to i64
%or = or i64 %zext.idx, 16
%addr = inttoptr i64 %or to ptr addrspace(1)
@@ -4707,21 +4699,13 @@ define amdgpu_ps float @global_load_saddr_i8_offset_or_i64_imm_offset_4160(ptr a
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
-; GFX12-SDAG-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_4160:
-; GFX12-SDAG: ; %bb.0:
-; GFX12-SDAG-NEXT: v_or_b32_e32 v0, 0x1040, v0
-; GFX12-SDAG-NEXT: v_mov_b32_e32 v1, 0
-; GFX12-SDAG-NEXT: global_load_u8 v0, v[0:1], off
-; GFX12-SDAG-NEXT: s_wait_loadcnt 0x0
-; GFX12-SDAG-NEXT: ; return to shader part epilog
-;
-; GFX12-GISEL-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_4160:
-; GFX12-GISEL: ; %bb.0:
-; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, 0
-; GFX12-GISEL-NEXT: v_or_b32_e32 v0, 0x1040, v0
-; GFX12-GISEL-NEXT: global_load_u8 v0, v[0:1], off
-; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
-; GFX12-GISEL-NEXT: ; return to shader part epilog
+; GFX12-LABEL: global_load_saddr_i8_offset_or_i64_imm_offset_4160:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_or_b32_e32 v0, 0x1040, v0
+; GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GFX12-NEXT: global_load_u8 v0, v[0:1], off
+; GFX12-NEXT: s_wait_loadcnt 0x0
+; GFX12-NEXT: ; return to shader part epilog
%zext.idx = zext i32 %idx to i64
%or = or i64 %zext.idx, 4160
%addr = inttoptr i64 %or to ptr addrspace(1)
diff --git a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
index 9684712..2f9182e 100644
--- a/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/itofp.i128.ll
@@ -1066,13 +1066,13 @@ define double @uitofp_i128_to_f64(i128 %x) {
; GISEL-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
; GISEL-NEXT: v_lshlrev_b64 v[8:9], 30, v[2:3]
; GISEL-NEXT: v_lshrrev_b32_e32 v5, 2, v1
-; GISEL-NEXT: v_or_b32_e32 v9, v5, v8
+; GISEL-NEXT: v_or_b32_e32 v9, v8, v5
; GISEL-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GISEL-NEXT: ; %bb.11: ; %itofp-if-then20
; GISEL-NEXT: v_lshlrev_b64 v[2:3], 29, v[2:3]
; GISEL-NEXT: v_lshrrev_b64 v[4:5], 3, v[0:1]
; GISEL-NEXT: v_lshrrev_b32_e32 v0, 3, v1
-; GISEL-NEXT: v_or_b32_e32 v9, v0, v2
+; GISEL-NEXT: v_or_b32_e32 v9, v2, v0
; GISEL-NEXT: v_mov_b32_e32 v7, v6
; GISEL-NEXT: ; %bb.12: ; %Flow
; GISEL-NEXT: s_or_b64 exec, exec, s[4:5]
diff --git a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
index 1e4b633..fc36ed9 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-buffer-fat-pointers-nontemporal-metadata.ll
@@ -45,27 +45,18 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX9-GISEL: ; %bb.0: ; %entry
; GFX9-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX9-GISEL-NEXT: s_load_dword s7, s[8:9], 0x10
-; GFX9-GISEL-NEXT: s_mov_b32 s11, 0
-; GFX9-GISEL-NEXT: s_mov_b32 s4, s11
-; GFX9-GISEL-NEXT: s_mov_b32 s6, s11
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s1
+; GFX9-GISEL-NEXT: s_mov_b32 s4, s1
; GFX9-GISEL-NEXT: s_mov_b32 s5, s2
-; GFX9-GISEL-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX9-GISEL-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
+; GFX9-GISEL-NEXT: s_mov_b32 s6, s3
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX9-GISEL-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen glc slc
; GFX9-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x20
; GFX9-GISEL-NEXT: s_load_dword s7, s[8:9], 0x30
-; GFX9-GISEL-NEXT: s_mov_b32 s4, s11
-; GFX9-GISEL-NEXT: s_mov_b32 s6, s11
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s1
+; GFX9-GISEL-NEXT: s_mov_b32 s4, s1
; GFX9-GISEL-NEXT: s_mov_b32 s5, s2
-; GFX9-GISEL-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX9-GISEL-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
+; GFX9-GISEL-NEXT: s_mov_b32 s6, s3
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s0
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX9-GISEL-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen glc slc
@@ -105,27 +96,18 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX942-GISEL: ; %bb.0: ; %entry
; GFX942-GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX942-GISEL-NEXT: s_load_dword s11, s[4:5], 0x10
-; GFX942-GISEL-NEXT: s_mov_b32 s7, 0
-; GFX942-GISEL-NEXT: s_mov_b32 s8, s7
-; GFX942-GISEL-NEXT: s_mov_b32 s10, s7
; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-GISEL-NEXT: s_mov_b32 s6, s1
+; GFX942-GISEL-NEXT: s_mov_b32 s8, s1
; GFX942-GISEL-NEXT: s_mov_b32 s9, s2
-; GFX942-GISEL-NEXT: s_or_b64 s[8:9], s[6:7], s[8:9]
-; GFX942-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX942-GISEL-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
+; GFX942-GISEL-NEXT: s_mov_b32 s10, s3
; GFX942-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX942-GISEL-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen nt
; GFX942-GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x20
-; GFX942-GISEL-NEXT: s_load_dword s9, s[4:5], 0x30
-; GFX942-GISEL-NEXT: s_mov_b32 s4, s7
-; GFX942-GISEL-NEXT: s_mov_b32 s8, s7
+; GFX942-GISEL-NEXT: s_load_dword s7, s[4:5], 0x30
; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-GISEL-NEXT: s_mov_b32 s6, s1
+; GFX942-GISEL-NEXT: s_mov_b32 s4, s1
; GFX942-GISEL-NEXT: s_mov_b32 s5, s2
-; GFX942-GISEL-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
; GFX942-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX942-GISEL-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX942-GISEL-NEXT: v_mov_b32_e32 v1, s0
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen nt
@@ -168,29 +150,22 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX10-GISEL: ; %bb.0: ; %entry
; GFX10-GISEL-NEXT: s_clause 0x1
; GFX10-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX10-GISEL-NEXT: s_load_dword s5, s[8:9], 0x10
-; GFX10-GISEL-NEXT: s_mov_b32 s7, 0
-; GFX10-GISEL-NEXT: s_mov_b32 s10, s7
-; GFX10-GISEL-NEXT: s_mov_b32 s4, s7
+; GFX10-GISEL-NEXT: s_load_dword s7, s[8:9], 0x10
; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-GISEL-NEXT: s_mov_b32 s6, s1
-; GFX10-GISEL-NEXT: s_mov_b32 s11, s2
; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX10-GISEL-NEXT: s_or_b64 s[0:1], s[6:7], s[10:11]
+; GFX10-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX10-GISEL-NEXT: s_mov_b32 s5, s2
; GFX10-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX10-GISEL-NEXT: s_or_b64 s[2:3], s[6:7], s[4:5]
-; GFX10-GISEL-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen slc
+; GFX10-GISEL-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen slc
; GFX10-GISEL-NEXT: s_clause 0x1
-; GFX10-GISEL-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x20
-; GFX10-GISEL-NEXT: s_load_dword s11, s[8:9], 0x30
+; GFX10-GISEL-NEXT: s_waitcnt_depctr 0xffe3
+; GFX10-GISEL-NEXT: s_load_dword s7, s[8:9], 0x30
; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-GISEL-NEXT: s_mov_b32 s6, s1
-; GFX10-GISEL-NEXT: s_mov_b32 s5, s2
; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, s0
-; GFX10-GISEL-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; GFX10-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX10-GISEL-NEXT: s_mov_b32 s5, s2
; GFX10-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX10-GISEL-NEXT: s_or_b64 s[6:7], s[6:7], s[10:11]
; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX10-GISEL-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen glc slc
; GFX10-GISEL-NEXT: s_endpgm
@@ -234,32 +209,21 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX11-GISEL: ; %bb.0: ; %entry
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x10
-; GFX11-GISEL-NEXT: s_mov_b32 s9, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-GISEL-NEXT: s_mov_b32 s10, s9
-; GFX11-GISEL-NEXT: s_mov_b32 s6, s9
+; GFX11-GISEL-NEXT: s_load_b32 s11, s[4:5], 0x10
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX11-GISEL-NEXT: s_mov_b32 s11, s2
; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-GISEL-NEXT: s_or_b64 s[0:1], s[8:9], s[10:11]
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-GISEL-NEXT: s_or_b64 s[2:3], s[8:9], s[6:7]
-; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen slc dlc
+; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
+; GFX11-GISEL-NEXT: s_mov_b32 s9, s2
+; GFX11-GISEL-NEXT: s_mov_b32 s10, s3
+; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen slc dlc
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX11-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
-; GFX11-GISEL-NEXT: s_mov_b32 s4, s9
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX11-GISEL-NEXT: s_mov_b32 s5, s2
; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-GISEL-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-GISEL-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GFX11-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX11-GISEL-NEXT: s_mov_b32 s5, s2
+; GFX11-GISEL-NEXT: s_mov_b32 s6, s3
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX11-GISEL-NEXT: buffer_store_b32 v0, v1, s[4:7], 0 offen glc slc dlc
; GFX11-GISEL-NEXT: s_endpgm
@@ -303,32 +267,21 @@ define amdgpu_kernel void @buffer_nontemporal_load_store(ptr addrspace(7) %in, p
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX12-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x10
-; GFX12-GISEL-NEXT: s_mov_b32 s9, 0
-; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: s_mov_b32 s10, s9
-; GFX12-GISEL-NEXT: s_mov_b32 s6, s9
+; GFX12-GISEL-NEXT: s_load_b32 s11, s[4:5], 0x10
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX12-GISEL-NEXT: s_mov_b32 s11, s2
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX12-GISEL-NEXT: s_or_b64 s[0:1], s[8:9], s[10:11]
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: s_or_b64 s[2:3], s[8:9], s[6:7]
-; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen th:TH_LOAD_NT
+; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
+; GFX12-GISEL-NEXT: s_mov_b32 s9, s2
+; GFX12-GISEL-NEXT: s_mov_b32 s10, s3
+; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
-; GFX12-GISEL-NEXT: s_mov_b32 s4, s9
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX12-GISEL-NEXT: s_mov_b32 s5, s2
; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-GISEL-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GFX12-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX12-GISEL-NEXT: s_mov_b32 s5, s2
+; GFX12-GISEL-NEXT: s_mov_b32 s6, s3
; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX12-GISEL-NEXT: buffer_store_b32 v0, v1, s[4:7], null offen th:TH_STORE_NT
; GFX12-GISEL-NEXT: s_endpgm
@@ -374,28 +327,19 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX9-GISEL: ; %bb.0: ; %entry
; GFX9-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
; GFX9-GISEL-NEXT: s_load_dword s7, s[8:9], 0x10
-; GFX9-GISEL-NEXT: s_mov_b32 s11, 0
-; GFX9-GISEL-NEXT: s_mov_b32 s4, s11
-; GFX9-GISEL-NEXT: s_mov_b32 s6, s11
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s1
+; GFX9-GISEL-NEXT: s_mov_b32 s4, s1
; GFX9-GISEL-NEXT: s_mov_b32 s5, s2
-; GFX9-GISEL-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX9-GISEL-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
+; GFX9-GISEL-NEXT: s_mov_b32 s6, s3
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX9-GISEL-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen glc
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX9-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x20
; GFX9-GISEL-NEXT: s_load_dword s7, s[8:9], 0x30
-; GFX9-GISEL-NEXT: s_mov_b32 s4, s11
-; GFX9-GISEL-NEXT: s_mov_b32 s6, s11
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s1
+; GFX9-GISEL-NEXT: s_mov_b32 s4, s1
; GFX9-GISEL-NEXT: s_mov_b32 s5, s2
-; GFX9-GISEL-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
-; GFX9-GISEL-NEXT: s_mov_b32 s10, s3
-; GFX9-GISEL-NEXT: s_or_b64 s[6:7], s[10:11], s[6:7]
+; GFX9-GISEL-NEXT: s_mov_b32 s6, s3
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, s0
; GFX9-GISEL-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
@@ -436,28 +380,19 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX942-GISEL: ; %bb.0: ; %entry
; GFX942-GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
; GFX942-GISEL-NEXT: s_load_dword s11, s[4:5], 0x10
-; GFX942-GISEL-NEXT: s_mov_b32 s7, 0
-; GFX942-GISEL-NEXT: s_mov_b32 s8, s7
-; GFX942-GISEL-NEXT: s_mov_b32 s10, s7
; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-GISEL-NEXT: s_mov_b32 s6, s1
+; GFX942-GISEL-NEXT: s_mov_b32 s8, s1
; GFX942-GISEL-NEXT: s_mov_b32 s9, s2
-; GFX942-GISEL-NEXT: s_or_b64 s[8:9], s[6:7], s[8:9]
-; GFX942-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX942-GISEL-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
+; GFX942-GISEL-NEXT: s_mov_b32 s10, s3
; GFX942-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX942-GISEL-NEXT: buffer_load_dword v0, v0, s[8:11], 0 offen sc0 sc1
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX942-GISEL-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x20
-; GFX942-GISEL-NEXT: s_load_dword s9, s[4:5], 0x30
-; GFX942-GISEL-NEXT: s_mov_b32 s4, s7
-; GFX942-GISEL-NEXT: s_mov_b32 s8, s7
+; GFX942-GISEL-NEXT: s_load_dword s7, s[4:5], 0x30
; GFX942-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX942-GISEL-NEXT: s_mov_b32 s6, s1
+; GFX942-GISEL-NEXT: s_mov_b32 s4, s1
; GFX942-GISEL-NEXT: s_mov_b32 s5, s2
-; GFX942-GISEL-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
; GFX942-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX942-GISEL-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
; GFX942-GISEL-NEXT: v_mov_b32_e32 v1, s0
; GFX942-GISEL-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen sc0 sc1
; GFX942-GISEL-NEXT: s_waitcnt vmcnt(0)
@@ -501,30 +436,23 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX10-GISEL: ; %bb.0: ; %entry
; GFX10-GISEL-NEXT: s_clause 0x1
; GFX10-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; GFX10-GISEL-NEXT: s_load_dword s5, s[8:9], 0x10
-; GFX10-GISEL-NEXT: s_mov_b32 s7, 0
-; GFX10-GISEL-NEXT: s_mov_b32 s10, s7
-; GFX10-GISEL-NEXT: s_mov_b32 s4, s7
+; GFX10-GISEL-NEXT: s_load_dword s7, s[8:9], 0x10
; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-GISEL-NEXT: s_mov_b32 s6, s1
-; GFX10-GISEL-NEXT: s_mov_b32 s11, s2
; GFX10-GISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX10-GISEL-NEXT: s_or_b64 s[0:1], s[6:7], s[10:11]
+; GFX10-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX10-GISEL-NEXT: s_mov_b32 s5, s2
; GFX10-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX10-GISEL-NEXT: s_or_b64 s[2:3], s[6:7], s[4:5]
-; GFX10-GISEL-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen glc dlc
+; GFX10-GISEL-NEXT: buffer_load_dword v0, v0, s[4:7], 0 offen glc dlc
; GFX10-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX10-GISEL-NEXT: s_clause 0x1
-; GFX10-GISEL-NEXT: s_waitcnt_depctr 0xffe3
; GFX10-GISEL-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x20
-; GFX10-GISEL-NEXT: s_load_dword s11, s[8:9], 0x30
+; GFX10-GISEL-NEXT: s_waitcnt_depctr 0xffe3
+; GFX10-GISEL-NEXT: s_load_dword s7, s[8:9], 0x30
; GFX10-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX10-GISEL-NEXT: s_mov_b32 s6, s1
-; GFX10-GISEL-NEXT: s_mov_b32 s5, s2
; GFX10-GISEL-NEXT: v_mov_b32_e32 v1, s0
-; GFX10-GISEL-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
+; GFX10-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX10-GISEL-NEXT: s_mov_b32 s5, s2
; GFX10-GISEL-NEXT: s_mov_b32 s6, s3
-; GFX10-GISEL-NEXT: s_or_b64 s[6:7], s[6:7], s[10:11]
; GFX10-GISEL-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen
; GFX10-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-GISEL-NEXT: s_endpgm
@@ -569,33 +497,22 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX11-GISEL: ; %bb.0: ; %entry
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX11-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x10
-; GFX11-GISEL-NEXT: s_mov_b32 s9, 0
-; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-GISEL-NEXT: s_mov_b32 s10, s9
-; GFX11-GISEL-NEXT: s_mov_b32 s6, s9
+; GFX11-GISEL-NEXT: s_load_b32 s11, s[4:5], 0x10
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX11-GISEL-NEXT: s_mov_b32 s11, s2
; GFX11-GISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-GISEL-NEXT: s_or_b64 s[0:1], s[8:9], s[10:11]
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-GISEL-NEXT: s_or_b64 s[2:3], s[8:9], s[6:7]
-; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[0:3], 0 offen glc dlc
+; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
+; GFX11-GISEL-NEXT: s_mov_b32 s9, s2
+; GFX11-GISEL-NEXT: s_mov_b32 s10, s3
+; GFX11-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], 0 offen glc dlc
; GFX11-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX11-GISEL-NEXT: s_clause 0x1
; GFX11-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX11-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
-; GFX11-GISEL-NEXT: s_mov_b32 s4, s9
; GFX11-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX11-GISEL-NEXT: s_mov_b32 s5, s2
; GFX11-GISEL-NEXT: v_mov_b32_e32 v1, s0
-; GFX11-GISEL-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
-; GFX11-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX11-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-GISEL-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GFX11-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX11-GISEL-NEXT: s_mov_b32 s5, s2
+; GFX11-GISEL-NEXT: s_mov_b32 s6, s3
; GFX11-GISEL-NEXT: buffer_store_b32 v0, v1, s[4:7], 0 offen dlc
; GFX11-GISEL-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-GISEL-NEXT: s_endpgm
@@ -640,33 +557,22 @@ define amdgpu_kernel void @buffer_nontemporal_and_volatile_load_store(ptr addrsp
; GFX12-GISEL: ; %bb.0: ; %entry
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x0
-; GFX12-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x10
-; GFX12-GISEL-NEXT: s_mov_b32 s9, 0
-; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: s_mov_b32 s10, s9
-; GFX12-GISEL-NEXT: s_mov_b32 s6, s9
+; GFX12-GISEL-NEXT: s_load_b32 s11, s[4:5], 0x10
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX12-GISEL-NEXT: s_mov_b32 s11, s2
; GFX12-GISEL-NEXT: v_mov_b32_e32 v0, s0
-; GFX12-GISEL-NEXT: s_or_b64 s[0:1], s[8:9], s[10:11]
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: s_or_b64 s[2:3], s[8:9], s[6:7]
-; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[0:3], null offen th:TH_LOAD_NT scope:SCOPE_SYS
+; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
+; GFX12-GISEL-NEXT: s_mov_b32 s9, s2
+; GFX12-GISEL-NEXT: s_mov_b32 s10, s3
+; GFX12-GISEL-NEXT: buffer_load_b32 v0, v0, s[8:11], null offen th:TH_LOAD_NT scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_loadcnt 0x0
; GFX12-GISEL-NEXT: s_clause 0x1
; GFX12-GISEL-NEXT: s_load_b128 s[0:3], s[4:5], 0x20
; GFX12-GISEL-NEXT: s_load_b32 s7, s[4:5], 0x30
-; GFX12-GISEL-NEXT: s_mov_b32 s4, s9
; GFX12-GISEL-NEXT: s_wait_kmcnt 0x0
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s1
-; GFX12-GISEL-NEXT: s_mov_b32 s5, s2
; GFX12-GISEL-NEXT: v_mov_b32_e32 v1, s0
-; GFX12-GISEL-NEXT: s_or_b64 s[4:5], s[8:9], s[4:5]
-; GFX12-GISEL-NEXT: s_mov_b32 s8, s3
-; GFX12-GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-GISEL-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7]
+; GFX12-GISEL-NEXT: s_mov_b32 s4, s1
+; GFX12-GISEL-NEXT: s_mov_b32 s5, s2
+; GFX12-GISEL-NEXT: s_mov_b32 s6, s3
; GFX12-GISEL-NEXT: buffer_store_b32 v0, v1, s[4:7], null offen th:TH_STORE_NT scope:SCOPE_SYS
; GFX12-GISEL-NEXT: s_wait_storecnt 0x0
; GFX12-GISEL-NEXT: s_endpgm