aboutsummaryrefslogtreecommitdiff
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll40
-rw-r--r--llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll16
-rw-r--r--llvm/test/CodeGen/AArch64/icmp.ll51
-rw-r--r--llvm/test/CodeGen/AArch64/sme-za-exceptions.ll474
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-bool.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll4706
-rw-r--r--llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll341
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_mac.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_mac_f16.ll14
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll58
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll58
-rw-r--r--llvm/test/CodeGen/LoongArch/merge-offset-option.ll24
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar_cmp.ll226
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll24
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll48
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll19
-rw-r--r--llvm/test/CodeGen/SPIRV/image_store.ll22
-rw-r--r--llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll28
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll56
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll14
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll114
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll1
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll86
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll30
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll3
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll11
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll140
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll16
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll30
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/float16.ll25
-rw-r--r--llvm/test/CodeGen/X86/avx512-mask-op.ll6
-rw-r--r--llvm/test/CodeGen/X86/combine-add.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-mul.ll2
-rw-r--r--llvm/test/CodeGen/X86/combine-sdiv.ll30
-rw-r--r--llvm/test/CodeGen/X86/dpbusd.ll202
-rw-r--r--llvm/test/CodeGen/X86/dpbusd_const.ll192
-rw-r--r--llvm/test/CodeGen/X86/known-signbits-shl.ll2
-rw-r--r--llvm/test/CodeGen/X86/masked_gather_scatter.ll33
-rw-r--r--llvm/test/CodeGen/X86/negative-sin.ll15
-rw-r--r--llvm/test/CodeGen/X86/oddsubvector.ll12
-rw-r--r--llvm/test/CodeGen/X86/pr62286.ll38
-rw-r--r--llvm/test/CodeGen/X86/pr74736.ll20
-rw-r--r--llvm/test/CodeGen/X86/shift-i512.ll6
-rw-r--r--llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll24
-rw-r--r--llvm/test/CodeGen/X86/vec_shift6.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-gep.ll128
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll6
-rw-r--r--llvm/test/CodeGen/X86/vector-mul.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll2
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-shl-128.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining.ll4
-rw-r--r--llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll64
54 files changed, 6900 insertions, 650 deletions
diff --git a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll
index 1c216e7..e371748 100644
--- a/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-vector-bitcast.ll
@@ -11,6 +11,16 @@ entry:
ret <4 x i16> %1
}
+define <4 x half> @v4bf16_to_v4f16(float, <4 x bfloat> %a) nounwind {
+; CHECK-LABEL: v4bf16_to_v4f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <4 x bfloat> %a to <4 x half>
+ ret <4 x half> %1
+}
+
define <2 x i32> @v4bf16_to_v2i32(float, <4 x bfloat> %a) nounwind {
; CHECK-LABEL: v4bf16_to_v2i32:
; CHECK: // %bb.0: // %entry
@@ -82,6 +92,16 @@ entry:
ret <4 x bfloat> %1
}
+define <4 x bfloat> @v4f16_to_v4bf16(float, <4 x half> %a) nounwind {
+; CHECK-LABEL: v4f16_to_v4bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: fmov d0, d1
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <4 x half> %a to <4 x bfloat>
+ ret <4 x bfloat> %1
+}
+
define <4 x bfloat> @v2i32_to_v4bf16(float, <2 x i32> %a) nounwind {
; CHECK-LABEL: v2i32_to_v4bf16:
; CHECK: // %bb.0: // %entry
@@ -152,6 +172,16 @@ entry:
ret <8 x i16> %1
}
+define <8 x half> @v8bf16_to_v8f16(float, <8 x bfloat> %a) nounwind {
+; CHECK-LABEL: v8bf16_to_v8f16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <8 x bfloat> %a to <8 x half>
+ ret <8 x half> %1
+}
+
define <4 x i32> @v8bf16_to_v4i32(float, <8 x bfloat> %a) nounwind {
; CHECK-LABEL: v8bf16_to_v4i32:
; CHECK: // %bb.0: // %entry
@@ -202,6 +232,16 @@ entry:
ret <8 x bfloat> %1
}
+define <8 x bfloat> @v8f16_to_v8bf16(float, <8 x half> %a) nounwind {
+; CHECK-LABEL: v8f16_to_v8bf16:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: mov v0.16b, v1.16b
+; CHECK-NEXT: ret
+entry:
+ %1 = bitcast <8 x half> %a to <8 x bfloat>
+ ret <8 x bfloat> %1
+}
+
define <8 x bfloat> @v4i32_to_v8bf16(float, <4 x i32> %a) nounwind {
; CHECK-LABEL: v4i32_to_v8bf16:
; CHECK: // %bb.0: // %entry
diff --git a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
index 0960c4c..a56d5b1 100644
--- a/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
+++ b/llvm/test/CodeGen/AArch64/check-sign-bit-before-extension.ll
@@ -78,9 +78,8 @@ B:
define i32 @g_i8_sign_extend_inreg(i8 %in, i32 %a, i32 %b) nounwind {
; CHECK-LABEL: g_i8_sign_extend_inreg:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sxtb w8, w0
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w8, w1, w2, mi
+; CHECK-NEXT: tst w0, #0x80
+; CHECK-NEXT: csel w8, w1, w2, ne
; CHECK-NEXT: add w0, w8, w0, uxtb
; CHECK-NEXT: ret
entry:
@@ -100,9 +99,8 @@ B:
define i32 @g_i16_sign_extend_inreg(i16 %in, i32 %a, i32 %b) nounwind {
; CHECK-LABEL: g_i16_sign_extend_inreg:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: sxth w8, w0
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w8, w1, w2, mi
+; CHECK-NEXT: tst w0, #0x8000
+; CHECK-NEXT: csel w8, w1, w2, ne
; CHECK-NEXT: add w0, w8, w0, uxth
; CHECK-NEXT: ret
entry:
@@ -167,10 +165,8 @@ B:
define i64 @g_i32_sign_extend_i64(i32 %in, i64 %a, i64 %b) nounwind {
; CHECK-LABEL: g_i32_sign_extend_i64:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
-; CHECK-NEXT: sxtw x8, w0
-; CHECK-NEXT: cmp x8, #0
-; CHECK-NEXT: csel x8, x1, x2, mi
+; CHECK-NEXT: tst w0, #0x80000000
+; CHECK-NEXT: csel x8, x1, x2, ne
; CHECK-NEXT: add x0, x8, w0, uxtw
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/icmp.ll b/llvm/test/CodeGen/AArch64/icmp.ll
index 18665bc..7195e2b 100644
--- a/llvm/test/CodeGen/AArch64/icmp.ll
+++ b/llvm/test/CodeGen/AArch64/icmp.ll
@@ -2093,3 +2093,54 @@ define <2 x i1> @icmp_slt_v2i64_Zero_LHS(<2 x i64> %a) {
%c = icmp slt <2 x i64> <i64 0, i64 0>, %a
ret <2 x i1> %c
}
+
+; Test TST optimization for i8 sign bit testing with cross-type select
+; This tests the pattern: icmp slt i8 %val, 0; select i1 %cmp, i32 %a, i32 %b
+; The optimization should convert sxtb+cmp to tst for sign bit testing.
+
+define i32 @i8_signbit_tst_constants(i8 %x, i8 %y) {
+; CHECK-SD-LABEL: i8_signbit_tst_constants:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w9, w0, w1
+; CHECK-SD-NEXT: mov w8, #42 // =0x2a
+; CHECK-SD-NEXT: tst w9, #0x80
+; CHECK-SD-NEXT: mov w9, #20894 // =0x519e
+; CHECK-SD-NEXT: csel w0, w9, w8, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: i8_signbit_tst_constants:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: mov w9, #42 // =0x2a
+; CHECK-GI-NEXT: mov w10, #20894 // =0x519e
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: csel w0, w10, w9, mi
+; CHECK-GI-NEXT: ret
+ %add = add i8 %x, %y
+ %cmp = icmp slt i8 %add, 0
+ %sel = select i1 %cmp, i32 20894, i32 42
+ ret i32 %sel
+}
+
+; Test i8 sign bit testing with variable select values (problematic case)
+define i32 @i8_signbit_variables(i8 %x, i8 %y, i32 %a, i32 %b) {
+; CHECK-SD-LABEL: i8_signbit_variables:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: add w8, w0, w1
+; CHECK-SD-NEXT: tst w8, #0x80
+; CHECK-SD-NEXT: csel w0, w2, w3, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: i8_signbit_variables:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: add w8, w0, w1
+; CHECK-GI-NEXT: sxtb w8, w8
+; CHECK-GI-NEXT: cmp w8, #0
+; CHECK-GI-NEXT: csel w0, w2, w3, mi
+; CHECK-GI-NEXT: ret
+ %add = add i8 %x, %y
+ %cmp = icmp slt i8 %add, 0
+ %sel = select i1 %cmp, i32 %a, i32 %b
+ ret i32 %sel
+}
diff --git a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
index fc43c71..b6dee97e 100644
--- a/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
+++ b/llvm/test/CodeGen/AArch64/sme-za-exceptions.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -aarch64-new-sme-abi -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s --check-prefix=CHECK-SDAG
; A simple EH test case that corresponds to the following C++ source:
;
@@ -87,6 +88,90 @@ define void @za_with_raii(i1 %fail) "aarch64_inout_za" personality ptr @__gxx_pe
; CHECK-NEXT: mov x0, x19
; CHECK-NEXT: msr TPIDR2_EL0, x8
; CHECK-NEXT: bl _Unwind_Resume
+;
+; CHECK-SDAG-LABEL: za_with_raii:
+; CHECK-SDAG: .Lfunc_begin0:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception0
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -8
+; CHECK-SDAG-NEXT: .cfi_offset w20, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: tbnz w0, #0, .LBB0_2
+; CHECK-SDAG-NEXT: // %bb.1: // %return_normally
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: b shared_za_call
+; CHECK-SDAG-NEXT: .LBB0_2: // %throw_exception
+; CHECK-SDAG-NEXT: sub x20, x29, #16
+; CHECK-SDAG-NEXT: mov w0, #8 // =0x8
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20
+; CHECK-SDAG-NEXT: bl __cxa_allocate_exception
+; CHECK-SDAG-NEXT: mov x8, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x9, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x9, .LBB0_4
+; CHECK-SDAG-NEXT: // %bb.3: // %throw_exception
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_4: // %throw_exception
+; CHECK-SDAG-NEXT: adrp x9, .L.str
+; CHECK-SDAG-NEXT: add x9, x9, :lo12:.L.str
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: str x9, [x8]
+; CHECK-SDAG-NEXT: .Ltmp0: // EH_LABEL
+; CHECK-SDAG-NEXT: adrp x1, :got:typeinfo_for_char_const_ptr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20
+; CHECK-SDAG-NEXT: mov x0, x8
+; CHECK-SDAG-NEXT: ldr x1, [x1, :got_lo12:typeinfo_for_char_const_ptr]
+; CHECK-SDAG-NEXT: mov x2, xzr
+; CHECK-SDAG-NEXT: bl __cxa_throw
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB0_6
+; CHECK-SDAG-NEXT: // %bb.5: // %throw_exception
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_6: // %throw_exception
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .Ltmp1: // EH_LABEL
+; CHECK-SDAG-NEXT: // %bb.7: // %throw_fail
+; CHECK-SDAG-NEXT: .LBB0_8: // %unwind_dtors
+; CHECK-SDAG-NEXT: .Ltmp2: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x19, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB0_10
+; CHECK-SDAG-NEXT: // %bb.9: // %unwind_dtors
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_10: // %unwind_dtors
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: bl shared_za_call
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x20
+; CHECK-SDAG-NEXT: bl _Unwind_Resume
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB0_12
+; CHECK-SDAG-NEXT: // %bb.11: // %unwind_dtors
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB0_12: // %unwind_dtors
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
br i1 %fail, label %throw_exception, label %return_normally
throw_exception:
@@ -124,7 +209,7 @@ throw_fail:
; }
; shared_za_call();
; }
-define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 {
+define void @try_catch() "aarch64_inout_za" personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: try_catch:
; CHECK: .Lfunc_begin1:
; CHECK-NEXT: .cfi_startproc
@@ -142,11 +227,11 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per
; CHECK-NEXT: msub x9, x8, x8, x9
; CHECK-NEXT: mov sp, x9
; CHECK-NEXT: stp x9, x8, [x29, #-16]
-; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: .Ltmp3: // EH_LABEL
; CHECK-NEXT: sub x8, x29, #16
; CHECK-NEXT: msr TPIDR2_EL0, x8
; CHECK-NEXT: bl may_throw
-; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: .Ltmp4: // EH_LABEL
; CHECK-NEXT: .LBB1_1: // %after_catch
; CHECK-NEXT: smstart za
; CHECK-NEXT: mrs x8, TPIDR2_EL0
@@ -160,7 +245,7 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: b shared_za_call
; CHECK-NEXT: .LBB1_4: // %catch
-; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: .Ltmp5: // EH_LABEL
; CHECK-NEXT: bl __cxa_begin_catch
; CHECK-NEXT: smstart za
; CHECK-NEXT: mrs x8, TPIDR2_EL0
@@ -175,6 +260,78 @@ define dso_local void @try_catch() "aarch64_inout_za" personality ptr @__gxx_per
; CHECK-NEXT: msr TPIDR2_EL0, x8
; CHECK-NEXT: bl __cxa_end_catch
; CHECK-NEXT: b .LBB1_1
+;
+; CHECK-SDAG-LABEL: try_catch:
+; CHECK-SDAG: .Lfunc_begin1:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception1
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: .Ltmp3: // EH_LABEL
+; CHECK-SDAG-NEXT: sub x19, x29, #16
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl may_throw
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_2
+; CHECK-SDAG-NEXT: // %bb.1:
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_2:
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .Ltmp4: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB1_3: // %after_catch
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: b shared_za_call
+; CHECK-SDAG-NEXT: .LBB1_4: // %catch
+; CHECK-SDAG-NEXT: .Ltmp5: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_6
+; CHECK-SDAG-NEXT: // %bb.5: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_6: // %catch
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_8
+; CHECK-SDAG-NEXT: // %bb.7: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_8: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: bl shared_za_call
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB1_10
+; CHECK-SDAG-NEXT: // %bb.9: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB1_10: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: b .LBB1_3
invoke void @may_throw()
to label %after_catch unwind label %catch
@@ -235,16 +392,16 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx
; CHECK-NEXT: zero {za}
; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: smstart za
-; CHECK-NEXT: .Ltmp6:
+; CHECK-NEXT: .Ltmp6: // EH_LABEL
; CHECK-NEXT: bl shared_za_call
-; CHECK-NEXT: .Ltmp7:
+; CHECK-NEXT: .Ltmp7: // EH_LABEL
; CHECK-NEXT: .LBB2_3: // %exit
; CHECK-NEXT: smstop za
; CHECK-NEXT: mov sp, x29
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_4: // %catch
-; CHECK-NEXT: .Ltmp8:
+; CHECK-NEXT: .Ltmp8: // EH_LABEL
; CHECK-NEXT: bl __cxa_begin_catch
; CHECK-NEXT: smstart za
; CHECK-NEXT: mrs x8, TPIDR2_EL0
@@ -260,6 +417,78 @@ define void @try_catch_shared_za_callee() "aarch64_new_za" personality ptr @__gx
; CHECK-NEXT: bl __cxa_end_catch
; CHECK-NEXT: msr TPIDR2_EL0, xzr
; CHECK-NEXT: b .LBB2_3
+;
+; CHECK-SDAG-LABEL: try_catch_shared_za_callee:
+; CHECK-SDAG: .Lfunc_begin2:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception2
+; CHECK-SDAG-NEXT: // %bb.0: // %prelude
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: sub sp, sp, #16
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: rdsvl x8, #1
+; CHECK-SDAG-NEXT: mov x9, sp
+; CHECK-SDAG-NEXT: msub x9, x8, x8, x9
+; CHECK-SDAG-NEXT: mov sp, x9
+; CHECK-SDAG-NEXT: stp x9, x8, [x29, #-16]
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: cbz x8, .LBB2_2
+; CHECK-SDAG-NEXT: // %bb.1: // %save.za
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_save
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: .LBB2_2:
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: zero {za}
+; CHECK-SDAG-NEXT: .Ltmp6: // EH_LABEL
+; CHECK-SDAG-NEXT: bl shared_za_call
+; CHECK-SDAG-NEXT: .Ltmp7: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB2_3: // %exit
+; CHECK-SDAG-NEXT: smstop za
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB2_4: // %catch
+; CHECK-SDAG-NEXT: .Ltmp8: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: sub x19, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB2_6
+; CHECK-SDAG-NEXT: // %bb.5: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB2_6: // %catch
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB2_8
+; CHECK-SDAG-NEXT: // %bb.7: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB2_8: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: bl noexcept_shared_za_call
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, x19
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-SDAG-NEXT: sub x0, x29, #16
+; CHECK-SDAG-NEXT: cbnz x8, .LBB2_10
+; CHECK-SDAG-NEXT: // %bb.9: // %catch
+; CHECK-SDAG-NEXT: bl __arm_tpidr2_restore
+; CHECK-SDAG-NEXT: .LBB2_10: // %catch
+; CHECK-SDAG-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-SDAG-NEXT: b .LBB2_3
invoke void @shared_za_call() #4
to label %exit unwind label %catch
catch:
@@ -275,6 +504,234 @@ exit:
ret void
}
+; A simple ZT0 exception example that corresponds to:
+;
+; struct ZT0Resource {
+; ~ZT0Resource() __arm_inout("zt0") {
+; shared_zt0_call(); // simulate cleanup in destructor
+; }
+; };
+;
+; void za_with_raii() __arm_inout("zt0") {
+; ZT0Resource r;
+; may_throw();
+; }
+;
+; This code may require reloading ZT0 in the cleanup for ~ZT0Resource().
+;
+; FIXME: Codegen with `-aarch64-new-sme-abi` is broken with ZT0 (as it is not implemented).
+define void @try_catch_shared_zt0_callee() "aarch64_inout_zt0" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_shared_zt0_callee:
+; CHECK: .Lfunc_begin3:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception3
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: sub sp, sp, #80
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: rdsvl x8, #1
+; CHECK-NEXT: mov x9, sp
+; CHECK-NEXT: msub x9, x8, x8, x9
+; CHECK-NEXT: mov sp, x9
+; CHECK-NEXT: stp x9, x8, [x29, #-80]
+; CHECK-NEXT: .Ltmp9: // EH_LABEL
+; CHECK-NEXT: sub x19, x29, #64
+; CHECK-NEXT: str zt0, [x19]
+; CHECK-NEXT: smstop za
+; CHECK-NEXT: bl may_throw
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: ldr zt0, [x19]
+; CHECK-NEXT: .Ltmp10: // EH_LABEL
+; CHECK-NEXT: // %bb.1: // %return_normally
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB3_2: // %unwind_dtors
+; CHECK-NEXT: .Ltmp11: // EH_LABEL
+; CHECK-NEXT: sub x20, x29, #64
+; CHECK-NEXT: mov x19, x0
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: mrs x8, TPIDR2_EL0
+; CHECK-NEXT: sub x0, x29, #80
+; CHECK-NEXT: cbnz x8, .LBB3_4
+; CHECK-NEXT: // %bb.3: // %unwind_dtors
+; CHECK-NEXT: bl __arm_tpidr2_restore
+; CHECK-NEXT: .LBB3_4: // %unwind_dtors
+; CHECK-NEXT: msr TPIDR2_EL0, xzr
+; CHECK-NEXT: bl shared_zt0_call
+; CHECK-NEXT: str zt0, [x20]
+; CHECK-NEXT: smstop za
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl _Unwind_Resume
+; CHECK-NEXT: smstart za
+; CHECK-NEXT: ldr zt0, [x20]
+;
+; CHECK-SDAG-LABEL: try_catch_shared_zt0_callee:
+; CHECK-SDAG: .Lfunc_begin3:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception3
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: sub sp, sp, #96
+; CHECK-SDAG-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: stp x20, x19, [sp, #80] // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: .cfi_def_cfa_offset 96
+; CHECK-SDAG-NEXT: .cfi_offset w19, -8
+; CHECK-SDAG-NEXT: .cfi_offset w20, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -32
+; CHECK-SDAG-NEXT: .Ltmp9: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x19, sp
+; CHECK-SDAG-NEXT: str zt0, [x19]
+; CHECK-SDAG-NEXT: smstop za
+; CHECK-SDAG-NEXT: bl may_throw
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: ldr zt0, [x19]
+; CHECK-SDAG-NEXT: .Ltmp10: // EH_LABEL
+; CHECK-SDAG-NEXT: // %bb.1: // %return_normally
+; CHECK-SDAG-NEXT: ldp x20, x19, [sp, #80] // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: add sp, sp, #96
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB3_2: // %unwind_dtors
+; CHECK-SDAG-NEXT: .Ltmp11: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x20, sp
+; CHECK-SDAG-NEXT: mov x19, x0
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: ldr zt0, [x20]
+; CHECK-SDAG-NEXT: bl shared_zt0_call
+; CHECK-SDAG-NEXT: str zt0, [x20]
+; CHECK-SDAG-NEXT: smstop za
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl _Unwind_Resume
+; CHECK-SDAG-NEXT: smstart za
+; CHECK-SDAG-NEXT: ldr zt0, [x20]
+ invoke void @may_throw()
+ to label %return_normally unwind label %unwind_dtors
+
+unwind_dtors:
+ %5 = landingpad { ptr, i32 }
+ cleanup
+ tail call void @shared_zt0_call()
+ resume { ptr, i32 } %5
+
+return_normally:
+ ret void
+}
+
+; This example corresponds to:
+;
+; __arm_agnostic("sme_za_state") void try_catch_agnostic_za()
+; {
+; try {
+; may_throw();
+; } catch(...) {
+; }
+; }
+;
+; In this example we must execute __arm_sme_restore once we enter the catch block
+; (before executing __arm_sme_save again, which would invalidate the prior save).
+define void @try_catch_agnostic_za() "aarch64_za_state_agnostic" personality ptr @__gxx_personality_v0 {
+; CHECK-LABEL: try_catch_agnostic_za:
+; CHECK: .Lfunc_begin4:
+; CHECK-NEXT: .cfi_startproc
+; CHECK-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-NEXT: .cfi_lsda 28, .Lexception4
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-NEXT: mov x29, sp
+; CHECK-NEXT: .cfi_def_cfa w29, 32
+; CHECK-NEXT: .cfi_offset w19, -16
+; CHECK-NEXT: .cfi_offset w30, -24
+; CHECK-NEXT: .cfi_offset w29, -32
+; CHECK-NEXT: bl __arm_sme_state_size
+; CHECK-NEXT: sub sp, sp, x0
+; CHECK-NEXT: mov x19, sp
+; CHECK-NEXT: .Ltmp12: // EH_LABEL
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_save
+; CHECK-NEXT: bl may_throw
+; CHECK-NEXT: .Ltmp13: // EH_LABEL
+; CHECK-NEXT: .LBB4_1: // %exit
+; CHECK-NEXT: mov x0, x19
+; CHECK-NEXT: bl __arm_sme_restore
+; CHECK-NEXT: mov sp, x29
+; CHECK-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB4_2: // %catch
+; CHECK-NEXT: .Ltmp14: // EH_LABEL
+; CHECK-NEXT: bl __cxa_begin_catch
+; CHECK-NEXT: bl __cxa_end_catch
+; CHECK-NEXT: b .LBB4_1
+;
+; CHECK-SDAG-LABEL: try_catch_agnostic_za:
+; CHECK-SDAG: .Lfunc_begin4:
+; CHECK-SDAG-NEXT: .cfi_startproc
+; CHECK-SDAG-NEXT: .cfi_personality 156, DW.ref.__gxx_personality_v0
+; CHECK-SDAG-NEXT: .cfi_lsda 28, .Lexception4
+; CHECK-SDAG-NEXT: // %bb.0:
+; CHECK-SDAG-NEXT: stp x29, x30, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-SDAG-NEXT: str x19, [sp, #16] // 8-byte Folded Spill
+; CHECK-SDAG-NEXT: mov x29, sp
+; CHECK-SDAG-NEXT: .cfi_def_cfa w29, 32
+; CHECK-SDAG-NEXT: .cfi_offset w19, -16
+; CHECK-SDAG-NEXT: .cfi_offset w30, -24
+; CHECK-SDAG-NEXT: .cfi_offset w29, -32
+; CHECK-SDAG-NEXT: bl __arm_sme_state_size
+; CHECK-SDAG-NEXT: sub sp, sp, x0
+; CHECK-SDAG-NEXT: mov x19, sp
+; CHECK-SDAG-NEXT: .Ltmp12: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl may_throw
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: .Ltmp13: // EH_LABEL
+; CHECK-SDAG-NEXT: .LBB4_1: // %exit
+; CHECK-SDAG-NEXT: mov sp, x29
+; CHECK-SDAG-NEXT: ldr x19, [sp, #16] // 8-byte Folded Reload
+; CHECK-SDAG-NEXT: ldp x29, x30, [sp], #32 // 16-byte Folded Reload
+; CHECK-SDAG-NEXT: ret
+; CHECK-SDAG-NEXT: .LBB4_2: // %catch
+; CHECK-SDAG-NEXT: .Ltmp14: // EH_LABEL
+; CHECK-SDAG-NEXT: mov x1, x0
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: mov x0, x1
+; CHECK-SDAG-NEXT: bl __cxa_begin_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_save
+; CHECK-SDAG-NEXT: bl __cxa_end_catch
+; CHECK-SDAG-NEXT: mov x0, x19
+; CHECK-SDAG-NEXT: bl __arm_sme_restore
+; CHECK-SDAG-NEXT: b .LBB4_1
+ invoke void @may_throw()
+ to label %exit unwind label %catch
+catch:
+ %eh_info = landingpad { ptr, i32 }
+ catch ptr null
+ %exception_ptr = extractvalue { ptr, i32 } %eh_info, 0
+ tail call ptr @__cxa_begin_catch(ptr %exception_ptr)
+ tail call void @__cxa_end_catch()
+ br label %exit
+
+exit:
+ ret void
+}
+
declare ptr @__cxa_allocate_exception(i64)
declare void @__cxa_throw(ptr, ptr, ptr)
declare ptr @__cxa_begin_catch(ptr)
@@ -284,3 +741,4 @@ declare i32 @__gxx_personality_v0(...)
declare void @may_throw()
declare void @shared_za_call() "aarch64_inout_za"
declare void @noexcept_shared_za_call() "aarch64_inout_za"
+declare void @shared_zt0_call() "aarch64_inout_zt0"
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
index 62d41fc..19e1aa5 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-bool.ll
@@ -26,9 +26,9 @@ define i32 @reduce_and_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_and_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.b[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.b[0]
+; CHECK-NEXT: tst w8, #0x80
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i8> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -120,9 +120,9 @@ define i32 @reduce_and_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_and_v1i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.h[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.h[0]
+; CHECK-NEXT: tst w8, #0x8000
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i16> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %x)
@@ -305,9 +305,9 @@ define i32 @reduce_or_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_or_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.b[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.b[0]
+; CHECK-NEXT: tst w8, #0x80
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i8> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -399,9 +399,9 @@ define i32 @reduce_or_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_or_v1i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.h[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.h[0]
+; CHECK-NEXT: tst w8, #0x8000
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i16> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %x)
@@ -584,9 +584,9 @@ define i32 @reduce_xor_v1i8(<1 x i8> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_xor_v1i8:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.b[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.b[0]
+; CHECK-NEXT: tst w8, #0x80
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i8> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
@@ -679,9 +679,9 @@ define i32 @reduce_xor_v1i16(<1 x i16> %a0, i32 %a1, i32 %a2) nounwind {
; CHECK-LABEL: reduce_xor_v1i16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
-; CHECK-NEXT: smov w8, v0.h[0]
-; CHECK-NEXT: cmp w8, #0
-; CHECK-NEXT: csel w0, w0, w1, mi
+; CHECK-NEXT: umov w8, v0.h[0]
+; CHECK-NEXT: tst w8, #0x8000
+; CHECK-NEXT: csel w0, w0, w1, ne
; CHECK-NEXT: ret
%x = icmp slt <1 x i16> %a0, zeroinitializer
%y = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %x)
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 0490e5a..94ba5cd 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -10908,12 +10908,13 @@ define <2 x bfloat> @v_fadd_v2bf16(<2 x bfloat> %a, <2 x bfloat> %b) {
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v1, 0x7060302
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: v_fadd_v2bf16:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1
-; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%op = fadd <2 x bfloat> %a, %b
ret <2 x bfloat> %op
}
@@ -11446,13 +11447,14 @@ define <4 x bfloat> @v_fadd_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b) {
; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11FAKE16-NEXT: v_perm_b32 v1, v1, v4, 0x7060302
; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: v_fadd_v4bf16:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2
-; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3
-; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_pk_add_bf16 v0, v0, v2
+; GFX1250-NEXT: v_pk_add_bf16 v1, v1, v3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%op = fadd <4 x bfloat> %a, %b
ret <4 x bfloat> %op
}
@@ -49991,6 +49993,622 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
ret <4 x bfloat> %op
}
+define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) {
+; GCN-LABEL: v_fma_v8bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
+; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
+; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
+; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
+; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
+; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
+; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
+; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
+; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_fma_f32 v7, v7, v15, v23
+; GCN-NEXT: v_fma_f32 v6, v6, v14, v22
+; GCN-NEXT: v_fma_f32 v5, v5, v13, v21
+; GCN-NEXT: v_fma_f32 v4, v4, v12, v20
+; GCN-NEXT: v_fma_f32 v3, v3, v11, v19
+; GCN-NEXT: v_fma_f32 v2, v2, v10, v18
+; GCN-NEXT: v_fma_f32 v1, v1, v9, v17
+; GCN-NEXT: v_fma_f32 v0, v0, v8, v16
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_fma_v8bf16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT: v_fma_f32 v7, v7, v15, v23
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_fma_f32 v6, v6, v14, v15
+; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
+; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
+; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v21
+; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX7-NEXT: v_fma_f32 v5, v5, v13, v14
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
+; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
+; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v20
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX7-NEXT: v_fma_f32 v4, v4, v12, v13
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
+; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v19
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX7-NEXT: v_fma_f32 v3, v3, v11, v12
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
+; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v18
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT: v_fma_f32 v2, v2, v10, v11
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v17
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
+; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v16
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT: v_fma_f32 v1, v1, v9, v11
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v10
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_fma_f32 v0, v0, v8, v9
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_fma_v8bf16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v11
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v7
+; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v3
+; GFX8-NEXT: v_fma_f32 v12, v14, v13, v12
+; GFX8-NEXT: v_bfe_u32 v13, v12, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v12
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, s4, v13
+; GFX8-NEXT: v_fma_f32 v3, v3, v7, v11
+; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
+; GFX8-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v3
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, s4, v7
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc
+; GFX8-NEXT: v_lshlrev_b32_e32 v7, 16, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v6
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v2
+; GFX8-NEXT: v_fma_f32 v7, v13, v11, v7
+; GFX8-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v7
+; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, s4, v11
+; GFX8-NEXT: v_fma_f32 v2, v2, v6, v10
+; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX8-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v2
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, s4, v6
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc
+; GFX8-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v1
+; GFX8-NEXT: v_fma_f32 v6, v11, v10, v6
+; GFX8-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v6
+; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, s4, v10
+; GFX8-NEXT: v_fma_f32 v1, v1, v5, v9
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v1
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, s4, v5
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
+; GFX8-NEXT: v_lshlrev_b32_e32 v5, 16, v8
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v4
+; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v0
+; GFX8-NEXT: v_fma_f32 v5, v10, v9, v5
+; GFX8-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v5
+; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX8-NEXT: v_add_u32_e32 v9, vcc, s4, v9
+; GFX8-NEXT: v_fma_f32 v0, v0, v4, v8
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX8-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v0
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0x7fff, v4
+; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX8-NEXT: v_alignbit_b32 v0, v0, v5, 16
+; GFX8-NEXT: v_alignbit_b32 v1, v1, v6, 16
+; GFX8-NEXT: v_alignbit_b32 v2, v2, v7, 16
+; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_fma_v8bf16:
+; GFX900: ; %bb.0:
+; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v11
+; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v7
+; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v3
+; GFX900-NEXT: v_fma_f32 v12, v14, v13, v12
+; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX900-NEXT: v_bfe_u32 v13, v12, 16, 1
+; GFX900-NEXT: s_movk_i32 s4, 0x7fff
+; GFX900-NEXT: v_fma_f32 v3, v3, v7, v11
+; GFX900-NEXT: v_add3_u32 v13, v13, v12, s4
+; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v12
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
+; GFX900-NEXT: v_bfe_u32 v7, v3, 16, 1
+; GFX900-NEXT: v_cndmask_b32_e32 v12, v13, v14, vcc
+; GFX900-NEXT: v_add3_u32 v7, v7, v3, s4
+; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v3
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX900-NEXT: v_cndmask_b32_e32 v3, v7, v11, vcc
+; GFX900-NEXT: v_lshlrev_b32_e32 v7, 16, v10
+; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v6
+; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v2
+; GFX900-NEXT: v_fma_f32 v7, v13, v11, v7
+; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX900-NEXT: v_bfe_u32 v11, v7, 16, 1
+; GFX900-NEXT: v_fma_f32 v2, v2, v6, v10
+; GFX900-NEXT: v_add3_u32 v11, v11, v7, s4
+; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX900-NEXT: v_bfe_u32 v6, v2, 16, 1
+; GFX900-NEXT: v_cndmask_b32_e32 v7, v11, v13, vcc
+; GFX900-NEXT: v_add3_u32 v6, v6, v2, s4
+; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v2
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX900-NEXT: v_cndmask_b32_e32 v2, v6, v10, vcc
+; GFX900-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v1
+; GFX900-NEXT: v_fma_f32 v6, v11, v10, v6
+; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX900-NEXT: v_bfe_u32 v10, v6, 16, 1
+; GFX900-NEXT: v_fma_f32 v1, v1, v5, v9
+; GFX900-NEXT: v_add3_u32 v10, v10, v6, s4
+; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v6
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX900-NEXT: v_bfe_u32 v5, v1, 16, 1
+; GFX900-NEXT: v_cndmask_b32_e32 v6, v10, v11, vcc
+; GFX900-NEXT: v_add3_u32 v5, v5, v1, s4
+; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v1
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX900-NEXT: v_cndmask_b32_e32 v1, v5, v9, vcc
+; GFX900-NEXT: v_lshlrev_b32_e32 v5, 16, v8
+; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v4
+; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v0
+; GFX900-NEXT: v_fma_f32 v5, v10, v9, v5
+; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX900-NEXT: v_bfe_u32 v9, v5, 16, 1
+; GFX900-NEXT: v_fma_f32 v0, v0, v4, v8
+; GFX900-NEXT: v_add3_u32 v9, v9, v5, s4
+; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX900-NEXT: v_bfe_u32 v4, v0, 16, 1
+; GFX900-NEXT: v_cndmask_b32_e32 v5, v9, v10, vcc
+; GFX900-NEXT: v_add3_u32 v4, v4, v0, s4
+; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v0
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX900-NEXT: v_cndmask_b32_e32 v0, v4, v8, vcc
+; GFX900-NEXT: s_mov_b32 s4, 0x7060302
+; GFX900-NEXT: v_perm_b32 v0, v0, v5, s4
+; GFX900-NEXT: v_perm_b32 v1, v1, v6, s4
+; GFX900-NEXT: v_perm_b32 v2, v2, v7, s4
+; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4
+; GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: v_fma_v8bf16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11
+; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v7
+; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v3
+; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX950-NEXT: v_fmac_f32_e32 v12, v14, v13
+; GFX950-NEXT: v_fmac_f32_e32 v11, v3, v7
+; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v10
+; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v6
+; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v2
+; GFX950-NEXT: v_fmac_f32_e32 v3, v13, v7
+; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v10
+; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX950-NEXT: v_fmac_f32_e32 v7, v2, v6
+; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v9
+; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v5
+; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v1
+; GFX950-NEXT: v_fmac_f32_e32 v2, v10, v6
+; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX950-NEXT: v_fmac_f32_e32 v6, v1, v5
+; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v8
+; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v4
+; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v0
+; GFX950-NEXT: v_fmac_f32_e32 v1, v9, v5
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v8
+; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX950-NEXT: v_fmac_f32_e32 v5, v0, v4
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v5, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v6, v2
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v7, v3
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v11, v12
+; GFX950-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_fma_v8bf16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v3
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT: v_lshlrev_b32_e32 v18, 16, v0
+; GFX10-NEXT: v_fmac_f32_e32 v12, v14, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v2
+; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT: v_fmac_f32_e32 v11, v3, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v10
+; GFX10-NEXT: v_bfe_u32 v13, v12, 16, 1
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX10-NEXT: v_add3_u32 v13, v13, v12, 0x7fff
+; GFX10-NEXT: v_fmac_f32_e32 v3, v14, v7
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v10
+; GFX10-NEXT: v_bfe_u32 v16, v11, 16, 1
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v1
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v13, v15, vcc_lo
+; GFX10-NEXT: v_bfe_u32 v13, v3, 16, 1
+; GFX10-NEXT: v_fmac_f32_e32 v7, v2, v6
+; GFX10-NEXT: v_add3_u32 v12, v16, v11, 0x7fff
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v9
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v5
+; GFX10-NEXT: v_add3_u32 v13, v13, v3, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v15, 0x400000, v3
+; GFX10-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_fmac_f32_e32 v2, v14, v6
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX10-NEXT: v_add3_u32 v6, v16, v7, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v13, v15, vcc_lo
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v8
+; GFX10-NEXT: v_lshlrev_b32_e32 v16, 16, v4
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX10-NEXT: v_bfe_u32 v14, v2, 16, 1
+; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX10-NEXT: v_fmac_f32_e32 v9, v1, v5
+; GFX10-NEXT: v_fmac_f32_e32 v15, v18, v16
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX10-NEXT: v_fmac_f32_e32 v8, v0, v4
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo
+; GFX10-NEXT: v_add3_u32 v0, v14, v2, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v4, v9, 16, 1
+; GFX10-NEXT: v_bfe_u32 v5, v15, 16, 1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v11
+; GFX10-NEXT: v_add3_u32 v2, v5, v15, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX10-NEXT: v_add3_u32 v0, v4, v9, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v15
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX10-NEXT: v_add3_u32 v5, v7, v8, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX10-NEXT: v_perm_b32 v0, v4, v2, 0x7060302
+; GFX10-NEXT: v_perm_b32 v2, v6, v3, 0x7060302
+; GFX10-NEXT: v_perm_b32 v1, v5, v1, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo
+; GFX10-NEXT: v_perm_b32 v3, v7, v10, 0x7060302
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11TRUE16-LABEL: v_fma_v8bf16:
+; GFX11TRUE16: ; %bb.0:
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v11
+; GFX11TRUE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v7
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v6
+; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v2
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v3
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v11, v3, v7
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v10
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v7, v2, v6
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v14, v13
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_or_b32_e32 v6, 0x400000, v11
+; GFX11TRUE16-NEXT: v_bfe_u32 v13, v12, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v12
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11TRUE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v13, v17, vcc_lo
+; GFX11TRUE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v1
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v10
+; GFX11TRUE16-NEXT: v_bfe_u32 v10, v11, 16, 1
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11TRUE16-NEXT: v_bfe_u32 v13, v7, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v14, v16, v15
+; GFX11TRUE16-NEXT: v_add3_u32 v2, v10, v11, 0x7fff
+; GFX11TRUE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v5
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v7
+; GFX11TRUE16-NEXT: v_bfe_u32 v15, v14, 16, 1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v2, v6, vcc_lo
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_add3_u32 v10, v15, v14, 0x7fff
+; GFX11TRUE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v9
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v14
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v4
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v6.h
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v1, v5
+; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v4
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v10, v12, vcc_lo
+; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v7, 0x7fff
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11TRUE16-NEXT: v_bfe_u32 v7, v9, 16, 1
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v8
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v4, v10, v11 :: v_dual_and_b32 v5, 0xffff0000, v8
+; GFX11TRUE16-NEXT: v_add3_u32 v7, v7, v9, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v9
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v4.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v7, v10, vcc_lo
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v15, v17, v16
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_bfe_u32 v12, v15, 16, 1
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v13, v16, v14
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v5, v0, v1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v15
+; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v15, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v13, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v11, v5, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v12, 0x400000, v13
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v5
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v13, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v9, v11, v5, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v0, v12, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v8, v1, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v7.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v9, v10, vcc_lo
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v11.h
+; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11FAKE16-LABEL: v_fma_v8bf16:
+; GFX11FAKE16: ; %bb.0:
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v7
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v3
+; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v18, 16, v0
+; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v14, v13 :: v_dual_and_b32 v3, 0xffff0000, v3
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v2
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_bfe_u32 v13, v12, 16, 1
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v11, v3, v7
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v6
+; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v12
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v12, 0x7fff
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v3, 16, v10
+; GFX11FAKE16-NEXT: v_bfe_u32 v16, v11, 16, 1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11FAKE16-NEXT: v_or_b32_e32 v17, 0x400000, v11
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v3, v14, v7
+; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v10, v13, v15 :: v_dual_and_b32 v7, 0xffff0000, v10
+; GFX11FAKE16-NEXT: v_add3_u32 v12, v16, v11, 0x7fff
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v14, 16, v1
+; GFX11FAKE16-NEXT: v_bfe_u32 v13, v3, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v15, 0x400000, v3
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11FAKE16-NEXT: v_add3_u32 v13, v13, v3, 0x7fff
+; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v3, v13, v15 :: v_dual_and_b32 v2, 0xffff0000, v2
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v7, v2, v6 :: v_dual_lshlrev_b32 v6, 16, v5
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v8
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v9
+; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11FAKE16-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v7
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v14, v6 :: v_dual_and_b32 v5, 0xffff0000, v5
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11FAKE16-NEXT: v_add3_u32 v6, v16, v7, 0x7fff
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v9, v1, v5 :: v_dual_and_b32 v8, 0xffff0000, v8
+; GFX11FAKE16-NEXT: v_bfe_u32 v14, v2, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v2
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v6, v13, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v16, 16, v4
+; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v9
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v8, v0, v4
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v14, v2, 0x7fff
+; GFX11FAKE16-NEXT: v_bfe_u32 v4, v9, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_bfe_u32 v7, v8, 16, 1
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v18, v16
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v4, v9, 0x7fff
+; GFX11FAKE16-NEXT: v_bfe_u32 v5, v15, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v4, 0x400000, v15
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v5, v15, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v5, v7, v8, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v8
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v5, v7, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v0, v13, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v4, v2, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v2, v6, v3, 0x7060302
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11FAKE16-NEXT: v_perm_b32 v1, v5, v1, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v12, v17, vcc_lo
+; GFX11FAKE16-NEXT: v_perm_b32 v3, v7, v10, 0x7060302
+; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: v_fma_v8bf16:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -50000,11 +50618,1239 @@ define <4 x bfloat> @v_fma_v4bf16(<4 x bfloat> %a, <4 x bfloat> %b, <4 x bfloat>
; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v6, v10
; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v7, v11
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
-define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c) {
%op = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat> %c)
ret <8 x bfloat> %op
}
+define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) {
+; GCN-LABEL: v_fma_v16bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GCN-NEXT: v_fma_f32 v15, v15, v31, v32
+; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
+; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
+; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GCN-NEXT: v_fma_f32 v14, v14, v30, v31
+; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
+; GCN-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56
+; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
+; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
+; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GCN-NEXT: v_fma_f32 v13, v13, v29, v30
+; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
+; GCN-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52
+; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
+; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
+; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GCN-NEXT: v_fma_f32 v12, v12, v28, v29
+; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48
+; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
+; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
+; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GCN-NEXT: v_fma_f32 v11, v11, v27, v28
+; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
+; GCN-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44
+; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
+; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
+; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GCN-NEXT: v_fma_f32 v10, v10, v26, v27
+; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GCN-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40
+; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
+; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
+; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GCN-NEXT: v_fma_f32 v9, v9, v25, v26
+; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
+; GCN-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36
+; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
+; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
+; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GCN-NEXT: v_fma_f32 v8, v8, v24, v25
+; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GCN-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32
+; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
+; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GCN-NEXT: v_fma_f32 v7, v7, v23, v24
+; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GCN-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28
+; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GCN-NEXT: v_fma_f32 v6, v6, v22, v23
+; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
+; GCN-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24
+; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
+; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GCN-NEXT: v_fma_f32 v5, v5, v21, v22
+; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
+; GCN-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20
+; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
+; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GCN-NEXT: v_fma_f32 v4, v4, v20, v21
+; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16
+; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GCN-NEXT: v_fma_f32 v3, v3, v19, v20
+; GCN-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12
+; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GCN-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4
+; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GCN-NEXT: v_fma_f32 v2, v2, v18, v19
+; GCN-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
+; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v20
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GCN-NEXT: v_fma_f32 v1, v1, v17, v18
+; GCN-NEXT: v_fma_f32 v0, v0, v16, v19
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_fma_v16bf16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
+; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
+; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
+; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
+; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
+; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
+; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
+; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
+; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
+; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
+; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
+; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
+; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
+; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
+; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: v_fma_f32 v15, v15, v31, v32
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: v_fma_f32 v14, v14, v30, v31
+; GFX7-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:56
+; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
+; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX7-NEXT: v_fma_f32 v13, v13, v29, v30
+; GFX7-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
+; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX7-NEXT: v_fma_f32 v12, v12, v28, v29
+; GFX7-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:48
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
+; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX7-NEXT: v_fma_f32 v11, v11, v27, v28
+; GFX7-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:44
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
+; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX7-NEXT: v_fma_f32 v10, v10, v26, v27
+; GFX7-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:40
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
+; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX7-NEXT: v_fma_f32 v9, v9, v25, v26
+; GFX7-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:36
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: v_fma_f32 v8, v8, v24, v25
+; GFX7-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:32
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
+; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX7-NEXT: v_fma_f32 v7, v7, v23, v24
+; GFX7-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX7-NEXT: v_fma_f32 v6, v6, v22, v23
+; GFX7-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:24
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_fma_f32 v5, v5, v21, v22
+; GFX7-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:20
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
+; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX7-NEXT: v_fma_f32 v4, v4, v20, v21
+; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:16
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX7-NEXT: v_fma_f32 v3, v3, v19, v20
+; GFX7-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:12
+; GFX7-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:4
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX7-NEXT: v_fma_f32 v2, v2, v18, v19
+; GFX7-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:8
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v20
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX7-NEXT: v_fma_f32 v1, v1, v17, v18
+; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v19
+; GFX7-NEXT: v_fma_f32 v0, v0, v16, v17
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_fma_v16bf16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v15
+; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v7
+; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX8-NEXT: v_fma_f32 v24, v26, v25, v24
+; GFX8-NEXT: v_fma_f32 v7, v7, v15, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v15, 16, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v14
+; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v6
+; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX8-NEXT: v_fma_f32 v15, v25, v23, v15
+; GFX8-NEXT: v_fma_f32 v6, v6, v14, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v14, 16, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v13
+; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v5
+; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX8-NEXT: v_fma_f32 v14, v23, v22, v14
+; GFX8-NEXT: v_fma_f32 v5, v5, v13, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v13, 16, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v12
+; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX8-NEXT: v_fma_f32 v13, v22, v21, v13
+; GFX8-NEXT: v_fma_f32 v4, v4, v12, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v12, 16, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v11
+; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v3
+; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX8-NEXT: v_fma_f32 v12, v21, v20, v12
+; GFX8-NEXT: v_fma_f32 v3, v3, v11, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v11, 16, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v10
+; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX8-NEXT: v_fma_f32 v11, v20, v19, v11
+; GFX8-NEXT: v_fma_f32 v2, v2, v10, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v10, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v9
+; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v1
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX8-NEXT: v_fma_f32 v10, v19, v18, v10
+; GFX8-NEXT: v_fma_f32 v1, v1, v9, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v9, 16, v16
+; GFX8-NEXT: v_lshlrev_b32_e32 v17, 16, v8
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v0
+; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX8-NEXT: v_fma_f32 v0, v0, v8, v16
+; GFX8-NEXT: v_bfe_u32 v8, v24, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v24
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, s4, v8
+; GFX8-NEXT: v_or_b32_e32 v16, 0x400000, v24
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v7
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_fma_f32 v9, v18, v17, v9
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v15, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v15
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v15
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
+; GFX8-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v6
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v14
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v14
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
+; GFX8-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v5
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v5
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v13, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v13
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v13
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v4
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v4
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v12, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v12
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v12
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v3
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v3
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v11, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v11
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v11
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
+; GFX8-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v2
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v2
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v10, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v10
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
+; GFX8-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v1
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v9, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v9
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v9
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v16, v0, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v0
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v0
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_alignbit_b32 v0, v0, v9, 16
+; GFX8-NEXT: v_alignbit_b32 v1, v1, v10, 16
+; GFX8-NEXT: v_alignbit_b32 v2, v2, v11, 16
+; GFX8-NEXT: v_alignbit_b32 v3, v3, v12, 16
+; GFX8-NEXT: v_alignbit_b32 v4, v4, v13, 16
+; GFX8-NEXT: v_alignbit_b32 v5, v5, v14, 16
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v15, 16
+; GFX8-NEXT: v_alignbit_b32 v7, v7, v8, 16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_fma_v16bf16:
+; GFX900: ; %bb.0:
+; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v23
+; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v15
+; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v7
+; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX900-NEXT: v_fma_f32 v24, v26, v25, v24
+; GFX900-NEXT: v_fma_f32 v7, v7, v15, v23
+; GFX900-NEXT: v_lshlrev_b32_e32 v15, 16, v22
+; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v14
+; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v6
+; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX900-NEXT: v_fma_f32 v15, v25, v23, v15
+; GFX900-NEXT: v_fma_f32 v6, v6, v14, v22
+; GFX900-NEXT: v_lshlrev_b32_e32 v14, 16, v21
+; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v13
+; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v5
+; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX900-NEXT: v_fma_f32 v14, v23, v22, v14
+; GFX900-NEXT: v_fma_f32 v5, v5, v13, v21
+; GFX900-NEXT: v_lshlrev_b32_e32 v13, 16, v20
+; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v12
+; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v4
+; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX900-NEXT: v_fma_f32 v13, v22, v21, v13
+; GFX900-NEXT: v_fma_f32 v4, v4, v12, v20
+; GFX900-NEXT: v_lshlrev_b32_e32 v12, 16, v19
+; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v11
+; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v3
+; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX900-NEXT: v_fma_f32 v12, v21, v20, v12
+; GFX900-NEXT: v_fma_f32 v3, v3, v11, v19
+; GFX900-NEXT: v_lshlrev_b32_e32 v11, 16, v18
+; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v10
+; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v2
+; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX900-NEXT: v_fma_f32 v11, v20, v19, v11
+; GFX900-NEXT: v_fma_f32 v2, v2, v10, v18
+; GFX900-NEXT: v_lshlrev_b32_e32 v10, 16, v17
+; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v9
+; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v1
+; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX900-NEXT: v_fma_f32 v10, v19, v18, v10
+; GFX900-NEXT: v_fma_f32 v1, v1, v9, v17
+; GFX900-NEXT: v_lshlrev_b32_e32 v9, 16, v16
+; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v8
+; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v0
+; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX900-NEXT: v_fma_f32 v0, v0, v8, v16
+; GFX900-NEXT: s_movk_i32 s4, 0x7fff
+; GFX900-NEXT: v_bfe_u32 v8, v24, 16, 1
+; GFX900-NEXT: v_add3_u32 v8, v8, v24, s4
+; GFX900-NEXT: v_or_b32_e32 v16, 0x400000, v24
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
+; GFX900-NEXT: v_cndmask_b32_e32 v8, v8, v16, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v7, 16, 1
+; GFX900-NEXT: v_fma_f32 v9, v18, v17, v9
+; GFX900-NEXT: v_add3_u32 v16, v16, v7, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v7
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX900-NEXT: v_cndmask_b32_e32 v7, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v15, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v15, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v15
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
+; GFX900-NEXT: v_cndmask_b32_e32 v15, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v6, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v6, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v6
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX900-NEXT: v_cndmask_b32_e32 v6, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v14, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v14, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v14
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
+; GFX900-NEXT: v_cndmask_b32_e32 v14, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v5, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v5, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v5
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX900-NEXT: v_cndmask_b32_e32 v5, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v13, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v13, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v13
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX900-NEXT: v_cndmask_b32_e32 v13, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v4, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v4, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX900-NEXT: v_cndmask_b32_e32 v4, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v12, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v12, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v12
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
+; GFX900-NEXT: v_cndmask_b32_e32 v12, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v3, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v3, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v3
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX900-NEXT: v_cndmask_b32_e32 v3, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v11, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v11, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v11
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
+; GFX900-NEXT: v_cndmask_b32_e32 v11, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v2, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v2, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v2
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX900-NEXT: v_cndmask_b32_e32 v2, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v10, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v10, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v10
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
+; GFX900-NEXT: v_cndmask_b32_e32 v10, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v1, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v1, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v1
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX900-NEXT: v_cndmask_b32_e32 v1, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v9, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v9, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v9
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
+; GFX900-NEXT: v_cndmask_b32_e32 v9, v16, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v16, v0, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v0, s4
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v0
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX900-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc
+; GFX900-NEXT: s_mov_b32 s4, 0x7060302
+; GFX900-NEXT: v_perm_b32 v0, v0, v9, s4
+; GFX900-NEXT: v_perm_b32 v1, v1, v10, s4
+; GFX900-NEXT: v_perm_b32 v2, v2, v11, s4
+; GFX900-NEXT: v_perm_b32 v3, v3, v12, s4
+; GFX900-NEXT: v_perm_b32 v4, v4, v13, s4
+; GFX900-NEXT: v_perm_b32 v5, v5, v14, s4
+; GFX900-NEXT: v_perm_b32 v6, v6, v15, s4
+; GFX900-NEXT: v_perm_b32 v7, v7, v8, s4
+; GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: v_fma_v16bf16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23
+; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v15
+; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v7
+; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX950-NEXT: v_fmac_f32_e32 v24, v26, v25
+; GFX950-NEXT: v_fmac_f32_e32 v23, v7, v15
+; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
+; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v14
+; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v6
+; GFX950-NEXT: v_fmac_f32_e32 v7, v25, v15
+; GFX950-NEXT: v_lshlrev_b32_e32 v15, 16, v22
+; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX950-NEXT: v_fmac_f32_e32 v15, v6, v14
+; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v21
+; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v13
+; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v5
+; GFX950-NEXT: v_fmac_f32_e32 v6, v22, v14
+; GFX950-NEXT: v_lshlrev_b32_e32 v14, 16, v21
+; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX950-NEXT: v_fmac_f32_e32 v14, v5, v13
+; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v20
+; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v12
+; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v4
+; GFX950-NEXT: v_fmac_f32_e32 v5, v21, v13
+; GFX950-NEXT: v_lshlrev_b32_e32 v13, 16, v20
+; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v12
+; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX950-NEXT: v_fmac_f32_e32 v13, v4, v12
+; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v19
+; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v11
+; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v3
+; GFX950-NEXT: v_fmac_f32_e32 v4, v20, v12
+; GFX950-NEXT: v_lshlrev_b32_e32 v12, 16, v19
+; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX950-NEXT: v_fmac_f32_e32 v12, v3, v11
+; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v18
+; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v10
+; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v2
+; GFX950-NEXT: v_fmac_f32_e32 v3, v19, v11
+; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v18
+; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX950-NEXT: v_fmac_f32_e32 v11, v2, v10
+; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
+; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v9
+; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v1
+; GFX950-NEXT: v_fmac_f32_e32 v2, v18, v10
+; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v17
+; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX950-NEXT: v_fmac_f32_e32 v10, v1, v9
+; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v16
+; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v8
+; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v0
+; GFX950-NEXT: v_fmac_f32_e32 v1, v17, v9
+; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v16
+; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX950-NEXT: v_fmac_f32_e32 v9, v0, v8
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v9, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v10, v2
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v11, v3
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v12, v4
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v13, v5
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v14, v6
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v15, v7
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v23, v24
+; GFX950-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_fma_v16bf16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v15
+; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v7
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25
+; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT: v_fmac_f32_e32 v23, v7, v15
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v22
+; GFX10-NEXT: v_bfe_u32 v25, v24, 16, 1
+; GFX10-NEXT: v_lshlrev_b32_e32 v15, 16, v14
+; GFX10-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX10-NEXT: v_bfe_u32 v28, v23, 16, 1
+; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX10-NEXT: v_add3_u32 v25, v25, v24, 0x7fff
+; GFX10-NEXT: v_fmac_f32_e32 v7, v26, v15
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v22
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX10-NEXT: v_add3_u32 v24, v28, v23, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v26, v7, 16, 1
+; GFX10-NEXT: v_fmac_f32_e32 v15, v6, v14
+; GFX10-NEXT: v_cndmask_b32_e32 v22, v25, v27, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v21
+; GFX10-NEXT: v_lshlrev_b32_e32 v14, 16, v13
+; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v5
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX10-NEXT: v_fmac_f32_e32 v6, v27, v14
+; GFX10-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo
+; GFX10-NEXT: v_add3_u32 v24, v26, v7, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v7
+; GFX10-NEXT: v_bfe_u32 v26, v15, 16, 1
+; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v21
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX10-NEXT: v_add3_u32 v21, v26, v15, 0x7fff
+; GFX10-NEXT: v_fmac_f32_e32 v14, v5, v13
+; GFX10-NEXT: v_cndmask_b32_e32 v7, v24, v25, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v15
+; GFX10-NEXT: v_bfe_u32 v25, v6, 16, 1
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v20
+; GFX10-NEXT: v_lshlrev_b32_e32 v13, 16, v12
+; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v4
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_fmac_f32_e32 v5, v26, v13
+; GFX10-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo
+; GFX10-NEXT: v_add3_u32 v21, v25, v6, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v6
+; GFX10-NEXT: v_bfe_u32 v25, v14, 16, 1
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v20
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v2
+; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT: v_add3_u32 v20, v25, v14, 0x7fff
+; GFX10-NEXT: v_fmac_f32_e32 v13, v4, v12
+; GFX10-NEXT: v_cndmask_b32_e32 v6, v21, v24, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v14
+; GFX10-NEXT: v_bfe_u32 v24, v5, 16, 1
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v12, 16, v11
+; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v3
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT: v_fmac_f32_e32 v4, v25, v12
+; GFX10-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo
+; GFX10-NEXT: v_add3_u32 v20, v24, v5, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v21, 0x400000, v5
+; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v19, 16, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v10
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX10-NEXT: v_bfe_u32 v24, v13, 16, 1
+; GFX10-NEXT: v_fmac_f32_e32 v12, v3, v11
+; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX10-NEXT: v_fmac_f32_e32 v19, v26, v25
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v20, v21, vcc_lo
+; GFX10-NEXT: v_bfe_u32 v20, v4, 16, 1
+; GFX10-NEXT: v_add3_u32 v21, v24, v13, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v24, v12, 16, 1
+; GFX10-NEXT: v_bfe_u32 v25, v19, 16, 1
+; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX10-NEXT: v_add3_u32 v11, v20, v4, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v4
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX10-NEXT: v_or_b32_e32 v26, 0x400000, v19
+; GFX10-NEXT: v_fmac_f32_e32 v18, v2, v10
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 16, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v10, 16, v9
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo
+; GFX10-NEXT: v_add3_u32 v11, v24, v12, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v12
+; GFX10-NEXT: v_add3_u32 v24, v25, v19, 0x7fff
+; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v1
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX10-NEXT: v_fmac_f32_e32 v2, v25, v10
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v8
+; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX10-NEXT: v_bfe_u32 v20, v2, 16, 1
+; GFX10-NEXT: v_fmac_f32_e32 v17, v1, v9
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo
+; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v16
+; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v0
+; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT: v_add3_u32 v1, v20, v2, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX10-NEXT: v_fmac_f32_e32 v24, v26, v25
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX10-NEXT: v_fmac_f32_e32 v16, v0, v8
+; GFX10-NEXT: v_bfe_u32 v0, v17, 16, 1
+; GFX10-NEXT: v_bfe_u32 v27, v18, 16, 1
+; GFX10-NEXT: v_bfe_u32 v8, v24, 16, 1
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX10-NEXT: v_or_b32_e32 v9, 0x400000, v17
+; GFX10-NEXT: v_add3_u32 v0, v0, v17, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX10-NEXT: v_bfe_u32 v2, v16, 16, 1
+; GFX10-NEXT: v_add3_u32 v8, v8, v24, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v24
+; GFX10-NEXT: v_or_b32_e32 v25, 0x400000, v16
+; GFX10-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX10-NEXT: v_add3_u32 v2, v2, v16, 0x7fff
+; GFX10-NEXT: v_add3_u32 v12, v27, v18, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX10-NEXT: v_or_b32_e32 v3, 0x400000, v13
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX10-NEXT: v_perm_b32 v1, v9, v1, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX10-NEXT: v_perm_b32 v0, v2, v0, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX10-NEXT: v_perm_b32 v2, v8, v10, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo
+; GFX10-NEXT: v_perm_b32 v3, v11, v4, 0x7060302
+; GFX10-NEXT: v_perm_b32 v4, v12, v5, 0x7060302
+; GFX10-NEXT: v_perm_b32 v5, v14, v6, 0x7060302
+; GFX10-NEXT: v_perm_b32 v6, v15, v7, 0x7060302
+; GFX10-NEXT: v_perm_b32 v7, v23, v22, 0x7060302
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11TRUE16-LABEL: v_fma_v16bf16:
+; GFX11TRUE16: ; %bb.0:
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v23
+; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v7
+; GFX11TRUE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v14
+; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v6
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v15
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_lshlrev_b32 v7, 16, v7
+; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v22
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v26, v28, v27
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v22, v6, v14
+; GFX11TRUE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21
+; GFX11TRUE16-NEXT: v_and_b32_e32 v28, 0xffff0000, v13
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v29, 0x400000, v24
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v23, v7, v15
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v25, v29, vcc_lo
+; GFX11TRUE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v5
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_bfe_u32 v15, v23, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v24, v26, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v26
+; GFX11TRUE16-NEXT: v_add3_u32 v15, v15, v23, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v24, v24, v26, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v23, v22, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v14, v29, v28 :: v_dual_cndmask_b32 v15, v15, v25
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v26, v26
+; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v12
+; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v4
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v12, 16, v12
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v24, v27, vcc_lo
+; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v20
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v22, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v27, 0x400000, v22
+; GFX11TRUE16-NEXT: v_bfe_u32 v28, v14, 16, 1
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v20, v4, v12
+; GFX11TRUE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19
+; GFX11TRUE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v11
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25
+; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v14
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v15.h
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v5, v13
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v23, v27, vcc_lo
+; GFX11TRUE16-NEXT: v_add3_u32 v5, v28, v14, 0x7fff
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11TRUE16-NEXT: v_bfe_u32 v25, v24, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v23, v21, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v27, v20, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v24
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v5, v22, vcc_lo
+; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v21
+; GFX11TRUE16-NEXT: v_add3_u32 v14, v23, v21, 0x7fff
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11TRUE16-NEXT: v_add3_u32 v23, v25, v24, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v21, v27, v20, 0x7fff
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v13.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v14, v22, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v20
+; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v3
+; GFX11TRUE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v18
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v14.h
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v12, v25, v4
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v23, v26, vcc_lo
+; GFX11TRUE16-NEXT: v_and_b32_e32 v25, 0xffff0000, v10
+; GFX11TRUE16-NEXT: v_and_b32_e32 v26, 0xffff0000, v2
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; GFX11TRUE16-NEXT: v_bfe_u32 v23, v12, 16, 1
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v20, v20
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v24, v26, v25
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v20, v21, v22 :: v_dual_and_b32 v25, 0xffff0000, v1
+; GFX11TRUE16-NEXT: v_add3_u32 v21, v23, v12, 0x7fff
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_bfe_u32 v23, v24, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v12
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v20.h
+; GFX11TRUE16-NEXT: v_add3_u32 v12, v23, v24, 0x7fff
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11TRUE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v9
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v19, v3, v11
+; GFX11TRUE16-NEXT: v_dual_cndmask_b32 v3, v21, v22 :: v_dual_and_b32 v22, 0xffff0000, v17
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v18
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX11TRUE16-NEXT: v_bfe_u32 v18, v19, 16, 1
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v22, v25, v23 :: v_dual_fmac_f32 v11, v2, v10
+; GFX11TRUE16-NEXT: v_or_b32_e32 v10, 0x400000, v19
+; GFX11TRUE16-NEXT: v_add3_u32 v2, v18, v19, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v24
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v8
+; GFX11TRUE16-NEXT: v_bfe_u32 v21, v11, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_cndmask_b32 v10, v2, v10
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v16
+; GFX11TRUE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v8
+; GFX11TRUE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v12, v18, vcc_lo
+; GFX11TRUE16-NEXT: v_add3_u32 v12, v21, v11, 0x7fff
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v16
+; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v11
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
+; GFX11TRUE16-NEXT: v_bfe_u32 v11, v17, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v19, v22, 16, 1
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v21, v24, v23
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v9, v0, v1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v12, v18, vcc_lo
+; GFX11TRUE16-NEXT: v_add3_u32 v11, v11, v17, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v17
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v21, 16, 1
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11TRUE16-NEXT: v_add3_u32 v12, v19, v22, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v18, v9, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v19, 0x400000, v21
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v21, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v11, v16, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v21, v21
+; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v22
+; GFX11TRUE16-NEXT: v_add3_u32 v16, v18, v9, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v9
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v8.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v0, v19, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v22, v22
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v10.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v12, v1, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v11.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v16, v17, vcc_lo
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v18.h
+; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11FAKE16-LABEL: v_fma_v16bf16:
+; GFX11FAKE16: ; %bb.0:
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v23
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v15
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v7
+; GFX11FAKE16-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v24, v26, v25 :: v_dual_and_b32 v23, 0xffff0000, v23
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v23, v7, v15 :: v_dual_lshlrev_b32 v26, 16, v6
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v14
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_bfe_u32 v25, v24, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v27, 0x400000, v24
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11FAKE16-NEXT: v_bfe_u32 v28, v23, 16, 1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX11FAKE16-NEXT: v_add3_u32 v25, v25, v24, 0x7fff
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v7, 16, v22
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_add3_u32 v24, v28, v23, 0x7fff
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v7, v26, v15
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v22, v25, v27 :: v_dual_and_b32 v15, 0xffff0000, v22
+; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v23
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v23, v23
+; GFX11FAKE16-NEXT: v_bfe_u32 v26, v7, 16, 1
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v27, 16, v5
+; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v24, v25, vcc_lo
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_add3_u32 v24, v26, v7, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v7
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
+; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v7, v24, v25 :: v_dual_and_b32 v6, 0xffff0000, v6
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v15, v6, v14 :: v_dual_lshlrev_b32 v14, 16, v13
+; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v15
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v6, 16, v21
+; GFX11FAKE16-NEXT: v_bfe_u32 v26, v15, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v6, v27, v14
+; GFX11FAKE16-NEXT: v_and_b32_e32 v14, 0xffff0000, v21
+; GFX11FAKE16-NEXT: v_add3_u32 v21, v26, v15, 0x7fff
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v4
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_bfe_u32 v25, v6, 16, 1
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v14, v5, v13 :: v_dual_lshlrev_b32 v5, 16, v20
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v13, 16, v12
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v21, v24, vcc_lo
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v6, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v6
+; GFX11FAKE16-NEXT: v_bfe_u32 v25, v14, 16, 1
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v5, v26, v13 :: v_dual_and_b32 v12, 0xffff0000, v12
+; GFX11FAKE16-NEXT: v_and_b32_e32 v13, 0xffff0000, v20
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v2
+; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11FAKE16-NEXT: v_add3_u32 v20, v25, v14, 0x7fff
+; GFX11FAKE16-NEXT: v_dual_cndmask_b32 v6, v21, v24 :: v_dual_lshlrev_b32 v25, 16, v3
+; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v14
+; GFX11FAKE16-NEXT: v_bfe_u32 v24, v5, 16, 1
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v13, v4, v12 :: v_dual_lshlrev_b32 v4, 16, v19
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v12, 16, v11
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
+; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v4, v25, v12
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v20, v21, vcc_lo
+; GFX11FAKE16-NEXT: v_add3_u32 v20, v24, v5, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v21, 0x400000, v5
+; GFX11FAKE16-NEXT: v_bfe_u32 v24, v13, 16, 1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v12, 0xffff0000, v19
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v10
+; GFX11FAKE16-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v12, v3, v11 :: v_dual_cndmask_b32 v5, v20, v21
+; GFX11FAKE16-NEXT: v_add3_u32 v21, v24, v13, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v13
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v19, 16, v18
+; GFX11FAKE16-NEXT: v_bfe_u32 v20, v4, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v24, v12, 16, 1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v19, v26, v25
+; GFX11FAKE16-NEXT: v_add3_u32 v11, v20, v4, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v4
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v18, v2, v10
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v17
+; GFX11FAKE16-NEXT: v_bfe_u32 v25, v19, 16, 1
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v10, 16, v9
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v11, v20, vcc_lo
+; GFX11FAKE16-NEXT: v_add3_u32 v11, v24, v12, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v12
+; GFX11FAKE16-NEXT: v_add3_u32 v24, v25, v19, 0x7fff
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v1
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v19
+; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v2, v25, v10 :: v_dual_and_b32 v9, 0xffff0000, v9
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v11, v20, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v25, 16, v8
+; GFX11FAKE16-NEXT: v_bfe_u32 v20, v2, 16, 1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v24, v26, vcc_lo
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v24, 16, v16
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v26, 16, v0
+; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v17, v1, v9 :: v_dual_and_b32 v0, 0xffff0000, v0
+; GFX11FAKE16-NEXT: v_add3_u32 v1, v20, v2, 0x7fff
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v24, v26, v25
+; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v2
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v16, v0, v8
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v17, 16, 1
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
+; GFX11FAKE16-NEXT: v_bfe_u32 v8, v24, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v24
+; GFX11FAKE16-NEXT: v_bfe_u32 v2, v16, 16, 1
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v17, 0x7fff
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
+; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v17
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v17, v17
+; GFX11FAKE16-NEXT: v_add3_u32 v8, v8, v24, 0x7fff
+; GFX11FAKE16-NEXT: v_bfe_u32 v27, v18, 16, 1
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v16, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v25, 0x400000, v16
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v0, v9, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v24, v24
+; GFX11FAKE16-NEXT: v_add3_u32 v12, v27, v18, 0x7fff
+; GFX11FAKE16-NEXT: v_or_b32_e32 v19, 0x400000, v18
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v1, v9, v1, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v8, v20, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v16, v16
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v25, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v18, v18
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v12, v19, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v13, v13
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v2, v8, v10, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v21, v3, vcc_lo
+; GFX11FAKE16-NEXT: v_perm_b32 v3, v11, v4, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v4, v12, v5, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v5, v14, v6, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v6, v15, v7, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v7, v23, v22, 0x7060302
+; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: v_fma_v16bf16:
; GFX1250: ; %bb.0:
; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
@@ -50018,67 +51864,2797 @@ define <8 x bfloat> @v_fma_v8bf16(<8 x bfloat> %a, <8 x bfloat> %b, <8 x bfloat>
; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v14, v22
; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v15, v23
; GFX1250-NEXT: s_set_pc_i64 s[30:31]
-define <16 x bfloat> @v_fma_v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c) {
%op = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> %a, <16 x bfloat> %b, <16 x bfloat> %c)
ret <16 x bfloat> %op
}
+define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) {
+; GCN-LABEL: v_fma_v32bf16:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GCN-NEXT: v_fma_f32 v31, v31, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252
+; GCN-NEXT: v_mul_f32_e32 v30, 1.0, v30
+; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v30, v30, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248
+; GCN-NEXT: v_mul_f32_e32 v29, 1.0, v29
+; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v29, v29, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244
+; GCN-NEXT: v_mul_f32_e32 v28, 1.0, v28
+; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v28, v28, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240
+; GCN-NEXT: v_mul_f32_e32 v27, 1.0, v27
+; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v27, v27, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236
+; GCN-NEXT: v_mul_f32_e32 v26, 1.0, v26
+; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v26, v26, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232
+; GCN-NEXT: v_mul_f32_e32 v25, 1.0, v25
+; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v25, v25, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228
+; GCN-NEXT: v_mul_f32_e32 v24, 1.0, v24
+; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v24, v24, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224
+; GCN-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v23, v23, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220
+; GCN-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v22, v22, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216
+; GCN-NEXT: v_mul_f32_e32 v21, 1.0, v21
+; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v21, v21, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212
+; GCN-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v20, v20, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208
+; GCN-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v19, v19, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204
+; GCN-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v18, v18, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200
+; GCN-NEXT: v_mul_f32_e32 v17, 1.0, v17
+; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v17, v17, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196
+; GCN-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v16, v16, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192
+; GCN-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v15, v15, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188
+; GCN-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v14, v14, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184
+; GCN-NEXT: v_mul_f32_e32 v13, 1.0, v13
+; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v13, v13, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180
+; GCN-NEXT: v_mul_f32_e32 v12, 1.0, v12
+; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v12, v12, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176
+; GCN-NEXT: v_mul_f32_e32 v11, 1.0, v11
+; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v11, v11, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
+; GCN-NEXT: v_mul_f32_e32 v10, 1.0, v10
+; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v10, v10, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168
+; GCN-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v9, v9, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164
+; GCN-NEXT: v_mul_f32_e32 v8, 1.0, v8
+; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v8, v8, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160
+; GCN-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v7, v7, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156
+; GCN-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v6, v6, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152
+; GCN-NEXT: v_mul_f32_e32 v5, 1.0, v5
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v5, v5, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148
+; GCN-NEXT: v_mul_f32_e32 v4, 1.0, v4
+; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v4, v4, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144
+; GCN-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v3, v3, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140
+; GCN-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v2, v2, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136
+; GCN-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v1, v1, v32, v33
+; GCN-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GCN-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GCN-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GCN-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GCN-NEXT: v_fma_f32 v0, v0, v32, v33
+; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GCN-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GCN-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GCN-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GCN-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GCN-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GCN-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GCN-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GCN-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GCN-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GCN-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GCN-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GCN-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GCN-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GCN-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GCN-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GCN-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GCN-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GCN-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GCN-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GCN-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GCN-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GCN-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GCN-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GCN-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GCN-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GCN-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GCN-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GCN-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX7-LABEL: v_fma_v32bf16:
+; GFX7: ; %bb.0:
+; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX7-NEXT: buffer_load_dword v31, off, s[0:3], s32
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:256
+; GFX7-NEXT: v_mul_f32_e32 v30, 1.0, v30
+; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX7-NEXT: v_mul_f32_e32 v29, 1.0, v29
+; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX7-NEXT: v_mul_f32_e32 v28, 1.0, v28
+; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX7-NEXT: v_mul_f32_e32 v27, 1.0, v27
+; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX7-NEXT: v_mul_f32_e32 v26, 1.0, v26
+; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX7-NEXT: v_mul_f32_e32 v25, 1.0, v25
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: v_mul_f32_e32 v24, 1.0, v24
+; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX7-NEXT: v_mul_f32_e32 v23, 1.0, v23
+; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX7-NEXT: v_mul_f32_e32 v22, 1.0, v22
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: v_mul_f32_e32 v21, 1.0, v21
+; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX7-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX7-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX7-NEXT: v_mul_f32_e32 v18, 1.0, v18
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX7-NEXT: v_mul_f32_e32 v17, 1.0, v17
+; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX7-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX7-NEXT: v_mul_f32_e32 v15, 1.0, v15
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX7-NEXT: v_mul_f32_e32 v13, 1.0, v13
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT: v_mul_f32_e32 v12, 1.0, v12
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX7-NEXT: v_mul_f32_e32 v11, 1.0, v11
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: v_mul_f32_e32 v10, 1.0, v10
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX7-NEXT: v_mul_f32_e32 v9, 1.0, v9
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX7-NEXT: v_mul_f32_e32 v8, 1.0, v8
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX7-NEXT: v_mul_f32_e32 v7, 1.0, v7
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT: v_mul_f32_e32 v6, 1.0, v6
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: v_mul_f32_e32 v5, 1.0, v5
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX7-NEXT: v_mul_f32_e32 v4, 1.0, v4
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX7-NEXT: v_mul_f32_e32 v3, 1.0, v3
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX7-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: s_waitcnt vmcnt(2)
+; GFX7-NEXT: v_mul_f32_e32 v31, 1.0, v31
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: v_fma_f32 v31, v31, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:124
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:252
+; GFX7-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v30, v30, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:120
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248
+; GFX7-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v29, v29, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:116
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:244
+; GFX7-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v28, v28, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:112
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:240
+; GFX7-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v27, v27, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:108
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:236
+; GFX7-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v26, v26, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:104
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:232
+; GFX7-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v25, v25, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:100
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:228
+; GFX7-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v24, v24, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:96
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:224
+; GFX7-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v23, v23, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:92
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:220
+; GFX7-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v22, v22, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:88
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:216
+; GFX7-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v21, v21, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:84
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:212
+; GFX7-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v20, v20, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:80
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:208
+; GFX7-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v19, v19, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:76
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204
+; GFX7-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v18, v18, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:200
+; GFX7-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v17, v17, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:68
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:196
+; GFX7-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v16, v16, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:192
+; GFX7-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v15, v15, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:60
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:188
+; GFX7-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v14, v14, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:184
+; GFX7-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v13, v13, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:52
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:180
+; GFX7-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v12, v12, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:48
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176
+; GFX7-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v11, v11, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
+; GFX7-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v10, v10, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:40
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:168
+; GFX7-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v9, v9, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:36
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:164
+; GFX7-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v8, v8, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:32
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160
+; GFX7-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v7, v7, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:28
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:156
+; GFX7-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v6, v6, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:24
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:152
+; GFX7-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v5, v5, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:20
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:148
+; GFX7-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v4, v4, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:16
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:144
+; GFX7-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v3, v3, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:12
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:140
+; GFX7-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v2, v2, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:8
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136
+; GFX7-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v1, v1, v32, v33
+; GFX7-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:4
+; GFX7-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:132
+; GFX7-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX7-NEXT: s_waitcnt vmcnt(1)
+; GFX7-NEXT: v_mul_f32_e32 v32, 1.0, v32
+; GFX7-NEXT: s_waitcnt vmcnt(0)
+; GFX7-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; GFX7-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX7-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX7-NEXT: v_fma_f32 v0, v0, v32, v33
+; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_fma_v32bf16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32
+; GFX8-NEXT: v_lshlrev_b32_e32 v31, 16, v15
+; GFX8-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: s_waitcnt vmcnt(1)
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v32
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v15, v15, v33, v32
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
+; GFX8-NEXT: v_fma_f32 v31, v31, v35, v34
+; GFX8-NEXT: v_lshlrev_b32_e32 v32, 16, v30
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v14
+; GFX8-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX8-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v14, v14, v30, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56
+; GFX8-NEXT: v_fma_f32 v32, v34, v32, v35
+; GFX8-NEXT: v_lshlrev_b32_e32 v30, 16, v29
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v13
+; GFX8-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX8-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v13, v13, v29, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52
+; GFX8-NEXT: v_fma_f32 v30, v34, v30, v35
+; GFX8-NEXT: v_lshlrev_b32_e32 v29, 16, v28
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v12
+; GFX8-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX8-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v12, v12, v28, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48
+; GFX8-NEXT: v_fma_f32 v29, v34, v29, v35
+; GFX8-NEXT: v_lshlrev_b32_e32 v28, 16, v27
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v11
+; GFX8-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX8-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v11, v11, v27, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44
+; GFX8-NEXT: v_fma_f32 v28, v34, v28, v35
+; GFX8-NEXT: v_lshlrev_b32_e32 v27, 16, v26
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v10
+; GFX8-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX8-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v10, v10, v26, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40
+; GFX8-NEXT: v_fma_f32 v27, v34, v27, v35
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v25
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v9
+; GFX8-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX8-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v26, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v9, v9, v25, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
+; GFX8-NEXT: v_fma_f32 v26, v35, v34, v26
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v24
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v8
+; GFX8-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX8-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v25, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v8, v8, v24, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
+; GFX8-NEXT: v_fma_f32 v25, v35, v34, v25
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v7
+; GFX8-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX8-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v24, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v7, v7, v23, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28
+; GFX8-NEXT: v_fma_f32 v24, v35, v34, v24
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v6
+; GFX8-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX8-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v23, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v6, v6, v22, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24
+; GFX8-NEXT: v_fma_f32 v23, v35, v34, v23
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v5
+; GFX8-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX8-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v22, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v5, v5, v21, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
+; GFX8-NEXT: v_fma_f32 v22, v35, v34, v22
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v4
+; GFX8-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX8-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v21, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v4, v4, v20, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
+; GFX8-NEXT: v_fma_f32 v21, v35, v34, v21
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v3
+; GFX8-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v20, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v3, v3, v19, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
+; GFX8-NEXT: v_fma_f32 v20, v35, v34, v20
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v2
+; GFX8-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v19, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v2, v2, v18, v33
+; GFX8-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; GFX8-NEXT: v_fma_f32 v19, v35, v34, v19
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v18, 16, v33
+; GFX8-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX8-NEXT: v_fma_f32 v1, v1, v17, v33
+; GFX8-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:4
+; GFX8-NEXT: v_fma_f32 v18, v35, v34, v18
+; GFX8-NEXT: v_lshlrev_b32_e32 v34, 16, v16
+; GFX8-NEXT: v_lshlrev_b32_e32 v35, 16, v0
+; GFX8-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v33, 16, v17
+; GFX8-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX8-NEXT: v_fma_f32 v0, v0, v16, v17
+; GFX8-NEXT: v_bfe_u32 v16, v31, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v16, v31
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
+; GFX8-NEXT: v_or_b32_e32 v17, 0x400000, v31
+; GFX8-NEXT: v_cndmask_b32_e32 v16, v16, v17, vcc
+; GFX8-NEXT: v_bfe_u32 v17, v15, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v15
+; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
+; GFX8-NEXT: v_or_b32_e32 v15, 0x400000, v15
+; GFX8-NEXT: v_cndmask_b32_e32 v15, v17, v15, vcc
+; GFX8-NEXT: v_bfe_u32 v17, v32, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v17, vcc, v17, v32
+; GFX8-NEXT: v_add_u32_e32 v17, vcc, s4, v17
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
+; GFX8-NEXT: v_or_b32_e32 v31, 0x400000, v32
+; GFX8-NEXT: v_cndmask_b32_e32 v17, v17, v31, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v14, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v14
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
+; GFX8-NEXT: v_or_b32_e32 v14, 0x400000, v14
+; GFX8-NEXT: v_cndmask_b32_e32 v14, v31, v14, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v30, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v30
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
+; GFX8-NEXT: v_or_b32_e32 v30, 0x400000, v30
+; GFX8-NEXT: v_cndmask_b32_e32 v30, v31, v30, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v13, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v13
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX8-NEXT: v_or_b32_e32 v13, 0x400000, v13
+; GFX8-NEXT: v_cndmask_b32_e32 v13, v31, v13, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v29, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v29
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
+; GFX8-NEXT: v_or_b32_e32 v29, 0x400000, v29
+; GFX8-NEXT: v_cndmask_b32_e32 v29, v31, v29, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v12, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v12
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
+; GFX8-NEXT: v_or_b32_e32 v12, 0x400000, v12
+; GFX8-NEXT: v_cndmask_b32_e32 v12, v31, v12, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v28, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v28
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
+; GFX8-NEXT: v_or_b32_e32 v28, 0x400000, v28
+; GFX8-NEXT: v_cndmask_b32_e32 v28, v31, v28, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v11, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v11
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
+; GFX8-NEXT: v_or_b32_e32 v11, 0x400000, v11
+; GFX8-NEXT: v_cndmask_b32_e32 v11, v31, v11, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v27, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v27
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
+; GFX8-NEXT: v_or_b32_e32 v27, 0x400000, v27
+; GFX8-NEXT: v_cndmask_b32_e32 v27, v31, v27, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v10, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v10
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
+; GFX8-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX8-NEXT: v_cndmask_b32_e32 v10, v31, v10, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v26, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v26
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
+; GFX8-NEXT: v_or_b32_e32 v26, 0x400000, v26
+; GFX8-NEXT: v_cndmask_b32_e32 v26, v31, v26, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v9, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v9
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
+; GFX8-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX8-NEXT: v_cndmask_b32_e32 v9, v31, v9, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v25, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v25
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
+; GFX8-NEXT: v_or_b32_e32 v25, 0x400000, v25
+; GFX8-NEXT: v_cndmask_b32_e32 v25, v31, v25, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v8, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v8
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
+; GFX8-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v31, v8, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v24, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v24
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
+; GFX8-NEXT: v_or_b32_e32 v24, 0x400000, v24
+; GFX8-NEXT: v_cndmask_b32_e32 v24, v31, v24, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v7, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v7
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX8-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v31, v7, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v23, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v23
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
+; GFX8-NEXT: v_or_b32_e32 v23, 0x400000, v23
+; GFX8-NEXT: v_cndmask_b32_e32 v23, v31, v23, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v6, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v6
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX8-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v31, v6, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v22, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v22
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
+; GFX8-NEXT: v_or_b32_e32 v22, 0x400000, v22
+; GFX8-NEXT: v_cndmask_b32_e32 v22, v31, v22, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v5, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v5
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX8-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v31, v5, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v21, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v21
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
+; GFX8-NEXT: v_or_b32_e32 v21, 0x400000, v21
+; GFX8-NEXT: v_cndmask_b32_e32 v21, v31, v21, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v4, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v4
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX8-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v31, v4, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v20, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v20
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
+; GFX8-NEXT: v_or_b32_e32 v20, 0x400000, v20
+; GFX8-NEXT: v_cndmask_b32_e32 v20, v31, v20, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v3, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v3
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX8-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v31, v3, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v19, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v19
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
+; GFX8-NEXT: v_or_b32_e32 v19, 0x400000, v19
+; GFX8-NEXT: v_cndmask_b32_e32 v19, v31, v19, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v2, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v2
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX8-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v31, v2, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v18, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v18
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
+; GFX8-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX8-NEXT: v_cndmask_b32_e32 v18, v31, v18, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v1, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_fma_f32 v33, v35, v34, v33
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX8-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v31, v1, vcc
+; GFX8-NEXT: v_bfe_u32 v31, v33, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, v31, v33
+; GFX8-NEXT: v_add_u32_e32 v31, vcc, s4, v31
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v33, v33
+; GFX8-NEXT: v_or_b32_e32 v32, 0x400000, v33
+; GFX8-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc
+; GFX8-NEXT: v_bfe_u32 v32, v0, 16, 1
+; GFX8-NEXT: v_add_u32_e32 v32, vcc, v32, v0
+; GFX8-NEXT: v_add_u32_e32 v32, vcc, s4, v32
+; GFX8-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX8-NEXT: v_or_b32_e32 v0, 0x400000, v0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc
+; GFX8-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v10
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v15, 16, v15
+; GFX8-NEXT: v_lshrrev_b32_e32 v14, 16, v14
+; GFX8-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; GFX8-NEXT: v_lshrrev_b32_e32 v12, 16, v12
+; GFX8-NEXT: v_alignbit_b32 v0, v0, v31, 16
+; GFX8-NEXT: v_alignbit_b32 v1, v1, v18, 16
+; GFX8-NEXT: v_alignbit_b32 v2, v2, v19, 16
+; GFX8-NEXT: v_alignbit_b32 v3, v3, v20, 16
+; GFX8-NEXT: v_alignbit_b32 v4, v4, v21, 16
+; GFX8-NEXT: v_alignbit_b32 v5, v5, v22, 16
+; GFX8-NEXT: v_alignbit_b32 v6, v6, v23, 16
+; GFX8-NEXT: v_alignbit_b32 v7, v7, v24, 16
+; GFX8-NEXT: v_alignbit_b32 v8, v8, v25, 16
+; GFX8-NEXT: v_alignbit_b32 v9, v9, v26, 16
+; GFX8-NEXT: v_alignbit_b32 v10, v10, v27, 16
+; GFX8-NEXT: v_alignbit_b32 v11, v11, v28, 16
+; GFX8-NEXT: v_alignbit_b32 v12, v12, v29, 16
+; GFX8-NEXT: v_alignbit_b32 v13, v13, v30, 16
+; GFX8-NEXT: v_alignbit_b32 v14, v14, v17, 16
+; GFX8-NEXT: v_alignbit_b32 v15, v15, v16, 16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX900-LABEL: v_fma_v32bf16:
+; GFX900: ; %bb.0:
+; GFX900-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX900-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32
+; GFX900-NEXT: v_lshlrev_b32_e32 v31, 16, v15
+; GFX900-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; GFX900-NEXT: s_movk_i32 s4, 0x7fff
+; GFX900-NEXT: s_waitcnt vmcnt(1)
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v32
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v15, v15, v33, v32
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:60
+; GFX900-NEXT: v_fma_f32 v31, v31, v35, v34
+; GFX900-NEXT: v_lshlrev_b32_e32 v32, 16, v30
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v14
+; GFX900-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX900-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v31, v31
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v14, v14, v30, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:56
+; GFX900-NEXT: v_fma_f32 v32, v34, v32, v35
+; GFX900-NEXT: v_lshlrev_b32_e32 v30, 16, v29
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v13
+; GFX900-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX900-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v13, v13, v29, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:52
+; GFX900-NEXT: v_fma_f32 v30, v34, v30, v35
+; GFX900-NEXT: v_lshlrev_b32_e32 v29, 16, v28
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v12
+; GFX900-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX900-NEXT: v_and_b32_e32 v12, 0xffff0000, v12
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v12, v12, v28, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:48
+; GFX900-NEXT: v_fma_f32 v29, v34, v29, v35
+; GFX900-NEXT: v_lshlrev_b32_e32 v28, 16, v27
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v11
+; GFX900-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX900-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v11, v11, v27, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:44
+; GFX900-NEXT: v_fma_f32 v28, v34, v28, v35
+; GFX900-NEXT: v_lshlrev_b32_e32 v27, 16, v26
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v10
+; GFX900-NEXT: v_and_b32_e32 v26, 0xffff0000, v26
+; GFX900-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v10, v10, v26, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:40
+; GFX900-NEXT: v_fma_f32 v27, v34, v27, v35
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v25
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v9
+; GFX900-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX900-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v26, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v9, v9, v25, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:36
+; GFX900-NEXT: v_fma_f32 v26, v35, v34, v26
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v24
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v8
+; GFX900-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX900-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v25, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v8, v8, v24, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
+; GFX900-NEXT: v_fma_f32 v25, v35, v34, v25
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v23
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v7
+; GFX900-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX900-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v24, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v7, v7, v23, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:28
+; GFX900-NEXT: v_fma_f32 v24, v35, v34, v24
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v22
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v6
+; GFX900-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX900-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v23, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v6, v6, v22, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:24
+; GFX900-NEXT: v_fma_f32 v23, v35, v34, v23
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v21
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v5
+; GFX900-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX900-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v22, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v5, v5, v21, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:20
+; GFX900-NEXT: v_fma_f32 v22, v35, v34, v22
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v20
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v4
+; GFX900-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX900-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v21, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v4, v4, v20, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
+; GFX900-NEXT: v_fma_f32 v21, v35, v34, v21
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v19
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v3
+; GFX900-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX900-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v20, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v3, v3, v19, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:12
+; GFX900-NEXT: v_fma_f32 v20, v35, v34, v20
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v18
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v2
+; GFX900-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX900-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v19, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v2, v2, v18, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:8
+; GFX900-NEXT: v_fma_f32 v19, v35, v34, v19
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v17
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v1
+; GFX900-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX900-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v18, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v1, v1, v17, v33
+; GFX900-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:4
+; GFX900-NEXT: v_fma_f32 v18, v35, v34, v18
+; GFX900-NEXT: v_lshlrev_b32_e32 v34, 16, v16
+; GFX900-NEXT: v_lshlrev_b32_e32 v35, 16, v0
+; GFX900-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX900-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX900-NEXT: s_waitcnt vmcnt(0)
+; GFX900-NEXT: v_lshlrev_b32_e32 v17, 16, v33
+; GFX900-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX900-NEXT: v_fma_f32 v0, v0, v16, v33
+; GFX900-NEXT: v_bfe_u32 v16, v31, 16, 1
+; GFX900-NEXT: v_add3_u32 v16, v16, v31, s4
+; GFX900-NEXT: v_or_b32_e32 v31, 0x400000, v31
+; GFX900-NEXT: v_cndmask_b32_e32 v16, v16, v31, vcc
+; GFX900-NEXT: v_bfe_u32 v31, v15, 16, 1
+; GFX900-NEXT: v_add3_u32 v31, v31, v15, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v15, v15
+; GFX900-NEXT: v_or_b32_e32 v15, 0x400000, v15
+; GFX900-NEXT: v_cndmask_b32_e32 v15, v31, v15, vcc
+; GFX900-NEXT: v_bfe_u32 v31, v32, 16, 1
+; GFX900-NEXT: v_add3_u32 v31, v31, v32, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v32, v32
+; GFX900-NEXT: v_or_b32_e32 v32, 0x400000, v32
+; GFX900-NEXT: v_cndmask_b32_e32 v31, v31, v32, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v14, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v14, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v14, v14
+; GFX900-NEXT: v_or_b32_e32 v14, 0x400000, v14
+; GFX900-NEXT: v_cndmask_b32_e32 v14, v32, v14, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v30, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v30, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v30, v30
+; GFX900-NEXT: v_or_b32_e32 v30, 0x400000, v30
+; GFX900-NEXT: v_cndmask_b32_e32 v30, v32, v30, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v13, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v13, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v13, v13
+; GFX900-NEXT: v_or_b32_e32 v13, 0x400000, v13
+; GFX900-NEXT: v_cndmask_b32_e32 v13, v32, v13, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v29, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v29, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v29, v29
+; GFX900-NEXT: v_or_b32_e32 v29, 0x400000, v29
+; GFX900-NEXT: v_cndmask_b32_e32 v29, v32, v29, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v12, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v12, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v12, v12
+; GFX900-NEXT: v_or_b32_e32 v12, 0x400000, v12
+; GFX900-NEXT: v_cndmask_b32_e32 v12, v32, v12, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v28, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v28, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v28, v28
+; GFX900-NEXT: v_or_b32_e32 v28, 0x400000, v28
+; GFX900-NEXT: v_cndmask_b32_e32 v28, v32, v28, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v11, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v11, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v11, v11
+; GFX900-NEXT: v_or_b32_e32 v11, 0x400000, v11
+; GFX900-NEXT: v_cndmask_b32_e32 v11, v32, v11, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v27, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v27, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v27, v27
+; GFX900-NEXT: v_or_b32_e32 v27, 0x400000, v27
+; GFX900-NEXT: v_cndmask_b32_e32 v27, v32, v27, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v10, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v10, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v10, v10
+; GFX900-NEXT: v_or_b32_e32 v10, 0x400000, v10
+; GFX900-NEXT: v_cndmask_b32_e32 v10, v32, v10, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v26, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v26, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v26, v26
+; GFX900-NEXT: v_or_b32_e32 v26, 0x400000, v26
+; GFX900-NEXT: v_cndmask_b32_e32 v26, v32, v26, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v9, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v9, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v9, v9
+; GFX900-NEXT: v_or_b32_e32 v9, 0x400000, v9
+; GFX900-NEXT: v_cndmask_b32_e32 v9, v32, v9, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v25, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v25, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v25, v25
+; GFX900-NEXT: v_or_b32_e32 v25, 0x400000, v25
+; GFX900-NEXT: v_cndmask_b32_e32 v25, v32, v25, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v8, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v8, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v8, v8
+; GFX900-NEXT: v_or_b32_e32 v8, 0x400000, v8
+; GFX900-NEXT: v_cndmask_b32_e32 v8, v32, v8, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v24, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v24, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v24, v24
+; GFX900-NEXT: v_or_b32_e32 v24, 0x400000, v24
+; GFX900-NEXT: v_cndmask_b32_e32 v24, v32, v24, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v7, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v7, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v7, v7
+; GFX900-NEXT: v_or_b32_e32 v7, 0x400000, v7
+; GFX900-NEXT: v_cndmask_b32_e32 v7, v32, v7, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v23, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v23, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v23, v23
+; GFX900-NEXT: v_or_b32_e32 v23, 0x400000, v23
+; GFX900-NEXT: v_cndmask_b32_e32 v23, v32, v23, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v6, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v6, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v6, v6
+; GFX900-NEXT: v_or_b32_e32 v6, 0x400000, v6
+; GFX900-NEXT: v_cndmask_b32_e32 v6, v32, v6, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v22, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v22, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v22, v22
+; GFX900-NEXT: v_or_b32_e32 v22, 0x400000, v22
+; GFX900-NEXT: v_cndmask_b32_e32 v22, v32, v22, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v5, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v5, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v5, v5
+; GFX900-NEXT: v_or_b32_e32 v5, 0x400000, v5
+; GFX900-NEXT: v_cndmask_b32_e32 v5, v32, v5, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v21, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v21, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v21, v21
+; GFX900-NEXT: v_or_b32_e32 v21, 0x400000, v21
+; GFX900-NEXT: v_cndmask_b32_e32 v21, v32, v21, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v4, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v4, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v4, v4
+; GFX900-NEXT: v_or_b32_e32 v4, 0x400000, v4
+; GFX900-NEXT: v_cndmask_b32_e32 v4, v32, v4, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v20, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v20, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v20, v20
+; GFX900-NEXT: v_or_b32_e32 v20, 0x400000, v20
+; GFX900-NEXT: v_cndmask_b32_e32 v20, v32, v20, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v3, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v3, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v3, v3
+; GFX900-NEXT: v_or_b32_e32 v3, 0x400000, v3
+; GFX900-NEXT: v_cndmask_b32_e32 v3, v32, v3, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v19, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v19, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v19, v19
+; GFX900-NEXT: v_or_b32_e32 v19, 0x400000, v19
+; GFX900-NEXT: v_cndmask_b32_e32 v19, v32, v19, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v2, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v2, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
+; GFX900-NEXT: v_or_b32_e32 v2, 0x400000, v2
+; GFX900-NEXT: v_cndmask_b32_e32 v2, v32, v2, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v18, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v18, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v18, v18
+; GFX900-NEXT: v_or_b32_e32 v18, 0x400000, v18
+; GFX900-NEXT: v_cndmask_b32_e32 v18, v32, v18, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v1, 16, 1
+; GFX900-NEXT: v_fma_f32 v17, v35, v34, v17
+; GFX900-NEXT: v_add3_u32 v32, v32, v1, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX900-NEXT: v_or_b32_e32 v1, 0x400000, v1
+; GFX900-NEXT: v_cndmask_b32_e32 v1, v32, v1, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v17, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v17, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v17, v17
+; GFX900-NEXT: v_or_b32_e32 v17, 0x400000, v17
+; GFX900-NEXT: v_cndmask_b32_e32 v17, v32, v17, vcc
+; GFX900-NEXT: v_bfe_u32 v32, v0, 16, 1
+; GFX900-NEXT: v_add3_u32 v32, v32, v0, s4
+; GFX900-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX900-NEXT: v_or_b32_e32 v0, 0x400000, v0
+; GFX900-NEXT: v_cndmask_b32_e32 v0, v32, v0, vcc
+; GFX900-NEXT: s_mov_b32 s4, 0x7060302
+; GFX900-NEXT: v_perm_b32 v0, v0, v17, s4
+; GFX900-NEXT: v_perm_b32 v1, v1, v18, s4
+; GFX900-NEXT: v_perm_b32 v2, v2, v19, s4
+; GFX900-NEXT: v_perm_b32 v3, v3, v20, s4
+; GFX900-NEXT: v_perm_b32 v4, v4, v21, s4
+; GFX900-NEXT: v_perm_b32 v5, v5, v22, s4
+; GFX900-NEXT: v_perm_b32 v6, v6, v23, s4
+; GFX900-NEXT: v_perm_b32 v7, v7, v24, s4
+; GFX900-NEXT: v_perm_b32 v8, v8, v25, s4
+; GFX900-NEXT: v_perm_b32 v9, v9, v26, s4
+; GFX900-NEXT: v_perm_b32 v10, v10, v27, s4
+; GFX900-NEXT: v_perm_b32 v11, v11, v28, s4
+; GFX900-NEXT: v_perm_b32 v12, v12, v29, s4
+; GFX900-NEXT: v_perm_b32 v13, v13, v30, s4
+; GFX900-NEXT: v_perm_b32 v14, v14, v31, s4
+; GFX900-NEXT: v_perm_b32 v15, v15, v16, s4
+; GFX900-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX950-LABEL: v_fma_v32bf16:
+; GFX950: ; %bb.0:
+; GFX950-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX950-NEXT: scratch_load_dword v35, off, s32 offset:64
+; GFX950-NEXT: scratch_load_dword v36, off, s32
+; GFX950-NEXT: scratch_load_dword v38, off, s32 offset:60
+; GFX950-NEXT: scratch_load_dword v39, off, s32 offset:56
+; GFX950-NEXT: scratch_load_dword v48, off, s32 offset:52
+; GFX950-NEXT: scratch_load_dword v49, off, s32 offset:48
+; GFX950-NEXT: scratch_load_dword v50, off, s32 offset:44
+; GFX950-NEXT: scratch_load_dword v51, off, s32 offset:40
+; GFX950-NEXT: scratch_load_dword v52, off, s32 offset:36
+; GFX950-NEXT: scratch_load_dword v53, off, s32 offset:32
+; GFX950-NEXT: scratch_load_dword v54, off, s32 offset:28
+; GFX950-NEXT: scratch_load_dword v31, off, s32 offset:4
+; GFX950-NEXT: scratch_load_dword v32, off, s32 offset:8
+; GFX950-NEXT: scratch_load_dword v33, off, s32 offset:12
+; GFX950-NEXT: scratch_load_dword v34, off, s32 offset:16
+; GFX950-NEXT: scratch_load_dword v37, off, s32 offset:20
+; GFX950-NEXT: scratch_load_dword v55, off, s32 offset:24
+; GFX950-NEXT: v_accvgpr_write_b32 a3, v43 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a5, v45 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a6, v46 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a8, v56 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a11, v59 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a13, v61 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a14, v62 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a15, v63 ; Reload Reuse
+; GFX950-NEXT: v_and_b32_e32 v43, 0xffff0000, v14
+; GFX950-NEXT: v_lshlrev_b32_e32 v45, 16, v14
+; GFX950-NEXT: v_and_b32_e32 v46, 0xffff0000, v29
+; GFX950-NEXT: v_lshlrev_b32_e32 v56, 16, v29
+; GFX950-NEXT: v_and_b32_e32 v59, 0xffff0000, v12
+; GFX950-NEXT: v_lshlrev_b32_e32 v61, 16, v12
+; GFX950-NEXT: v_and_b32_e32 v62, 0xffff0000, v27
+; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v27
+; GFX950-NEXT: v_accvgpr_write_b32 a2, v42 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a4, v44 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a7, v47 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a9, v57 ; Reload Reuse
+; GFX950-NEXT: v_and_b32_e32 v42, 0xffff0000, v30
+; GFX950-NEXT: v_lshlrev_b32_e32 v44, 16, v30
+; GFX950-NEXT: v_and_b32_e32 v47, 0xffff0000, v13
+; GFX950-NEXT: v_lshlrev_b32_e32 v57, 16, v13
+; GFX950-NEXT: v_accvgpr_write_b32 a0, v40 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a1, v41 ; Reload Reuse
+; GFX950-NEXT: v_and_b32_e32 v40, 0xffff0000, v15
+; GFX950-NEXT: v_lshlrev_b32_e32 v41, 16, v15
+; GFX950-NEXT: v_accvgpr_write_b32 a10, v58 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_write_b32 a12, v60 ; Reload Reuse
+; GFX950-NEXT: v_and_b32_e32 v58, 0xffff0000, v28
+; GFX950-NEXT: v_lshlrev_b32_e32 v60, 16, v28
+; GFX950-NEXT: s_waitcnt vmcnt(16)
+; GFX950-NEXT: v_and_b32_e32 v15, 0xffff0000, v35
+; GFX950-NEXT: s_waitcnt vmcnt(15)
+; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v36
+; GFX950-NEXT: v_lshlrev_b32_e32 v63, 16, v36
+; GFX950-NEXT: s_waitcnt vmcnt(14)
+; GFX950-NEXT: v_and_b32_e32 v14, 0xffff0000, v38
+; GFX950-NEXT: v_lshlrev_b32_e32 v29, 16, v38
+; GFX950-NEXT: s_waitcnt vmcnt(11)
+; GFX950-NEXT: v_and_b32_e32 v36, 0xffff0000, v49
+; GFX950-NEXT: v_and_b32_e32 v38, 0xffff0000, v11
+; GFX950-NEXT: v_fmac_f32_e32 v36, v38, v62
+; GFX950-NEXT: v_lshlrev_b32_e32 v38, 16, v49
+; GFX950-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX950-NEXT: v_and_b32_e32 v13, 0xffff0000, v39
+; GFX950-NEXT: v_lshlrev_b32_e32 v30, 16, v39
+; GFX950-NEXT: v_fmac_f32_e32 v38, v11, v27
+; GFX950-NEXT: s_waitcnt vmcnt(10)
+; GFX950-NEXT: v_and_b32_e32 v11, 0xffff0000, v50
+; GFX950-NEXT: v_and_b32_e32 v27, 0xffff0000, v26
+; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v10
+; GFX950-NEXT: v_fmac_f32_e32 v11, v39, v27
+; GFX950-NEXT: v_lshlrev_b32_e32 v27, 16, v50
+; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v26
+; GFX950-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; GFX950-NEXT: v_fmac_f32_e32 v27, v10, v26
+; GFX950-NEXT: s_waitcnt vmcnt(9)
+; GFX950-NEXT: v_and_b32_e32 v10, 0xffff0000, v51
+; GFX950-NEXT: v_and_b32_e32 v26, 0xffff0000, v25
+; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v9
+; GFX950-NEXT: v_fmac_f32_e32 v10, v39, v26
+; GFX950-NEXT: v_lshlrev_b32_e32 v26, 16, v51
+; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v25
+; GFX950-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX950-NEXT: v_fmac_f32_e32 v26, v9, v25
+; GFX950-NEXT: s_waitcnt vmcnt(8)
+; GFX950-NEXT: v_and_b32_e32 v9, 0xffff0000, v52
+; GFX950-NEXT: v_and_b32_e32 v25, 0xffff0000, v24
+; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v8
+; GFX950-NEXT: v_fmac_f32_e32 v9, v39, v25
+; GFX950-NEXT: v_lshlrev_b32_e32 v25, 16, v52
+; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; GFX950-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX950-NEXT: v_fmac_f32_e32 v25, v8, v24
+; GFX950-NEXT: s_waitcnt vmcnt(7)
+; GFX950-NEXT: v_and_b32_e32 v8, 0xffff0000, v53
+; GFX950-NEXT: v_and_b32_e32 v24, 0xffff0000, v23
+; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v7
+; GFX950-NEXT: v_fmac_f32_e32 v8, v39, v24
+; GFX950-NEXT: v_lshlrev_b32_e32 v24, 16, v53
+; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; GFX950-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX950-NEXT: v_fmac_f32_e32 v24, v7, v23
+; GFX950-NEXT: s_waitcnt vmcnt(6)
+; GFX950-NEXT: v_and_b32_e32 v7, 0xffff0000, v54
+; GFX950-NEXT: v_and_b32_e32 v23, 0xffff0000, v22
+; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v6
+; GFX950-NEXT: v_fmac_f32_e32 v7, v39, v23
+; GFX950-NEXT: v_lshlrev_b32_e32 v23, 16, v54
+; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; GFX950-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX950-NEXT: v_fmac_f32_e32 v23, v6, v22
+; GFX950-NEXT: s_waitcnt vmcnt(0)
+; GFX950-NEXT: v_and_b32_e32 v6, 0xffff0000, v55
+; GFX950-NEXT: v_and_b32_e32 v22, 0xffff0000, v21
+; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v5
+; GFX950-NEXT: v_fmac_f32_e32 v6, v39, v22
+; GFX950-NEXT: v_lshlrev_b32_e32 v22, 16, v55
+; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX950-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX950-NEXT: v_fmac_f32_e32 v22, v5, v21
+; GFX950-NEXT: v_and_b32_e32 v5, 0xffff0000, v37
+; GFX950-NEXT: v_and_b32_e32 v21, 0xffff0000, v20
+; GFX950-NEXT: v_and_b32_e32 v39, 0xffff0000, v4
+; GFX950-NEXT: v_fmac_f32_e32 v5, v39, v21
+; GFX950-NEXT: v_lshlrev_b32_e32 v21, 16, v37
+; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX950-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX950-NEXT: v_fmac_f32_e32 v21, v4, v20
+; GFX950-NEXT: v_and_b32_e32 v4, 0xffff0000, v34
+; GFX950-NEXT: v_and_b32_e32 v20, 0xffff0000, v19
+; GFX950-NEXT: v_and_b32_e32 v37, 0xffff0000, v3
+; GFX950-NEXT: v_fmac_f32_e32 v4, v37, v20
+; GFX950-NEXT: v_lshlrev_b32_e32 v20, 16, v34
+; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; GFX950-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX950-NEXT: v_fmac_f32_e32 v20, v3, v19
+; GFX950-NEXT: v_and_b32_e32 v3, 0xffff0000, v33
+; GFX950-NEXT: v_and_b32_e32 v19, 0xffff0000, v18
+; GFX950-NEXT: v_and_b32_e32 v34, 0xffff0000, v2
+; GFX950-NEXT: v_fmac_f32_e32 v3, v34, v19
+; GFX950-NEXT: v_lshlrev_b32_e32 v19, 16, v33
+; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; GFX950-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX950-NEXT: v_fmac_f32_e32 v19, v2, v18
+; GFX950-NEXT: v_and_b32_e32 v2, 0xffff0000, v32
+; GFX950-NEXT: v_and_b32_e32 v18, 0xffff0000, v17
+; GFX950-NEXT: v_and_b32_e32 v33, 0xffff0000, v1
+; GFX950-NEXT: v_fmac_f32_e32 v2, v33, v18
+; GFX950-NEXT: v_lshlrev_b32_e32 v18, 16, v32
+; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX950-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX950-NEXT: v_fmac_f32_e32 v18, v1, v17
+; GFX950-NEXT: v_and_b32_e32 v1, 0xffff0000, v31
+; GFX950-NEXT: v_and_b32_e32 v17, 0xffff0000, v16
+; GFX950-NEXT: v_and_b32_e32 v32, 0xffff0000, v0
+; GFX950-NEXT: v_lshlrev_b32_e32 v28, 16, v35
+; GFX950-NEXT: v_fmac_f32_e32 v15, v40, v12
+; GFX950-NEXT: v_and_b32_e32 v12, 0xffff0000, v48
+; GFX950-NEXT: v_lshlrev_b32_e32 v35, 16, v48
+; GFX950-NEXT: v_fmac_f32_e32 v1, v32, v17
+; GFX950-NEXT: v_lshlrev_b32_e32 v17, 16, v31
+; GFX950-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; GFX950-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX950-NEXT: v_fmac_f32_e32 v28, v41, v63
+; GFX950-NEXT: v_fmac_f32_e32 v14, v43, v42
+; GFX950-NEXT: v_fmac_f32_e32 v29, v45, v44
+; GFX950-NEXT: v_fmac_f32_e32 v13, v47, v46
+; GFX950-NEXT: v_fmac_f32_e32 v30, v57, v56
+; GFX950-NEXT: v_fmac_f32_e32 v12, v59, v58
+; GFX950-NEXT: v_fmac_f32_e32 v35, v61, v60
+; GFX950-NEXT: v_fmac_f32_e32 v17, v0, v16
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v0, v17, v1
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v1, v18, v2
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v2, v19, v3
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v3, v20, v4
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v4, v21, v5
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v5, v22, v6
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v6, v23, v7
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v7, v24, v8
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v8, v25, v9
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v9, v26, v10
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v10, v27, v11
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v11, v38, v36
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v12, v35, v12
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v13, v30, v13
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v14, v29, v14
+; GFX950-NEXT: v_cvt_pk_bf16_f32 v15, v28, v15
+; GFX950-NEXT: v_accvgpr_read_b32 v63, a15 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v62, a14 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v61, a13 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v60, a12 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v59, a11 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v58, a10 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v57, a9 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v56, a8 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v47, a7 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v46, a6 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v45, a5 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v44, a4 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v43, a3 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v42, a2 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v41, a1 ; Reload Reuse
+; GFX950-NEXT: v_accvgpr_read_b32 v40, a0 ; Reload Reuse
+; GFX950-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_fma_v32bf16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_clause 0x8
+; GFX10-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:64
+; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32
+; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60
+; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56
+; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:52
+; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
+; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:44
+; GFX10-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:40
+; GFX10-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:36
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v15
+; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v15
+; GFX10-NEXT: v_and_b32_e32 v52, 0xffff0000, v10
+; GFX10-NEXT: s_waitcnt vmcnt(8)
+; GFX10-NEXT: v_lshlrev_b32_e32 v31, 16, v32
+; GFX10-NEXT: s_waitcnt vmcnt(7)
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v33
+; GFX10-NEXT: v_and_b32_e32 v15, 0xffff0000, v32
+; GFX10-NEXT: v_and_b32_e32 v32, 0xffff0000, v33
+; GFX10-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:32
+; GFX10-NEXT: v_fmac_f32_e32 v31, v49, v50
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v30
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v14
+; GFX10-NEXT: v_fmac_f32_e32 v15, v51, v32
+; GFX10-NEXT: s_waitcnt vmcnt(7)
+; GFX10-NEXT: v_lshlrev_b32_e32 v32, 16, v34
+; GFX10-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
+; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v14
+; GFX10-NEXT: v_and_b32_e32 v14, 0xffff0000, v34
+; GFX10-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28
+; GFX10-NEXT: v_fmac_f32_e32 v32, v50, v49
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v29
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v13
+; GFX10-NEXT: v_fmac_f32_e32 v14, v51, v30
+; GFX10-NEXT: s_waitcnt vmcnt(7)
+; GFX10-NEXT: v_lshlrev_b32_e32 v30, 16, v35
+; GFX10-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v13
+; GFX10-NEXT: v_and_b32_e32 v13, 0xffff0000, v35
+; GFX10-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:24
+; GFX10-NEXT: v_fmac_f32_e32 v30, v50, v49
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v28
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v12
+; GFX10-NEXT: v_fmac_f32_e32 v13, v51, v29
+; GFX10-NEXT: s_waitcnt vmcnt(7)
+; GFX10-NEXT: v_lshlrev_b32_e32 v29, 16, v36
+; GFX10-NEXT: v_and_b32_e32 v28, 0xffff0000, v28
+; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v12
+; GFX10-NEXT: v_and_b32_e32 v12, 0xffff0000, v36
+; GFX10-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:20
+; GFX10-NEXT: v_fmac_f32_e32 v29, v50, v49
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v27
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v11
+; GFX10-NEXT: v_fmac_f32_e32 v12, v51, v28
+; GFX10-NEXT: s_waitcnt vmcnt(7)
+; GFX10-NEXT: v_lshlrev_b32_e32 v28, 16, v37
+; GFX10-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v11
+; GFX10-NEXT: v_and_b32_e32 v11, 0xffff0000, v37
+; GFX10-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:16
+; GFX10-NEXT: v_fmac_f32_e32 v28, v50, v49
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v26
+; GFX10-NEXT: v_lshlrev_b32_e32 v50, 16, v10
+; GFX10-NEXT: v_fmac_f32_e32 v11, v51, v27
+; GFX10-NEXT: s_waitcnt vmcnt(7)
+; GFX10-NEXT: v_lshlrev_b32_e32 v27, 16, v38
+; GFX10-NEXT: v_and_b32_e32 v51, 0xffff0000, v26
+; GFX10-NEXT: v_and_b32_e32 v10, 0xffff0000, v38
+; GFX10-NEXT: v_lshlrev_b32_e32 v38, 16, v25
+; GFX10-NEXT: s_waitcnt vmcnt(6)
+; GFX10-NEXT: v_lshlrev_b32_e32 v26, 16, v39
+; GFX10-NEXT: v_fmac_f32_e32 v27, v50, v49
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v9
+; GFX10-NEXT: v_fmac_f32_e32 v10, v52, v51
+; GFX10-NEXT: s_clause 0x1
+; GFX10-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:12
+; GFX10-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:8
+; GFX10-NEXT: v_and_b32_e32 v25, 0xffff0000, v25
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v31
+; GFX10-NEXT: v_fmac_f32_e32 v26, v49, v38
+; GFX10-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:4
+; GFX10-NEXT: v_and_b32_e32 v49, 0xffff0000, v9
+; GFX10-NEXT: v_and_b32_e32 v9, 0xffff0000, v39
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v24
+; GFX10-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v31, v31
+; GFX10-NEXT: v_fmac_f32_e32 v9, v49, v25
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v8
+; GFX10-NEXT: s_waitcnt vmcnt(8)
+; GFX10-NEXT: v_lshlrev_b32_e32 v25, 16, v48
+; GFX10-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX10-NEXT: v_and_b32_e32 v48, 0xffff0000, v48
+; GFX10-NEXT: v_fmac_f32_e32 v25, v49, v39
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v7
+; GFX10-NEXT: v_fmac_f32_e32 v48, v8, v24
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX10-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v22
+; GFX10-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX10-NEXT: s_waitcnt vmcnt(7)
+; GFX10-NEXT: v_lshlrev_b32_e32 v8, 16, v33
+; GFX10-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX10-NEXT: v_fmac_f32_e32 v8, v49, v39
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v6
+; GFX10-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX10-NEXT: v_fmac_f32_e32 v33, v7, v23
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v21
+; GFX10-NEXT: s_waitcnt vmcnt(6)
+; GFX10-NEXT: v_lshlrev_b32_e32 v7, 16, v34
+; GFX10-NEXT: v_and_b32_e32 v34, 0xffff0000, v34
+; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v5
+; GFX10-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX10-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX10-NEXT: v_fmac_f32_e32 v7, v39, v24
+; GFX10-NEXT: v_fmac_f32_e32 v34, v6, v22
+; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v20
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v4
+; GFX10-NEXT: s_waitcnt vmcnt(5)
+; GFX10-NEXT: v_lshlrev_b32_e32 v6, 16, v35
+; GFX10-NEXT: v_and_b32_e32 v35, 0xffff0000, v35
+; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v19
+; GFX10-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX10-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX10-NEXT: v_fmac_f32_e32 v6, v23, v49
+; GFX10-NEXT: v_fmac_f32_e32 v35, v5, v21
+; GFX10-NEXT: v_lshlrev_b32_e32 v23, 16, v3
+; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX10-NEXT: s_waitcnt vmcnt(4)
+; GFX10-NEXT: v_lshlrev_b32_e32 v5, 16, v36
+; GFX10-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX10-NEXT: v_and_b32_e32 v36, 0xffff0000, v36
+; GFX10-NEXT: v_lshlrev_b32_e32 v49, 16, v18
+; GFX10-NEXT: v_lshlrev_b32_e32 v21, 16, v2
+; GFX10-NEXT: v_fmac_f32_e32 v5, v39, v24
+; GFX10-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX10-NEXT: v_fmac_f32_e32 v36, v4, v20
+; GFX10-NEXT: v_lshlrev_b32_e32 v20, 16, v16
+; GFX10-NEXT: s_waitcnt vmcnt(3)
+; GFX10-NEXT: v_lshlrev_b32_e32 v39, 16, v37
+; GFX10-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX10-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX10-NEXT: v_lshlrev_b32_e32 v24, 16, v17
+; GFX10-NEXT: v_lshlrev_b32_e32 v4, 16, v1
+; GFX10-NEXT: v_fmac_f32_e32 v39, v23, v22
+; GFX10-NEXT: v_and_b32_e32 v23, 0xffff0000, v37
+; GFX10-NEXT: v_lshlrev_b32_e32 v22, 16, v0
+; GFX10-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX10-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX10-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX10-NEXT: v_fmac_f32_e32 v23, v3, v19
+; GFX10-NEXT: s_waitcnt vmcnt(2)
+; GFX10-NEXT: v_lshlrev_b32_e32 v37, 16, v50
+; GFX10-NEXT: s_waitcnt vmcnt(1)
+; GFX10-NEXT: v_lshlrev_b32_e32 v3, 16, v51
+; GFX10-NEXT: v_and_b32_e32 v19, 0xffff0000, v51
+; GFX10-NEXT: v_and_b32_e32 v50, 0xffff0000, v50
+; GFX10-NEXT: v_cmp_u_f32_e64 s5, v33, v33
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_lshlrev_b32_e32 v51, 16, v38
+; GFX10-NEXT: v_and_b32_e32 v38, 0xffff0000, v38
+; GFX10-NEXT: v_fmac_f32_e32 v37, v21, v49
+; GFX10-NEXT: v_fmac_f32_e32 v50, v2, v18
+; GFX10-NEXT: v_fmac_f32_e32 v19, v1, v17
+; GFX10-NEXT: v_or_b32_e32 v1, 0x400000, v48
+; GFX10-NEXT: v_fmac_f32_e32 v38, v0, v16
+; GFX10-NEXT: v_bfe_u32 v0, v48, 16, 1
+; GFX10-NEXT: v_bfe_u32 v16, v33, 16, 1
+; GFX10-NEXT: v_bfe_u32 v2, v8, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v17, 0x400000, v33
+; GFX10-NEXT: v_bfe_u32 v18, v7, 16, 1
+; GFX10-NEXT: v_bfe_u32 v21, v34, 16, 1
+; GFX10-NEXT: v_add3_u32 v0, v0, v48, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v48, v35, 16, 1
+; GFX10-NEXT: v_add3_u32 v16, v16, v33, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v33, v5, 16, 1
+; GFX10-NEXT: v_fmac_f32_e32 v3, v4, v24
+; GFX10-NEXT: v_fmac_f32_e32 v51, v22, v20
+; GFX10-NEXT: v_or_b32_e32 v4, 0x400000, v8
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v7
+; GFX10-NEXT: v_or_b32_e32 v22, 0x400000, v34
+; GFX10-NEXT: v_bfe_u32 v24, v6, 16, 1
+; GFX10-NEXT: v_add3_u32 v2, v2, v8, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s4, v8, v8
+; GFX10-NEXT: v_or_b32_e32 v8, 0x400000, v35
+; GFX10-NEXT: v_add3_u32 v18, v18, v7, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s6, v7, v7
+; GFX10-NEXT: v_or_b32_e32 v7, 0x400000, v5
+; GFX10-NEXT: v_add3_u32 v21, v21, v34, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s7, v34, v34
+; GFX10-NEXT: v_bfe_u32 v34, v39, 16, 1
+; GFX10-NEXT: v_add3_u32 v48, v48, v35, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s9, v35, v35
+; GFX10-NEXT: v_bfe_u32 v35, v23, 16, 1
+; GFX10-NEXT: v_add3_u32 v33, v33, v5, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s10, v5, v5
+; GFX10-NEXT: v_bfe_u32 v5, v37, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v49, 0x400000, v6
+; GFX10-NEXT: v_add3_u32 v24, v24, v6, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s8, v6, v6
+; GFX10-NEXT: v_or_b32_e32 v6, 0x400000, v39
+; GFX10-NEXT: v_add3_u32 v34, v34, v39, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s11, v39, v39
+; GFX10-NEXT: v_or_b32_e32 v39, 0x400000, v23
+; GFX10-NEXT: v_add3_u32 v35, v35, v23, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s12, v23, v23
+; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v37
+; GFX10-NEXT: v_add3_u32 v5, v5, v37, 0x7fff
+; GFX10-NEXT: v_cmp_u_f32_e64 s13, v37, v37
+; GFX10-NEXT: v_bfe_u32 v37, v31, 16, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v53, v2, v4, s4
+; GFX10-NEXT: v_bfe_u32 v4, v3, 16, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v16, v16, v17, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v17, v18, v20, s6
+; GFX10-NEXT: v_add3_u32 v37, v37, v31, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v18, v21, v22, s7
+; GFX10-NEXT: v_or_b32_e32 v20, 0x400000, v3
+; GFX10-NEXT: v_bfe_u32 v22, v19, 16, 1
+; GFX10-NEXT: v_add3_u32 v4, v4, v3, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v31, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v15, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v15
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v15, v15
+; GFX10-NEXT: v_cndmask_b32_e64 v21, v24, v49, s8
+; GFX10-NEXT: v_or_b32_e32 v24, 0x400000, v19
+; GFX10-NEXT: v_add3_u32 v37, v37, v15, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v33, v7, s10
+; GFX10-NEXT: v_bfe_u32 v33, v51, 16, 1
+; GFX10-NEXT: v_add3_u32 v22, v22, v19, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v34, v6, s11
+; GFX10-NEXT: v_cndmask_b32_e64 v15, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v32, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v32
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v32, v32
+; GFX10-NEXT: v_or_b32_e32 v34, 0x400000, v51
+; GFX10-NEXT: v_cndmask_b32_e64 v35, v35, v39, s12
+; GFX10-NEXT: v_add3_u32 v37, v37, v32, 0x7fff
+; GFX10-NEXT: v_bfe_u32 v39, v38, 16, 1
+; GFX10-NEXT: v_add3_u32 v33, v33, v51, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v23, s13
+; GFX10-NEXT: v_or_b32_e32 v23, 0x400000, v38
+; GFX10-NEXT: v_cndmask_b32_e64 v32, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v14, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v14
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v14, v14
+; GFX10-NEXT: v_add3_u32 v39, v39, v38, 0x7fff
+; GFX10-NEXT: v_or_b32_e32 v2, 0x400000, v50
+; GFX10-NEXT: v_add3_u32 v37, v37, v14, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v48, v8, s9
+; GFX10-NEXT: v_perm_b32 v15, v15, v31, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v30, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v30
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v30, v30
+; GFX10-NEXT: v_perm_b32 v14, v14, v32, 0x7060302
+; GFX10-NEXT: v_add3_u32 v37, v37, v30, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v30, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v13, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v13
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v13, v13
+; GFX10-NEXT: v_add3_u32 v37, v37, v13, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v13, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v29, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v29
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v29, v29
+; GFX10-NEXT: v_perm_b32 v13, v13, v30, 0x7060302
+; GFX10-NEXT: v_add3_u32 v37, v37, v29, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v29, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v12, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v12
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v12, v12
+; GFX10-NEXT: v_add3_u32 v37, v37, v12, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v12, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v28, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v28
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v28, v28
+; GFX10-NEXT: v_perm_b32 v12, v12, v29, 0x7060302
+; GFX10-NEXT: v_add3_u32 v37, v37, v28, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v28, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v11, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v11
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v11, v11
+; GFX10-NEXT: v_add3_u32 v37, v37, v11, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v11, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v27, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v27
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v27, v27
+; GFX10-NEXT: v_perm_b32 v11, v11, v28, 0x7060302
+; GFX10-NEXT: v_add3_u32 v37, v37, v27, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v27, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v10, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v10
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v10, v10
+; GFX10-NEXT: v_add3_u32 v37, v37, v10, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v10, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v26, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v26
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v26, v26
+; GFX10-NEXT: v_perm_b32 v10, v10, v27, 0x7060302
+; GFX10-NEXT: v_add3_u32 v37, v37, v26, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v26, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v9, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v9
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v9, v9
+; GFX10-NEXT: v_add3_u32 v37, v37, v9, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v9, v37, v52, s14
+; GFX10-NEXT: v_bfe_u32 v37, v25, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v52, 0x400000, v25
+; GFX10-NEXT: v_cmp_u_f32_e64 s14, v25, v25
+; GFX10-NEXT: v_perm_b32 v9, v9, v26, 0x7060302
+; GFX10-NEXT: v_add3_u32 v37, v37, v25, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e64 v25, v37, v52, s14
+; GFX10-NEXT: v_cndmask_b32_e32 v52, v0, v1, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
+; GFX10-NEXT: v_bfe_u32 v1, v50, 16, 1
+; GFX10-NEXT: v_bfe_u32 v37, v36, 16, 1
+; GFX10-NEXT: v_or_b32_e32 v0, 0x400000, v36
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v4, v20, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v19, v19
+; GFX10-NEXT: v_add3_u32 v1, v1, v50, 0x7fff
+; GFX10-NEXT: v_add3_u32 v37, v37, v36, 0x7fff
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v22, v24, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51
+; GFX10-NEXT: v_cndmask_b32_e32 v19, v33, v34, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX10-NEXT: v_cndmask_b32_e32 v20, v39, v23, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v1, v2, vcc_lo
+; GFX10-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX10-NEXT: v_perm_b32 v1, v4, v3, 0x7060302
+; GFX10-NEXT: v_perm_b32 v3, v35, v6, 0x7060302
+; GFX10-NEXT: v_perm_b32 v6, v18, v17, 0x7060302
+; GFX10-NEXT: v_perm_b32 v2, v2, v5, 0x7060302
+; GFX10-NEXT: v_cndmask_b32_e32 v22, v37, v0, vcc_lo
+; GFX10-NEXT: v_perm_b32 v0, v20, v19, 0x7060302
+; GFX10-NEXT: v_perm_b32 v5, v8, v21, 0x7060302
+; GFX10-NEXT: v_perm_b32 v8, v52, v25, 0x7060302
+; GFX10-NEXT: v_perm_b32 v4, v22, v7, 0x7060302
+; GFX10-NEXT: v_perm_b32 v7, v16, v53, 0x7060302
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11TRUE16-LABEL: v_fma_v32bf16:
+; GFX11TRUE16: ; %bb.0:
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11TRUE16-NEXT: s_clause 0x10
+; GFX11TRUE16-NEXT: scratch_load_b32 v31, off, s32 offset:64
+; GFX11TRUE16-NEXT: scratch_load_b32 v32, off, s32
+; GFX11TRUE16-NEXT: scratch_load_b32 v33, off, s32 offset:60
+; GFX11TRUE16-NEXT: scratch_load_b32 v34, off, s32 offset:56
+; GFX11TRUE16-NEXT: scratch_load_b32 v35, off, s32 offset:52
+; GFX11TRUE16-NEXT: scratch_load_b32 v36, off, s32 offset:48
+; GFX11TRUE16-NEXT: scratch_load_b32 v37, off, s32 offset:44
+; GFX11TRUE16-NEXT: scratch_load_b32 v38, off, s32 offset:40
+; GFX11TRUE16-NEXT: scratch_load_b32 v39, off, s32 offset:36
+; GFX11TRUE16-NEXT: scratch_load_b32 v48, off, s32 offset:32
+; GFX11TRUE16-NEXT: scratch_load_b32 v49, off, s32 offset:28
+; GFX11TRUE16-NEXT: scratch_load_b32 v50, off, s32 offset:24
+; GFX11TRUE16-NEXT: scratch_load_b32 v51, off, s32 offset:20
+; GFX11TRUE16-NEXT: scratch_load_b32 v52, off, s32 offset:16
+; GFX11TRUE16-NEXT: scratch_load_b32 v53, off, s32 offset:12
+; GFX11TRUE16-NEXT: scratch_load_b32 v54, off, s32 offset:8
+; GFX11TRUE16-NEXT: scratch_load_b32 v55, off, s32 offset:4
+; GFX11TRUE16-NEXT: v_and_b32_e32 v99, 0xffff0000, v21
+; GFX11TRUE16-NEXT: v_and_b32_e32 v100, 0xffff0000, v5
+; GFX11TRUE16-NEXT: v_and_b32_e32 v101, 0xffff0000, v20
+; GFX11TRUE16-NEXT: v_and_b32_e32 v102, 0xffff0000, v4
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX11TRUE16-NEXT: v_and_b32_e32 v115, 0xffff0000, v17
+; GFX11TRUE16-NEXT: v_and_b32_e32 v116, 0xffff0000, v1
+; GFX11TRUE16-NEXT: v_and_b32_e32 v97, 0xffff0000, v22
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; GFX11TRUE16-NEXT: v_and_b32_e32 v117, 0xffff0000, v16
+; GFX11TRUE16-NEXT: v_and_b32_e32 v118, 0xffff0000, v0
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX11TRUE16-NEXT: v_and_b32_e32 v103, 0xffff0000, v19
+; GFX11TRUE16-NEXT: v_and_b32_e32 v112, 0xffff0000, v3
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX11TRUE16-NEXT: v_and_b32_e32 v85, 0xffff0000, v24
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; GFX11TRUE16-NEXT: v_and_b32_e32 v113, 0xffff0000, v18
+; GFX11TRUE16-NEXT: v_and_b32_e32 v114, 0xffff0000, v2
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(16)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v119, 0xffff0000, v31
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(15)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v32
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(14)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v129, 0xffff0000, v33
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v33, 16, v33
+; GFX11TRUE16-NEXT: v_and_b32_e32 v68, 0xffff0000, v13
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(12)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v131, 0xffff0000, v35
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(10)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v133, 0xffff0000, v37
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(9)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v134, 0xffff0000, v38
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v37, 16, v37
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v144, 0xffff0000, v48
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v48, 16, v48
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v146, 0xffff0000, v50
+; GFX11TRUE16-NEXT: v_and_b32_e32 v145, 0xffff0000, v49
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v49, 16, v49
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(4)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v147, 0xffff0000, v51
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v51, 16, v51
+; GFX11TRUE16-NEXT: v_and_b32_e32 v96, 0xffff0000, v7
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX11TRUE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11TRUE16-NEXT: v_and_b32_e32 v148, 0xffff0000, v55
+; GFX11TRUE16-NEXT: v_and_b32_e32 v87, 0xffff0000, v23
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; GFX11TRUE16-NEXT: v_and_b32_e32 v83, 0xffff0000, v25
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_lshlrev_b32 v25, 16, v25
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v55, 16, v55
+; GFX11TRUE16-NEXT: v_and_b32_e32 v98, 0xffff0000, v6
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX11TRUE16-NEXT: v_and_b32_e32 v84, 0xffff0000, v9
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v48, v7, v23
+; GFX11TRUE16-NEXT: v_and_b32_e32 v135, 0xffff0000, v39
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v39, 16, v39
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v49, v6, v22
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_lshlrev_b32 v13, 16, v13
+; GFX11TRUE16-NEXT: v_bfe_u32 v83, v146, 16, 1
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v51, v4, v20 :: v_dual_fmac_f32 v148, v118, v117
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_and_b32 v81, 0xffff0000, v26
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v55, v0, v16 :: v_dual_lshlrev_b32 v26, 16, v26
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v145, v98, v97
+; GFX11TRUE16-NEXT: v_or_b32_e32 v84, 0x400000, v146
+; GFX11TRUE16-NEXT: v_add3_u32 v83, v83, v146, 0x7fff
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; GFX11TRUE16-NEXT: v_and_b32_e32 v86, 0xffff0000, v8
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX11TRUE16-NEXT: v_and_b32_e32 v82, 0xffff0000, v10
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_lshlrev_b32 v10, 16, v10
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v38, 16, v38
+; GFX11TRUE16-NEXT: v_and_b32_e32 v69, 0xffff0000, v28
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_lshlrev_b32 v28, 16, v28
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v39, v8, v24
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_and_b32 v70, 0xffff0000, v12
+; GFX11TRUE16-NEXT: v_bfe_u32 v97, v51, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v23, v37, 16, 1
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_lshlrev_b32 v12, 16, v12
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v35, 16, v35
+; GFX11TRUE16-NEXT: v_and_b32_e32 v80, 0xffff0000, v11
+; GFX11TRUE16-NEXT: v_and_b32_e32 v132, 0xffff0000, v36
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v36, 16, v36
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v50, 16, v50
+; GFX11TRUE16-NEXT: v_or_b32_e32 v22, 0x400000, v133
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX11TRUE16-NEXT: v_or_b32_e32 v24, 0x400000, v37
+; GFX11TRUE16-NEXT: v_or_b32_e32 v98, 0x400000, v51
+; GFX11TRUE16-NEXT: v_add3_u32 v23, v23, v37, 0x7fff
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; GFX11TRUE16-NEXT: v_and_b32_e32 v71, 0xffff0000, v27
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v27, 16, v27
+; GFX11TRUE16-NEXT: v_add3_u32 v97, v97, v51, 0x7fff
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v31, 16, v31
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v32, 16, v32
+; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v15
+; GFX11TRUE16-NEXT: v_and_b32_e32 v130, 0xffff0000, v34
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_lshlrev_b32 v34, 16, v34
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v36, v11, v27
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v50, v5, v21
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_and_b32 v67, 0xffff0000, v29
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v29, 16, v29
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_and_b32 v65, 0xffff0000, v30
+; GFX11TRUE16-NEXT: v_or_b32_e32 v20, 0x400000, v36
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_fmac_f32 v31, v15, v32
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v119, v64, v128 :: v_dual_and_b32 v66, 0xffff0000, v14
+; GFX11TRUE16-NEXT: v_and_b32_e32 v64, 0xffff0000, v52
+; GFX11TRUE16-NEXT: v_and_b32_e32 v128, 0xffff0000, v53
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v53, 16, v53
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_lshlrev_b32 v30, 16, v30
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v52, 16, v52
+; GFX11TRUE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54
+; GFX11TRUE16-NEXT: v_lshlrev_b32_e32 v54, 16, v54
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v64, v112, v103
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v38, v9, v25
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_lshlrev_b32 v14, 16, v14
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v53, v2, v18
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v119, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v2, v31, 16, 1
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v54, v1, v17
+; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v119
+; GFX11TRUE16-NEXT: v_or_b32_e32 v3, 0x400000, v31
+; GFX11TRUE16-NEXT: v_bfe_u32 v4, v129, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119
+; GFX11TRUE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31
+; GFX11TRUE16-NEXT: v_or_b32_e32 v5, 0x400000, v129
+; GFX11TRUE16-NEXT: v_bfe_u32 v6, v33, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v14, v132, 16, 1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v15, v0, v1, vcc_lo
+; GFX11TRUE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0
+; GFX11TRUE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129
+; GFX11TRUE16-NEXT: v_or_b32_e32 v7, 0x400000, v33
+; GFX11TRUE16-NEXT: v_bfe_u32 v8, v130, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v150, v14, v132, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v14, v2, v5, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11TRUE16-NEXT: v_or_b32_e32 v9, 0x400000, v130
+; GFX11TRUE16-NEXT: v_bfe_u32 v10, v34, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v13, v35, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130
+; GFX11TRUE16-NEXT: v_or_b32_e32 v11, 0x400000, v34
+; GFX11TRUE16-NEXT: v_bfe_u32 v12, v131, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v10, v13, v35, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v13, v4, v9, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11TRUE16-NEXT: v_or_b32_e32 v16, 0x400000, v131
+; GFX11TRUE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v17, 0x400000, v35
+; GFX11TRUE16-NEXT: v_or_b32_e32 v18, 0x400000, v132
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131
+; GFX11TRUE16-NEXT: v_bfe_u32 v19, v36, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v21, v133, 16, 1
+; GFX11TRUE16-NEXT: v_bfe_u32 v25, v134, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v26, 0x400000, v134
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v12, v8, v16, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11TRUE16-NEXT: v_add3_u32 v19, v19, v36, 0x7fff
+; GFX11TRUE16-NEXT: v_add3_u32 v21, v21, v133, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v27, v38, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v25, v25, v134, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v16, v10, v17, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132
+; GFX11TRUE16-NEXT: v_or_b32_e32 v28, 0x400000, v38
+; GFX11TRUE16-NEXT: v_bfe_u32 v29, v135, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v27, v27, v38, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v30, 0x400000, v135
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v11, v150, v18, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11TRUE16-NEXT: v_bfe_u32 v65, v39, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v29, v29, v135, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v66, 0x400000, v39
+; GFX11TRUE16-NEXT: v_bfe_u32 v67, v144, 16, 1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v17, v19, v20, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133
+; GFX11TRUE16-NEXT: v_add3_u32 v65, v65, v39, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v68, 0x400000, v144
+; GFX11TRUE16-NEXT: v_bfe_u32 v69, v48, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v67, v67, v144, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v10, v21, v22, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11TRUE16-NEXT: v_or_b32_e32 v70, 0x400000, v48
+; GFX11TRUE16-NEXT: v_bfe_u32 v71, v145, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v69, v69, v48, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v80, 0x400000, v145
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v18, v23, v24, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134
+; GFX11TRUE16-NEXT: v_bfe_u32 v81, v49, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v71, v71, v145, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v82, 0x400000, v49
+; GFX11TRUE16-NEXT: v_bfe_u32 v85, v50, 16, 1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v9, v25, v26, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11TRUE16-NEXT: v_add3_u32 v81, v81, v49, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v86, 0x400000, v50
+; GFX11TRUE16-NEXT: v_bfe_u32 v87, v147, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v85, v85, v50, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v19, v27, v28, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135
+; GFX11TRUE16-NEXT: v_or_b32_e32 v96, 0x400000, v147
+; GFX11TRUE16-NEXT: v_add3_u32 v87, v87, v147, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v99, v64, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v100, 0x400000, v64
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v8, v29, v30, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11TRUE16-NEXT: v_bfe_u32 v101, v52, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v99, v99, v64, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v102, 0x400000, v52
+; GFX11TRUE16-NEXT: v_bfe_u32 v117, v54, 16, 1
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v20, v65, v66, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144
+; GFX11TRUE16-NEXT: v_add3_u32 v101, v101, v52, 0x7fff
+; GFX11TRUE16-NEXT: v_or_b32_e32 v118, 0x400000, v54
+; GFX11TRUE16-NEXT: v_bfe_u32 v0, v55, 16, 1
+; GFX11TRUE16-NEXT: v_add3_u32 v117, v117, v54, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v7, v67, v68, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11TRUE16-NEXT: v_or_b32_e32 v1, 0x400000, v55
+; GFX11TRUE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff
+; GFX11TRUE16-NEXT: v_bfe_u32 v119, v148, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v31, 0x400000, v148
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v21, v69, v70, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v8.l, v20.h
+; GFX11TRUE16-NEXT: v_add3_u32 v119, v119, v148, 0x7fff
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v9.l, v19.h
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v7.l, v21.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v6, v71, v80, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v10.l, v18.h
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v11.l, v17.h
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v12.l, v16.h
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v13.l, v34.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v22, v81, v82, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v14.l, v33.h
+; GFX11TRUE16-NEXT: v_mov_b16_e64 v15.l, v149.h
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v6.l, v22.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v5, v83, v84, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v23, v85, v86, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v5.l, v23.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v4, v87, v96, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v128, v114, v113
+; GFX11TRUE16-NEXT: v_bfe_u32 v113, v53, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v114, 0x400000, v53
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v24, v97, v98, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11TRUE16-NEXT: v_bfe_u32 v103, v128, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v112, 0x400000, v128
+; GFX11TRUE16-NEXT: v_add3_u32 v113, v113, v53, 0x7fff
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v4.l, v24.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v3, v99, v100, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
+; GFX11TRUE16-NEXT: v_add3_u32 v103, v103, v128, 0x7fff
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v25, v101, v102, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128
+; GFX11TRUE16-NEXT: v_fmac_f32_e32 v32, v116, v115
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v3.l, v25.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v2, v103, v112, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53
+; GFX11TRUE16-NEXT: v_bfe_u32 v115, v32, 16, 1
+; GFX11TRUE16-NEXT: v_or_b32_e32 v116, 0x400000, v32
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v26, v113, v114, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX11TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11TRUE16-NEXT: v_add3_u32 v115, v115, v32, 0x7fff
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v2.l, v26.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v27, v117, v118, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v28, v0, v1, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v1, v115, v116, vcc_lo
+; GFX11TRUE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v148, v148
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v1.l, v27.h
+; GFX11TRUE16-NEXT: v_cndmask_b32_e32 v0, v119, v31, vcc_lo
+; GFX11TRUE16-NEXT: v_mov_b16_e32 v0.l, v28.h
+; GFX11TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11FAKE16-LABEL: v_fma_v32bf16:
+; GFX11FAKE16: ; %bb.0:
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11FAKE16-NEXT: s_clause 0x10
+; GFX11FAKE16-NEXT: scratch_load_b32 v31, off, s32 offset:64
+; GFX11FAKE16-NEXT: scratch_load_b32 v32, off, s32
+; GFX11FAKE16-NEXT: scratch_load_b32 v33, off, s32 offset:60
+; GFX11FAKE16-NEXT: scratch_load_b32 v34, off, s32 offset:56
+; GFX11FAKE16-NEXT: scratch_load_b32 v35, off, s32 offset:52
+; GFX11FAKE16-NEXT: scratch_load_b32 v36, off, s32 offset:48
+; GFX11FAKE16-NEXT: scratch_load_b32 v37, off, s32 offset:44
+; GFX11FAKE16-NEXT: scratch_load_b32 v38, off, s32 offset:40
+; GFX11FAKE16-NEXT: scratch_load_b32 v39, off, s32 offset:36
+; GFX11FAKE16-NEXT: scratch_load_b32 v48, off, s32 offset:32
+; GFX11FAKE16-NEXT: scratch_load_b32 v49, off, s32 offset:28
+; GFX11FAKE16-NEXT: scratch_load_b32 v50, off, s32 offset:24
+; GFX11FAKE16-NEXT: scratch_load_b32 v51, off, s32 offset:20
+; GFX11FAKE16-NEXT: scratch_load_b32 v52, off, s32 offset:16
+; GFX11FAKE16-NEXT: scratch_load_b32 v53, off, s32 offset:12
+; GFX11FAKE16-NEXT: scratch_load_b32 v54, off, s32 offset:8
+; GFX11FAKE16-NEXT: scratch_load_b32 v55, off, s32 offset:4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v99, 16, v21
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v100, 16, v5
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v97, 16, v22
+; GFX11FAKE16-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v101, 16, v20
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v102, 16, v4
+; GFX11FAKE16-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; GFX11FAKE16-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v117, 16, v16
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v118, 16, v0
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v87, 16, v23
+; GFX11FAKE16-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
+; GFX11FAKE16-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; GFX11FAKE16-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v98, 16, v6
+; GFX11FAKE16-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v103, 16, v19
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v112, 16, v3
+; GFX11FAKE16-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
+; GFX11FAKE16-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v85, 16, v24
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v113, 16, v18
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v114, 16, v2
+; GFX11FAKE16-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v115, 16, v17
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v116, 16, v1
+; GFX11FAKE16-NEXT: v_and_b32_e32 v24, 0xffff0000, v24
+; GFX11FAKE16-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
+; GFX11FAKE16-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(15)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v32
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(14)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v129, 16, v33
+; GFX11FAKE16-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v68, 16, v13
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(12)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v131, 16, v35
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(10)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v133, 16, v37
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(9)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v134, 16, v38
+; GFX11FAKE16-NEXT: v_and_b32_e32 v37, 0xffff0000, v37
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(7)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v144, 16, v48
+; GFX11FAKE16-NEXT: v_and_b32_e32 v48, 0xffff0000, v48
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(5)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v146, 16, v50
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v145, 16, v49
+; GFX11FAKE16-NEXT: v_and_b32_e32 v49, 0xffff0000, v49
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v84, 16, v9
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(4)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v147, 16, v51
+; GFX11FAKE16-NEXT: v_and_b32_e32 v51, 0xffff0000, v51
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v96, 16, v7
+; GFX11FAKE16-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v83, 16, v25
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v146, v100, v99 :: v_dual_and_b32 v25, 0xffff0000, v25
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v135, 16, v39
+; GFX11FAKE16-NEXT: v_and_b32_e32 v39, 0xffff0000, v39
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v48, v7, v23 :: v_dual_fmac_f32 v49, v6, v22
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v134, v84, v83 :: v_dual_and_b32 v13, 0xffff0000, v13
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v51, v4, v20
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v144, v96, v87 :: v_dual_lshlrev_b32 v81, 16, v26
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v145, v98, v97 :: v_dual_and_b32 v26, 0xffff0000, v26
+; GFX11FAKE16-NEXT: v_or_b32_e32 v84, 0x400000, v146
+; GFX11FAKE16-NEXT: v_and_b32_e32 v18, 0xffff0000, v18
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v86, 16, v8
+; GFX11FAKE16-NEXT: v_and_b32_e32 v8, 0xffff0000, v8
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v82, 16, v10
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v147, v102, v101 :: v_dual_and_b32 v10, 0xffff0000, v10
+; GFX11FAKE16-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; GFX11FAKE16-NEXT: v_and_b32_e32 v38, 0xffff0000, v38
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v69, 16, v28
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v37, v10, v26 :: v_dual_and_b32 v28, 0xffff0000, v28
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v39, v8, v24
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v133, v82, v81 :: v_dual_lshlrev_b32 v70, 16, v12
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v135, v86, v85 :: v_dual_and_b32 v12, 0xffff0000, v12
+; GFX11FAKE16-NEXT: v_and_b32_e32 v35, 0xffff0000, v35
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v80, 16, v11
+; GFX11FAKE16-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v132, 16, v36
+; GFX11FAKE16-NEXT: v_and_b32_e32 v36, 0xffff0000, v36
+; GFX11FAKE16-NEXT: v_and_b32_e32 v50, 0xffff0000, v50
+; GFX11FAKE16-NEXT: v_or_b32_e32 v22, 0x400000, v133
+; GFX11FAKE16-NEXT: v_and_b32_e32 v21, 0xffff0000, v21
+; GFX11FAKE16-NEXT: v_or_b32_e32 v24, 0x400000, v37
+; GFX11FAKE16-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v71, 16, v27
+; GFX11FAKE16-NEXT: v_and_b32_e32 v27, 0xffff0000, v27
+; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v130, 16, v34
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v35, v12, v28 :: v_dual_and_b32 v34, 0xffff0000, v34
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v36, v11, v27
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v50, v5, v21
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v132, v80, v71 :: v_dual_lshlrev_b32 v67, 16, v29
+; GFX11FAKE16-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
+; GFX11FAKE16-NEXT: v_or_b32_e32 v98, 0x400000, v51
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v119, 16, v31
+; GFX11FAKE16-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v15
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v34, v13, v29 :: v_dual_and_b32 v15, 0xffff0000, v15
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v130, v68, v67 :: v_dual_lshlrev_b32 v65, 16, v30
+; GFX11FAKE16-NEXT: v_bfe_u32 v23, v37, 16, 1
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3)
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v31, v15, v32 :: v_dual_lshlrev_b32 v66, 16, v14
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v119, v64, v128
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(3)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v64, 16, v52
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(2)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v128, 16, v53
+; GFX11FAKE16-NEXT: v_and_b32_e32 v53, 0xffff0000, v53
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(1)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v15, 16, v54
+; GFX11FAKE16-NEXT: v_and_b32_e32 v32, 0xffff0000, v54
+; GFX11FAKE16-NEXT: s_waitcnt vmcnt(0)
+; GFX11FAKE16-NEXT: v_lshlrev_b32_e32 v54, 16, v55
+; GFX11FAKE16-NEXT: v_and_b32_e32 v55, 0xffff0000, v55
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v129, v66, v65 :: v_dual_and_b32 v30, 0xffff0000, v30
+; GFX11FAKE16-NEXT: v_and_b32_e32 v52, 0xffff0000, v52
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v64, v112, v103
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v38, v9, v25
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v131, v70, v69 :: v_dual_and_b32 v14, 0xffff0000, v14
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v53, v2, v18
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v55, v0, v16
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v119, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v2, v31, 16, 1
+; GFX11FAKE16-NEXT: v_dual_fmac_f32 v33, v14, v30 :: v_dual_fmac_f32 v52, v3, v19
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v32, v1, v17
+; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v119
+; GFX11FAKE16-NEXT: v_or_b32_e32 v3, 0x400000, v31
+; GFX11FAKE16-NEXT: v_bfe_u32 v4, v129, 16, 1
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v119, 0x7fff
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v119, v119
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v2, v31, 0x7fff
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e64 s0, v31, v31
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v128, v114, v113
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v54, v118, v117
+; GFX11FAKE16-NEXT: v_or_b32_e32 v5, 0x400000, v129
+; GFX11FAKE16-NEXT: v_bfe_u32 v6, v33, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v10, v34, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v14, v35, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v19, v36, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v27, v38, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v65, v39, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v69, v48, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v81, v49, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v85, v50, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v97, v51, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v101, v52, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v113, v53, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v117, v32, 16, 1
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v148, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT: v_cndmask_b32_e64 v149, v2, v3, s0
+; GFX11FAKE16-NEXT: v_add3_u32 v2, v4, v129, 0x7fff
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v129, v129
+; GFX11FAKE16-NEXT: v_fmac_f32_e32 v15, v116, v115
+; GFX11FAKE16-NEXT: v_or_b32_e32 v7, 0x400000, v33
+; GFX11FAKE16-NEXT: v_bfe_u32 v8, v130, 16, 1
+; GFX11FAKE16-NEXT: v_add3_u32 v3, v6, v33, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v6, v10, v34, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v10, v14, v35, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v14, v19, v36, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v19, v23, v37, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v23, v27, v38, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v27, v65, v39, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v65, v69, v48, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v69, v81, v49, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v81, v85, v50, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v85, v97, v51, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v97, v101, v52, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v101, v113, v53, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v113, v117, v32, 0x7fff
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v117, v2, v5, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v33, v33
+; GFX11FAKE16-NEXT: v_or_b32_e32 v9, 0x400000, v130
+; GFX11FAKE16-NEXT: v_bfe_u32 v12, v131, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v17, v132, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v21, v133, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v25, v134, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v29, v135, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v67, v144, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v71, v145, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v83, v146, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v87, v147, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v99, v64, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v103, v128, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v115, v15, 16, 1
+; GFX11FAKE16-NEXT: v_bfe_u32 v119, v54, 16, 1
+; GFX11FAKE16-NEXT: v_add3_u32 v4, v8, v130, 0x7fff
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v33, v3, v7, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v130, v130
+; GFX11FAKE16-NEXT: v_or_b32_e32 v11, 0x400000, v34
+; GFX11FAKE16-NEXT: v_add3_u32 v8, v12, v131, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v12, v17, v132, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v17, v21, v133, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v21, v25, v134, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v25, v29, v135, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v29, v67, v144, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v67, v71, v145, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v71, v83, v146, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v83, v87, v147, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v87, v99, v64, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v99, v103, v128, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v103, v115, v15, 0x7fff
+; GFX11FAKE16-NEXT: v_add3_u32 v115, v119, v54, 0x7fff
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v119, v4, v9, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v34, v34
+; GFX11FAKE16-NEXT: v_or_b32_e32 v13, 0x400000, v131
+; GFX11FAKE16-NEXT: v_or_b32_e32 v16, 0x400000, v35
+; GFX11FAKE16-NEXT: v_or_b32_e32 v18, 0x400000, v132
+; GFX11FAKE16-NEXT: v_or_b32_e32 v20, 0x400000, v36
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v34, v6, v11, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v131, v131
+; GFX11FAKE16-NEXT: v_or_b32_e32 v26, 0x400000, v134
+; GFX11FAKE16-NEXT: v_or_b32_e32 v28, 0x400000, v38
+; GFX11FAKE16-NEXT: v_or_b32_e32 v30, 0x400000, v135
+; GFX11FAKE16-NEXT: v_or_b32_e32 v66, 0x400000, v39
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v13, v8, v13, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v35, v35
+; GFX11FAKE16-NEXT: v_or_b32_e32 v68, 0x400000, v144
+; GFX11FAKE16-NEXT: v_or_b32_e32 v70, 0x400000, v48
+; GFX11FAKE16-NEXT: v_or_b32_e32 v80, 0x400000, v145
+; GFX11FAKE16-NEXT: v_or_b32_e32 v82, 0x400000, v49
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v16, v10, v16, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v132, v132
+; GFX11FAKE16-NEXT: v_or_b32_e32 v86, 0x400000, v50
+; GFX11FAKE16-NEXT: v_or_b32_e32 v96, 0x400000, v147
+; GFX11FAKE16-NEXT: v_or_b32_e32 v100, 0x400000, v64
+; GFX11FAKE16-NEXT: v_or_b32_e32 v102, 0x400000, v52
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v18, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v36, v36
+; GFX11FAKE16-NEXT: v_or_b32_e32 v112, 0x400000, v128
+; GFX11FAKE16-NEXT: v_or_b32_e32 v116, 0x400000, v15
+; GFX11FAKE16-NEXT: v_or_b32_e32 v118, 0x400000, v32
+; GFX11FAKE16-NEXT: v_or_b32_e32 v31, 0x400000, v54
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v20, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v133, v133
+; GFX11FAKE16-NEXT: v_bfe_u32 v0, v55, 16, 1
+; GFX11FAKE16-NEXT: v_or_b32_e32 v1, 0x400000, v55
+; GFX11FAKE16-NEXT: v_or_b32_e32 v114, 0x400000, v53
+; GFX11FAKE16-NEXT: v_perm_b32 v11, v12, v11, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v10, v17, v22, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v37, v37
+; GFX11FAKE16-NEXT: v_add3_u32 v0, v0, v55, 0x7fff
+; GFX11FAKE16-NEXT: v_perm_b32 v12, v16, v13, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v13, v34, v119, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v14, v19, v24, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v134, v134
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v10, v14, v10, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v9, v21, v26, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v38, v38
+; GFX11FAKE16-NEXT: v_perm_b32 v14, v33, v117, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v17, v23, v28, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v135, v135
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v9, v17, v9, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v8, v25, v30, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v39, v39
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v18, v27, v66, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v144, v144
+; GFX11FAKE16-NEXT: v_perm_b32 v8, v18, v8, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v7, v29, v68, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v48, v48
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v19, v65, v70, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v145, v145
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v7, v19, v7, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v6, v67, v80, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v49, v49
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v20, v69, v82, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v146, v146
+; GFX11FAKE16-NEXT: v_perm_b32 v6, v20, v6, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v5, v71, v84, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v50, v50
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v21, v81, v86, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v147, v147
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v5, v21, v5, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v4, v83, v96, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v64, v64
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v3, v87, v100, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v22, v97, v102, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v128, v128
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v3, v22, v3, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v2, v99, v112, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v15, v103, v116, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v32, v32
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v23, v113, v118, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v54, v54
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v24, v115, v31, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v55, v55
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v53, v53
+; GFX11FAKE16-NEXT: v_perm_b32 v1, v23, v15, 0x7060302
+; GFX11FAKE16-NEXT: v_perm_b32 v15, v149, v148, 0x7060302
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11FAKE16-NEXT: v_perm_b32 v0, v0, v24, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v25, v101, v114, vcc_lo
+; GFX11FAKE16-NEXT: v_cmp_u_f32_e32 vcc_lo, v51, v51
+; GFX11FAKE16-NEXT: v_perm_b32 v2, v25, v2, 0x7060302
+; GFX11FAKE16-NEXT: v_cndmask_b32_e32 v26, v85, v98, vcc_lo
+; GFX11FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11FAKE16-NEXT: v_perm_b32 v4, v26, v4, 0x7060302
+; GFX11FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
; GFX1250-LABEL: v_fma_v32bf16:
; GFX1250: ; %bb.0:
-; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
-; GFX1250-NEXT: s_wait_kmcnt 0x0
-; GFX1250-NEXT: s_clause 0x10
-; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64
-; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4
-; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8
-; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12
-; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16
-; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20
-; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24
-; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28
-; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32
-; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36
-; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40
-; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44
-; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48
-; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52
-; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56
-; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60
-; GFX1250-NEXT: scratch_load_b32 v55, off, s32
-; GFX1250-NEXT: s_wait_loadcnt 0xf
-; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32
-; GFX1250-NEXT: s_wait_loadcnt 0xe
-; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33
-; GFX1250-NEXT: s_wait_loadcnt 0xd
-; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34
-; GFX1250-NEXT: s_wait_loadcnt 0xc
-; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35
-; GFX1250-NEXT: s_wait_loadcnt 0xb
-; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36
-; GFX1250-NEXT: s_wait_loadcnt 0xa
-; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37
-; GFX1250-NEXT: s_wait_loadcnt 0x9
-; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38
-; GFX1250-NEXT: s_wait_loadcnt 0x8
-; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39
-; GFX1250-NEXT: s_wait_loadcnt 0x7
-; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48
-; GFX1250-NEXT: s_wait_loadcnt 0x6
-; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49
-; GFX1250-NEXT: s_wait_loadcnt 0x5
-; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50
-; GFX1250-NEXT: s_wait_loadcnt 0x4
-; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51
-; GFX1250-NEXT: s_wait_loadcnt 0x3
-; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52
-; GFX1250-NEXT: s_wait_loadcnt 0x2
-; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53
-; GFX1250-NEXT: s_wait_loadcnt 0x1
-; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54
-; GFX1250-NEXT: s_wait_loadcnt 0x0
-; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31
-; GFX1250-NEXT: s_set_pc_i64 s[30:31]
-define <32 x bfloat> @v_fma_v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c) {
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: s_clause 0x10
+; GFX1250-NEXT: scratch_load_b32 v31, off, s32 offset:64
+; GFX1250-NEXT: scratch_load_b32 v32, off, s32 offset:4
+; GFX1250-NEXT: scratch_load_b32 v33, off, s32 offset:8
+; GFX1250-NEXT: scratch_load_b32 v34, off, s32 offset:12
+; GFX1250-NEXT: scratch_load_b32 v35, off, s32 offset:16
+; GFX1250-NEXT: scratch_load_b32 v36, off, s32 offset:20
+; GFX1250-NEXT: scratch_load_b32 v37, off, s32 offset:24
+; GFX1250-NEXT: scratch_load_b32 v38, off, s32 offset:28
+; GFX1250-NEXT: scratch_load_b32 v39, off, s32 offset:32
+; GFX1250-NEXT: scratch_load_b32 v48, off, s32 offset:36
+; GFX1250-NEXT: scratch_load_b32 v49, off, s32 offset:40
+; GFX1250-NEXT: scratch_load_b32 v50, off, s32 offset:44
+; GFX1250-NEXT: scratch_load_b32 v51, off, s32 offset:48
+; GFX1250-NEXT: scratch_load_b32 v52, off, s32 offset:52
+; GFX1250-NEXT: scratch_load_b32 v53, off, s32 offset:56
+; GFX1250-NEXT: scratch_load_b32 v54, off, s32 offset:60
+; GFX1250-NEXT: scratch_load_b32 v55, off, s32
+; GFX1250-NEXT: s_wait_loadcnt 0xf
+; GFX1250-NEXT: v_pk_fma_bf16 v0, v0, v16, v32
+; GFX1250-NEXT: s_wait_loadcnt 0xe
+; GFX1250-NEXT: v_pk_fma_bf16 v1, v1, v17, v33
+; GFX1250-NEXT: s_wait_loadcnt 0xd
+; GFX1250-NEXT: v_pk_fma_bf16 v2, v2, v18, v34
+; GFX1250-NEXT: s_wait_loadcnt 0xc
+; GFX1250-NEXT: v_pk_fma_bf16 v3, v3, v19, v35
+; GFX1250-NEXT: s_wait_loadcnt 0xb
+; GFX1250-NEXT: v_pk_fma_bf16 v4, v4, v20, v36
+; GFX1250-NEXT: s_wait_loadcnt 0xa
+; GFX1250-NEXT: v_pk_fma_bf16 v5, v5, v21, v37
+; GFX1250-NEXT: s_wait_loadcnt 0x9
+; GFX1250-NEXT: v_pk_fma_bf16 v6, v6, v22, v38
+; GFX1250-NEXT: s_wait_loadcnt 0x8
+; GFX1250-NEXT: v_pk_fma_bf16 v7, v7, v23, v39
+; GFX1250-NEXT: s_wait_loadcnt 0x7
+; GFX1250-NEXT: v_pk_fma_bf16 v8, v8, v24, v48
+; GFX1250-NEXT: s_wait_loadcnt 0x6
+; GFX1250-NEXT: v_pk_fma_bf16 v9, v9, v25, v49
+; GFX1250-NEXT: s_wait_loadcnt 0x5
+; GFX1250-NEXT: v_pk_fma_bf16 v10, v10, v26, v50
+; GFX1250-NEXT: s_wait_loadcnt 0x4
+; GFX1250-NEXT: v_pk_fma_bf16 v11, v11, v27, v51
+; GFX1250-NEXT: s_wait_loadcnt 0x3
+; GFX1250-NEXT: v_pk_fma_bf16 v12, v12, v28, v52
+; GFX1250-NEXT: s_wait_loadcnt 0x2
+; GFX1250-NEXT: v_pk_fma_bf16 v13, v13, v29, v53
+; GFX1250-NEXT: s_wait_loadcnt 0x1
+; GFX1250-NEXT: v_pk_fma_bf16 v14, v14, v30, v54
+; GFX1250-NEXT: s_wait_loadcnt 0x0
+; GFX1250-NEXT: v_pk_fma_bf16 v15, v15, v55, v31
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%op = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> %a, <32 x bfloat> %b, <32 x bfloat> %c)
ret <32 x bfloat> %op
}
diff --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
index 92d3277..bb22144 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.v2f16.ll
@@ -4148,28 +4148,28 @@ define <2 x half> @mul_select_negk_negfabs_v2f16(<2 x i32> %c, <2 x half> %x, <2
; --------------------------------------------------------------------------------
define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %y) {
-; CI-SAFE-LABEL: select_fneg_posk_src_add_v2f16:
-; CI-SAFE: ; %bb.0:
-; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_add_f32_e32 v3, 4.0, v3
-; CI-SAFE-NEXT: v_add_f32_e32 v2, 4.0, v2
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3
-; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2
-; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
-; CI-SAFE-NEXT: s_setpc_b64 s[30:31]
+; CI-LABEL: select_fneg_posk_src_add_v2f16:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_add_f32_e32 v3, 4.0, v3
+; CI-NEXT: v_add_f32_e32 v2, 4.0, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_or_b32_e32 v2, v2, v3
+; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
;
; VI-SAFE-LABEL: select_fneg_posk_src_add_v2f16:
; VI-SAFE: ; %bb.0:
@@ -4229,21 +4229,6 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <
; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
-; CI-NSZ-LABEL: select_fneg_posk_src_add_v2f16:
-; CI-NSZ: ; %bb.0:
-; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NSZ-NEXT: v_sub_f32_e32 v2, -4.0, v2
-; CI-NSZ-NEXT: v_sub_f32_e32 v3, -4.0, v3
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
-; CI-NSZ-NEXT: s_setpc_b64 s[30:31]
-;
; VI-NSZ-LABEL: select_fneg_posk_src_add_v2f16:
; VI-NSZ: ; %bb.0:
; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4302,6 +4287,105 @@ define <2 x half> @select_fneg_posk_src_add_v2f16(<2 x i32> %c, <2 x half> %x, <
ret <2 x half> %select
}
+define <2 x half> @select_fneg_posk_src_add_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %y) {
+; CI-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_sub_f32_e32 v2, -4.0, v2
+; CI-NEXT: v_sub_f32_e32 v3, -4.0, v3
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT: v_mov_b32_e32 v1, 0xc400
+; VI-NEXT: v_sub_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_sub_f16_e32 v2, -4.0, v2
+; VI-NEXT: v_mov_b32_e32 v3, 0x4000
+; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5]
+; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT: v_pk_add_f16 v1, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-SAFE-TRUE16: ; %bb.0:
+; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-SAFE-FAKE16: ; %bb.0:
+; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-NSZ-TRUE16: ; %bb.0:
+; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-TRUE16-NEXT: v_pk_add_f16 v0, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_add_v2f16_nsz:
+; GFX11-NSZ-FAKE16: ; %bb.0:
+; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-FAKE16-NEXT: v_pk_add_f16 v2, v2, -4.0 op_sel_hi:[1,0] neg_lo:[1,0] neg_hi:[1,0]
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq <2 x i32> %c, zeroinitializer
+ %add = fadd nsz <2 x half> %x, <half 4.0, half 4.0>
+ %fneg = fneg <2 x half> %add
+ %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0>
+ ret <2 x half> %select
+}
+
define <2 x half> @select_fneg_posk_src_sub_v2f16(<2 x i32> %c, <2 x half> %x) {
; CI-SAFE-LABEL: select_fneg_posk_src_sub_v2f16:
; CI-SAFE: ; %bb.0:
@@ -4704,34 +4788,34 @@ define <2 x half> @select_fneg_posk_src_fma_v2f16(<2 x i32> %c, <2 x half> %x, <
}
define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x, <2 x half> %z) {
-; CI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16:
-; CI-SAFE: ; %bb.0:
-; CI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3
-; CI-SAFE-NEXT: v_add_f32_e32 v3, v3, v5
-; CI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-SAFE-NEXT: v_add_f32_e32 v2, v2, v4
-; CI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-SAFE-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; CI-SAFE-NEXT: v_or_b32_e32 v2, v2, v3
-; CI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v2
-; CI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
-; CI-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-SAFE-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
-; CI-SAFE-NEXT: s_setpc_b64 s[30:31]
+; CI-LABEL: select_fneg_posk_src_fmad_v2f16:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_mul_f32_e32 v3, 4.0, v3
+; CI-NEXT: v_add_f32_e32 v3, v3, v5
+; CI-NEXT: v_mul_f32_e32 v2, 4.0, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_add_f32_e32 v2, v2, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_or_b32_e32 v2, v2, v3
+; CI-NEXT: v_xor_b32_e32 v2, 0x80008000, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v2
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v3, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v2, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
;
; VI-SAFE-LABEL: select_fneg_posk_src_fmad_v2f16:
; VI-SAFE: ; %bb.0:
@@ -4793,27 +4877,6 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x,
; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
;
-; CI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16:
-; CI-NSZ: ; %bb.0:
-; CI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v3, v3
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v4, v4
-; CI-NSZ-NEXT: v_cvt_f16_f32_e32 v5, v5
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v2, v2
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v3, v3
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v4, v4
-; CI-NSZ-NEXT: v_cvt_f32_f16_e32 v5, v5
-; CI-NSZ-NEXT: v_mul_f32_e32 v2, -4.0, v2
-; CI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v3
-; CI-NSZ-NEXT: v_sub_f32_e32 v2, v2, v4
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; CI-NSZ-NEXT: v_sub_f32_e32 v3, v3, v5
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
-; CI-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
-; CI-NSZ-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
-; CI-NSZ-NEXT: s_setpc_b64 s[30:31]
-;
; VI-NSZ-LABEL: select_fneg_posk_src_fmad_v2f16:
; VI-NSZ: ; %bb.0:
; VI-NSZ-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -4873,6 +4936,112 @@ define <2 x half> @select_fneg_posk_src_fmad_v2f16(<2 x i32> %c, <2 x half> %x,
ret <2 x half> %select
}
+define <2 x half> @select_fneg_posk_src_fmad_v2f16_nsz(<2 x i32> %c, <2 x half> %x, <2 x half> %z) {
+; CI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; CI: ; %bb.0:
+; CI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; CI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; CI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; CI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; CI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; CI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; CI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; CI-NEXT: v_mul_f32_e32 v2, -4.0, v2
+; CI-NEXT: v_mul_f32_e32 v3, -4.0, v3
+; CI-NEXT: v_sub_f32_e32 v2, v2, v4
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; CI-NEXT: v_sub_f32_e32 v3, v3, v5
+; CI-NEXT: v_cndmask_b32_e32 v0, 2.0, v2, vcc
+; CI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; CI-NEXT: v_cndmask_b32_e32 v1, 2.0, v3, vcc
+; CI-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT: v_lshrrev_b32_e32 v1, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; VI-NEXT: v_fma_f16 v1, v4, -4.0, -v1
+; VI-NEXT: v_fma_f16 v2, v2, -4.0, -v3
+; VI-NEXT: v_mov_b32_e32 v3, 0x4000
+; VI-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, v3, v2, s[4:5]
+; VI-NEXT: v_cndmask_b32_sdwa v1, v3, v1, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GFX9-NEXT: v_pk_fma_f16 v1, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX9-NEXT: v_mov_b32_e32 v2, 0x4000
+; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v1, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_sdwa v1, v2, v1, vcc dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v1, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-SAFE-TRUE16: ; %bb.0:
+; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-SAFE-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-SAFE-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-SAFE-FAKE16: ; %bb.0:
+; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-SAFE-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-TRUE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-NSZ-TRUE16: ; %bb.0:
+; GFX11-NSZ-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-TRUE16-NEXT: v_pk_fma_f16 v0, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-NSZ-TRUE16-NEXT: v_cmp_eq_u32_e64 s0, 0, v1
+; GFX11-NSZ-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.l, 0x4000, v0.l, vcc_lo
+; GFX11-NSZ-TRUE16-NEXT: v_cndmask_b16 v0.h, 0x4000, v0.h, s0
+; GFX11-NSZ-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-NSZ-FAKE16-LABEL: select_fneg_posk_src_fmad_v2f16_nsz:
+; GFX11-NSZ-FAKE16: ; %bb.0:
+; GFX11-NSZ-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NSZ-FAKE16-NEXT: v_pk_fma_f16 v2, v2, -4.0, v3 op_sel_hi:[1,0,1] neg_lo:[0,0,1] neg_hi:[0,0,1]
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NSZ-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v2
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v0, 0x4000, v2, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v1
+; GFX11-NSZ-FAKE16-NEXT: v_cndmask_b32_e32 v1, 0x4000, v3, vcc_lo
+; GFX11-NSZ-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NSZ-FAKE16-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
+; GFX11-NSZ-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq <2 x i32> %c, zeroinitializer
+ %fmad = call nsz <2 x half> @llvm.fmuladd.v2f16(<2 x half> %x, <2 x half> <half 4.0, half 4.0>, <2 x half> %z)
+ %fneg = fneg <2 x half> %fmad
+ %select = select <2 x i1> %cmp, <2 x half> %fneg, <2 x half> <half 2.0, half 2.0>
+ ret <2 x half> %select
+}
+
declare <2 x half> @llvm.fabs.v2f16(<2 x half>) #0
declare <2 x half> @llvm.fma.v2f16(<2 x half>, <2 x half>, <2 x half>) #0
declare <2 x half> @llvm.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>) #0
diff --git a/llvm/test/CodeGen/AMDGPU/v_mac.ll b/llvm/test/CodeGen/AMDGPU/v_mac.ll
index c128715..f5dc824 100644
--- a/llvm/test/CodeGen/AMDGPU/v_mac.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_mac.ll
@@ -116,7 +116,7 @@ entry:
; GCN-LABEL: {{^}}nsz_mad_sub0_src0:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+define amdgpu_kernel void @nsz_mad_sub0_src0(ptr addrspace(1) %out, ptr addrspace(1) %in) {
entry:
%b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1
%c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2
@@ -125,7 +125,7 @@ entry:
%b = load float, ptr addrspace(1) %b_ptr
%c = load float, ptr addrspace(1) %c_ptr
- %neg_a = fsub float 0.0, %a
+ %neg_a = fsub nsz float 0.0, %a
%tmp0 = fmul float %neg_a, %b
%tmp1 = fadd float %tmp0, %c
@@ -176,7 +176,7 @@ entry:
; GCN-LABEL: {{^}}nsz_mad_sub0_src1:
; GCN-NOT: v_mac_f32
; GCN: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
-define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) #1 {
+define amdgpu_kernel void @nsz_mad_sub0_src1(ptr addrspace(1) %out, ptr addrspace(1) %in) {
entry:
%b_ptr = getelementptr float, ptr addrspace(1) %in, i32 1
%c_ptr = getelementptr float, ptr addrspace(1) %in, i32 2
@@ -185,7 +185,7 @@ entry:
%b = load float, ptr addrspace(1) %b_ptr
%c = load float, ptr addrspace(1) %c_ptr
- %neg_b = fsub float 0.0, %b
+ %neg_b = fsub nsz float 0.0, %b
%tmp0 = fmul float %a, %neg_b
%tmp1 = fadd float %tmp0, %c
@@ -310,6 +310,5 @@ define float @v_mac_f32_dynamic_ftz(float %a, float %b, float %c) "denormal-fp-m
declare i32 @llvm.amdgcn.workitem.id.x() #2
attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" }
-attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" }
attributes #2 = { nounwind readnone }
attributes #3 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
index bcc60b0..8da6f23 100644
--- a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
@@ -236,7 +236,7 @@ entry:
%b.val = load half, ptr addrspace(1) %b
%c.val = load half, ptr addrspace(1) %c
- %a.neg = fsub half 0.0, %a.val
+ %a.neg = fsub nsz half 0.0, %a.val
%t.val = fmul half %a.neg, %b.val
%r.val = fadd half %t.val, %c.val
@@ -263,7 +263,7 @@ entry:
%b.val = load half, ptr addrspace(1) %b
%c.val = load half, ptr addrspace(1) %c
- %b.neg = fsub half 0.0, %b.val
+ %b.neg = fsub nsz half 0.0, %b.val
%t.val = fmul half %a.val, %b.neg
%r.val = fadd half %t.val, %c.val
@@ -290,7 +290,7 @@ entry:
%b.val = load half, ptr addrspace(1) %b
%c.val = load half, ptr addrspace(1) %c
- %c.neg = fsub half 0.0, %c.val
+ %c.neg = fsub nsz half 0.0, %c.val
%t.val = fmul half %a.val, %b.val
%r.val = fadd half %t.val, %c.neg
@@ -601,7 +601,7 @@ entry:
%b.val = load <2 x half>, ptr addrspace(1) %b
%c.val = load <2 x half>, ptr addrspace(1) %c
- %a.neg = fsub <2 x half> <half 0.0, half 0.0>, %a.val
+ %a.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %a.val
%t.val = fmul <2 x half> %a.neg, %b.val
%r.val = fadd <2 x half> %t.val, %c.val
@@ -634,7 +634,7 @@ entry:
%b.val = load <2 x half>, ptr addrspace(1) %b
%c.val = load <2 x half>, ptr addrspace(1) %c
- %b.neg = fsub <2 x half> <half 0.0, half 0.0>, %b.val
+ %b.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %b.val
%t.val = fmul <2 x half> %a.val, %b.neg
%r.val = fadd <2 x half> %t.val, %c.val
@@ -667,7 +667,7 @@ entry:
%b.val = load <2 x half>, ptr addrspace(1) %b
%c.val = load <2 x half>, ptr addrspace(1) %c
- %c.neg = fsub <2 x half> <half 0.0, half 0.0>, %c.val
+ %c.neg = fsub nsz <2 x half> <half 0.0, half 0.0>, %c.val
%t.val = fmul <2 x half> %a.val, %b.val
%r.val = fadd <2 x half> %t.val, %c.neg
@@ -678,5 +678,5 @@ entry:
declare void @llvm.amdgcn.s.barrier() #2
attributes #0 = { nounwind "no-signed-zeros-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "no-signed-zeros-fp-math"="true" "denormal-fp-math"="preserve-sign,preserve-sign" }
+attributes #1 = { nounwind "denormal-fp-math"="preserve-sign,preserve-sign" }
attributes #2 = { nounwind convergent }
diff --git a/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll
new file mode 100644
index 0000000..39ac647
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/scalarize-fp.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 -mattr=+lasx < %s | FileCheck %s
+
+define <8 x float> @fadd_elt0_v8f32(float %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -1168
+; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <8 x float> poison, float %a, i32 0
+ %c = fadd <8 x float> %b, <float 1.0, float poison, float poison, float poison, float poison, float poison, float poison, float poison>
+ ret <8 x float> %c
+}
+
+define <4 x double> @fadd_elt0_v4f64(double %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -912
+; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <4 x double> poison, double %a, i32 0
+ %c = fadd <4 x double> %b, <double 1.0, double poison, double poison, double poison>
+ ret <4 x double> %c
+}
+
+define <8 x float> @fsub_splat_v8f32(float %a, float %b) nounwind {
+; CHECK-LABEL: fsub_splat_v8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsub.s $fa0, $fa0, $fa1
+; CHECK-NEXT: xvreplve0.w $xr0, $xr0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <8 x float> poison, float %a, i32 0
+ %insb = insertelement <8 x float> poison, float %b, i32 0
+ %va = shufflevector <8 x float> %insa, <8 x float> poison, <8 x i32> zeroinitializer
+ %vb = shufflevector <8 x float> %insb, <8 x float> poison, <8 x i32> zeroinitializer
+ %c = fsub <8 x float> %va, %vb
+ ret <8 x float> %c
+}
+
+define <4 x double> @fsub_splat_v4f64(double %a) nounwind {
+; CHECK-LABEL: fsub_splat_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -784
+; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1
+; CHECK-NEXT: xvreplve0.d $xr0, $xr0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <4 x double> poison, double %a, i32 0
+ %insb = insertelement <4 x double> poison, double 1.0, i32 0
+ %va = shufflevector <4 x double> %insa, <4 x double> poison, <4 x i32> zeroinitializer
+ %vb = shufflevector <4 x double> %insb, <4 x double> poison, <4 x i32> zeroinitializer
+ %c = fsub <4 x double> %va, %vb
+ ret <4 x double> %c
+}
diff --git a/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll
new file mode 100644
index 0000000..b651f11
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lsx/scalarize-fp.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 -mattr=+32s,+lsx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 -mattr=+lsx < %s | FileCheck %s
+
+define <4 x float> @fadd_elt0_v4f32(float %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -1168
+; CHECK-NEXT: fadd.s $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <4 x float> poison, float %a, i32 0
+ %c = fadd <4 x float> %b, <float 1.0, float poison, float poison, float poison>
+ ret <4 x float> %c
+}
+
+define <2 x double> @fadd_elt0_v2f64(double %a) nounwind {
+; CHECK-LABEL: fadd_elt0_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -912
+; CHECK-NEXT: fadd.d $fa0, $fa0, $fa1
+; CHECK-NEXT: ret
+entry:
+ %b = insertelement <2 x double> poison, double %a, i32 0
+ %c = fadd <2 x double> %b, <double 1.0, double poison>
+ ret <2 x double> %c
+}
+
+define <4 x float> @fsub_splat_v4f32(float %b) nounwind {
+; CHECK-LABEL: fsub_splat_v4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vldi $vr1, -1168
+; CHECK-NEXT: fsub.s $fa0, $fa1, $fa0
+; CHECK-NEXT: vreplvei.w $vr0, $vr0, 0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <4 x float> poison, float 1.0, i32 0
+ %insb = insertelement <4 x float> poison, float %b, i32 0
+ %va = shufflevector <4 x float> %insa, <4 x float> poison, <4 x i32> zeroinitializer
+ %vb = shufflevector <4 x float> %insb, <4 x float> poison, <4 x i32> zeroinitializer
+ %c = fsub <4 x float> %va, %vb
+ ret <4 x float> %c
+}
+
+define <2 x double> @fsub_splat_v2f64(double %a, double %b) nounwind {
+; CHECK-LABEL: fsub_splat_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: fsub.d $fa0, $fa0, $fa1
+; CHECK-NEXT: vreplvei.d $vr0, $vr0, 0
+; CHECK-NEXT: ret
+entry:
+ %insa = insertelement <2 x double> poison, double %a, i32 0
+ %insb = insertelement <2 x double> poison, double %b, i32 0
+ %va = shufflevector <2 x double> %insa, <2 x double> poison, <2 x i32> zeroinitializer
+ %vb = shufflevector <2 x double> %insb, <2 x double> poison, <2 x i32> zeroinitializer
+ %c = fsub <2 x double> %va, %vb
+ ret <2 x double> %c
+}
diff --git a/llvm/test/CodeGen/LoongArch/merge-offset-option.ll b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll
new file mode 100644
index 0000000..e5351a6
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/merge-offset-option.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \
+; RUN: < %s | FileCheck %s --check-prefix=MERGE
+; RUN: llc --mtriple=loongarch64 -mattr=+d --relocation-model=static -O1 \
+; RUN: --loongarch-enable-merge-offset=false < %s | FileCheck %s --check-prefix=NO_MERGE
+
+@g = dso_local global i32 zeroinitializer, align 4
+
+define void @foo() nounwind {
+; MERGE-LABEL: foo:
+; MERGE: # %bb.0:
+; MERGE-NEXT: pcalau12i $a0, %pc_hi20(g)
+; MERGE-NEXT: ld.w $zero, $a0, %pc_lo12(g)
+; MERGE-NEXT: ret
+;
+; NO_MERGE-LABEL: foo:
+; NO_MERGE: # %bb.0:
+; NO_MERGE-NEXT: pcalau12i $a0, %pc_hi20(g)
+; NO_MERGE-NEXT: addi.d $a0, $a0, %pc_lo12(g)
+; NO_MERGE-NEXT: ld.w $zero, $a0, 0
+; NO_MERGE-NEXT: ret
+ %v = load volatile i32, ptr @g
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
index aaabd76e..fd0b494 100644
--- a/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar_cmp.ll
@@ -20,18 +20,18 @@
define float @select_oeq_float(float %a, float %b, float %c, float %d) {
; FAST-P8-LABEL: select_oeq_float:
; FAST-P8: # %bb.0: # %entry
-; FAST-P8-NEXT: xssubsp f0, f2, f1
-; FAST-P8-NEXT: xssubsp f1, f1, f2
-; FAST-P8-NEXT: fsel f1, f1, f3, f4
-; FAST-P8-NEXT: fsel f1, f0, f1, f4
+; FAST-P8-NEXT: xssubsp f0, f1, f2
+; FAST-P8-NEXT: xsnegdp f1, f0
+; FAST-P8-NEXT: fsel f0, f0, f3, f4
+; FAST-P8-NEXT: fsel f1, f1, f0, f4
; FAST-P8-NEXT: blr
;
; FAST-P9-LABEL: select_oeq_float:
; FAST-P9: # %bb.0: # %entry
-; FAST-P9-NEXT: xssubsp f0, f2, f1
-; FAST-P9-NEXT: xssubsp f1, f1, f2
-; FAST-P9-NEXT: fsel f1, f1, f3, f4
-; FAST-P9-NEXT: fsel f1, f0, f1, f4
+; FAST-P9-NEXT: xssubsp f0, f1, f2
+; FAST-P9-NEXT: xsnegdp f1, f0
+; FAST-P9-NEXT: fsel f0, f0, f3, f4
+; FAST-P9-NEXT: fsel f1, f1, f0, f4
; FAST-P9-NEXT: blr
;
; NO-FAST-P8-LABEL: select_oeq_float:
@@ -59,6 +59,48 @@ entry:
ret float %cond
}
+define float @select_oeq_float_nsz(float %a, float %b, float %c, float %d) {
+; FAST-P8-LABEL: select_oeq_float_nsz:
+; FAST-P8: # %bb.0: # %entry
+; FAST-P8-NEXT: xssubsp f0, f2, f1
+; FAST-P8-NEXT: xssubsp f1, f1, f2
+; FAST-P8-NEXT: fsel f1, f1, f3, f4
+; FAST-P8-NEXT: fsel f1, f0, f1, f4
+; FAST-P8-NEXT: blr
+;
+; FAST-P9-LABEL: select_oeq_float_nsz:
+; FAST-P9: # %bb.0: # %entry
+; FAST-P9-NEXT: xssubsp f0, f2, f1
+; FAST-P9-NEXT: xssubsp f1, f1, f2
+; FAST-P9-NEXT: fsel f1, f1, f3, f4
+; FAST-P9-NEXT: fsel f1, f0, f1, f4
+; FAST-P9-NEXT: blr
+;
+; NO-FAST-P8-LABEL: select_oeq_float_nsz:
+; NO-FAST-P8: # %bb.0: # %entry
+; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P8-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P8-NEXT: # %bb.1: # %entry
+; NO-FAST-P8-NEXT: fmr f3, f4
+; NO-FAST-P8-NEXT: .LBB1_2: # %entry
+; NO-FAST-P8-NEXT: fmr f1, f3
+; NO-FAST-P8-NEXT: blr
+;
+; NO-FAST-P9-LABEL: select_oeq_float_nsz:
+; NO-FAST-P9: # %bb.0: # %entry
+; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P9-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P9-NEXT: # %bb.1: # %entry
+; NO-FAST-P9-NEXT: fmr f3, f4
+; NO-FAST-P9-NEXT: .LBB1_2: # %entry
+; NO-FAST-P9-NEXT: fmr f1, f3
+; NO-FAST-P9-NEXT: blr
+entry:
+ %cmp = fcmp nsz oeq float %a, %b
+ %cond = select i1 %cmp, float %c, float %d
+ ret float %cond
+}
+
define double @select_oeq_double(double %a, double %b, double %c, double %d) {
; FAST-P8-LABEL: select_oeq_double:
; FAST-P8: # %bb.0: # %entry
@@ -79,20 +121,20 @@ define double @select_oeq_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8-LABEL: select_oeq_double:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P8-NEXT: beq cr0, .LBB2_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB1_2: # %entry
+; NO-FAST-P8-NEXT: .LBB2_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_oeq_double:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: beq cr0, .LBB1_2
+; NO-FAST-P9-NEXT: beq cr0, .LBB2_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB1_2: # %entry
+; NO-FAST-P9-NEXT: .LBB2_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -182,13 +224,57 @@ entry:
define float @select_one_float(float %a, float %b, float %c, float %d) {
; FAST-P8-LABEL: select_one_float:
; FAST-P8: # %bb.0: # %entry
+; FAST-P8-NEXT: xssubsp f0, f1, f2
+; FAST-P8-NEXT: xsnegdp f1, f0
+; FAST-P8-NEXT: fsel f0, f0, f4, f3
+; FAST-P8-NEXT: fsel f1, f1, f0, f3
+; FAST-P8-NEXT: blr
+;
+; FAST-P9-LABEL: select_one_float:
+; FAST-P9: # %bb.0: # %entry
+; FAST-P9-NEXT: xssubsp f0, f1, f2
+; FAST-P9-NEXT: xsnegdp f1, f0
+; FAST-P9-NEXT: fsel f0, f0, f4, f3
+; FAST-P9-NEXT: fsel f1, f1, f0, f3
+; FAST-P9-NEXT: blr
+;
+; NO-FAST-P8-LABEL: select_one_float:
+; NO-FAST-P8: # %bb.0: # %entry
+; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P8-NEXT: # %bb.1: # %entry
+; NO-FAST-P8-NEXT: fmr f3, f4
+; NO-FAST-P8-NEXT: .LBB5_2: # %entry
+; NO-FAST-P8-NEXT: fmr f1, f3
+; NO-FAST-P8-NEXT: blr
+;
+; NO-FAST-P9-LABEL: select_one_float:
+; NO-FAST-P9: # %bb.0: # %entry
+; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
+; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P9-NEXT: # %bb.1: # %entry
+; NO-FAST-P9-NEXT: fmr f3, f4
+; NO-FAST-P9-NEXT: .LBB5_2: # %entry
+; NO-FAST-P9-NEXT: fmr f1, f3
+; NO-FAST-P9-NEXT: blr
+entry:
+ %cmp = fcmp one float %a, %b
+ %cond = select i1 %cmp, float %c, float %d
+ ret float %cond
+}
+
+define float @select_one_float_nsz(float %a, float %b, float %c, float %d) {
+; FAST-P8-LABEL: select_one_float_nsz:
+; FAST-P8: # %bb.0: # %entry
; FAST-P8-NEXT: xssubsp f0, f2, f1
; FAST-P8-NEXT: xssubsp f1, f1, f2
; FAST-P8-NEXT: fsel f1, f1, f4, f3
; FAST-P8-NEXT: fsel f1, f0, f1, f3
; FAST-P8-NEXT: blr
;
-; FAST-P9-LABEL: select_one_float:
+; FAST-P9-LABEL: select_one_float_nsz:
; FAST-P9: # %bb.0: # %entry
; FAST-P9-NEXT: xssubsp f0, f2, f1
; FAST-P9-NEXT: xssubsp f1, f1, f2
@@ -196,29 +282,29 @@ define float @select_one_float(float %a, float %b, float %c, float %d) {
; FAST-P9-NEXT: fsel f1, f0, f1, f3
; FAST-P9-NEXT: blr
;
-; NO-FAST-P8-LABEL: select_one_float:
+; NO-FAST-P8-LABEL: select_one_float_nsz:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB4_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB6_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB4_2: # %entry
+; NO-FAST-P8-NEXT: .LBB6_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
-; NO-FAST-P9-LABEL: select_one_float:
+; NO-FAST-P9-LABEL: select_one_float_nsz:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB4_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB6_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB4_2: # %entry
+; NO-FAST-P9-NEXT: .LBB6_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
- %cmp = fcmp one float %a, %b
+ %cmp = fcmp nsz one float %a, %b
%cond = select i1 %cmp, float %c, float %d
ret float %cond
}
@@ -244,10 +330,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB7_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB5_2: # %entry
+; NO-FAST-P8-NEXT: .LBB7_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -255,10 +341,10 @@ define double @select_one_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, eq
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB5_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB7_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB5_2: # %entry
+; NO-FAST-P9-NEXT: .LBB7_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -362,10 +448,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB8_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB10_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB8_2: # %entry
+; NO-FAST-P8-NEXT: .LBB10_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -373,10 +459,10 @@ define float @select_oge_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB8_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB10_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB8_2: # %entry
+; NO-FAST-P9-NEXT: .LBB10_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -402,10 +488,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB9_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB11_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB9_2: # %entry
+; NO-FAST-P8-NEXT: .LBB11_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -413,10 +499,10 @@ define double @select_oge_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, lt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB9_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB11_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB9_2: # %entry
+; NO-FAST-P9-NEXT: .LBB11_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -503,20 +589,20 @@ define float @select_olt_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8-LABEL: select_olt_float:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB12_2
+; NO-FAST-P8-NEXT: blt cr0, .LBB14_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB12_2: # %entry
+; NO-FAST-P8-NEXT: .LBB14_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_olt_float:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB12_2
+; NO-FAST-P9-NEXT: blt cr0, .LBB14_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB12_2: # %entry
+; NO-FAST-P9-NEXT: .LBB14_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -541,20 +627,20 @@ define double @select_olt_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8-LABEL: select_olt_double:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: blt cr0, .LBB13_2
+; NO-FAST-P8-NEXT: blt cr0, .LBB15_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB13_2: # %entry
+; NO-FAST-P8-NEXT: .LBB15_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_olt_double:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: blt cr0, .LBB13_2
+; NO-FAST-P9-NEXT: blt cr0, .LBB15_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB13_2: # %entry
+; NO-FAST-P9-NEXT: .LBB15_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -641,20 +727,20 @@ define float @select_ogt_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8-LABEL: select_ogt_float:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB16_2
+; NO-FAST-P8-NEXT: bgt cr0, .LBB18_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB16_2: # %entry
+; NO-FAST-P8-NEXT: .LBB18_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_ogt_float:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB16_2
+; NO-FAST-P9-NEXT: bgt cr0, .LBB18_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB16_2: # %entry
+; NO-FAST-P9-NEXT: .LBB18_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -679,20 +765,20 @@ define double @select_ogt_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8-LABEL: select_ogt_double:
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: bgt cr0, .LBB17_2
+; NO-FAST-P8-NEXT: bgt cr0, .LBB19_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB17_2: # %entry
+; NO-FAST-P8-NEXT: .LBB19_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
; NO-FAST-P9-LABEL: select_ogt_double:
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P9-NEXT: bgt cr0, .LBB17_2
+; NO-FAST-P9-NEXT: bgt cr0, .LBB19_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB17_2: # %entry
+; NO-FAST-P9-NEXT: .LBB19_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -780,10 +866,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB20_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB22_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB20_2: # %entry
+; NO-FAST-P8-NEXT: .LBB22_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -791,10 +877,10 @@ define float @select_ole_float(float %a, float %b, float %c, float %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB20_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB22_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB20_2: # %entry
+; NO-FAST-P9-NEXT: .LBB22_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -820,10 +906,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P8: # %bb.0: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P8-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB21_2
+; NO-FAST-P8-NEXT: bc 12, 4*cr5+lt, .LBB23_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f3, f4
-; NO-FAST-P8-NEXT: .LBB21_2: # %entry
+; NO-FAST-P8-NEXT: .LBB23_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -831,10 +917,10 @@ define double @select_ole_double(double %a, double %b, double %c, double %d) {
; NO-FAST-P9: # %bb.0: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f2
; NO-FAST-P9-NEXT: crnor 4*cr5+lt, un, gt
-; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB21_2
+; NO-FAST-P9-NEXT: bc 12, 4*cr5+lt, .LBB23_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f3, f4
-; NO-FAST-P9-NEXT: .LBB21_2: # %entry
+; NO-FAST-P9-NEXT: .LBB23_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -926,13 +1012,13 @@ define double @onecmp1(double %a, double %y, double %z) {
; NO-FAST-P8-NEXT: vspltisw v2, 1
; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P8-NEXT: bc 12, lt, .LBB24_3
+; NO-FAST-P8-NEXT: bc 12, lt, .LBB26_3
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P8-NEXT: bc 12, un, .LBB24_3
+; NO-FAST-P8-NEXT: bc 12, un, .LBB26_3
; NO-FAST-P8-NEXT: # %bb.2: # %entry
; NO-FAST-P8-NEXT: fmr f3, f2
-; NO-FAST-P8-NEXT: .LBB24_3: # %entry
+; NO-FAST-P8-NEXT: .LBB26_3: # %entry
; NO-FAST-P8-NEXT: fmr f1, f3
; NO-FAST-P8-NEXT: blr
;
@@ -941,13 +1027,13 @@ define double @onecmp1(double %a, double %y, double %z) {
; NO-FAST-P9-NEXT: vspltisw v2, 1
; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f0
-; NO-FAST-P9-NEXT: bc 12, lt, .LBB24_3
+; NO-FAST-P9-NEXT: bc 12, lt, .LBB26_3
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fcmpu cr0, f1, f1
-; NO-FAST-P9-NEXT: bc 12, un, .LBB24_3
+; NO-FAST-P9-NEXT: bc 12, un, .LBB26_3
; NO-FAST-P9-NEXT: # %bb.2: # %entry
; NO-FAST-P9-NEXT: fmr f3, f2
-; NO-FAST-P9-NEXT: .LBB24_3: # %entry
+; NO-FAST-P9-NEXT: .LBB26_3: # %entry
; NO-FAST-P9-NEXT: fmr f1, f3
; NO-FAST-P9-NEXT: blr
entry:
@@ -978,10 +1064,10 @@ define double @onecmp2(double %a, double %y, double %z) {
; NO-FAST-P8-NEXT: vspltisw v2, 1
; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: bgt cr0, .LBB25_2
+; NO-FAST-P8-NEXT: bgt cr0, .LBB27_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB25_2: # %entry
+; NO-FAST-P8-NEXT: .LBB27_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f2
; NO-FAST-P8-NEXT: blr
;
@@ -990,10 +1076,10 @@ define double @onecmp2(double %a, double %y, double %z) {
; NO-FAST-P9-NEXT: vspltisw v2, 1
; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: bgt cr0, .LBB25_2
+; NO-FAST-P9-NEXT: bgt cr0, .LBB27_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB25_2: # %entry
+; NO-FAST-P9-NEXT: .LBB27_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f2
; NO-FAST-P9-NEXT: blr
entry:
@@ -1028,10 +1114,10 @@ define double @onecmp3(double %a, double %y, double %z) {
; NO-FAST-P8-NEXT: vspltisw v2, 1
; NO-FAST-P8-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P8-NEXT: beq cr0, .LBB26_2
+; NO-FAST-P8-NEXT: beq cr0, .LBB28_2
; NO-FAST-P8-NEXT: # %bb.1: # %entry
; NO-FAST-P8-NEXT: fmr f2, f3
-; NO-FAST-P8-NEXT: .LBB26_2: # %entry
+; NO-FAST-P8-NEXT: .LBB28_2: # %entry
; NO-FAST-P8-NEXT: fmr f1, f2
; NO-FAST-P8-NEXT: blr
;
@@ -1040,10 +1126,10 @@ define double @onecmp3(double %a, double %y, double %z) {
; NO-FAST-P9-NEXT: vspltisw v2, 1
; NO-FAST-P9-NEXT: xvcvsxwdp vs0, vs34
; NO-FAST-P9-NEXT: xscmpudp cr0, f1, f0
-; NO-FAST-P9-NEXT: beq cr0, .LBB26_2
+; NO-FAST-P9-NEXT: beq cr0, .LBB28_2
; NO-FAST-P9-NEXT: # %bb.1: # %entry
; NO-FAST-P9-NEXT: fmr f2, f3
-; NO-FAST-P9-NEXT: .LBB26_2: # %entry
+; NO-FAST-P9-NEXT: .LBB28_2: # %entry
; NO-FAST-P9-NEXT: fmr f1, f2
; NO-FAST-P9-NEXT: blr
entry:
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll
new file mode 100644
index 0000000..093d172
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/builtin_printf.ll
@@ -0,0 +1,24 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+
+; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space"
+; CHECK: %[[#]] = OpExtInst %[[#]] %[[#]] printf
+
+; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space.
+
+@.str = private unnamed_addr addrspace(1) constant [4 x i8] c"%d\0A\00", align 1
+
+declare spir_func i32 @printf(ptr addrspace(4), ...)
+
+define spir_kernel void @test_kernel() {
+entry:
+ ; Format string in addrspace(1) → cast to addrspace(4)
+ %format = addrspacecast ptr addrspace(1) @.str to ptr addrspace(4)
+ %val = alloca i32, align 4
+ store i32 123, ptr %val, align 4
+ %loaded = load i32, ptr %val, align 4
+
+ ; Call printf with non-constant format string
+ %call = call spir_func i32 (ptr addrspace(4), ...) @printf(ptr addrspace(4) %format, i32 %loaded)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll
new file mode 100644
index 0000000..b54d59b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_EXT_relaxed_printf_string_address_space/non-constant-printf.ll
@@ -0,0 +1,48 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_EXT_relaxed_printf_string_address_space %s -o - | FileCheck %s
+; RUN: not llc -O0 -mtriple=spirv32-unknown-unknown %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+
+; CHECK: OpExtension "SPV_EXT_relaxed_printf_string_address_space"
+; CHECK: %[[#ExtInstSetId:]] = OpExtInstImport "OpenCL.std"
+; CHECK-DAG: %[[#TypeInt32Id:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#TypeInt8Id:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#TypeInt64Id:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#TypeArrayId:]] = OpTypeArray %[[#TypeInt8Id]] %[[#]]
+; CHECK-DAG: %[[#ConstantStorClassGlobalPtrTy:]] = OpTypePointer UniformConstant %[[#TypeArrayId]]
+; CHECK-DAG: %[[#WGStorClassGlobalPtrTy:]] = OpTypePointer Workgroup %[[#TypeArrayId]]
+; CHECK-DAG: %[[#CrossWFStorClassGlobalPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeArrayId]]
+; CHECK-DAG: %[[#FunctionStorClassPtrTy:]] = OpTypePointer Function %[[#TypeInt8Id]]
+; CHECK-DAG: %[[#WGStorClassPtrTy:]] = OpTypePointer Workgroup %[[#TypeInt8Id]]
+; CHECK-DAG: %[[#CrossWFStorClassPtrTy:]] = OpTypePointer CrossWorkgroup %[[#TypeInt8Id]]
+; CHECK: %[[#ConstantCompositeId:]] = OpConstantComposite %[[#TypeArrayId]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]]
+; CHECK: %[[#]] = OpVariable %[[#ConstantStorClassGlobalPtrTy]] UniformConstant %[[#ConstantCompositeId]]
+; CHECK: %[[#]] = OpVariable %[[#CrossWFStorClassGlobalPtrTy]] CrossWorkgroup %[[#ConstantCompositeId]]
+; CHECK: %[[#]] = OpVariable %[[#WGStorClassGlobalPtrTy]] Workgroup %[[#ConstantCompositeId]]
+; CHECK: %[[#GEP1:]] = OpInBoundsPtrAccessChain %[[#FunctionStorClassPtrTy]] %[[#]] %[[#]] %[[#]]
+; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP1]]
+; CHECK: %[[#GEP2:]] = OpInBoundsPtrAccessChain %[[#CrossWFStorClassPtrTy]] %[[#]] %[[#]] %[[#]]
+; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP2]]
+; CHECK: %[[#GEP3:]] = OpInBoundsPtrAccessChain %[[#WGStorClassPtrTy]] %[[#]] %[[#]] %[[#]]
+; CHECK: %[[#]] = OpExtInst %[[#TypeInt32Id]] %[[#ExtInstSetId:]] printf %[[#GEP3]]
+
+; CHECK-ERROR: LLVM ERROR: SPV_EXT_relaxed_printf_string_address_space is required because printf uses a format string not in constant address space.
+
+@0 = internal unnamed_addr addrspace(2) constant [6 x i8] c"Test\0A\00", align 1
+@1 = internal unnamed_addr addrspace(1) constant [6 x i8] c"Test\0A\00", align 1
+@2 = internal unnamed_addr addrspace(3) constant [6 x i8] c"Test\0A\00", align 1
+
+define spir_kernel void @test() {
+ %tmp1 = alloca [6 x i8], align 1
+ call void @llvm.memcpy.p0.p2.i64(ptr align 1 %tmp1, ptr addrspace(2) align 1 @0, i64 6, i1 false)
+ %1 = getelementptr inbounds [6 x i8], ptr %tmp1, i32 0, i32 0
+ %2 = call spir_func i32 @_Z18__spirv_ocl_printfPc(ptr %1)
+ %3 = getelementptr inbounds [6 x i8], ptr addrspace(1) @1, i32 0, i32 0
+ %4 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1) %3)
+ %5 = getelementptr inbounds [6 x i8], ptr addrspace(3) @2, i32 0, i32 0
+ %6 = call spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3) %5)
+ ret void
+}
+
+declare spir_func i32 @_Z18__spirv_ocl_printfPc(ptr)
+declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS1c(ptr addrspace(1))
+declare spir_func i32 @_Z18__spirv_ocl_printfPU3AS3c(ptr addrspace(3))
+declare void @llvm.memcpy.p0.p2.i64(ptr captures(none), ptr addrspace(2) captures(none) readonly, i64, i1)
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll
new file mode 100644
index 0000000..3624f14
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_bindless_images/i32-in-physical64.ll
@@ -0,0 +1,19 @@
+; RUN: not llc -O0 -mtriple=spirv64-unknown-unknown --spirv-ext=+SPV_INTEL_bindless_images %s -o %t.spvt 2>&1 | FileCheck %s --check-prefix=CHECK-ERROR
+
+; CHECK-ERROR: LLVM ERROR: Parameter value must be a 32-bit scalar in case of Physical32 addressing model or a 64-bit scalar in case of Physical64 addressing model
+
+target datalayout = "e-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-n8:16:32:64"
+target triple = "spir64-unknown-unknown"
+
+define spir_func void @foo(i32 %in) {
+ %img = call spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32 %in)
+ %samp = call spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64 42)
+ %sampImage = call spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64 43)
+ ret void
+}
+
+declare spir_func target("spirv.Image", i32, 2, 0, 0, 0, 0, 0, 0) @_Z33__spirv_ConvertHandleToImageINTELi(i32)
+
+declare spir_func target("spirv.Sampler") @_Z35__spirv_ConvertHandleToSamplerINTELl(i64)
+
+declare spir_func target("spirv.SampledImage", i64, 1, 0, 0, 0, 0, 0, 0) @_Z40__spirv_ConvertHandleToSampledImageINTELl(i64)
diff --git a/llvm/test/CodeGen/SPIRV/image_store.ll b/llvm/test/CodeGen/SPIRV/image_store.ll
new file mode 100644
index 0000000..a70651c
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/image_store.ll
@@ -0,0 +1,22 @@
+; RUN: llc -O0 -verify-machineinstrs -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; Image types may be represented in two ways while translating to SPIR-V:
+; - OpenCL form, for example, '%opencl.image2d_ro_t',
+; - SPIR-V form, for example, '%spirv.Image._void_1_0_0_0_0_0_0',
+; but it is still one type which should be translated to one SPIR-V type.
+;
+; The test checks that the code below is successfully translated and only one
+; SPIR-V type for images is generated (no duplicate OpTypeImage instructions).
+
+; CHECK: %[[#]] = OpTypeImage %[[#]] 2D
+; CHECK-NOT: %[[#]] = OpTypeImage %[[#]] 2D
+
+declare spir_func <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1), ptr addrspace(2), <2 x float>, float)
+
+define spir_kernel void @read_image(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler){
+entry:
+ %spirvimg.addr = alloca target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0), align 8
+ %val = call <4 x float> @_Z11read_imagef14ocl_image2d_ro11ocl_samplerDv2_ff(ptr addrspace(1) %srcimg, ptr addrspace(2) %sampler, <2 x float> zeroinitializer, float 0.0)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll
new file mode 100644
index 0000000..b788f34b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/instructions/insertvalue-undef-ptr.ll
@@ -0,0 +1,28 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-LABEL: Begin function original_testcase
+define fastcc void @original_testcase() {
+top:
+ ; CHECK: OpCompositeInsert
+ %0 = insertvalue [1 x ptr] zeroinitializer, ptr poison, 0
+ ret void
+}
+
+; CHECK-LABEL: Begin function additional_testcases
+define fastcc void @additional_testcases() {
+top:
+ ; Test with different pointer types
+ ; CHECK: OpCompositeInsert
+ %1 = insertvalue [1 x ptr] zeroinitializer, ptr undef, 0
+ ; CHECK-NEXT: OpCompositeInsert
+ %2 = insertvalue {ptr, i32} zeroinitializer, ptr poison, 0
+ ; CHECK-NEXT: OpCompositeInsert
+ %3 = insertvalue {ptr, ptr} undef, ptr null, 0
+
+ ; Test with undef aggregate
+ ; CHECK-NEXT: OpCompositeInsert
+ %4 = insertvalue [1 x ptr] undef, ptr undef, 0
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll
new file mode 100644
index 0000000..49bb8ea
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/constrained-comparison.ll
@@ -0,0 +1,56 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpFOrdEqual
+; CHECK-DAG: OpFOrdGreaterThan
+; CHECK-DAG: OpFOrdGreaterThanEqual
+; CHECK-DAG: OpFOrdLessThan
+; CHECK-DAG: OpFOrdLessThanEqual
+; CHECK-DAG: OpFOrdNotEqual
+; CHECK-DAG: OpOrdered
+; CHECK-DAG: OpFUnordEqual
+; CHECK-DAG: OpFUnordGreaterThan
+; CHECK-DAG: OpFUnordGreaterThanEqual
+; CHECK-DAG: OpFUnordLessThan
+; CHECK-DAG: OpFUnordLessThanEqual
+; CHECK-DAG: OpFUnordNotEqual
+; CHECK-DAG: OpUnordered
+
+define dso_local spir_kernel void @test(float %a){
+entry:
+ %cmp = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oeq", metadata !"fpexcept.strict")
+ %cmp1 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"ogt", metadata !"fpexcept.strict")
+ %cmp2 = tail call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %a, metadata !"oge", metadata !"fpexcept.strict")
+ %cmp3 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"olt", metadata !"fpexcept.strict")
+ %cmp4 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ole", metadata !"fpexcept.strict")
+ %cmp5 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"one", metadata !"fpexcept.strict")
+ %cmp6 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ord", metadata !"fpexcept.strict")
+ %cmp7 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ueq", metadata !"fpexcept.strict")
+ %cmp8 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ugt", metadata !"fpexcept.strict")
+ %cmp9 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uge", metadata !"fpexcept.strict")
+ %cmp10 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ult", metadata !"fpexcept.strict")
+ %cmp11 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"ule", metadata !"fpexcept.strict")
+ %cmp12 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"une", metadata !"fpexcept.strict")
+ %cmp13 = tail call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %a, metadata !"uno", metadata !"fpexcept.strict")
+
+ %or1 = or i1 %cmp, %cmp1
+ %or2 = or i1 %or1, %cmp2
+ %or3 = or i1 %or2, %cmp3
+ %or4 = or i1 %or3, %cmp4
+ %or5 = or i1 %or4, %cmp5
+ %or6 = or i1 %or5, %cmp6
+ %or7 = or i1 %or6, %cmp7
+ %or8 = or i1 %or7, %cmp8
+ %or9 = or i1 %or8, %cmp9
+ %or10 = or i1 %or9, %cmp10
+ %or11 = or i1 %or10, %cmp11
+ %or12 = or i1 %or11, %cmp12
+ %or13 = or i1 %or12, %cmp13
+ br i1 %or13, label %true_block, label %false_block
+true_block:
+ ret void
+false_block:
+ ret void
+}
+declare i1 @llvm.experimental.constrained.fcmps.f32(float, float, metadata, metadata)
+declare i1 @llvm.experimental.constrained.fcmp.f32(float, float, metadata, metadata)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll
new file mode 100644
index 0000000..fd8cb9d
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/debugtrap.ll
@@ -0,0 +1,14 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+
+; CHECK: OpNop
+; CHECK-NEXT: OpReturn
+
+declare void @llvm.debugtrap()
+
+define spir_kernel void @foo(ptr addrspace(1) %a){
+entry:
+ %a.addr = alloca ptr addrspace(1), align 4
+ store ptr addrspace(1) %a, ptr %a.addr, align 4
+ call void @llvm.debugtrap()
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll
new file mode 100644
index 0000000..f6434e9
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/frexp.ll
@@ -0,0 +1,114 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: %[[#extinst_id:]] = OpExtInstImport "OpenCL.std"
+; CHECK-DAG: %[[#float_32_type:]] = OpTypeFloat 32
+; CHECK-DAG: %[[#int_32_type:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#fn_ptr_type_i32:]] = OpTypePointer Function %[[#int_32_type]]
+; CHECK-DAG: %[[#const_negzero:]] = OpConstant %[[#float_32_type]] -0
+; CHECK-DAG: %[[#vec2_float_type:]] = OpTypeVector %[[#float_32_type]] 2
+; CHECK-DAG: %[[#vec2_int_type:]] = OpTypeVector %[[#int_32_type]] 2
+; CHECK-DAG: %[[#fn_ptr_type_vec2_i32:]] = OpTypePointer Function %[[#vec2_int_type]]
+; CHECK-DAG: %[[#vec2_null:]] = OpConstantNull %[[#vec2_float_type]]
+; CHECK-DAG: %[[#scalar_null:]] = OpConstantNull %[[#float_32_type]]
+; CHECK-DAG: %[[#const_composite1:]] = OpConstantComposite %[[#vec2_float_type]] %[[#scalar_null]] %[[#const_negzero]]
+; CHECK-DAG: %[[#vec4_float_type:]] = OpTypeVector %[[#float_32_type]] 4
+; CHECK-DAG: %[[#vec4_int_type:]] = OpTypeVector %[[#int_32_type]] 4
+; CHECK-DAG: %[[#fn_ptr_type_vec4_i32:]] = OpTypePointer Function %[[#vec4_int_type]]
+; CHECK-DAG: %[[#const_composite2:]] = OpConstantComposite %[[#vec4_float_type]] %[[#const_16:]] %[[#const_neg32:]] %[[#const_0:]] %[[#const_9999:]]
+; CHECK-DAG: %[[#float_64_type:]] = OpTypeFloat 64
+; CHECK-DAG: %[[#vec2_double_type:]] = OpTypeVector %[[#float_64_type]] 2
+
+; CHECK: %[[#]] = OpFunctionParameter %[[#float_32_type]]
+; CHECK: %[[#var1:]] = OpVariable %[[#fn_ptr_type_i32]] Function
+; CHECK: %[[#extinst1:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#const_negzero]] %[[#var1]]
+; CHECK: %[[#exp_part_var:]] = OpLoad %[[#int_32_type]] %[[#var1]]
+; CHECK: OpReturnValue %[[#exp_part_var]]
+define i32 @frexp_negzero(float %x) {
+ %ret = call { float, i32 } @llvm.frexp.f32.i32(float -0.0)
+ %f_part = extractvalue { float, i32 } %ret, 0
+ %exp_part = extractvalue { float, i32 } %ret, 1
+ ret i32 %exp_part
+}
+
+; CHECK: %[[#x_var4:]] = OpFunctionParameter %[[#float_32_type]]
+; CHECK: %[[#var10:]] = OpVariable %[[#fn_ptr_type_i32]] Function
+; CHECK: %[[#extinst10:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var4]] %[[#var10]]
+; CHECK: %[[#exp_part_var2:]] = OpLoad %[[#int_32_type]] %[[#var10]]
+; CHECK: OpReturnValue %[[#exp_part_var2]]
+define i32 @frexp_frexp_get_int(float %x) {
+ %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x)
+ %f_part = extractvalue { float, i32 } %frexp0, 0
+ %exp_part = extractvalue { float, i32 } %frexp0, 1
+ ret i32 %exp_part
+}
+
+; CHECK: %[[#var3:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function
+; CHECK: %[[#extinst3:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#vec2_null]] %[[#var3]]
+; CHECK: %[[#f_part_var2:]] = OpLoad %[[#vec2_int_type]] %[[#var3]]
+; CHECK: OpReturnValue %[[#extinst3]]
+define <2 x float> @frexp_zero_vector() {
+ %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> zeroinitializer)
+ %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0
+ %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1
+ ret <2 x float> %f_part
+}
+
+; CHECK: %[[#var4:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function
+; CHECK: %[[#extinst4:]] = OpExtInst %[[#vec2_float_type]] %[[#extinst_id]] frexp %[[#const_composite1]] %[[#var4]]
+; CHECK: %[[#f_part_var3:]] = OpLoad %[[#vec2_int_type]] %[[#var4]]
+; CHECK: OpReturnValue %[[#extinst4]]
+define <2 x float> @frexp_zero_negzero_vector() {
+ %ret = call { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float> <float 0.0, float -0.0>)
+ %f_part = extractvalue { <2 x float>, <2 x i32> } %ret, 0
+ %exp_part = extractvalue { <2 x float>, <2 x i32> } %ret, 1
+ ret <2 x float> %f_part
+}
+
+; CHECK: %[[#var5:]] = OpVariable %[[#fn_ptr_type_vec4_i32]] Function
+; CHECK: %[[#extinst5:]] = OpExtInst %[[#vec4_float_type]] %[[#extinst_id]] frexp %[[#const_composite2]] %[[#var5]]
+; CHECK: %[[#f_part_var4:]] = OpLoad %[[#vec4_int_type]] %[[#var5]]
+; CHECK: OpReturnValue %[[#extinst5]]
+define <4 x float> @frexp_nonsplat_vector() {
+ %ret = call { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float> <float 16.0, float -32.0, float 0.0, float 9999.0>)
+ %f_part = extractvalue { <4 x float>, <4 x i32> } %ret, 0
+ %exp_part = extractvalue { <4 x float>, <4 x i32> } %ret, 1
+ ret <4 x float> %f_part
+}
+
+; CHECK: %[[#x_var2:]] = OpFunctionParameter %[[#float_32_type]]
+; CHECK: %[[#var6:]] = OpVariable %[[#fn_ptr_type_i32]] Function
+; CHECK: %[[#var7:]] = OpVariable %[[#fn_ptr_type_i32]] Function
+; CHECK: %[[#extinst6:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#x_var2]] %[[#var6]]
+; CHECK: %[[#load1:]] = OpLoad %[[#int_32_type]] %[[#var6]]
+; CHECK: %[[#extinst7:]] = OpExtInst %[[#float_32_type]] %[[#extinst_id]] frexp %[[#extinst6]] %[[#var7]]
+; CHECK: %[[#f_part_var5:]] = OpLoad %[[#int_32_type]] %[[#var7]]
+; CHECK: OpReturnValue %[[#extinst7]]
+define float @frexp_frexp(float %x) {
+ %frexp0 = call { float, i32 } @llvm.frexp.f32.i32(float %x)
+ %frexp0_f_part = extractvalue { float, i32 } %frexp0, 0
+ %frexp0_exp_part = extractvalue { float, i32 } %frexp0, 1
+ %frexp1 = call { float, i32 } @llvm.frexp.f32.i32(float %frexp0_f_part)
+ %frexp1_f_part = extractvalue { float, i32 } %frexp1, 0
+ %frexp1_exp_part = extractvalue { float, i32 } %frexp1, 1
+ ret float %frexp1_f_part
+}
+
+; CHECK: %[[#x_var3:]] = OpFunctionParameter %[[#vec2_double_type]]
+; CHECK: %[[#var9:]] = OpVariable %[[#fn_ptr_type_vec2_i32]] Function
+; CHECK: %[[#extinst9:]] = OpExtInst %[[#vec2_double_type]] %[[#extinst_id]] frexp %[[#x_var3]] %[[#var9]]
+; CHECK: %[[#f_part_var6:]] = OpLoad %[[#vec2_int_type]] %[[#var9]]
+; CHECK: OpReturnValue %[[#extinst9]]
+define <2 x double> @frexp_frexp_vector(<2 x double> %x) {
+ %frexp0 = call { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double> %x)
+ %f_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 0
+ %exp_part = extractvalue { <2 x double>, <2 x i32> } %frexp0, 1
+ ret <2 x double> %f_part
+}
+
+declare { float, i32 } @llvm.frexp.f32.i32(float)
+declare { double, i32 } @llvm.frexp.f64.i32(double)
+declare { <2 x float>, <2 x i32> } @llvm.frexp.v2f32.v2i32(<2 x float>)
+declare { <4 x float>, <4 x i32> } @llvm.frexp.v4f32.v4i32(<4 x float>)
+declare { <2 x double>, <2 x i32> } @llvm.frexp.v2f64.v2i32(<2 x double>)
+declare { float, i8 } @llvm.frexp.f32.i8(float)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll
index a15a807..b3ef6d6 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/ignore-llvm-intrinsic.ll
@@ -11,7 +11,6 @@
define spir_kernel void @foo(ptr %p) {
entry:
call void @llvm.trap()
- call void @llvm.debugtrap()
call void @llvm.ubsantrap(i8 100)
%r1 = call ptr @llvm.invariant.start.p0(i64 1024, ptr %p)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll
new file mode 100644
index 0000000..51b7664
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memmove.ll
@@ -0,0 +1,86 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV-NOT: llvm.memmove
+
+; CHECK-DAG: %[[#Int8:]] = OpTypeInt 8 0
+; CHECK-DAG: %[[#Int32:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#Int64:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#Ptr_CrossWG_8:]] = OpTypePointer CrossWorkgroup %[[#Int8]]
+; CHECK-DAG: %[[#Ptr_Generic_32:]] = OpTypePointer Generic %[[#Int32]]
+; CHECK-DAG: %[[#Const_64:]] = OpConstant %[[#Int32]] 64
+; CHECK-DAG: %[[#Const_36:]] = OpConstant %[[#Int32]] 36
+; CHECK-DAG: %[[#Const_30:]] = OpConstant %[[#Int32]] 30
+; CHECK-DAG: %[[#Const_32_64:]] = OpConstant %[[#Int64]] 32
+
+; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]]
+; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]]
+; CHECK: %[[#Size1:]] = OpUConvert %[[#Int64]] %[[#Const_64]]
+; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size1]] Aligned 64
+
+; CHECK: %[[#Src:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]]
+; CHECK: %[[#CastDst2:]] = OpGenericCastToPtr %[[#Ptr_CrossWG_8]] %[[#GenPtr:]]
+; CHECK: %[[#Size2:]] = OpUConvert %[[#Int64]] %[[#Const_36]]
+; CHECK: OpCopyMemorySized %[[#CastDst2]] %[[#Src]] %[[#Size2]] Aligned 64
+
+; CHECK: %[[#Param1:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]]
+; CHECK: %[[#Param2:]] = OpFunctionParameter %[[#Ptr_CrossWG_8]]
+; CHECK: %[[#Size3:]] = OpUConvert %[[#Int64]] %[[#Const_30]]
+; CHECK: OpCopyMemorySized %[[#Param2]] %[[#Param1]] %[[#Size3]] Aligned 1
+
+; CHECK: %[[#Phi:]] = OpPhi %[[#Ptr_Generic_32]] %[[#Op1:]] %[[#Lbl1:]] %[[#Op2:]] %[[#Lbl2:]]
+; CHECK: %[[#Cast:]] = OpPtrCastToGeneric %[[#]] %[[#]]
+; CHECK: OpCopyMemorySized %[[#Cast]] %[[#Phi]] %[[#Const_32_64]] Aligned 8
+
+%struct.SomeStruct = type { <16 x float>, i32, [60 x i8] }
+%class.kfunc = type <{ i32, i32, i32, [4 x i8] }>
+
+@InvocIndex = external local_unnamed_addr addrspace(1) constant i64, align 8
+@"func_object1" = internal addrspace(3) global %class.kfunc zeroinitializer, align 8
+
+define spir_kernel void @test_full_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(1)* captures(none) %out) {
+ %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)*
+ %2 = bitcast %struct.SomeStruct addrspace(1)* %out to i8 addrspace(1)*
+ call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %2, i8 addrspace(1)* align 64 %1, i32 64, i1 false)
+ ret void
+}
+
+define spir_kernel void @test_partial_move(%struct.SomeStruct addrspace(1)* captures(none) readonly %in, %struct.SomeStruct addrspace(4)* captures(none) %out) {
+ %1 = bitcast %struct.SomeStruct addrspace(1)* %in to i8 addrspace(1)*
+ %2 = bitcast %struct.SomeStruct addrspace(4)* %out to i8 addrspace(4)*
+ %3 = addrspacecast i8 addrspace(4)* %2 to i8 addrspace(1)*
+ call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* align 64 %3, i8 addrspace(1)* align 64 %1, i32 36, i1 false)
+ ret void
+}
+
+define spir_kernel void @test_array(i8 addrspace(1)* %in, i8 addrspace(1)* %out) {
+ call void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* %out, i8 addrspace(1)* %in, i32 30, i1 false)
+ ret void
+}
+
+define weak_odr dso_local spir_kernel void @test_phi() local_unnamed_addr {
+entry:
+ %0 = alloca i32, align 8
+ %1 = addrspacecast i32* %0 to i32 addrspace(4)*
+ %2 = load i64, i64 addrspace(1)* @InvocIndex, align 8
+ %cmp = icmp eq i64 %2, 0
+ br i1 %cmp, label %leader, label %entry.merge_crit_edge
+
+entry.merge_crit_edge: ; preds = %entry
+ %3 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)*
+ br label %merge
+
+leader: ; preds = %entry
+ %4 = bitcast i32 addrspace(4)* %1 to i8 addrspace(4)*
+ br label %merge
+
+merge: ; preds = %entry.merge_crit_edge, %leader
+ %phi = phi i8 addrspace(4)* [ %3, %entry.merge_crit_edge ], [ %4, %leader ]
+ %5 = addrspacecast i8 addrspace(3)* bitcast (%class.kfunc addrspace(3)* @"func_object1" to i8 addrspace(3)*) to i8 addrspace(4)*
+ call void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* align 8 dereferenceable(32) %5, i8 addrspace(4)* align 8 dereferenceable(32) %phi, i64 32, i1 false)
+ ret void
+}
+
+declare void @llvm.memmove.p4i8.p4i8.i64(i8 addrspace(4)* captures(none) writeonly, i8 addrspace(4)* captures(none) readonly, i64, i1 immarg)
+
+declare void @llvm.memmove.p1i8.p1i8.i32(i8 addrspace(1)* captures(none), i8 addrspace(1)* captures(none) readonly, i32, i1)
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll
new file mode 100644
index 0000000..52f939f
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/signed_arithmetic_overflow.ll
@@ -0,0 +1,30 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -filetype=obj -o - | spirv-val %}
+; XFAIL: *
+;@llvm.sadd.with.overflow and @llvm.ssub.with.overflow has not been implemented.
+
+define spir_func void @test_sadd_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) {
+entry:
+ %res = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
+ %val = extractvalue { i32, i1 } %res, 0
+ %ofl = extractvalue { i32, i1 } %res, 1
+ store i32 %val, ptr %out_result
+ %zext_ofl = zext i1 %ofl to i8
+ store i8 %zext_ofl, ptr %out_overflow
+ ret void
+}
+
+declare { i32, i1 } @llvm.sadd.with.overflow.i32(i32, i32)
+
+define spir_func void @test_ssub_overflow(ptr %out_result, ptr %out_overflow, i32 %a, i32 %b) {
+entry:
+ %res = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
+ %val = extractvalue { i32, i1 } %res, 0
+ %ofl = extractvalue { i32, i1 } %res, 1
+ store i32 %val, ptr %out_result
+ %zext_ofl = zext i1 %ofl to i8
+ store i8 %zext_ofl, ptr %out_overflow
+ ret void
+}
+
+declare { i32, i1 } @llvm.ssub.with.overflow.i32(i32, i32)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
index e405ef0..5e66b8b6 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/NoSignedUnsignedWrap.ll
@@ -7,10 +7,11 @@
;;
;; Positive tests:
;;
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV,CHECK-SPIRV-NEGATIVE
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_KHR_no_integer_wrap_decoration %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV
;;
;; Negative tests:
;;
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefixes=CHECK-SPIRV-NEGATIVE
;; Check that backend is able to skip nsw/nuw attributes if extension is
;; disabled implicitly or explicitly and if max SPIR-V version is lower then 1.4
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll
new file mode 100644
index 0000000..c8953c7
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/OpVariable_Initializer.ll
@@ -0,0 +1,11 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV: [[#PtrT:]] = OpTypePointer Workgroup %[[#]]
+; CHECK-SPIRV: %[[#]] = OpVariable %[[#PtrT]] Workgroup
+
+@test_atomic_fn.L = internal addrspace(3) global [64 x i32] zeroinitializer, align 4
+
+define spir_kernel void @test_atomic_fn() {
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll
new file mode 100644
index 0000000..607997d
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_pipe.ll
@@ -0,0 +1,140 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpCapability Kernel
+; CHECK: OpCapability Addresses
+; CHECK: OpCapability Pipes
+; CHECK: OpCapability Int8
+; CHECK: OpCapability GenericPointer
+
+; CHECK-DAG: %[[#PipeWriteTy:]] = OpTypePipe WriteOnly
+; CHECK-DAG: %[[#PipeReadTy:]] = OpTypePipe ReadOnly
+; CHECK-DAG: %[[#ReserveIdTy:]] = OpTypeReserveId
+; CHECK-DAG: %[[#BoolTy:]] = OpTypeBool
+; CHECK-DAG: %[[#Int32Ty:]] = OpTypeInt 32 0
+; CHECK-DAG: %[[#Uint1:]] = OpConstant %[[#Int32Ty]] 1
+; CHECK-DAG: %[[#Uint2:]] = OpConstant %[[#Int32Ty]] 2
+; CHECK-DAG: %[[#Uint3:]] = OpConstant %[[#Int32Ty]] 3
+; CHECK-DAG: %[[#Uint4:]] = OpConstant %[[#Int32Ty]] 4
+; CHECK-DAG: %[[#NullUint:]] = OpConstantNull %[[#Int32Ty]]
+
+; CHECK: OpFunction
+; CHECK: %[[#FuncParam1:]] = OpFunctionParameter %[[#PipeWriteTy]]
+; CHECK: %[[#FuncParam2:]] = OpFunctionParameter %[[#PipeReadTy]]
+
+; CHECK: %[[#BasicWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#BasicWriteReserve]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: %[[#BasicReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#BasicReadReserve]] %[[#Uint4]] %[[#Uint4]]
+
+; --- Reserved pipe operations ---
+; CHECK: %[[#ReservedWriteReserve:]] = OpReserveWritePipePackets %[[#ReserveIdTy]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: %[[#ReservedWrite:]] = OpReservedWritePipe %[[#Int32Ty]] %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: %[[#IsValidWrite:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedWriteReserve]]
+; CHECK: OpCommitWritePipe %[[#FuncParam1]] %[[#ReservedWriteReserve]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: %[[#ReservedReadReserve:]] = OpReserveReadPipePackets %[[#ReserveIdTy]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: %[[#ReservedRead:]] = OpReservedReadPipe %[[#Int32Ty]] %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#NullUint]] %[[#]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: %[[#IsValidRead:]] = OpIsValidReserveId %[[#BoolTy]] %[[#ReservedReadReserve]]
+; CHECK: OpCommitReadPipe %[[#FuncParam2]] %[[#ReservedReadReserve]] %[[#Uint4]] %[[#Uint4]]
+
+; --- Pipe packet queries ---
+; CHECK: %[[#MaxPacketsWO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpStore %[[#]] %[[#MaxPacketsWO]] Aligned 4
+; CHECK: %[[#NumPacketsWO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpStore %[[#]] %[[#NumPacketsWO]] Aligned 4
+; CHECK: %[[#MaxPacketsRO:]] = OpGetMaxPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpStore %[[#]] %[[#MaxPacketsRO]] Aligned 4
+; CHECK: %[[#NumPacketsRO:]] = OpGetNumPipePackets %[[#Int32Ty]] %[[#FuncParam2]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpStore %[[#]] %[[#NumPacketsRO]] Aligned 4
+
+; --- Workgroup operations ---
+; CHECK: %[[#WorkgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]]
+; CHECK: OpGroupCommitWritePipe %[[#Uint2]] %[[#FuncParam1]] %[[#WorkgroupWriteReserve]] %[[#Uint1]] %[[#Uint1]]
+; CHECK: %[[#WorkgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint2]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint1]] %[[#Uint1]]
+; CHECK: OpGroupCommitReadPipe %[[#Uint2]] %[[#FuncParam2]] %[[#WorkgroupReadReserve]] %[[#Uint1]] %[[#Uint1]]
+
+; --- Subgroup operations ---
+; CHECK: %[[#SubgroupWriteReserve:]] = OpGroupReserveWritePipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam1]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpGroupCommitWritePipe %[[#Uint3]] %[[#FuncParam1]] %[[#SubgroupWriteReserve]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: %[[#SubgroupReadReserve:]] = OpGroupReserveReadPipePackets %[[#ReserveIdTy]] %[[#Uint3]] %[[#FuncParam2]] %[[#Uint1]] %[[#Uint4]] %[[#Uint4]]
+; CHECK: OpGroupCommitReadPipe %[[#Uint3]] %[[#FuncParam2]] %[[#SubgroupReadReserve]] %[[#Uint4]] %[[#Uint4]]
+
+define spir_kernel void @test_pipe_builtins(
+ target("spirv.Pipe", 1) %out_pipe,
+ target("spirv.Pipe", 0) %in_pipe,
+ ptr addrspace(4) %src,
+ ptr addrspace(4) %dst,
+ ptr addrspace(1) %max_packets_wo,
+ ptr addrspace(1) %num_packets_wo,
+ ptr addrspace(1) %max_packets_ro,
+ ptr addrspace(1) %num_packets_ro
+) {
+entry:
+ ; Basic pipe operations
+ %0 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4)
+ %1 = call spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1) %out_pipe, ptr addrspace(4) %src, i32 4, i32 4)
+ call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %0, i32 4, i32 4)
+
+ %2 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4)
+ %3 = call spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0) %in_pipe, ptr addrspace(4) %dst, i32 4, i32 4)
+ call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %2, i32 4, i32 4)
+
+ ; Reserved pipe operations
+ %4 = call spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4)
+ %5 = call spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 0, ptr addrspace(4) %src, i32 4, i32 4)
+ %6 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %4)
+ call spir_func void @__commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %4, i32 4, i32 4)
+
+ %7 = call spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4)
+ %8 = call spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 0, ptr addrspace(4) %dst, i32 4, i32 4)
+ %9 = call spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId") %7)
+ call spir_func void @__commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %7, i32 4, i32 4)
+
+ ; Pipe packet queries
+ %10 = call spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4)
+ store i32 %10, ptr addrspace(1) %max_packets_wo, align 4
+ %11 = call spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1) %out_pipe, i32 4, i32 4)
+ store i32 %11, ptr addrspace(1) %num_packets_wo, align 4
+ %12 = call spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4)
+ store i32 %12, ptr addrspace(1) %max_packets_ro, align 4
+ %13 = call spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0) %in_pipe, i32 4, i32 4)
+ store i32 %13, ptr addrspace(1) %num_packets_ro, align 4
+
+ ; Workgroup operations
+ %14 = call spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 1, i32 1)
+ call spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %14, i32 1, i32 1)
+ %15 = call spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 1, i32 1)
+ call spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %15, i32 1, i32 1)
+
+ ; Subgroup operations
+ %16 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1) %out_pipe, i32 1, i32 4, i32 4)
+ call spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1) %out_pipe, target("spirv.ReserveId") %16, i32 4, i32 4)
+ %17 = call spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0) %in_pipe, i32 1, i32 4, i32 4)
+ call spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0) %in_pipe, target("spirv.ReserveId") %17, i32 4, i32 4)
+
+ ret void
+}
+
+declare spir_func target("spirv.ReserveId") @__reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32)
+declare spir_func target("spirv.ReserveId") @__reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32)
+declare spir_func i32 @__write_pipe_2(target("spirv.Pipe", 1), ptr addrspace(4), i32, i32)
+declare spir_func i32 @__read_pipe_2(target("spirv.Pipe", 0), ptr addrspace(4), i32, i32)
+declare spir_func i32 @__write_pipe_4(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32)
+declare spir_func i32 @__read_pipe_4(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, ptr addrspace(4), i32, i32)
+declare spir_func void @__commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32)
+declare spir_func void @__commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32)
+declare spir_func i1 @_Z19is_valid_reserve_id13ocl_reserveid(target("spirv.ReserveId"))
+declare spir_func i32 @__get_pipe_max_packets_wo(target("spirv.Pipe", 1), i32, i32)
+declare spir_func i32 @__get_pipe_num_packets_wo(target("spirv.Pipe", 1), i32, i32)
+declare spir_func i32 @__get_pipe_max_packets_ro(target("spirv.Pipe", 0), i32, i32)
+declare spir_func i32 @__get_pipe_num_packets_ro(target("spirv.Pipe", 0), i32, i32)
+declare spir_func target("spirv.ReserveId") @__work_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32)
+declare spir_func void @__work_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32)
+declare spir_func target("spirv.ReserveId") @__work_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32)
+declare spir_func void @__work_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32)
+declare spir_func target("spirv.ReserveId") @__sub_group_reserve_write_pipe(target("spirv.Pipe", 1), i32, i32, i32)
+declare spir_func void @__sub_group_commit_write_pipe(target("spirv.Pipe", 1), target("spirv.ReserveId"), i32, i32)
+declare spir_func target("spirv.ReserveId") @__sub_group_reserve_read_pipe(target("spirv.Pipe", 0), i32, i32, i32)
+declare spir_func void @__sub_group_commit_read_pipe(target("spirv.Pipe", 0), target("spirv.ReserveId"), i32, i32)
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll
new file mode 100644
index 0000000..4c64a12
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/builtin_vars_gep.ll
@@ -0,0 +1,16 @@
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpDecorate %[[#Id:]] BuiltIn GlobalInvocationId
+; CHECK: %[[#Id]] = OpVariable %[[#]] CrossWorkgroup
+
+@__spirv_BuiltInGlobalInvocationId = external dso_local local_unnamed_addr addrspace(1) constant <3 x i64>, align 32
+
+define spir_kernel void @f() {
+entry:
+ %0 = load i64, ptr addrspace(1) @__spirv_BuiltInGlobalInvocationId, align 32
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll
new file mode 100644
index 0000000..74ce26b
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/decoration-forward-decl.ll
@@ -0,0 +1,30 @@
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; Check saturation conversion is translated when there is forward declaration
+; of SPIRV entry.
+
+; CHECK: OpDecorate %[[#SAT:]] SaturatedConversion
+; CHECK: %[[#SAT]] = OpConvertFToU %[[#]] %[[#]]
+
+declare spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float)
+
+define spir_func void @forward(float %val, i8 %initval, ptr addrspace(1) %dst) {
+entry:
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %new_val.0 = phi i8 [ %initval, %entry ], [ %call1, %for.body ]
+ %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %cmp = icmp ult i32 %i.0, 1
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ %call1 = call spir_func zeroext i8 @_Z30__spirv_ConvertFToU_Ruchar_satf(float noundef %val)
+ %inc = add i32 %i.0, 1
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ store i8 %new_val.0, ptr addrspace(1) %dst, align 1
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/float16.ll b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll
new file mode 100644
index 0000000..0018dba
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/transcoding/float16.ll
@@ -0,0 +1,25 @@
+; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-SPIRV: %[[#HALF:]] = OpTypeFloat 16
+; CHECK-SPIRV: %[[#HALFPTR:]] = OpTypePointer Function %[[#HALF]]
+; CHECK-SPIRV: %[[#HALFV2:]] = OpTypeVector %[[#HALF]] 2
+; CHECK-SPIRV: %[[#HALFV2PTR:]] = OpTypePointer Function %[[#HALFV2]]
+; CHECK-SPIRV: %[[#CONST:]] = OpConstant %[[#HALF]] 14788
+; CHECK-SPIRV: %[[#ADDR:]] = OpVariable %[[#HALFPTR]] Function
+; CHECK-SPIRV: %[[#ADDR2:]] = OpVariable %[[#HALFV2PTR]] Function
+; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALF]] %[[#]] fract %[[#CONST]] %[[#ADDR]]
+; CHECK-SPIRV: %[[#]] = OpExtInst %[[#HALFV2]] %[[#]] fract %[[#]] %[[#ADDR2]]
+
+define spir_kernel void @test() {
+entry:
+ %addr = alloca half
+ %addr2 = alloca <2 x half>
+ %res = call spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef 0xH39C4, ptr noundef %addr)
+ %res2 = call spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef <half 0xH39C4, half 0xH0000>, ptr noundef %addr2)
+ ret void
+}
+
+declare spir_func noundef half @_Z17__spirv_ocl_fractDF16_PU3AS0DF16_(half noundef, ptr noundef) local_unnamed_addr
+
+declare spir_func noundef <2 x half> @_Z17__spirv_ocl_fractDv2_DF16_PU3AS0S_(<2 x half> noundef, ptr noundef) local_unnamed_addr
diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll
index 8aa898f..da0cef0 100644
--- a/llvm/test/CodeGen/X86/avx512-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll
@@ -2119,8 +2119,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) {
; KNL-LABEL: ktest_1:
; KNL: ## %bb.0:
; KNL-NEXT: vcmpgtpd (%rdi), %zmm0, %k1
-; KNL-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
-; KNL-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1}
+; KNL-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1}
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: testb %al, %al
; KNL-NEXT: je LBB44_2
@@ -2152,8 +2151,7 @@ define void @ktest_1(<8 x double> %in, ptr %base) {
; AVX512BW-LABEL: ktest_1:
; AVX512BW: ## %bb.0:
; AVX512BW-NEXT: vcmpgtpd (%rdi), %zmm0, %k1
-; AVX512BW-NEXT: vmovupd 8(%rdi), %zmm1 {%k1} {z}
-; AVX512BW-NEXT: vcmpltpd %zmm1, %zmm0, %k0 {%k1}
+; AVX512BW-NEXT: vcmpltpd 8(%rdi), %zmm0, %k0 {%k1}
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: testb %al, %al
; AVX512BW-NEXT: je LBB44_2
diff --git a/llvm/test/CodeGen/X86/combine-add.ll b/llvm/test/CodeGen/X86/combine-add.ll
index ff9f995..51a8bf5 100644
--- a/llvm/test/CodeGen/X86/combine-add.ll
+++ b/llvm/test/CodeGen/X86/combine-add.ll
@@ -235,10 +235,10 @@ define void @PR52039(ptr %pa, ptr %pb) {
; SSE-NEXT: psubd %xmm1, %xmm3
; SSE-NEXT: psubd %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm0
-; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: paddd %xmm2, %xmm0
; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: paddd %xmm1, %xmm1
; SSE-NEXT: paddd %xmm3, %xmm1
; SSE-NEXT: movdqu %xmm3, 16(%rsi)
; SSE-NEXT: movdqu %xmm2, (%rsi)
diff --git a/llvm/test/CodeGen/X86/combine-mul.ll b/llvm/test/CodeGen/X86/combine-mul.ll
index 8e4a50e..ae4d24f 100644
--- a/llvm/test/CodeGen/X86/combine-mul.ll
+++ b/llvm/test/CodeGen/X86/combine-mul.ll
@@ -81,7 +81,7 @@ define <4 x i64> @combine_vec_mul_pow2c(<4 x i64> %x) {
; SSE-LABEL: combine_vec_mul_pow2c:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: paddq %xmm0, %xmm2
+; SSE-NEXT: paddq %xmm2, %xmm2
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psllq $4, %xmm2
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll
index 98187d6..6bcbfe1 100644
--- a/llvm/test/CodeGen/X86/combine-sdiv.ll
+++ b/llvm/test/CodeGen/X86/combine-sdiv.ll
@@ -2187,13 +2187,13 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; SSE41-NEXT: pxor %xmm0, %xmm0
; SSE41-NEXT: pxor %xmm3, %xmm3
; SSE41-NEXT: pcmpgtb %xmm1, %xmm3
-; SSE41-NEXT: pmovzxbw {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero
; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3 # [256,2,2,2,2,128,2,128]
; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: paddw %xmm4, %xmm4
-; SSE41-NEXT: pmovsxbw %xmm1, %xmm2
-; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2],xmm2[3,4,5],xmm4[6],xmm2[7]
+; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; SSE41-NEXT: paddw %xmm2, %xmm2
+; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2],xmm0[3,4,5],xmm2[6],xmm0[7]
; SSE41-NEXT: psrlw $8, %xmm2
; SSE41-NEXT: packuswb %xmm3, %xmm2
; SSE41-NEXT: paddb %xmm1, %xmm2
@@ -2201,15 +2201,14 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
; SSE41-NEXT: psraw $8, %xmm0
; SSE41-NEXT: movdqa %xmm0, %xmm3
-; SSE41-NEXT: paddw %xmm0, %xmm3
-; SSE41-NEXT: psllw $7, %xmm0
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4],xmm3[5],xmm0[6],xmm3[7]
-; SSE41-NEXT: psrlw $8, %xmm0
+; SSE41-NEXT: psllw $7, %xmm3
+; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1,2,3,4],xmm0[5],xmm3[6],xmm0[7]
+; SSE41-NEXT: psrlw $8, %xmm3
; SSE41-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE41-NEXT: psraw $8, %xmm2
; SSE41-NEXT: psllw $7, %xmm2
; SSE41-NEXT: psrlw $8, %xmm2
-; SSE41-NEXT: packuswb %xmm0, %xmm2
+; SSE41-NEXT: packuswb %xmm3, %xmm2
; SSE41-NEXT: movaps {{.*#+}} xmm0 = [0,0,255,0,0,0,255,0,0,255,255,255,255,255,255,255]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,255,0,255,255,0,0,0,0,255,0,255]
@@ -2225,18 +2224,17 @@ define <16 x i8> @non_splat_minus_one_divisor_1(<16 x i8> %A) {
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 # [256,2,2,2,2,128,2,128]
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
-; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2],xmm3[3,4,5],xmm2[6],xmm3[7]
+; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2],xmm2[3,4,5],xmm3[6],xmm2[7]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; AVX1-NEXT: vpsraw $8, %xmm2, %xmm2
-; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
-; AVX1-NEXT: vpsllw $7, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4],xmm3[5],xmm2[6],xmm3[7]
+; AVX1-NEXT: vpsllw $7, %xmm2, %xmm3
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3,4],xmm2[5],xmm3[6],xmm2[7]
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/dpbusd.ll b/llvm/test/CodeGen/X86/dpbusd.ll
index 3aa77c3..7bd22d5 100644
--- a/llvm/test/CodeGen/X86/dpbusd.ll
+++ b/llvm/test/CodeGen/X86/dpbusd.ll
@@ -1,40 +1,25 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=AVXVNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=AVX512,AVX512VNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512VLVNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI
define i32 @no_dpbusd(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: no_dpbusd:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
-;
-; AVX512-LABEL: no_dpbusd:
-; AVX512: # %bb.0: # %entry
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVX512-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
-; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVX512-NEXT: vmovd %xmm0, %eax
-; AVX512-NEXT: addl %edx, %eax
-; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: retq
+; CHECK-LABEL: no_dpbusd:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; CHECK-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; CHECK-NEXT: vpmaddwd %ymm0, %ymm1, %ymm0
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: addl %edx, %eax
+; CHECK-NEXT: vzeroupper
+; CHECK-NEXT: retq
entry:
%0 = load <16 x i8>, ptr %a, align 16
%1 = zext <16 x i8> %0 to <16 x i32>
@@ -99,25 +84,44 @@ entry:
}
define i32 @mul_zext(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: mul_zext:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1
-; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
-; AVXVNNI-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
-; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_zext:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
+; AVXVNNI-AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
+; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX-NEXT: addl %edx, %eax
+; AVXVNNI-AVX-NEXT: vzeroupper
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_zext:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX512-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
+; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX512-NEXT: addl %edx, %eax
+; AVXVNNI-AVX512-NEXT: vzeroupper
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512-LABEL: mul_zext:
; AVX512: # %bb.0: # %entry
@@ -153,25 +157,44 @@ entry:
}
define i32 @mul_sext(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: mul_sext:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
-; AVXVNNI-NEXT: vpmovsxbw (%rsi), %ymm1
-; AVXVNNI-NEXT: vpmullw %ymm0, %ymm1, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpmovsxwd %xmm1, %ymm1
-; AVXVNNI-NEXT: vpmovsxwd %xmm0, %ymm0
-; AVXVNNI-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_sext:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm1, %ymm1
+; AVXVNNI-AVX-NEXT: vpmovsxwd %xmm0, %ymm0
+; AVXVNNI-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX-NEXT: addl %edx, %eax
+; AVXVNNI-AVX-NEXT: vzeroupper
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_sext:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero
+; AVXVNNI-AVX512-NEXT: vpmovsxbw (%rsi), %ymm1
+; AVXVNNI-AVX512-NEXT: vpmullw %ymm0, %ymm1, %ymm0
+; AVXVNNI-AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
+; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVXVNNI-AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
+; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX512-NEXT: addl %edx, %eax
+; AVXVNNI-AVX512-NEXT: vzeroupper
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512-LABEL: mul_sext:
; AVX512: # %bb.0: # %entry
@@ -312,17 +335,30 @@ entry:
declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>)
define i32 @vpdpbusd_128(ptr%a, ptr%b, i32 %c, i32 %n) {
-; AVXVNNI-LABEL: vpdpbusd_128:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVXVNNI-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
-; AVXVNNI-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
-; AVXVNNI-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2
-; AVXVNNI-NEXT: vmovd %xmm2, %eax
-; AVXVNNI-NEXT: addl %edx, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: vpdpbusd_128:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVXVNNI-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVXVNNI-AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2
+; AVXVNNI-AVX-NEXT: vmovd %xmm2, %eax
+; AVXVNNI-AVX-NEXT: addl %edx, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: vpdpbusd_128:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; AVXVNNI-AVX512-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm1, %xmm0, %xmm2
+; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax
+; AVXVNNI-AVX512-NEXT: addl %edx, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: vpdpbusd_128:
; AVX512VNNI: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/X86/dpbusd_const.ll b/llvm/test/CodeGen/X86/dpbusd_const.ll
index 456e6e8..bb47df5 100644
--- a/llvm/test/CodeGen/X86/dpbusd_const.ll
+++ b/llvm/test/CodeGen/X86/dpbusd_const.ll
@@ -1,20 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=ALL,AVXVNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VNNI
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni -mattr=+avx512vl | FileCheck %s --check-prefixes=ALL,AVX512,AVX512VLVNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVXVNNI,AVXVNNI-AVX512
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VNNI
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vnni,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512,AVX512VLVNNI
define i32 @mul_4xi8_zc_exceed(<4 x i8> %a, i32 %c) {
-; ALL-LABEL: mul_4xi8_zc_exceed:
-; ALL: # %bb.0: # %entry
-; ALL-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
-; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0]
-; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: addl %edi, %eax
-; ALL-NEXT: retq
+; CHECK-LABEL: mul_4xi8_zc_exceed:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,128,0]
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: addl %edi, %eax
+; CHECK-NEXT: retq
entry:
%0 = zext <4 x i8> %a to <4 x i32>
%1 = mul nsw <4 x i32> %0, <i32 0, i32 1, i32 2, i32 128>
@@ -24,14 +25,24 @@ entry:
}
define i32 @mul_4xi8_zc(<4 x i8> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_4xi8_zc:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVXVNNI-NEXT: vmovd %xmm1, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_4xi8_zc:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_4xi8_zc:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: mul_4xi8_zc:
; AVX512VNNI: # %bb.0: # %entry
@@ -62,16 +73,26 @@ entry:
}
define i32 @mul_4xi4_cz(<4 x i4> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_4xi4_cz:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
-; AVXVNNI-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVXVNNI-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
-; AVXVNNI-NEXT: vmovd %xmm1, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_4xi4_cz:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
+; AVXVNNI-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_4xi4_cz:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpmovdb %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: mul_4xi4_cz:
; AVX512VNNI: # %bb.0: # %entry
@@ -104,15 +125,26 @@ entry:
}
define i32 @mul_4xi8_cs(<4 x i8> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_4xi8_cs:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVXVNNI-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
-; AVXVNNI-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
-; AVXVNNI-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1
-; AVXVNNI-NEXT: vmovd %xmm1, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_4xi8_cs:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX-NEXT: vmovd {{.*#+}} xmm2 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %xmm0, %xmm2, %xmm1
+; AVXVNNI-AVX-NEXT: vmovd %xmm1, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_4xi8_cs:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVXVNNI-AVX512-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
+; AVXVNNI-AVX512-NEXT: vmovd {{.*#+}} xmm1 = [16,1,2,255,0,0,0,0,0,0,0,0,0,0,0,0]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %xmm0, %xmm1, %xmm2
+; AVXVNNI-AVX512-NEXT: vmovd %xmm2, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512VNNI-LABEL: mul_4xi8_cs:
; AVX512VNNI: # %bb.0: # %entry
@@ -145,17 +177,17 @@ entry:
}
define i32 @mul_4xi8_cs_exceed(<4 x i8> %a, i32 %c) {
-; ALL-LABEL: mul_4xi8_cs_exceed:
-; ALL: # %bb.0: # %entry
-; ALL-NEXT: vpmovsxbd %xmm0, %xmm0
-; ALL-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0]
-; ALL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; ALL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; ALL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
-; ALL-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; ALL-NEXT: vmovd %xmm0, %eax
-; ALL-NEXT: addl %edi, %eax
-; ALL-NEXT: retq
+; CHECK-LABEL: mul_4xi8_cs_exceed:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vpmovsxbd %xmm0, %xmm0
+; CHECK-NEXT: vpmaddwd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [0,0,1,0,2,0,256,0]
+; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; CHECK-NEXT: vmovd %xmm0, %eax
+; CHECK-NEXT: addl %edi, %eax
+; CHECK-NEXT: retq
entry:
%0 = sext <4 x i8> %a to <4 x i32>
%1 = mul nsw <4 x i32> <i32 0, i32 1, i32 2, i32 256>, %0
@@ -265,24 +297,44 @@ entry:
}
define i32 @mul_64xi8_zc(<64 x i8> %a, i32 %c) {
-; AVXVNNI-LABEL: mul_64xi8_zc:
-; AVXVNNI: # %bb.0: # %entry
-; AVXVNNI-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64]
-; AVXVNNI-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVXVNNI-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4
-; AVXVNNI-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3
-; AVXVNNI-NEXT: vpaddd %ymm4, %ymm3, %ymm0
-; AVXVNNI-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVXVNNI-NEXT: vpaddd %xmm1, %xmm0, %xmm0
-; AVXVNNI-NEXT: vmovd %xmm0, %eax
-; AVXVNNI-NEXT: addl %edi, %eax
-; AVXVNNI-NEXT: vzeroupper
-; AVXVNNI-NEXT: retq
+; AVXVNNI-AVX-LABEL: mul_64xi8_zc:
+; AVXVNNI-AVX: # %bb.0: # %entry
+; AVXVNNI-AVX-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64]
+; AVXVNNI-AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVXVNNI-AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4
+; AVXVNNI-AVX-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3
+; AVXVNNI-AVX-NEXT: vpaddd %ymm4, %ymm3, %ymm0
+; AVXVNNI-AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX-NEXT: addl %edi, %eax
+; AVXVNNI-AVX-NEXT: vzeroupper
+; AVXVNNI-AVX-NEXT: retq
+;
+; AVXVNNI-AVX512-LABEL: mul_64xi8_zc:
+; AVXVNNI-AVX512: # %bb.0: # %entry
+; AVXVNNI-AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
+; AVXVNNI-AVX512-NEXT: vpbroadcastd {{.*#+}} ymm2 = [0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64,0,1,2,64]
+; AVXVNNI-AVX512-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVXVNNI-AVX512-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm1, %ymm4
+; AVXVNNI-AVX512-NEXT: {vex} vpdpbusd %ymm2, %ymm0, %ymm3
+; AVXVNNI-AVX512-NEXT: vpaddd %ymm4, %ymm3, %ymm0
+; AVXVNNI-AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
+; AVXVNNI-AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVXVNNI-AVX512-NEXT: vmovd %xmm0, %eax
+; AVXVNNI-AVX512-NEXT: addl %edi, %eax
+; AVXVNNI-AVX512-NEXT: vzeroupper
+; AVXVNNI-AVX512-NEXT: retq
;
; AVX512-LABEL: mul_64xi8_zc:
; AVX512: # %bb.0: # %entry
diff --git a/llvm/test/CodeGen/X86/known-signbits-shl.ll b/llvm/test/CodeGen/X86/known-signbits-shl.ll
index 473fecc..57d557d 100644
--- a/llvm/test/CodeGen/X86/known-signbits-shl.ll
+++ b/llvm/test/CodeGen/X86/known-signbits-shl.ll
@@ -137,7 +137,7 @@ define void @computeNumSignBits_shl_zext_vec_3(<2 x i8> %x, ptr %p) nounwind {
; X64-NEXT: pandn {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; X64-NEXT: por %xmm2, %xmm1
; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: paddw %xmm0, %xmm2
+; X64-NEXT: paddw %xmm2, %xmm2
; X64-NEXT: movdqa %xmm2, %xmm3
; X64-NEXT: psraw $1, %xmm3
; X64-NEXT: pcmpeqw %xmm0, %xmm3
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
index 4e6f666..4cde581 100644
--- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll
+++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll
@@ -4806,9 +4806,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-KNL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
; X64-KNL-NEXT: vmovaps %zmm1, %zmm0
; X64-KNL-NEXT: retq
@@ -4830,9 +4829,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-SMALL-NEXT: retq
@@ -4842,10 +4840,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index(ptr %x, ptr %arr, <16
; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-LARGE-NEXT: retq
@@ -4875,9 +4872,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-KNL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
; X64-KNL-NEXT: vmovaps %zmm1, %zmm0
; X64-KNL-NEXT: retq
@@ -4899,9 +4895,8 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-SMALL-NEXT: retq
@@ -4911,10 +4906,9 @@ define <16 x float> @test_gather_structpt_16f32_mask_index_offset(ptr %x, ptr %a
; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vgatherdps 4(%rdi,%zmm0,8), %zmm1 {%k1}
; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-LARGE-NEXT: retq
@@ -4944,9 +4938,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(
; X64-KNL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-KNL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
-; X64-KNL-NEXT: vmovdqu64 (%rsi), %zmm0
-; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-KNL-NEXT: vpaddd %zmm0, %zmm0, %zmm2
+; X64-KNL-NEXT: vpslld $1, (%rsi), %zmm0
+; X64-KNL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
; X64-KNL-NEXT: kmovw %k1, %k2
; X64-KNL-NEXT: vmovaps %zmm1, %zmm0
; X64-KNL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
@@ -4972,9 +4965,8 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(
; X64-SKX-SMALL-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-SMALL-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-SMALL-NEXT: vmovdqu64 (%rsi), %zmm0
-; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm0
-; X64-SKX-SMALL-NEXT: vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-SMALL-NEXT: vpslld $1, (%rsi), %zmm0
+; X64-SKX-SMALL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm0, %zmm2
; X64-SKX-SMALL-NEXT: kmovw %k1, %k2
; X64-SKX-SMALL-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-SMALL-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
@@ -4986,10 +4978,9 @@ define {<16 x float>, <16 x float>} @test_gather_structpt_16f32_mask_index_pair(
; X64-SKX-LARGE-NEXT: vpmovsxbd %xmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpslld $31, %zmm0, %zmm0
; X64-SKX-LARGE-NEXT: vpmovd2m %zmm0, %k1
-; X64-SKX-LARGE-NEXT: vmovdqu64 (%rsi), %zmm0
+; X64-SKX-LARGE-NEXT: vpslld $1, (%rsi), %zmm0
; X64-SKX-LARGE-NEXT: movabsq ${{\.?LCPI[0-9]+_[0-9]+}}, %rax
-; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm0
-; X64-SKX-LARGE-NEXT: vpaddd %zmm0, %zmm0, %zmm2
+; X64-SKX-LARGE-NEXT: vpandd (%rax){1to16}, %zmm0, %zmm2
; X64-SKX-LARGE-NEXT: kmovw %k1, %k2
; X64-SKX-LARGE-NEXT: vmovaps %zmm1, %zmm0
; X64-SKX-LARGE-NEXT: vgatherdps (%rdi,%zmm2,8), %zmm0 {%k2}
diff --git a/llvm/test/CodeGen/X86/negative-sin.ll b/llvm/test/CodeGen/X86/negative-sin.ll
index f24507d..4836da2 100644
--- a/llvm/test/CodeGen/X86/negative-sin.ll
+++ b/llvm/test/CodeGen/X86/negative-sin.ll
@@ -82,18 +82,13 @@ define double @semi_strict2(double %e) nounwind {
ret double %h
}
-; FIXME:
-; Auto-upgrade function attribute to IR-level fast-math-flags.
-
-define double @fn_attr(double %e) nounwind #0 {
-; CHECK-LABEL: fn_attr:
+define double @nsz_flag(double %e) nounwind {
+; CHECK-LABEL: nsz_flag:
; CHECK: # %bb.0:
; CHECK-NEXT: jmp sin@PLT # TAILCALL
- %f = fsub double 0.0, %e
- %g = call double @sin(double %f) readonly
- %h = fsub double 0.0, %g
+ %f = fsub nsz double 0.0, %e
+ %g = call nsz double @sin(double %f) readonly
+ %h = fsub nsz double 0.0, %g
ret double %h
}
-attributes #0 = { "unsafe-fp-math"="true" "no-signed-zeros-fp-math"="true" }
-
diff --git a/llvm/test/CodeGen/X86/oddsubvector.ll b/llvm/test/CodeGen/X86/oddsubvector.ll
index f539830..5df1867 100644
--- a/llvm/test/CodeGen/X86/oddsubvector.ll
+++ b/llvm/test/CodeGen/X86/oddsubvector.ll
@@ -155,10 +155,10 @@ define <16 x i32> @PR42819(ptr %a0) {
define void @PR42833() {
; SSE2-LABEL: PR42833:
; SSE2: # %bb.0:
+; SSE2-NEXT: movl b(%rip), %eax
; SSE2-NEXT: movdqa c+144(%rip), %xmm2
; SSE2-NEXT: movdqa c+128(%rip), %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: addl b(%rip), %eax
+; SSE2-NEXT: addl c+128(%rip), %eax
; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: movd %eax, %xmm3
; SSE2-NEXT: paddd %xmm0, %xmm3
@@ -166,7 +166,7 @@ define void @PR42833() {
; SSE2-NEXT: psubd %xmm2, %xmm4
; SSE2-NEXT: paddd %xmm2, %xmm2
; SSE2-NEXT: movdqa %xmm0, %xmm5
-; SSE2-NEXT: paddd %xmm0, %xmm5
+; SSE2-NEXT: paddd %xmm5, %xmm5
; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm3[0],xmm5[1,2,3]
; SSE2-NEXT: movdqa %xmm2, c+144(%rip)
; SSE2-NEXT: movaps %xmm5, c+128(%rip)
@@ -191,17 +191,17 @@ define void @PR42833() {
;
; SSE42-LABEL: PR42833:
; SSE42: # %bb.0:
+; SSE42-NEXT: movl b(%rip), %eax
; SSE42-NEXT: movdqa c+144(%rip), %xmm1
; SSE42-NEXT: movdqa c+128(%rip), %xmm0
-; SSE42-NEXT: movd %xmm0, %eax
-; SSE42-NEXT: addl b(%rip), %eax
+; SSE42-NEXT: addl c+128(%rip), %eax
; SSE42-NEXT: movd %eax, %xmm2
; SSE42-NEXT: paddd %xmm0, %xmm2
; SSE42-NEXT: movdqa d+144(%rip), %xmm3
; SSE42-NEXT: psubd %xmm1, %xmm3
; SSE42-NEXT: paddd %xmm1, %xmm1
; SSE42-NEXT: movdqa %xmm0, %xmm4
-; SSE42-NEXT: paddd %xmm0, %xmm4
+; SSE42-NEXT: paddd %xmm4, %xmm4
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm2[0,1],xmm4[2,3,4,5,6,7]
; SSE42-NEXT: movdqa %xmm1, c+144(%rip)
; SSE42-NEXT: movdqa %xmm4, c+128(%rip)
diff --git a/llvm/test/CodeGen/X86/pr62286.ll b/llvm/test/CodeGen/X86/pr62286.ll
index ce03f8f..161e965 100644
--- a/llvm/test/CodeGen/X86/pr62286.ll
+++ b/llvm/test/CodeGen/X86/pr62286.ll
@@ -26,27 +26,33 @@ define i64 @PR62286(i32 %a) {
; AVX1-LABEL: PR62286:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %edi, %xmm0
-; AVX1-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
+; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2],ymm1[3],ymm2[4,5,6,7]
; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vorps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1
-; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddq %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovq %xmm0, %rax
+; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR62286:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %edi, %xmm0
-; AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm1
-; AVX2-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3]
-; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
+; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm1
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7]
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
@@ -59,12 +65,12 @@ define i64 @PR62286(i32 %a) {
; AVX512-LABEL: PR62286:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovd %edi, %xmm0
-; AVX512-NEXT: movb $8, %al
+; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,0]
+; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm1
+; AVX512-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
+; AVX512-NEXT: movw $4369, %ax # imm = 0x1111
; AVX512-NEXT: kmovd %eax, %k1
-; AVX512-NEXT: vpexpandd %ymm0, %ymm1 {%k1} {z}
-; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
-; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0
-; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
+; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1}
; AVX512-NEXT: vpmovsxdq %ymm0, %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0
diff --git a/llvm/test/CodeGen/X86/pr74736.ll b/llvm/test/CodeGen/X86/pr74736.ll
index ceccee0..5895526 100644
--- a/llvm/test/CodeGen/X86/pr74736.ll
+++ b/llvm/test/CodeGen/X86/pr74736.ll
@@ -6,8 +6,8 @@ define void @main(<16 x i32> %0, i32 %1) {
; SSE-LABEL: main:
; SSE: # %bb.0: # %entry
; SSE-NEXT: movd %edi, %xmm4
-; SSE-NEXT: movss {{.*#+}} xmm0 = [1,0,0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm4[1,0]
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [0,1,0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,0]
; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: paddd %xmm1, %xmm1
; SSE-NEXT: paddd %xmm3, %xmm3
@@ -32,20 +32,20 @@ define void @main(<16 x i32> %0, i32 %1) {
; AVX-LABEL: main:
; AVX: # %bb.0: # %entry
; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3]
; AVX-NEXT: movl $1, %eax
; AVX-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
; AVX-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2
-; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
-; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
-; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm1
-; AVX-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,1,1,3,3,5,5,7]
-; AVX-NEXT: vpermd %ymm0, %ymm2, %ymm2
+; AVX-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; AVX-NEXT: vpaddd %ymm2, %ymm2, %ymm2
+; AVX-NEXT: vpaddd %ymm1, %ymm1, %ymm3
; AVX-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1]
; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,3,3,3,7,7,7,7]
-; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,1,3,4,5,5,7]
+; AVX-NEXT: vpaddd %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm3[0,1,1,3,4,5,5,7]
; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7]
-; AVX-NEXT: vpxor %ymm0, %ymm2, %ymm0
+; AVX-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,1,3,3,5,5,7]
+; AVX-NEXT: vpermd %ymm2, %ymm1, %ymm1
+; AVX-NEXT: vpxor %ymm0, %ymm1, %ymm0
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
diff --git a/llvm/test/CodeGen/X86/shift-i512.ll b/llvm/test/CodeGen/X86/shift-i512.ll
index 756019d..03b61d9 100644
--- a/llvm/test/CodeGen/X86/shift-i512.ll
+++ b/llvm/test/CodeGen/X86/shift-i512.ll
@@ -10,7 +10,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) {
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2]
; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2
-; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3
+; AVX512VL-NEXT: vpaddq %xmm0, %xmm0, %xmm3
; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4
; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2
@@ -34,7 +34,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) {
; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3
-; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4
+; AVX512VBMI-NEXT: vpaddq %xmm0, %xmm0, %xmm4
; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7]
@@ -51,7 +51,7 @@ define <8 x i64> @shl_i512_1(<8 x i64> %a) {
; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1
; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2
; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
-; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4
+; ZNVER4-NEXT: vpaddq %xmm0, %xmm0, %xmm4
; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3
; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2
diff --git a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
index 3f48b22..a48be03 100644
--- a/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/sse2-intrinsics-fast-isel.ll
@@ -5791,20 +5791,20 @@ declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) nounwind readnone
define <2 x i64> @test_mm_slli_epi16(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi16:
; SSE: # %bb.0:
-; SSE-NEXT: psllw $1, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x01]
+; SSE-NEXT: psllw $2, %xmm0 # encoding: [0x66,0x0f,0x71,0xf0,0x02]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x01]
+; AVX1-NEXT: vpsllw $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x71,0xf0,0x02]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi16:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllw $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x01]
+; AVX512-NEXT: vpsllw $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x71,0xf0,0x02]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
- %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 1)
+ %res = call <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16> %arg0, i32 2)
%bc = bitcast <8 x i16> %res to <2 x i64>
ret <2 x i64> %bc
}
@@ -5813,20 +5813,20 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) nounwind readnone
define <2 x i64> @test_mm_slli_epi32(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi32:
; SSE: # %bb.0:
-; SSE-NEXT: pslld $1, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x01]
+; SSE-NEXT: pslld $2, %xmm0 # encoding: [0x66,0x0f,0x72,0xf0,0x02]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpslld $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x01]
+; AVX1-NEXT: vpslld $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x72,0xf0,0x02]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi32:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpslld $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x01]
+; AVX512-NEXT: vpslld $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x72,0xf0,0x02]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
%arg0 = bitcast <2 x i64> %a0 to <4 x i32>
- %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 1)
+ %res = call <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32> %arg0, i32 2)
%bc = bitcast <4 x i32> %res to <2 x i64>
ret <2 x i64> %bc
}
@@ -5835,19 +5835,19 @@ declare <4 x i32> @llvm.x86.sse2.pslli.d(<4 x i32>, i32) nounwind readnone
define <2 x i64> @test_mm_slli_epi64(<2 x i64> %a0) {
; SSE-LABEL: test_mm_slli_epi64:
; SSE: # %bb.0:
-; SSE-NEXT: psllq $1, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x01]
+; SSE-NEXT: psllq $2, %xmm0 # encoding: [0x66,0x0f,0x73,0xf0,0x02]
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX1-LABEL: test_mm_slli_epi64:
; AVX1: # %bb.0:
-; AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x01]
+; AVX1-NEXT: vpsllq $2, %xmm0, %xmm0 # encoding: [0xc5,0xf9,0x73,0xf0,0x02]
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
;
; AVX512-LABEL: test_mm_slli_epi64:
; AVX512: # %bb.0:
-; AVX512-NEXT: vpsllq $1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x01]
+; AVX512-NEXT: vpsllq $2, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf0,0x02]
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
- %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 1)
+ %res = call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %a0, i32 2)
ret <2 x i64> %res
}
declare <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64>, i32) nounwind readnone
diff --git a/llvm/test/CodeGen/X86/vec_shift6.ll b/llvm/test/CodeGen/X86/vec_shift6.ll
index 71e659c..219e32c 100644
--- a/llvm/test/CodeGen/X86/vec_shift6.ll
+++ b/llvm/test/CodeGen/X86/vec_shift6.ll
@@ -28,14 +28,14 @@ define <8 x i16> @test2(<8 x i16> %a) {
; SSE2-LABEL: test2:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddw %xmm0, %xmm1
+; SSE2-NEXT: paddw %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test2:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: paddw %xmm0, %xmm1
+; SSE41-NEXT: paddw %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
@@ -56,7 +56,7 @@ define <4 x i32> @test3(<4 x i32> %a) {
; SSE2-LABEL: test3:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm1
; SSE2-NEXT: pslld $2, %xmm0
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
@@ -81,14 +81,14 @@ define <4 x i32> @test4(<4 x i32> %a) {
; SSE2-LABEL: test4:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddd %xmm0, %xmm1
+; SSE2-NEXT: paddd %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: test4:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: paddd %xmm0, %xmm1
+; SSE41-NEXT: paddd %xmm1, %xmm1
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; SSE41-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll
index 5c48559..b4cffcd 100644
--- a/llvm/test/CodeGen/X86/vector-gep.ll
+++ b/llvm/test/CodeGen/X86/vector-gep.ll
@@ -122,91 +122,87 @@ define <64 x ptr> @AGEP9(ptr %param, <64 x i32> %off) nounwind {
; CHECK-NEXT: movl %esp, %ebp
; CHECK-NEXT: andl $-32, %esp
; CHECK-NEXT: subl $160, %esp
-; CHECK-NEXT: vmovdqa %ymm2, %ymm5
-; CHECK-NEXT: vmovdqa %ymm1, %ymm3
-; CHECK-NEXT: vmovdqa %ymm0, %ymm1
-; CHECK-NEXT: vmovdqa 72(%ebp), %ymm0
-; CHECK-NEXT: vmovdqa 40(%ebp), %ymm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm4
-; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm7
-; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4
-; CHECK-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3
+; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5
+; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3
+; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vmovdqa 104(%ebp), %ymm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vmovdqa 136(%ebp), %ymm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; CHECK-NEXT: vmovdqa 168(%ebp), %ymm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm2
-; CHECK-NEXT: vpaddd %xmm2, %xmm7, %xmm2
-; CHECK-NEXT: vmovdqa %xmm2, (%esp) # 16-byte Spill
-; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
+; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm2
-; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0
-; CHECK-NEXT: vpaddd %xmm0, %xmm7, %xmm0
-; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm1
-; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1
-; CHECK-NEXT: vpaddd %xmm1, %xmm7, %xmm1
-; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm6
-; CHECK-NEXT: vpaddd %xmm6, %xmm7, %xmm6
-; CHECK-NEXT: vextractf128 $1, %ymm3, %xmm3
-; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3
-; CHECK-NEXT: vpaddd %xmm3, %xmm7, %xmm3
-; CHECK-NEXT: vmovdqa %ymm5, %ymm4
-; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm5
-; CHECK-NEXT: vpaddd %xmm5, %xmm7, %xmm5
-; CHECK-NEXT: vextractf128 $1, %ymm4, %xmm4
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: vmovdqa 72(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill
+; CHECK-NEXT: vmovdqa 88(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm2
+; CHECK-NEXT: vmovdqa 104(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm1
+; CHECK-NEXT: vmovdqa 120(%ebp), %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
+; CHECK-NEXT: vmovdqa 136(%ebp), %xmm6
+; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6
+; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6
+; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7
+; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7
+; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7
+; CHECK-NEXT: vmovdqa 168(%ebp), %xmm4
; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4
-; CHECK-NEXT: vpaddd %xmm4, %xmm7, %xmm4
+; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4
+; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3
+; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3
+; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3
; CHECK-NEXT: movl 8(%ebp), %eax
-; CHECK-NEXT: vmovdqa %xmm4, 80(%eax)
-; CHECK-NEXT: vmovdqa %xmm5, 64(%eax)
-; CHECK-NEXT: vmovdqa %xmm3, 48(%eax)
-; CHECK-NEXT: vmovdqa %xmm6, 32(%eax)
-; CHECK-NEXT: vmovdqa %xmm1, 16(%eax)
-; CHECK-NEXT: vmovdqa %xmm0, (%eax)
-; CHECK-NEXT: vmovdqa %xmm2, 240(%eax)
+; CHECK-NEXT: vmovdqa %xmm3, 240(%eax)
+; CHECK-NEXT: vmovdqa %xmm4, 224(%eax)
+; CHECK-NEXT: vmovdqa %xmm7, 208(%eax)
+; CHECK-NEXT: vmovdqa %xmm6, 192(%eax)
+; CHECK-NEXT: vmovdqa %xmm0, 176(%eax)
+; CHECK-NEXT: vmovdqa %xmm1, 160(%eax)
+; CHECK-NEXT: vmovdqa %xmm2, 144(%eax)
; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 224(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 128(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 208(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 112(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 192(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 96(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 176(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 80(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 160(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 64(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 144(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 48(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 128(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 32(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 112(%eax)
+; CHECK-NEXT: vmovaps %xmm0, 16(%eax)
; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
-; CHECK-NEXT: vmovaps %xmm0, 96(%eax)
+; CHECK-NEXT: vmovaps %xmm0, (%eax)
; CHECK-NEXT: movl %ebp, %esp
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
index 13f7d68..33d80f6 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll
@@ -652,7 +652,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: paddb %xmm4, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: psllw $1, %xmm2
+; SSE2-NEXT: paddw %xmm2, %xmm2
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE2-NEXT: psrlw $2, %xmm1
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -678,7 +678,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE41-NEXT: paddb %xmm3, %xmm1
; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psllw $1, %xmm2
+; SSE41-NEXT: paddw %xmm2, %xmm2
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE41-NEXT: psrlw $2, %xmm1
; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
@@ -701,7 +701,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX1-NEXT: vpsllw $1, %xmm1, %xmm2
+; AVX1-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -720,7 +720,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX2NOBW-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2NOBW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX2NOBW-NEXT: vpsllw $1, %xmm1, %xmm2
+; AVX2NOBW-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2NOBW-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
@@ -739,7 +739,7 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind {
; AVX512BW-NEXT: vpsrlw $1, %xmm2, %xmm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpaddb %xmm1, %xmm2, %xmm1
-; AVX512BW-NEXT: vpsllw $1, %xmm1, %xmm2
+; AVX512BW-NEXT: vpaddw %xmm1, %xmm1, %xmm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlw $2, %xmm1, %xmm1
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
index 1a5c373..e43108f 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
@@ -590,7 +590,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX1-NEXT: vpand %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpaddb %xmm3, %xmm5, %xmm3
-; AVX1-NEXT: vpsllw $1, %xmm3, %xmm5
+; AVX1-NEXT: vpaddw %xmm3, %xmm3, %xmm5
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
; AVX1-NEXT: vpand %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3
@@ -609,7 +609,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpsllw $1, %xmm2, %xmm3
+; AVX1-NEXT: vpaddw %xmm2, %xmm2, %xmm3
; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm2, %xmm8, %xmm2
@@ -633,7 +633,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX2NOBW-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2NOBW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX2NOBW-NEXT: vpsllw $1, %ymm1, %ymm2
+; AVX2NOBW-NEXT: vpaddw %ymm1, %ymm1, %ymm2
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX2NOBW-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX2NOBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
@@ -651,7 +651,7 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind {
; AVX512BW-NEXT: vpsrlw $1, %ymm2, %ymm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512BW-NEXT: vpaddb %ymm1, %ymm2, %ymm1
-; AVX512BW-NEXT: vpsllw $1, %ymm1, %ymm2
+; AVX512BW-NEXT: vpaddw %ymm1, %ymm1, %ymm2
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; AVX512BW-NEXT: vpsrlw $2, %ymm1, %ymm1
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
index 9c56894..bf98bcc 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-512.ll
@@ -485,7 +485,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
; AVX512F-NEXT: vpaddb %ymm3, %ymm5, %ymm3
-; AVX512F-NEXT: vpsllw $1, %ymm3, %ymm5
+; AVX512F-NEXT: vpaddw %ymm3, %ymm3, %ymm5
; AVX512F-NEXT: vpbroadcastb {{.*#+}} ymm7 = [248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248,248]
; AVX512F-NEXT: vpand %ymm7, %ymm5, %ymm5
; AVX512F-NEXT: vpsrlw $2, %ymm3, %ymm3
@@ -504,7 +504,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512F-NEXT: vpsrlw $1, %ymm3, %ymm3
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
; AVX512F-NEXT: vpaddb %ymm2, %ymm3, %ymm2
-; AVX512F-NEXT: vpsllw $1, %ymm2, %ymm3
+; AVX512F-NEXT: vpaddw %ymm2, %ymm2, %ymm3
; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $2, %ymm2, %ymm2
; AVX512F-NEXT: vpand %ymm2, %ymm8, %ymm2
@@ -528,7 +528,7 @@ define <64 x i8> @test_rem7_64i8(<64 x i8> %a) nounwind {
; AVX512BW-NEXT: vpsrlw $1, %zmm2, %zmm2
; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
; AVX512BW-NEXT: vpaddb %zmm1, %zmm2, %zmm1
-; AVX512BW-NEXT: vpsllw $1, %zmm1, %zmm2
+; AVX512BW-NEXT: vpaddw %zmm1, %zmm1, %zmm2
; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $2, %zmm1, %zmm1
; AVX512BW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to16}, %zmm1, %zmm1
diff --git a/llvm/test/CodeGen/X86/vector-mul.ll b/llvm/test/CodeGen/X86/vector-mul.ll
index 13b21a7..6e1bf25 100644
--- a/llvm/test/CodeGen/X86/vector-mul.ll
+++ b/llvm/test/CodeGen/X86/vector-mul.ll
@@ -821,10 +821,10 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; X86-SSE-NEXT: andl $-16, %esp
; X86-SSE-NEXT: subl $16, %esp
; X86-SSE-NEXT: movdqa %xmm1, %xmm3
-; X86-SSE-NEXT: paddw %xmm1, %xmm3
+; X86-SSE-NEXT: paddw %xmm3, %xmm3
; X86-SSE-NEXT: paddw %xmm3, %xmm1
; X86-SSE-NEXT: movdqa %xmm0, %xmm3
-; X86-SSE-NEXT: paddw %xmm0, %xmm3
+; X86-SSE-NEXT: paddw %xmm3, %xmm3
; X86-SSE-NEXT: paddw %xmm2, %xmm0
; X86-SSE-NEXT: paddw %xmm3, %xmm0
; X86-SSE-NEXT: paddw 8(%ebp), %xmm1
@@ -835,9 +835,9 @@ define <16 x i16> @madd_v16i16_3(<16 x i16> %a0, <16 x i16> %a1) nounwind {
; X64-SSE-LABEL: madd_v16i16_3:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa %xmm1, %xmm4
-; X64-SSE-NEXT: paddw %xmm1, %xmm4
+; X64-SSE-NEXT: paddw %xmm4, %xmm4
; X64-SSE-NEXT: movdqa %xmm0, %xmm5
-; X64-SSE-NEXT: paddw %xmm0, %xmm5
+; X64-SSE-NEXT: paddw %xmm5, %xmm5
; X64-SSE-NEXT: paddw %xmm2, %xmm0
; X64-SSE-NEXT: paddw %xmm5, %xmm0
; X64-SSE-NEXT: paddw %xmm3, %xmm1
diff --git a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
index 227e000..ab1feba 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-or-cmp.ll
@@ -907,7 +907,7 @@ define i1 @mask_v8i32_2(<8 x i32> %a0) {
; SSE2-LABEL: mask_v8i32_2:
; SSE2: # %bb.0:
; SSE2-NEXT: por %xmm1, %xmm0
-; SSE2-NEXT: pslld $1, %xmm0
+; SSE2-NEXT: paddd %xmm0, %xmm0
; SSE2-NEXT: movmskps %xmm0, %eax
; SSE2-NEXT: testl %eax, %eax
; SSE2-NEXT: sete %al
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index 2b1cf5b..99dac74 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -927,7 +927,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; SSE2-LABEL: constant_shift_v2i64:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm1
-; SSE2-NEXT: paddq %xmm0, %xmm1
+; SSE2-NEXT: paddq %xmm1, %xmm1
; SSE2-NEXT: psllq $7, %xmm0
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; SSE2-NEXT: retq
@@ -975,7 +975,7 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; X86-SSE-LABEL: constant_shift_v2i64:
; X86-SSE: # %bb.0:
; X86-SSE-NEXT: movdqa %xmm0, %xmm1
-; X86-SSE-NEXT: paddq %xmm0, %xmm1
+; X86-SSE-NEXT: paddq %xmm1, %xmm1
; X86-SSE-NEXT: psllq $7, %xmm0
; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
; X86-SSE-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
index 5b61de5..ee9d8a5 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining.ll
@@ -3550,14 +3550,14 @@ define <8 x i16> @PR141475(i32 %in) {
; SSE-LABEL: PR141475:
; SSE: # %bb.0:
; SSE-NEXT: movd %edi, %xmm0
-; SSE-NEXT: pslld $1, %xmm0
+; SSE-NEXT: paddd %xmm0, %xmm0
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: PR141475:
; AVX: # %bb.0:
; AVX-NEXT: vmovd %edi, %xmm0
-; AVX-NEXT: vpslld $1, %xmm0, %xmm0
+; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
; AVX-NEXT: retq
%mul = shl i32 %in, 1
diff --git a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
index 54dc107..3b93734 100644
--- a/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
+++ b/llvm/test/CodeGen/X86/vector_splat-const-shift-of-constmasked.ll
@@ -1438,26 +1438,26 @@ define <8 x i16> @test_128_i16_x_8_65024_mask_ashr_10(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_127_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddw %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_127_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127, i16 127>
%t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -1656,26 +1656,26 @@ define <8 x i16> @test_128_i16_x_8_2032_mask_shl_6(<8 x i16> %a0) {
define <8 x i16> @test_128_i16_x_8_65024_mask_shl_1(<8 x i16> %a0) {
; X86-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddw %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddw %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i16_x_8_65024_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddw %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <8 x i16> %a0, <i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024, i16 65024>
%t1 = shl <8 x i16> %t0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
@@ -2373,40 +2373,40 @@ define <4 x i32> @test_128_i32_x_4_4294836224_mask_ashr_18(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_32767_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
-; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534]
+; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddd %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_32767_mask_shl_1:
; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [32767,32767,32767,32767]
-; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [65534,65534,65534,65534]
+; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 32767, i32 32767, i32 32767, i32 32767>
%t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
@@ -2675,40 +2675,40 @@ define <4 x i32> @test_128_i32_x_4_8388352_mask_shl_10(<4 x i32> %a0) {
define <4 x i32> @test_128_i32_x_4_4294836224_mask_shl_1(<4 x i32> %a0) {
; X86-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddd %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX1-NEXT: retl
;
; X86-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
-; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X86-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152]
+; X86-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-AVX2-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddd %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i32_x_4_4294836224_mask_shl_1:
; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294836224,4294836224,4294836224,4294836224]
-; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpaddd %xmm0, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294705152,4294705152,4294705152,4294705152]
+; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; X64-AVX2-NEXT: retq
%t0 = and <4 x i32> %a0, <i32 4294836224, i32 4294836224, i32 4294836224, i32 4294836224>
%t1 = shl <4 x i32> %t0, <i32 1, i32 1, i32 1, i32 1>
@@ -3325,26 +3325,26 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_34(<2 x i64> %
define <2 x i64> @test_128_i64_x_2_2147483647_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddq %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_2147483647_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 2147483647, i64 2147483647>
%t1 = shl <2 x i64> %t0, <i64 1, i64 1>
@@ -3543,26 +3543,26 @@ define <2 x i64> @test_128_i64_x_2_140737488289792_mask_shl_18(<2 x i64> %a0) {
define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_shl_1(<2 x i64> %a0) {
; X86-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-SSE2: # %bb.0:
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: paddq %xmm0, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X86-AVX: # %bb.0:
-; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X86-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
; X86-AVX-NEXT: retl
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X64-SSE2: # %bb.0:
-; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: paddq %xmm0, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_shl_1:
; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpaddq %xmm0, %xmm0, %xmm0
+; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = shl <2 x i64> %t0, <i64 1, i64 1>