aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/isinf.ll10
-rw-r--r--llvm/test/CodeGen/AArch64/masked-integer-compare.ll178
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll1421
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmax_legacy.ll226
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll281
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll1421
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmin_legacy.ll302
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll197
-rw-r--r--llvm/test/CodeGen/NVPTX/bug22322.ll6
-rw-r--r--llvm/test/CodeGen/PowerPC/scalar-min-max.ll232
-rw-r--r--llvm/test/CodeGen/RISCV/select-bare.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/select-cc.ll50
-rw-r--r--llvm/test/CodeGen/RISCV/select-cond.ll288
-rw-r--r--llvm/test/CodeGen/RISCV/select.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/xqcicm.ll501
-rw-r--r--llvm/test/CodeGen/RISCV/xqcics.ll32
-rw-r--r--llvm/test/CodeGen/VE/Scalar/max.ll176
-rw-r--r--llvm/test/CodeGen/VE/Scalar/min.ll178
-rw-r--r--llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll15
-rw-r--r--llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll99
-rw-r--r--llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll54
-rw-r--r--llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll72
-rw-r--r--llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll318
-rw-r--r--llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll120
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll120
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll296
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll352
-rw-r--r--llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll656
-rw-r--r--llvm/test/MC/LoongArch/Macros/macros-la.s1
-rw-r--r--llvm/test/Transforms/AggressiveInstCombine/memchr.ll51
-rw-r--r--llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll121
-rw-r--r--llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll8
32 files changed, 4834 insertions, 2996 deletions
diff --git a/llvm/test/CodeGen/AArch64/isinf.ll b/llvm/test/CodeGen/AArch64/isinf.ll
index e68539b..e8bbaf9 100644
--- a/llvm/test/CodeGen/AArch64/isinf.ll
+++ b/llvm/test/CodeGen/AArch64/isinf.ll
@@ -27,9 +27,8 @@ define i32 @replace_isinf_call_f32(float %x) {
; CHECK-LABEL: replace_isinf_call_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000
-; CHECK-NEXT: and w9, w9, #0x7fffffff
-; CHECK-NEXT: cmp w9, w8
+; CHECK-NEXT: mov w8, #-16777216 // =0xff000000
+; CHECK-NEXT: cmp w8, w9, lsl #1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%abs = tail call float @llvm.fabs.f32(float %x)
@@ -43,9 +42,8 @@ define i32 @replace_isinf_call_f64(double %x) {
; CHECK-LABEL: replace_isinf_call_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov x9, d0
-; CHECK-NEXT: mov x8, #9218868437227405312 // =0x7ff0000000000000
-; CHECK-NEXT: and x9, x9, #0x7fffffffffffffff
-; CHECK-NEXT: cmp x9, x8
+; CHECK-NEXT: mov x8, #-9007199254740992 // =0xffe0000000000000
+; CHECK-NEXT: cmp x8, x9, lsl #1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%abs = tail call double @llvm.fabs.f64(double %x)
diff --git a/llvm/test/CodeGen/AArch64/masked-integer-compare.ll b/llvm/test/CodeGen/AArch64/masked-integer-compare.ll
new file mode 100644
index 0000000..363cd10
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/masked-integer-compare.ll
@@ -0,0 +1,178 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=aarch64-none-linux-gnu < %s -o -| FileCheck %s
+
+; Test code generation support for SUBS (shifted register) from masked integer
+; compare sequences. These sequences appear in isinf tests, for example.
+
+define i1 @combine_masked_i32(i32 %x) {
+; CHECK-LABEL: combine_masked_i32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-16777216 // =0xff000000
+; CHECK-NEXT: cmp w8, w0, lsl #1
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7fffffff
+ %sub = sub i32 %and, u0x7f800000
+ %cmp = icmp eq i32 %sub, 0
+ ret i1 %cmp
+}
+
+define i1 @combine_masked_i64(i64 %x) {
+; CHECK-LABEL: combine_masked_i64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov x8, #-9007199254740992 // =0xffe0000000000000
+; CHECK-NEXT: cmp x8, x0, lsl #1
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i64 %x, u0x7fffffffffffffff
+ %sub = sub i64 %and, u0x7ff0000000000000
+ %cmp = icmp eq i64 %sub, 0
+ ret i1 %cmp
+}
+
+define i1 @combine_masked_ne(i32 %x) {
+; CHECK-LABEL: combine_masked_ne:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-16777216 // =0xff000000
+; CHECK-NEXT: cmp w8, w0, lsl #1
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7fffffff
+ %cmp = icmp ne i32 %and, u0x7f800000
+ ret i1 %cmp
+}
+
+define i1 @combine_masked_lsl4(i32 %x) {
+; CHECK-LABEL: combine_masked_lsl4:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-134217728 // =0xf8000000
+; CHECK-NEXT: cmp w8, w0, lsl #4
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x0fffffff
+ %cmp = icmp eq i32 %and, u0x0f800000
+ ret i1 %cmp
+}
+
+define i1 @dont_combine_not_mask(i32 %x) {
+; CHECK-LABEL: dont_combine_not_mask:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000
+; CHECK-NEXT: and w9, w0, #0x7ffffffe
+; CHECK-NEXT: cmp w9, w8
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7ffffffe
+ %cmp = icmp eq i32 %and, u0x7f800000
+ ret i1 %cmp
+}
+
+define i1 @dont_combine_cmp_not_masked(i32 %x) {
+; CHECK-LABEL: dont_combine_cmp_not_masked:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000
+; CHECK-NEXT: and w9, w0, #0x3fffffff
+; CHECK-NEXT: cmp w9, w8
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x3fffffff
+ %cmp = icmp eq i32 %and, u0x7f800000
+ ret i1 %cmp
+}
+
+define i1 @dont_combine_not_constant_mask(i32 %x, i32 %m) {
+; CHECK-LABEL: dont_combine_not_constant_mask:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000
+; CHECK-NEXT: and w9, w0, w1
+; CHECK-NEXT: cmp w9, w8
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, %m
+ %cmp = icmp eq i32 %and, u0x7f800000
+ ret i1 %cmp
+}
+
+define i1 @dont_combine_not_constant_cmp(i32 %x, i32 %c) {
+; CHECK-LABEL: dont_combine_not_constant_cmp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0xfffffff
+; CHECK-NEXT: cmp w8, w1
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x0fffffff
+ %cmp = icmp eq i32 %and, %c
+ ret i1 %cmp
+}
+
+define i1 @dont_combine_subs_imm(i32 %x) {
+; CHECK-LABEL: dont_combine_subs_imm:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0x7fffffff
+; CHECK-NEXT: cmp w8, #291
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7fffffff
+ %cmp = icmp eq i32 %and, u0x123
+ ret i1 %cmp
+}
+
+define i1 @dont_combine_subs_imm_lsl12(i32 %x) {
+; CHECK-LABEL: dont_combine_subs_imm_lsl12:
+; CHECK: // %bb.0:
+; CHECK-NEXT: and w8, w0, #0x7fffffff
+; CHECK-NEXT: cmp w8, #291, lsl #12 // =1191936
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7fffffff
+ %cmp = icmp eq i32 %and, u0x123000
+ ret i1 %cmp
+}
+
+define { i1, i1 } @dont_combine_multi_use_cmp(i32 %x) {
+; CHECK-LABEL: dont_combine_multi_use_cmp:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000
+; CHECK-NEXT: and w9, w0, #0x7fffffff
+; CHECK-NEXT: cmp w9, w8
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: cset w1, lt
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7fffffff
+ %eq = icmp eq i32 %and, u0x7f800000
+ %lt = icmp slt i32 %and, u0x7f800000
+ %r1 = insertvalue { i1, i1 } poison, i1 %eq, 0
+ %r2 = insertvalue { i1, i1 } %r1, i1 %lt, 1
+ ret { i1, i1 } %r2
+}
+
+define { i32, i1 } @dont_combine_multi_use_sub(i32 %x) {
+; CHECK-LABEL: dont_combine_multi_use_sub:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #-2139095040 // =0x80800000
+; CHECK-NEXT: and w9, w0, #0x7fffffff
+; CHECK-NEXT: adds w0, w9, w8
+; CHECK-NEXT: cset w1, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7fffffff
+ %sub = sub i32 %and, u0x7f800000
+ %cmp = icmp eq i32 %sub, 0
+ %r1 = insertvalue { i32, i1 } poison, i32 %sub, 0
+ %r2 = insertvalue { i32, i1 } %r1, i1 %cmp, 1
+ ret { i32, i1 } %r2
+}
+
+define { i32, i1 } @dont_combine_multi_use_and(i32 %x) {
+; CHECK-LABEL: dont_combine_multi_use_and:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000
+; CHECK-NEXT: and w0, w0, #0x7fffffff
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: cset w1, eq
+; CHECK-NEXT: ret
+ %and = and i32 %x, u0x7fffffff
+ %cmp = icmp eq i32 %and, u0x7f800000
+ %r1 = insertvalue { i32, i1 } poison, i32 %and, 0
+ %r2 = insertvalue { i32, i1 } %r1, i1 %cmp, 1
+ ret { i32, i1 } %r2
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll
index ed48999..bd28f72 100644
--- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.f16.ll
@@ -1,734 +1,759 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s
-; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s
+; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s
-; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s
-; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s
define half @test_fmax_legacy_ugt_f16(half %a, half %b) #0 {
-; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_max_f16_e32 v0, v0, v1
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmax_legacy_ugt_f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmax_legacy_ugt_f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmax_legacy_ugt_f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v1, v0
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmax_legacy_ugt_f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v1
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-TRUE16-LABEL: test_fmax_legacy_ugt_f16:
-; GFX11-NNAN-TRUE16: ; %bb.0:
-; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
-; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-FAKE16-LABEL: test_fmax_legacy_ugt_f16:
-; GFX11-NNAN-FAKE16: ; %bb.0:
-; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
-; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmax_legacy_ugt_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_max_legacy_f32_e32 v0, v1, v0
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.l, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ugt half %a, %b
%val = select i1 %cmp, half %a, half %b
ret half %val
}
+define half @test_fmax_legacy_ugt_f16_fast(half %a, half %b) #0 {
+; GFX9-LABEL: test_fmax_legacy_ugt_f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_max_f16_e32 v0, v0, v1
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_max_f32_e32 v0, v0, v1
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_f16_fast:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_max_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_f16_fast:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_max_f16_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ugt half %a, %b
+ %val = select nnan nsz i1 %cmp, half %a, half %b
+ ret half %val
+}
+
define <2 x half> @test_fmax_legacy_ugt_v2f16(<2 x half> %a, <2 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v2f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v2f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v1
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2
-; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v1
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmax_legacy_ugt_v2f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v2, v0
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v3, v1
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmax_legacy_ugt_v2f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v2
-; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v3
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v2f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v1
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmax_legacy_ugt_v2f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v2f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v2
+; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v2f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_max_legacy_f32_e32 v0, v2, v0
+; SI-NEXT: v_max_legacy_f32_e32 v1, v3, v1
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v2f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v1.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v2f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ugt <2 x half> %a, %b
%val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b
ret <2 x half> %val
}
+define <2 x half> @test_fmax_legacy_ugt_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 {
+; GFX9-LABEL: test_fmax_legacy_ugt_v2f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v2f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_max_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_e32 v0, v0, v1
+; VI-NEXT: v_or_b32_e32 v0, v0, v2
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v2f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_max_f32_e32 v0, v0, v2
+; SI-NEXT: v_max_f32_e32 v1, v1, v3
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmax_legacy_ugt_v2f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_f16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ugt <2 x half> %a, %b
+ %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %val
+}
+
define <3 x half> @test_fmax_legacy_ugt_v3f16(<3 x half> %a, <3 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v3f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v3f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3
-; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
-; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2
-; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmax_legacy_ugt_v3f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v3, v0
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v4, v1
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v5, v2
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmax_legacy_ugt_v3f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v3
-; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v4
-; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v5
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v3f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmax_legacy_ugt_v3f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v3f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
+; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
+; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
+; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v3f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_max_legacy_f32_e32 v0, v3, v0
+; SI-NEXT: v_max_legacy_f32_e32 v1, v4, v1
+; SI-NEXT: v_max_legacy_f32_e32 v2, v5, v2
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v3f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v2.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.l, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v1.l, v3.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v3f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ugt <3 x half> %a, %b
%val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b
ret <3 x half> %val
}
+define <3 x half> @test_fmax_legacy_ugt_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 {
+; GFX9-LABEL: test_fmax_legacy_ugt_v3f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v3, v3, v3
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX9-NEXT: v_pk_max_f16 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v3f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_max_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_e32 v0, v0, v2
+; VI-NEXT: v_max_f16_e32 v1, v1, v3
+; VI-NEXT: v_or_b32_e32 v0, v0, v4
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v3f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_max_f32_e32 v0, v0, v3
+; SI-NEXT: v_max_f32_e32 v1, v1, v4
+; SI-NEXT: v_max_f32_e32 v2, v2, v5
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmax_legacy_ugt_v3f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
+; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
+; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ugt <3 x half> %a, %b
+ %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b
+ ret <3 x half> %val
+}
+
define <4 x half> @test_fmax_legacy_ugt_v4f16(<4 x half> %a, <4 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v4f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v4f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v3
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6
-; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v3
-; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v2
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5
-; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmax_legacy_ugt_v4f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v4, v0
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v5, v1
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v6, v2
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v7, v3
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmax_legacy_ugt_v4f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v4
-; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v5
-; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v6
-; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v7
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v4f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v2
-; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v3
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmax_legacy_ugt_v4f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v4f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v7, v6
+; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v5, v4
+; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v3
+; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v2
+; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6
+; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v4f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_max_legacy_f32_e32 v0, v4, v0
+; SI-NEXT: v_max_legacy_f32_e32 v1, v5, v1
+; SI-NEXT: v_max_legacy_f32_e32 v2, v6, v2
+; SI-NEXT: v_max_legacy_f32_e32 v3, v7, v3
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v4f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1.h, v3.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v0.h, v2.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v0.l, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v1.l, v3.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v4f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v5, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v7, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ugt <4 x half> %a, %b
%val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b
ret <4 x half> %val
}
+define <4 x half> @test_fmax_legacy_ugt_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 {
+; GFX9-LABEL: test_fmax_legacy_ugt_v4f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v0, v0, v2
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v4f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_max_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_e32 v1, v1, v3
+; VI-NEXT: v_max_f16_e32 v0, v0, v2
+; VI-NEXT: v_or_b32_e32 v0, v0, v5
+; VI-NEXT: v_or_b32_e32 v1, v1, v4
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v4f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_max_f32_e32 v0, v0, v4
+; SI-NEXT: v_max_f32_e32 v1, v1, v5
+; SI-NEXT: v_max_f32_e32 v2, v2, v6
+; SI-NEXT: v_max_f32_e32 v3, v3, v7
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmax_legacy_ugt_v4f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_f16 v0, v0, v2
+; GFX11-NEXT: v_pk_max_f16 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ugt <4 x half> %a, %b
+ %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b
+ ret <4 x half> %val
+}
+
define <8 x half> @test_fmax_legacy_ugt_v8f16(<8 x half> %a, <8 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmax_legacy_ugt_v8f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
-; GFX9-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmax_legacy_ugt_v8f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_max_f16 v0, v0, v4
-; GFX9-NNAN-NEXT: v_pk_max_f16 v1, v1, v5
-; GFX9-NNAN-NEXT: v_pk_max_f16 v2, v2, v6
-; GFX9-NNAN-NEXT: v_pk_max_f16 v3, v3, v7
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
-; VI-SAFE-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10
-; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12
-; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14
-; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_max_f16_e32 v3, v3, v7
-; VI-NNAN-NEXT: v_max_f16_e32 v2, v2, v6
-; VI-NNAN-NEXT: v_max_f16_e32 v1, v1, v5
-; VI-NNAN-NEXT: v_max_f16_e32 v0, v0, v4
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11
-; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10
-; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9
-; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmax_legacy_ugt_v8f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v0, v8, v0
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v1, v9, v1
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v2, v10, v2
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v3, v11, v3
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v4, v12, v4
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v5, v13, v5
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v6, v14, v6
-; SI-SAFE-NEXT: v_max_legacy_f32_e32 v7, v15, v7
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmax_legacy_ugt_v8f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_max_f32_e32 v0, v0, v8
-; SI-NNAN-NEXT: v_max_f32_e32 v1, v1, v9
-; SI-NNAN-NEXT: v_max_f32_e32 v2, v2, v10
-; SI-NNAN-NEXT: v_max_f32_e32 v3, v3, v11
-; SI-NNAN-NEXT: v_max_f32_e32 v4, v4, v12
-; SI-NNAN-NEXT: v_max_f32_e32 v5, v5, v13
-; SI-NNAN-NEXT: v_max_f32_e32 v6, v6, v14
-; SI-NNAN-NEXT: v_max_f32_e32 v7, v7, v15
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmax_legacy_ugt_v8f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_max_f16 v0, v0, v4
-; GFX11-NNAN-NEXT: v_pk_max_f16 v1, v1, v5
-; GFX11-NNAN-NEXT: v_pk_max_f16 v2, v2, v6
-; GFX11-NNAN-NEXT: v_pk_max_f16 v3, v3, v7
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmax_legacy_ugt_v8f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7
+; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4
+; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4
+; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v8f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7
+; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6
+; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v15, v14
+; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5
+; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v13, v12
+; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v11, v10
+; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v9, v8
+; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v3, v7
+; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v2, v6
+; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v1, v5
+; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; VI-NEXT: v_cmp_nle_f16_e32 vcc, v0, v4
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8
+; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10
+; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12
+; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14
+; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v8f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT: v_max_legacy_f32_e32 v0, v8, v0
+; SI-NEXT: v_max_legacy_f32_e32 v1, v9, v1
+; SI-NEXT: v_max_legacy_f32_e32 v2, v10, v2
+; SI-NEXT: v_max_legacy_f32_e32 v3, v11, v3
+; SI-NEXT: v_max_legacy_f32_e32 v4, v12, v4
+; SI-NEXT: v_max_legacy_f32_e32 v5, v13, v5
+; SI-NEXT: v_max_legacy_f32_e32 v6, v14, v6
+; SI-NEXT: v_max_legacy_f32_e32 v7, v15, v7
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmax_legacy_ugt_v8f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0.h, v4.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s0, v1.h, v5.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s1, v2.h, v6.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s2, v3.h, v7.h
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s3, v0.l, v4.l
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s4, v1.l, v5.l
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s5, v2.l, v6.l
+; GFX11-TRUE16-NEXT: v_cmp_nle_f16_e64 s6, v3.l, v7.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmax_legacy_ugt_v8f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v11, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v13, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v15, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v9, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v2, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v1, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_nle_f16_e32 vcc_lo, v3, v7
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ugt <8 x half> %a, %b
%val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b
ret <8 x half> %val
}
+define <8 x half> @test_fmax_legacy_ugt_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 {
+; GFX9-LABEL: test_fmax_legacy_ugt_v8f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v0, v0, v4
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v5
+; GFX9-NEXT: v_pk_max_f16 v2, v2, v6
+; GFX9-NEXT: v_pk_max_f16 v3, v3, v7
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmax_legacy_ugt_v8f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_max_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_max_f16_e32 v3, v3, v7
+; VI-NEXT: v_max_f16_e32 v2, v2, v6
+; VI-NEXT: v_max_f16_e32 v1, v1, v5
+; VI-NEXT: v_max_f16_e32 v0, v0, v4
+; VI-NEXT: v_or_b32_e32 v0, v0, v11
+; VI-NEXT: v_or_b32_e32 v1, v1, v10
+; VI-NEXT: v_or_b32_e32 v2, v2, v9
+; VI-NEXT: v_or_b32_e32 v3, v3, v8
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmax_legacy_ugt_v8f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_max_f32_e32 v0, v0, v8
+; SI-NEXT: v_max_f32_e32 v1, v1, v9
+; SI-NEXT: v_max_f32_e32 v2, v2, v10
+; SI-NEXT: v_max_f32_e32 v3, v3, v11
+; SI-NEXT: v_max_f32_e32 v4, v4, v12
+; SI-NEXT: v_max_f32_e32 v5, v5, v13
+; SI-NEXT: v_max_f32_e32 v6, v6, v14
+; SI-NEXT: v_max_f32_e32 v7, v7, v15
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmax_legacy_ugt_v8f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_f16 v0, v0, v4
+; GFX11-NEXT: v_pk_max_f16 v1, v1, v5
+; GFX11-NEXT: v_pk_max_f16 v2, v2, v6
+; GFX11-NEXT: v_pk_max_f16 v3, v3, v7
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ugt <8 x half> %a, %b
+ %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b
+ ret <8 x half> %val
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
index eee2bd1..f3a84e6 100644
--- a/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmax_legacy.ll
@@ -1,8 +1,6 @@
-; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s
+; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s
-; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=GCN-NONAN,GCN,FUNC %s
+; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s
; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s
@@ -12,12 +10,10 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
-
-; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; VI: v_cmp_nlt_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
@@ -34,18 +30,38 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32(ptr addrspace(1) %out, ptr a
ret void
}
+; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_fast:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+
+; EG: MAX
+define amdgpu_kernel void @test_fmax_legacy_uge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
+
+ %cmp = fcmp uge float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]]
; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]]
-; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
-; VI-SAFE: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]]
+; VI: v_cmp_nlt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[ADD_B]], [[ADD_A]]
-; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
@@ -64,16 +80,40 @@ define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src(ptr addrspace(1) %o
ret void
}
+; FUNC-LABEL: {{^}}test_fmax_legacy_uge_f32_nnan_src_fast:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+; GCN-DAG: v_add_f32_e32 [[ADD_A:v[0-9]+]], 1.0, [[A]]
+; GCN-DAG: v_add_f32_e32 [[ADD_B:v[0-9]+]], 2.0, [[B]]
+
+; GCN: v_max_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
+
+; EG: MAX
+define amdgpu_kernel void @test_fmax_legacy_uge_f32_nnan_src_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
+ %a.nnan = fadd nnan float %a, 1.0
+ %b.nnan = fadd nnan float %b, 2.0
+
+ %cmp = fcmp uge float %a.nnan, %b.nnan
+ %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; VI-SAFE: v_cmp_ge_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+; VI: v_cmp_ge_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
-; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
@@ -89,17 +129,35 @@ define amdgpu_kernel void @test_fmax_legacy_oge_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32:
+; FUNC-LABEL: {{^}}test_fmax_legacy_oge_f32_fast:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; EG: MAX
+define amdgpu_kernel void @test_fmax_legacy_oge_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
-; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+ %cmp = fcmp oge float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+
+; VI: v_cmp_nle_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
-; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
@@ -115,16 +173,35 @@ define amdgpu_kernel void @test_fmax_legacy_ugt_f32(ptr addrspace(1) %out, ptr a
ret void
}
+; FUNC-LABEL: {{^}}test_fmax_legacy_ugt_f32_fast:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; EG: MAX
+define amdgpu_kernel void @test_fmax_legacy_ugt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
+
+ %cmp = fcmp ugt float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
-; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
@@ -140,17 +217,35 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32:
+; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_fast:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; EG: MAX
+define amdgpu_kernel void @test_fmax_legacy_ogt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
-; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+ %cmp = fcmp ogt float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; SI: v_max_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+
+; VI: v_cmp_gt_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
-; GCN-NONAN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
; EG: MAX
define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
@@ -166,23 +261,39 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32(ptr addrspace(1) %out, ptr
ret void
}
+; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v1f32_fast:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; GCN: v_max_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; EG: MAX
+define amdgpu_kernel void @test_fmax_legacy_ogt_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile <1 x float>, ptr addrspace(1) %gep.0
+ %b = load volatile <1 x float>, ptr addrspace(1) %gep.1
+
+ %cmp = fcmp ogt <1 x float> %a, %b
+ %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b
+ store <1 x float> %val, ptr addrspace(1) %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32:
-; SI-SAFE: v_max_legacy_f32_e32
-; SI-SAFE: v_max_legacy_f32_e32
-; SI-SAFE: v_max_legacy_f32_e32
-
-; VI-SAFE: v_cmp_gt_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
-; VI-SAFE: v_cmp_gt_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
-; VI-SAFE: v_cmp_gt_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
-; VI-SAFE-NOT: v_cmp
-; VI-SAFE-NOT: v_cndmask
-
-; GCN-NONAN: v_max_f32_e32
-; GCN-NONAN: v_max_f32_e32
-; GCN-NONAN: v_max_f32_e32
+; SI: v_max_legacy_f32_e32
+; SI: v_max_legacy_f32_e32
+; SI: v_max_legacy_f32_e32
+
+; VI: v_cmp_gt_f32_e32
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_gt_f32_e32
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_gt_f32_e32
+; VI: v_cndmask_b32_e32
+; VI-NOT: v_cmp
+; VI-NOT: v_cndmask
; GCN-NOT: v_max
define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
@@ -199,6 +310,27 @@ define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32(ptr addrspace(1) %out, ptr
ret void
}
+; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_v3f32_fast:
+
+; GCN: v_max_f32_e32
+; GCN: v_max_f32_e32
+; GCN: v_max_f32_e32
+
+; GCN-NOT: v_max
+define amdgpu_kernel void @test_fmax_legacy_ogt_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load <3 x float>, ptr addrspace(1) %gep.0
+ %b = load <3 x float>, ptr addrspace(1) %gep.1
+
+ %cmp = fcmp ogt <3 x float> %a, %b
+ %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b
+ store <3 x float> %val, ptr addrspace(1) %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmax_legacy_ogt_f32_multi_use:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
diff --git a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
index 2ac5891..37f077d5 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
@@ -1,16 +1,12 @@
-; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope --check-prefixes=GCN %s
+; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN %s
-; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope --check-prefixes=GCN,VI-NNAN %s
+; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN %s
; GCN-LABEL: {{^}}min_fneg_select_regression_0:
; GCN-NOT: v_mul
-; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0
-
-; VI-SAFE: v_cmp_nle_f32_e32 vcc, 1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc
+; VI: v_cmp_nle_f32_e32 vcc, 1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc
define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 {
%fneg.a = fsub float -0.0, %a
%cmp.a = fcmp ult float %a, 1.0
@@ -18,15 +14,23 @@ define amdgpu_ps float @min_fneg_select_regression_0(float %a, float %b) #0 {
ret float %min.a
}
+; GCN-LABEL: {{^}}min_fneg_select_regression_0_fast:
+; GCN-NOT: v_mul
+
+define amdgpu_ps float @min_fneg_select_regression_0_fast(float %a, float %b) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ult float %a, 1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0:
; GCN-NOT: v_mul
; SI: v_max_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0
-; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
-
-; VI-NNAN: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0
+; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0 {
%fneg.a = fsub float -0.0, %a
%cmp.a = fcmp ult float %a, -1.0
@@ -34,15 +38,24 @@ define amdgpu_ps float @min_fneg_select_regression_posk_0(float %a, float %b) #0
ret float %min.a
}
-; GCN-LABEL: {{^}}max_fneg_select_regression_0:
+; GCN-LABEL: {{^}}min_fneg_select_regression_posk_0_fast:
; GCN-NOT: v_mul
-; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0
+; VI: v_max_f32_e64 v{{[0-9]+}}, -v0, 1.0
+define amdgpu_ps float @min_fneg_select_regression_posk_0_fast(float %a, float %b) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ult float %a, -1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
+; GCN-LABEL: {{^}}max_fneg_select_regression_0:
+; GCN-NOT: v_mul
-; VI-SAFE: v_cmp_nge_f32_e32 vcc, 1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc
+; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], -1.0, -v0
-; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0
+; VI: v_cmp_nge_f32_e32 vcc, 1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc
define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 {
%fneg.a = fsub float -0.0, %a
%cmp.a = fcmp ugt float %a, 1.0
@@ -50,15 +63,24 @@ define amdgpu_ps float @max_fneg_select_regression_0(float %a) #0 {
ret float %min.a
}
-; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0:
+; GCN-LABEL: {{^}}max_fneg_select_regression_0_fast:
; GCN-NOT: v_mul
-; SI-SAFE: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0
+; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, -1.0
+define amdgpu_ps float @max_fneg_select_regression_0_fast(float %a) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ugt float %a, 1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0
+ ret float %min.a
+}
+
+; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0:
+; GCN-NOT: v_mul
-; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
+; SI: v_min_legacy_f32_e64 [[MIN:v[0-9]+]], 1.0, -v0
-; GCN-NONAN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0
+; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 {
%fneg.a = fsub float -0.0, %a
%cmp.a = fcmp ugt float %a, -1.0
@@ -66,13 +88,22 @@ define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 {
ret float %min.a
}
+; GCN-LABEL: {{^}}max_fneg_select_regression_posk_0_fast:
+; GCN-NOT: v_mul
+
+; GCN: v_min_f32_e64 [[MIN:v[0-9]+]], -v0, 1.0
+define amdgpu_ps float @max_fneg_select_regression_posk_0_fast(float %a) #0 {
+ %fneg.a = fsub float -0.0, %a
+ %cmp.a = fcmp ugt float %a, -1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1:
; SI: v_min_legacy_f32_e64 v0, 1.0, -v0
-; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
-
-; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0
+; VI: v_cmp_nge_f32_e32 vcc, -1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ugt float %a, -1.0
@@ -80,13 +111,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1_fast:
+
+; VI: v_min_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp ugt float %a, -1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1:
; SI: v_max_legacy_f32_e64 v0, 1.0, -v0
-; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
-
-; VI-NNAN: v_max_f32_e64 v0, -v0, 1.0
+; VI: v_cmp_nle_f32_e32 vcc, -1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ult float %a, -1.0
@@ -94,13 +133,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1_fast:
+
+; VI: v_max_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp ult float %a, -1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1:
; SI: v_min_legacy_f32_e64 v0, -v0, 1.0
-; VI-SAFE: v_cmp_lt_f32_e32 vcc, -1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
-
-; VI-NNAN: v_min_f32_e64 v0, -v0, 1.0
+; VI: v_cmp_lt_f32_e32 vcc, -1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ogt float %a, -1.0
@@ -108,13 +155,21 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1_fast:
+
+; VI: v_min_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp ogt float %a, -1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1:
; SI: v_max_legacy_f32_e64 v0, -v0, 1.0
-; VI-SAFE: v_cmp_gt_f32_e32 vcc, -1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
-
-; VI-NANN: v_max_f32_e64 v0, -v0, 1.0
+; VI: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, 1.0, -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp olt float %a, -1.0
@@ -122,17 +177,24 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1_fast:
+
+; VI-NANN: v_max_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp olt float %a, -1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 1.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8:
; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
; SI-NEXT: v_min_legacy_f32_e64 v0, [[K]], -v0
-; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
-; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
-; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[K0]], v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
-
-; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
-; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]]
+; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
+; VI: v_cmp_nge_f32_e32 vcc, [[K0]], v0
+; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ugt float %a, -8.0
@@ -140,17 +202,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8_fast:
+
+; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp ugt float %a, -8.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8:
; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
; SI-NEXT: v_max_legacy_f32_e64 v0, [[K]], -v0
-; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
-; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
-; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[K0]], v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
-
-; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
-; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]]
+; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
+; VI: v_cmp_nle_f32_e32 vcc, [[K0]], v0
+; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ult float %a, -8.0
@@ -158,17 +228,25 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8_fast:
+
+; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp ult float %a, -8.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8:
; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
; SI-NEXT: v_min_legacy_f32_e64 v0, -v0, [[K]]
-; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
-; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
-; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[K0]], v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
-
-; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
-; VI-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]]
+; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
+; VI: v_cmp_lt_f32_e32 vcc, [[K0]], v0
+; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ogt float %a, -8.0
@@ -176,18 +254,26 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8_fast:
+
+; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; VI-NEXT: v_min_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp ogt float %a, -8.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8:
; SI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
; SI-NEXT: v_max_legacy_f32_e64 v0, -v0, [[K]]
-; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
-; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
-; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[K0]], v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
-
-; VI-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
-; VI-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]]
+; VI-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x41000000
+; VI: v_cmp_gt_f32_e32 vcc, [[K0]], v0
+; VI-NEXT: v_cndmask_b32_e64 v0, [[K1]], -v0, vcc
define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp olt float %a, -8.0
@@ -195,13 +281,22 @@ define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8_fast:
+
+; VI: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; VI-NEXT: v_max_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp olt float %a, -8.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float 8.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1:
; SI: v_max_legacy_f32_e64 v0, -v0, -1.0
-; VI-SAFE: v_cmp_gt_f32_e32 vcc, 1.0, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc
-
-; VI-NNAN: v_max_f32_e64 v0, -v0, -1.0
+; VI: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; VI-NEXT: v_cndmask_b32_e64 v0, -1.0, -v0, vcc
define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp olt float %a, 1.0
@@ -209,15 +304,22 @@ define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0
ret float %min.a
}
+; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1_fast:
+
+; VI: v_max_f32_e64 v0, -v0, -1.0
+define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp olt float %a, 1.0
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float -1.0
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}ult_a_select_fneg_a_b:
; SI: v_cmp_nge_f32_e32 vcc, v0, v1
; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
-; VI-SAFE: v_cmp_nge_f32_e32 vcc, v0, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
-
-; VI-NNAN: v_cmp_lt_f32_e32 vcc, v0, v1
-; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+; VI: v_cmp_nge_f32_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ult float %a, %b
@@ -225,15 +327,23 @@ define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 {
ret float %min.a
}
+; GCN-LABEL: {{^}}ult_a_select_fneg_a_b_fast:
+
+; VI: v_cmp_lt_f32_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+define amdgpu_ps float @ult_a_select_fneg_a_b_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp nnan nsz ult float %a, %b
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b
+ ret float %min.a
+}
+
; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b:
; SI: v_cmp_nle_f32_e32 vcc, v0, v1
; SI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
-; VI-SAFE: v_cmp_nle_f32_e32 vcc, v0, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
-
-; VI-NNAN: v_cmp_gt_f32_e32 vcc, v0, v1
-; VI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+; VI: v_cmp_nle_f32_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 {
%fneg.a = fneg float %a
%cmp.a = fcmp ugt float %a, %b
@@ -241,5 +351,16 @@ define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 {
ret float %min.a
}
+; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b_fast:
+
+; VI: v_cmp_gt_f32_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+define amdgpu_ps float @ugt_a_select_fneg_a_b_fast(float %a, float %b) #0 {
+ %fneg.a = fneg float %a
+ %cmp.a = fcmp nnan nsz ugt float %a, %b
+ %min.a = select nnan nsz i1 %cmp.a, float %fneg.a, float %b
+ ret float %min.a
+}
+
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll
index 34cb0b1..40c2ec0 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.f16.ll
@@ -1,735 +1,760 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-SAFE %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9-NNAN %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
-; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI-SAFE %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NNAN %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s
-; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI-SAFE %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefixes=SI-NNAN %s
+; RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s
-; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-TRUE16 %s
-; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-SAFE-FAKE16 %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-TRUE16 %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11-NNAN,GFX11-NNAN-FAKE16 %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=+real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s
+; RUN: llc -mtriple=amdgcn-- -mcpu=gfx1100 -mattr=-real-true16 < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s
define half @test_fmin_legacy_ule_f16(half %a, half %b) #0 {
-; GFX9-SAFE-LABEL: test_fmin_legacy_ule_f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmin_legacy_ule_f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_min_f16_e32 v0, v0, v1
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmin_legacy_ule_f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmin_legacy_ule_f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmin_legacy_ule_f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v1, v0
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmin_legacy_ule_f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v1
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-TRUE16-LABEL: test_fmin_legacy_ule_f16:
-; GFX11-NNAN-TRUE16: ; %bb.0:
-; GFX11-NNAN-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
-; GFX11-NNAN-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-FAKE16-LABEL: test_fmin_legacy_ule_f16:
-; GFX11-NNAN-FAKE16: ; %bb.0:
-; GFX11-NNAN-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
-; GFX11-NNAN-FAKE16-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmin_legacy_ule_f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_min_legacy_f32_e32 v0, v1, v0
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.l, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, vcc_lo
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ule half %a, %b
%val = select i1 %cmp, half %a, half %b
ret half %val
}
+define half @test_fmin_legacy_ule_f16_fast(half %a, half %b) #0 {
+; GFX9-LABEL: test_fmin_legacy_ule_f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_min_f16_e32 v0, v0, v1
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_min_f32_e32 v0, v0, v1
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_f16_fast:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_min_f16_e32 v0.l, v0.l, v1.l
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_f16_fast:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_min_f16_e32 v0, v0, v1
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ule half %a, %b
+ %val = select nnan nsz i1 %cmp, half %a, half %b
+ ret half %val
+}
+
define <2 x half> @test_fmin_legacy_ule_v2f16(<2 x half> %a, <2 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v2f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v2, v0, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v2f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v1
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmin_legacy_ule_v2f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2
-; VI-SAFE-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmin_legacy_ule_v2f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v1
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v2
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmin_legacy_ule_v2f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v2, v0
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v3, v1
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmin_legacy_ule_v2f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v2
-; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v3
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v2f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v2f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v2f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v1
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmin_legacy_ule_v2f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v2, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v2f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v2
+; VI-NEXT: v_cndmask_b32_sdwa v2, v2, v3, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v1
+; VI-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v2f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_min_legacy_f32_e32 v0, v2, v0
+; SI-NEXT: v_min_legacy_f32_e32 v1, v3, v1
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v2f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v1.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v1.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v1.h, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v1.l, v0.l, s0
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v2f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v2, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v1
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ule <2 x half> %a, %b
%val = select <2 x i1> %cmp, <2 x half> %a, <2 x half> %b
ret <2 x half> %val
}
+define <2 x half> @test_fmin_legacy_ule_v2f16_fast(<2 x half> %a, <2 x half> %b) #0 {
+; GFX9-LABEL: test_fmin_legacy_ule_v2f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v2f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_min_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_e32 v0, v0, v1
+; VI-NEXT: v_or_b32_e32 v0, v0, v2
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v2f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_min_f32_e32 v0, v0, v2
+; SI-NEXT: v_min_f32_e32 v1, v1, v3
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmin_legacy_ule_v2f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_f16 v0, v0, v1
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ule <2 x half> %a, %b
+ %val = select nnan nsz <2 x i1> %cmp, <2 x half> %a, <2 x half> %b
+ ret <2 x half> %val
+}
+
define <3 x half> @test_fmin_legacy_ule_v3f16(<3 x half> %a, <3 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v3f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v3f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3
-; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmin_legacy_ule_v3f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
-; VI-SAFE-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmin_legacy_ule_v3f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2
-; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v4
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmin_legacy_ule_v3f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v3, v0
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v4, v1
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v5, v2
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmin_legacy_ule_v3f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v3
-; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v4
-; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v5
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v3f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v3f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v3f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmin_legacy_ule_v3f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v3f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
+; VI-NEXT: v_cndmask_b32_sdwa v4, v4, v5, vcc dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
+; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
+; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v3f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_min_legacy_f32_e32 v0, v3, v0
+; SI-NEXT: v_min_legacy_f32_e32 v1, v4, v1
+; SI-NEXT: v_min_legacy_f32_e32 v2, v5, v2
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v3f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v2.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.l, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v1.l, v3.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s1
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v3f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v2, v0, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ule <3 x half> %a, %b
%val = select <3 x i1> %cmp, <3 x half> %a, <3 x half> %b
ret <3 x half> %val
}
+define <3 x half> @test_fmin_legacy_ule_v3f16_fast(<3 x half> %a, <3 x half> %b) #0 {
+; GFX9-LABEL: test_fmin_legacy_ule_v3f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_max_f16 v3, v3, v3
+; GFX9-NEXT: v_pk_max_f16 v1, v1, v1
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX9-NEXT: v_pk_min_f16 v0, v0, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v3f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_min_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_e32 v0, v0, v2
+; VI-NEXT: v_min_f16_e32 v1, v1, v3
+; VI-NEXT: v_or_b32_e32 v0, v0, v4
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v3f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_min_f32_e32 v0, v0, v3
+; SI-NEXT: v_min_f32_e32 v1, v1, v4
+; SI-NEXT: v_min_f32_e32 v2, v2, v5
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmin_legacy_ule_v3f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_max_f16 v3, v3, v3
+; GFX11-NEXT: v_pk_max_f16 v1, v1, v1
+; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ule <3 x half> %a, %b
+ %val = select nnan nsz <3 x i1> %cmp, <3 x half> %a, <3 x half> %b
+ ret <3 x half> %val
+}
+
define <4 x half> @test_fmin_legacy_ule_v4f16(<4 x half> %a, <4 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v4f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v4, v0, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v1, v6, v1, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v4f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v3
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmin_legacy_ule_v4f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v6, 16, v3
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v4, 16, v2
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v5, 16, v0
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v4
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 16, v6
-; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmin_legacy_ule_v4f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v3
-; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v2
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v5
-; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v4
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmin_legacy_ule_v4f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v4, v0
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v5, v1
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v6, v2
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v7, v3
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmin_legacy_ule_v4f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v4
-; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v5
-; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v6
-; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v7
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v4f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1
-; GFX11-SAFE-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v4f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v4f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v2
-; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v3
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmin_legacy_ule_v4f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; GFX9-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v4, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v6, v1, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v4f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v2
+; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v0
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v7, v6
+; VI-NEXT: v_cndmask_b32_e32 v6, v6, v7, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v5, v4
+; VI-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v3
+; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v2
+; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v4
+; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v6
+; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v4f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_min_legacy_f32_e32 v0, v4, v0
+; SI-NEXT: v_min_legacy_f32_e32 v1, v5, v1
+; SI-NEXT: v_min_legacy_f32_e32 v2, v6, v2
+; SI-NEXT: v_min_legacy_f32_e32 v3, v7, v3
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v4f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1.h, v3.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v0.h, v2.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v0.l, v2.l
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v1.l, v3.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v3.h, v1.h, vcc_lo
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v2.h, v0.h, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v2.l, v0.l, s1
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4)
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v3.l, v1.l, s2
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v4f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v4, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v6, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v7, 16, v0
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v5, v4
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v4, v4, v5, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v7, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v5, v6, v7, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v2
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v3
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v5, v0, 0x5040100
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v4, v1, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ule <4 x half> %a, %b
%val = select <4 x i1> %cmp, <4 x half> %a, <4 x half> %b
ret <4 x half> %val
}
+define <4 x half> @test_fmin_legacy_ule_v4f16_fast(<4 x half> %a, <4 x half> %b) #0 {
+; GFX9-LABEL: test_fmin_legacy_ule_v4f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v0, v0, v2
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v4f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_min_f16_sdwa v4, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_sdwa v5, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_e32 v1, v1, v3
+; VI-NEXT: v_min_f16_e32 v0, v0, v2
+; VI-NEXT: v_or_b32_e32 v0, v0, v5
+; VI-NEXT: v_or_b32_e32 v1, v1, v4
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v4f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_min_f32_e32 v0, v0, v4
+; SI-NEXT: v_min_f32_e32 v1, v1, v5
+; SI-NEXT: v_min_f32_e32 v2, v2, v6
+; SI-NEXT: v_min_f32_e32 v3, v3, v7
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmin_legacy_ule_v4f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_f16 v0, v0, v2
+; GFX11-NEXT: v_pk_min_f16 v1, v1, v3
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ule <4 x half> %a, %b
+ %val = select nnan nsz <4 x i1> %cmp, <4 x half> %a, <4 x half> %b
+ ret <4 x half> %val
+}
+
define <8 x half> @test_fmin_legacy_ule_v8f16(<8 x half> %a, <8 x half> %b) #0 {
-; GFX9-SAFE-LABEL: test_fmin_legacy_ule_v8f16:
-; GFX9-SAFE: ; %bb.0:
-; GFX9-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; GFX9-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
-; GFX9-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4
-; GFX9-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; GFX9-SAFE-NEXT: s_mov_b32 s4, 0x5040100
-; GFX9-SAFE-NEXT: v_perm_b32 v0, v8, v0, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v1, v10, v1, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v2, v12, v2, s4
-; GFX9-SAFE-NEXT: v_perm_b32 v3, v14, v3, s4
-; GFX9-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX9-NNAN-LABEL: test_fmin_legacy_ule_v8f16:
-; GFX9-NNAN: ; %bb.0:
-; GFX9-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NNAN-NEXT: v_pk_min_f16 v0, v0, v4
-; GFX9-NNAN-NEXT: v_pk_min_f16 v1, v1, v5
-; GFX9-NNAN-NEXT: v_pk_min_f16 v2, v2, v6
-; GFX9-NNAN-NEXT: v_pk_min_f16 v3, v3, v7
-; GFX9-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-SAFE-LABEL: test_fmin_legacy_ule_v8f16:
-; VI-SAFE: ; %bb.0:
-; VI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v14, 16, v7
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v15, 16, v3
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v12, 16, v6
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v10, 16, v5
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; VI-SAFE-NEXT: v_lshrrev_b32_e32 v9, 16, v0
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
-; VI-SAFE-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4
-; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v8
-; VI-SAFE-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v10
-; VI-SAFE-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v12
-; VI-SAFE-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 16, v14
-; VI-SAFE-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; VI-NNAN-LABEL: test_fmin_legacy_ule_v8f16:
-; VI-NNAN: ; %bb.0:
-; VI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NNAN-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NNAN-NEXT: v_min_f16_e32 v3, v3, v7
-; VI-NNAN-NEXT: v_min_f16_e32 v2, v2, v6
-; VI-NNAN-NEXT: v_min_f16_e32 v1, v1, v5
-; VI-NNAN-NEXT: v_min_f16_e32 v0, v0, v4
-; VI-NNAN-NEXT: v_or_b32_e32 v0, v0, v11
-; VI-NNAN-NEXT: v_or_b32_e32 v1, v1, v10
-; VI-NNAN-NEXT: v_or_b32_e32 v2, v2, v9
-; VI-NNAN-NEXT: v_or_b32_e32 v3, v3, v8
-; VI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-SAFE-LABEL: test_fmin_legacy_ule_v8f16:
-; SI-SAFE: ; %bb.0:
-; SI-SAFE-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v14, v14
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v10, v10
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v15, v15
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v14, v14
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v13, v13
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v12, v12
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v11, v11
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v9, v9
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-SAFE-NEXT: v_cvt_f32_f16_e32 v8, v8
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v0, v8, v0
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v1, v9, v1
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v2, v10, v2
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v3, v11, v3
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v4, v12, v4
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v5, v13, v5
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v6, v14, v6
-; SI-SAFE-NEXT: v_min_legacy_f32_e32 v7, v15, v7
-; SI-SAFE-NEXT: s_setpc_b64 s[30:31]
-;
-; SI-NNAN-LABEL: test_fmin_legacy_ule_v8f16:
-; SI-NNAN: ; %bb.0:
-; SI-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v14, v14
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v10, v10
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NNAN-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v15, v15
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v14, v14
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v13, v13
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v12, v12
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v11, v11
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v3, v3
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v9, v9
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v8, v8
-; SI-NNAN-NEXT: v_cvt_f32_f16_e32 v0, v0
-; SI-NNAN-NEXT: v_min_f32_e32 v0, v0, v8
-; SI-NNAN-NEXT: v_min_f32_e32 v1, v1, v9
-; SI-NNAN-NEXT: v_min_f32_e32 v2, v2, v10
-; SI-NNAN-NEXT: v_min_f32_e32 v3, v3, v11
-; SI-NNAN-NEXT: v_min_f32_e32 v4, v4, v12
-; SI-NNAN-NEXT: v_min_f32_e32 v5, v5, v13
-; SI-NNAN-NEXT: v_min_f32_e32 v6, v6, v14
-; SI-NNAN-NEXT: v_min_f32_e32 v7, v7, v15
-; SI-NNAN-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-TRUE16-LABEL: test_fmin_legacy_ule_v8f16:
-; GFX11-SAFE-TRUE16: ; %bb.0:
-; GFX11-SAFE-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l
-; GFX11-SAFE-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5
-; GFX11-SAFE-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6
-; GFX11-SAFE-TRUE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-SAFE-FAKE16-LABEL: test_fmin_legacy_ule_v8f16:
-; GFX11-SAFE-FAKE16: ; %bb.0:
-; GFX11-SAFE-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
-; GFX11-SAFE-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
-; GFX11-SAFE-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100
-; GFX11-SAFE-FAKE16-NEXT: s_setpc_b64 s[30:31]
-;
-; GFX11-NNAN-LABEL: test_fmin_legacy_ule_v8f16:
-; GFX11-NNAN: ; %bb.0:
-; GFX11-NNAN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NNAN-NEXT: v_pk_min_f16 v0, v0, v4
-; GFX11-NNAN-NEXT: v_pk_min_f16 v1, v1, v5
-; GFX11-NNAN-NEXT: v_pk_min_f16 v2, v2, v6
-; GFX11-NNAN-NEXT: v_pk_min_f16 v3, v3, v7
-; GFX11-NNAN-NEXT: s_setpc_b64 s[30:31]
+; GFX9-LABEL: test_fmin_legacy_ule_v8f16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v7
+; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v3
+; GFX9-NEXT: v_lshrrev_b32_e32 v12, 16, v6
+; GFX9-NEXT: v_lshrrev_b32_e32 v13, 16, v2
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v10, 16, v5
+; GFX9-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; GFX9-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12
+; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10
+; GFX9-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: s_mov_b32 s4, 0x5040100
+; GFX9-NEXT: v_perm_b32 v0, v8, v0, s4
+; GFX9-NEXT: v_perm_b32 v1, v10, v1, s4
+; GFX9-NEXT: v_perm_b32 v2, v12, v2, s4
+; GFX9-NEXT: v_perm_b32 v3, v14, v3, s4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v8f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v7
+; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v3
+; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v6
+; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v2
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v15, v14
+; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v5
+; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1
+; VI-NEXT: v_cndmask_b32_e32 v14, v14, v15, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v13, v12
+; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; VI-NEXT: v_cndmask_b32_e32 v12, v12, v13, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v11, v10
+; VI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v9, v8
+; VI-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v3, v7
+; VI-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v2, v6
+; VI-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v1, v5
+; VI-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; VI-NEXT: v_cmp_ngt_f16_e32 vcc, v0, v4
+; VI-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8
+; VI-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v10
+; VI-NEXT: v_or_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v12
+; VI-NEXT: v_or_b32_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v14
+; VI-NEXT: v_or_b32_sdwa v3, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v8f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT: v_min_legacy_f32_e32 v0, v8, v0
+; SI-NEXT: v_min_legacy_f32_e32 v1, v9, v1
+; SI-NEXT: v_min_legacy_f32_e32 v2, v10, v2
+; SI-NEXT: v_min_legacy_f32_e32 v3, v11, v3
+; SI-NEXT: v_min_legacy_f32_e32 v4, v12, v4
+; SI-NEXT: v_min_legacy_f32_e32 v5, v13, v5
+; SI-NEXT: v_min_legacy_f32_e32 v6, v14, v6
+; SI-NEXT: v_min_legacy_f32_e32 v7, v15, v7
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-TRUE16-LABEL: test_fmin_legacy_ule_v8f16:
+; GFX11-TRUE16: ; %bb.0:
+; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0.h, v4.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s0, v1.h, v5.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s1, v2.h, v6.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s2, v3.h, v7.h
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s3, v0.l, v4.l
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s4, v1.l, v5.l
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s5, v2.l, v6.l
+; GFX11-TRUE16-NEXT: v_cmp_ngt_f16_e64 s6, v3.l, v7.l
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.h, v7.h, v3.h, s2
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.h, v6.h, v2.h, s1
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.h, v5.h, v1.h, s0
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.h, v4.h, v0.h, vcc_lo
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v0.l, v4.l, v0.l, s3
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v1.l, v5.l, v1.l, s4
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v2.l, v6.l, v2.l, s5
+; GFX11-TRUE16-NEXT: v_cndmask_b16 v3.l, v7.l, v3.l, s6
+; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-FAKE16-LABEL: test_fmin_legacy_ule_v8f16:
+; GFX11-FAKE16: ; %bb.0:
+; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v10, 16, v7
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v12, 16, v6
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v13, 16, v2
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v14, 16, v5
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v15, 16, v1
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v11, v10
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v8, 16, v4
+; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v9, 16, v0
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v13, v12
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v11, v12, v13, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v15, v14
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v12, v14, v15, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v9, v8
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v8, v8, v9, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v2, v6
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v0, v4
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_perm_b32 v2, v11, v2, 0x5040100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v1, v5
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc_lo
+; GFX11-FAKE16-NEXT: v_cmp_ngt_f16_e32 vcc_lo, v3, v7
+; GFX11-FAKE16-NEXT: v_perm_b32 v1, v12, v1, 0x5040100
+; GFX11-FAKE16-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc_lo
+; GFX11-FAKE16-NEXT: v_perm_b32 v0, v8, v0, 0x5040100
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-FAKE16-NEXT: v_perm_b32 v3, v10, v3, 0x5040100
+; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp ule <8 x half> %a, %b
%val = select <8 x i1> %cmp, <8 x half> %a, <8 x half> %b
ret <8 x half> %val
}
+define <8 x half> @test_fmin_legacy_ule_v8f16_fast(<8 x half> %a, <8 x half> %b) #0 {
+; GFX9-LABEL: test_fmin_legacy_ule_v8f16_fast:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_f16 v0, v0, v4
+; GFX9-NEXT: v_pk_min_f16 v1, v1, v5
+; GFX9-NEXT: v_pk_min_f16 v2, v2, v6
+; GFX9-NEXT: v_pk_min_f16 v3, v3, v7
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: test_fmin_legacy_ule_v8f16_fast:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_min_f16_sdwa v8, v3, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_sdwa v9, v2, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_sdwa v10, v1, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_sdwa v11, v0, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_min_f16_e32 v3, v3, v7
+; VI-NEXT: v_min_f16_e32 v2, v2, v6
+; VI-NEXT: v_min_f16_e32 v1, v1, v5
+; VI-NEXT: v_min_f16_e32 v0, v0, v4
+; VI-NEXT: v_or_b32_e32 v0, v0, v11
+; VI-NEXT: v_or_b32_e32 v1, v1, v10
+; VI-NEXT: v_or_b32_e32 v2, v2, v9
+; VI-NEXT: v_or_b32_e32 v3, v3, v8
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; SI-LABEL: test_fmin_legacy_ule_v8f16_fast:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v14
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_min_f32_e32 v0, v0, v8
+; SI-NEXT: v_min_f32_e32 v1, v1, v9
+; SI-NEXT: v_min_f32_e32 v2, v2, v10
+; SI-NEXT: v_min_f32_e32 v3, v3, v11
+; SI-NEXT: v_min_f32_e32 v4, v4, v12
+; SI-NEXT: v_min_f32_e32 v5, v5, v13
+; SI-NEXT: v_min_f32_e32 v6, v6, v14
+; SI-NEXT: v_min_f32_e32 v7, v7, v15
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: test_fmin_legacy_ule_v8f16_fast:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: v_pk_min_f16 v0, v0, v4
+; GFX11-NEXT: v_pk_min_f16 v1, v1, v5
+; GFX11-NEXT: v_pk_min_f16 v2, v2, v6
+; GFX11-NEXT: v_pk_min_f16 v3, v3, v7
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = fcmp ule <8 x half> %a, %b
+ %val = select nnan nsz <8 x i1> %cmp, <8 x half> %a, <8 x half> %b
+ ret <8 x half> %val
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
index ec4dd85..defcffa 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_legacy.ll
@@ -1,8 +1,6 @@
-; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN,FUNC %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-NONAN,GCN-NONAN,GCN,FUNC %s
+; RUN: llc -mtriple=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI,GCN,FUNC %s
-; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN,FUNC %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-NONAN,GCN-NONAN,GCN,FUNC %s
+; RUN: llc -mtriple=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI,GCN,FUNC %s
; RUN: llc -mtriple=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope --check-prefixes=EG,FUNC %s
@@ -14,13 +12,9 @@ declare i32 @llvm.amdgcn.workitem.id.x() #1
; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32:
; EG: MIN *
-; SI-SAFE: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_min_legacy_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
-; SI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
-
-; VI-SAFE: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
-
-; VI-NONAN: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_cmp_nlt_f32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1) %out, <4 x float> %reg0) #0 {
%r0 = extractelement <4 x float> %reg0, i32 0
%r1 = extractelement <4 x float> %reg0, i32 1
@@ -30,22 +24,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32(ptr addrspace(1)
ret void
}
-; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32:
-; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
+; FUNC-LABEL: {{^}}s_test_fmin_legacy_subreg_inputs_f32_fast:
-; SI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]]
+; SI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
-; GCN-NONAN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]]
+; VI: v_min_f32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
+define amdgpu_kernel void @s_test_fmin_legacy_subreg_inputs_f32_fast(ptr addrspace(1) %out, <4 x float> %reg0) #0 {
+ %r0 = extractelement <4 x float> %reg0, i32 0
+ %r1 = extractelement <4 x float> %reg0, i32 1
+ %r2 = fcmp nnan nsz uge float %r0, %r1
+ %r3 = select nnan nsz i1 %r2, float %r1, float %r0
+ store float %r3, ptr addrspace(1) %out
+ ret void
+}
-; VI-SAFE: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]]
+; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32:
+; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
+
+; SI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]]
-; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]]
+; VI: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]]
-; VI-SAFE: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]]
-; VI-SAFE: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]]
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, s[[#LOAD + 3]], [[VA]]
-; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]]
+; VI: v_mov_b32_e32 [[VA:v[0-9]+]], s[[#LOAD + 2]]
+; VI: v_cmp_ngt_f32_e32 vcc, s[[#LOAD + 2]], [[VB]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[VB]], [[VA]]
define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, float %a, float %b) #0 {
%cmp = fcmp ule float %a, %b
%val = select i1 %cmp, float %a, float %b
@@ -53,6 +57,19 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo
ret void
}
+; FUNC-LABEL: {{^}}s_test_fmin_legacy_ule_f32_fast:
+; GCN-DAG: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
+
+; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], s[[#LOAD + 3]]
+
+; GCN: v_min_f32_e32 {{v[0-9]+}}, s[[#LOAD + 2]], [[VB]]
+define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, float %a, float %b) #0 {
+ %cmp = fcmp ule float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
; Nsz also needed
; FIXME: Should separate tests
; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src:
@@ -61,12 +78,10 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32(ptr addrspace(1) %out, flo
; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
-; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
-
-; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
-; VI-SAFE: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]]
-; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
+; VI: v_cmp_ngt_f32_e32 vcc, [[ADD_A]], [[ADD_B]]
+; VI: v_cndmask_b32_e32 {{v[0-9]+}}, [[ADD_B]], [[ADD_A]], vcc
define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1) %out, float %a, float %b) #0 {
%a.nnan = fadd nnan float %a, 1.0
%b.nnan = fadd nnan float %b, 2.0
@@ -76,16 +91,32 @@ define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src(ptr addrspace(1)
ret void
}
+; Nsz also needed
+; FIXME: Should separate tests
+; GCN-LABEL: {{^}}s_test_fmin_legacy_ule_f32_nnan_src_fast:
+; GCN: s_load_dwordx4 s[[[#LOAD:]]:{{[0-9]+}}], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
+
+; GCN-DAG: v_add_f32_e64 [[ADD_A:v[0-9]+]], s[[#LOAD + 2]], 1.0
+; GCN-DAG: v_add_f32_e64 [[ADD_B:v[0-9]+]], s[[#LOAD + 3]], 2.0
+
+; GCN: v_min_f32_e32 {{v[0-9]+}}, [[ADD_A]], [[ADD_B]]
+define amdgpu_kernel void @s_test_fmin_legacy_ule_f32_nnan_src_fast(ptr addrspace(1) %out, float %a, float %b) #0 {
+ %a.nnan = fadd nnan float %a, 1.0
+ %b.nnan = fadd nnan float %b, 2.0
+ %cmp = fcmp ule float %a.nnan, %b.nnan
+ %val = select nnan nsz i1 %cmp, float %a.nnan, float %b.nnan
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
-; VI-SAFE: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
-
-; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; VI: v_cmp_ngt_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
@@ -100,16 +131,33 @@ define amdgpu_kernel void @test_fmin_legacy_ule_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32:
+; FUNC-LABEL: {{^}}test_fmin_legacy_ule_f32_fast:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+define amdgpu_kernel void @test_fmin_legacy_ule_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
-; VI-SAFE: v_cmp_le_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+ %cmp = fcmp ule float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
-; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+
+; VI: v_cmp_le_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
@@ -124,16 +172,33 @@ define amdgpu_kernel void @test_fmin_legacy_ole_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32:
+; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_fast:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+define amdgpu_kernel void @test_fmin_legacy_ole_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
-; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
+
+ %cmp = fcmp ole float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
-; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+; VI: v_cmp_lt_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
@@ -148,16 +213,33 @@ define amdgpu_kernel void @test_fmin_legacy_olt_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32:
+; FUNC-LABEL: {{^}}test_fmin_legacy_olt_f32_fast:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+define amdgpu_kernel void @test_fmin_legacy_olt_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
-; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
-; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+ %cmp = fcmp olt float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+
+; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
@@ -172,16 +254,33 @@ define amdgpu_kernel void @test_fmin_legacy_ult_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32:
+; FUNC-LABEL: {{^}}test_fmin_legacy_ult_f32_fast:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; SI-SAFE: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+define amdgpu_kernel void @test_fmin_legacy_ult_f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr float, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr float, ptr addrspace(1) %gep.0, i32 1
-; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[A]], [[B]]
-; VI-SAFE: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
+ %a = load volatile float, ptr addrspace(1) %gep.0, align 4
+ %b = load volatile float, ptr addrspace(1) %gep.1, align 4
-; GCN-NONAN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+ %cmp = fcmp ult float %a, %b
+ %val = select nnan nsz i1 %cmp, float %a, float %b
+ store float %val, ptr addrspace(1) %out, align 4
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; SI: v_min_legacy_f32_e32 {{v[0-9]+}}, [[B]], [[A]]
+
+; VI: v_cmp_nge_f32_e32 vcc, [[A]], [[B]]
+; VI: v_cndmask_b32_e32 v{{[0-9]+}}, [[B]], [[A]]
define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid
@@ -196,19 +295,35 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v1f32(ptr addrspace(1) %out, ptr
ret void
}
+; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v1f32_fast:
+; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
+; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
+
+; GCN: v_min_f32_e32 {{v[0-9]+}}, [[A]], [[B]]
+define amdgpu_kernel void @test_fmin_legacy_ult_v1f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr <1 x float>, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr <1 x float>, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile <1 x float>, ptr addrspace(1) %gep.0
+ %b = load volatile <1 x float>, ptr addrspace(1) %gep.1
+
+ %cmp = fcmp ult <1 x float> %a, %b
+ %val = select nnan nsz <1 x i1> %cmp, <1 x float> %a, <1 x float> %b
+ store <1 x float> %val, ptr addrspace(1) %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32:
; GCN: {{buffer|flat}}_load_dwordx2
; GCN: {{buffer|flat}}_load_dwordx2
-; SI-SAFE: v_min_legacy_f32_e32
-; SI-SAFE: v_min_legacy_f32_e32
-
-; VI-SAFE: v_cmp_nge_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
-; VI-SAFE: v_cmp_nge_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
+; SI: v_min_legacy_f32_e32
+; SI: v_min_legacy_f32_e32
-; GCN-NONAN: v_min_f32_e32
-; GCN-NONAN: v_min_f32_e32
+; VI: v_cmp_nge_f32_e32
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_nge_f32_e32
+; VI: v_cndmask_b32_e32
define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid
@@ -223,25 +338,40 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v2f32(ptr addrspace(1) %out, ptr
ret void
}
+; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v2f32_fast:
+; GCN: {{buffer|flat}}_load_dwordx2
+; GCN: {{buffer|flat}}_load_dwordx2
+
+; GCN: v_min_f32_e32
+; GCN: v_min_f32_e32
+define amdgpu_kernel void @test_fmin_legacy_ult_v2f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr <2 x float>, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr <2 x float>, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load volatile <2 x float>, ptr addrspace(1) %gep.0
+ %b = load volatile <2 x float>, ptr addrspace(1) %gep.1
+
+ %cmp = fcmp ult <2 x float> %a, %b
+ %val = select nnan nsz <2 x i1> %cmp, <2 x float> %a, <2 x float> %b
+ store <2 x float> %val, ptr addrspace(1) %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32:
-; SI-SAFE: v_min_legacy_f32_e32
-; SI-SAFE: v_min_legacy_f32_e32
-; SI-SAFE: v_min_legacy_f32_e32
-; SI-SAFE-NOT: v_min_
-
-; VI-SAFE: v_cmp_nge_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
-; VI-SAFE: v_cmp_nge_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
-; VI-SAFE: v_cmp_nge_f32_e32
-; VI-SAFE: v_cndmask_b32_e32
+; SI: v_min_legacy_f32_e32
+; SI: v_min_legacy_f32_e32
+; SI: v_min_legacy_f32_e32
+; SI-NOT: v_min_
+
+; VI: v_cmp_nge_f32_e32
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_nge_f32_e32
+; VI: v_cndmask_b32_e32
+; VI: v_cmp_nge_f32_e32
+; VI: v_cndmask_b32_e32
; VI-NOT: v_cmp
; VI-NOT: v_cndmask
-
-; GCN-NONAN: v_min_f32_e32
-; GCN-NONAN: v_min_f32_e32
-; GCN-NONAN: v_min_f32_e32
-; GCN-NONAN-NOT: v_min_
define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
%gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid
@@ -256,6 +386,28 @@ define amdgpu_kernel void @test_fmin_legacy_ult_v3f32(ptr addrspace(1) %out, ptr
ret void
}
+; FUNC-LABEL: {{^}}test_fmin_legacy_ult_v3f32_fast:
+; VI-NOT: v_cmp
+; VI-NOT: v_cndmask
+
+; GCN: v_min_f32_e32
+; GCN: v_min_f32_e32
+; GCN: v_min_f32_e32
+; GCN-NOT: v_min_
+define amdgpu_kernel void @test_fmin_legacy_ult_v3f32_fast(ptr addrspace(1) %out, ptr addrspace(1) %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x() #1
+ %gep.0 = getelementptr <3 x float>, ptr addrspace(1) %in, i32 %tid
+ %gep.1 = getelementptr <3 x float>, ptr addrspace(1) %gep.0, i32 1
+
+ %a = load <3 x float>, ptr addrspace(1) %gep.0
+ %b = load <3 x float>, ptr addrspace(1) %gep.1
+
+ %cmp = fcmp ult <3 x float> %a, %b
+ %val = select nnan nsz <3 x i1> %cmp, <3 x float> %a, <3 x float> %b
+ store <3 x float> %val, ptr addrspace(1) %out
+ ret void
+}
+
; FUNC-LABEL: {{^}}test_fmin_legacy_ole_f32_multi_use:
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
diff --git a/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll
new file mode 100644
index 0000000..e1784f8
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/lasx/ir-instruction/shuffle-as-xvinsve0.ll
@@ -0,0 +1,197 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc --mtriple=loongarch32 --mattr=+32s,+lasx < %s | FileCheck %s
+; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
+
+;; xvinsve0.w
+define void @xvinsve0_v8i32_l_0(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v8i32_l_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x i32> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v8i32_l_4(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v8i32_l_4:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 4
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 5, i32 6, i32 7>
+ store <8 x i32> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v8f32_l(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v8f32_l:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.w $xr0, $xr1, 0
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x float>, ptr %a
+ %vb = load <8 x float>, ptr %b
+ %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ store <8 x float> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v8i32_h_1(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v8i32_h_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 1
+; CHECK-NEXT: xvst $xr1, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 0, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <8 x i32> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v8i32_h_6(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v8i32_h_6:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 6
+; CHECK-NEXT: xvst $xr1, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x i32>, ptr %a
+ %vb = load <8 x i32>, ptr %b
+ %vc = shufflevector <8 x i32> %va, <8 x i32> %vb, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 0, i32 15>
+ store <8 x i32> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v8f32_h(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v8f32_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.w $xr1, $xr0, 0
+; CHECK-NEXT: xvst $xr1, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <8 x float>, ptr %a
+ %vb = load <8 x float>, ptr %b
+ %vc = shufflevector <8 x float> %va, <8 x float> %vb, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <8 x float> %vc, ptr %d
+ ret void
+}
+
+;; xvinsve0.d
+define void @xvinsve0_v4i64_l_1(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v4i64_l_1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 1
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 4, i32 2, i32 3>
+ store <4 x i64> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v4i64_l_2(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v4i64_l_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 2
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
+ store <4 x i64> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v4f64_l(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v4f64_l:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.d $xr0, $xr1, 0
+; CHECK-NEXT: xvst $xr0, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x double>, ptr %a
+ %vb = load <4 x double>, ptr %b
+ %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
+ store <4 x double> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v4i64_h_0(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v4i64_h_0:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0
+; CHECK-NEXT: xvst $xr1, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ store <4 x i64> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v4i64_h_2(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v4i64_h_2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 2
+; CHECK-NEXT: xvst $xr1, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x i64>, ptr %a
+ %vb = load <4 x i64>, ptr %b
+ %vc = shufflevector <4 x i64> %va, <4 x i64> %vb, <4 x i32> <i32 4, i32 5, i32 0, i32 7>
+ store <4 x i64> %vc, ptr %d
+ ret void
+}
+
+define void @xvinsve0_v4f64_h(ptr %d, ptr %a, ptr %b) nounwind {
+; CHECK-LABEL: xvinsve0_v4f64_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: xvld $xr0, $a1, 0
+; CHECK-NEXT: xvld $xr1, $a2, 0
+; CHECK-NEXT: xvinsve0.d $xr1, $xr0, 0
+; CHECK-NEXT: xvst $xr1, $a0, 0
+; CHECK-NEXT: ret
+entry:
+ %va = load <4 x double>, ptr %a
+ %vb = load <4 x double>, ptr %b
+ %vc = shufflevector <4 x double> %va, <4 x double> %vb, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
+ store <4 x double> %vc, ptr %d
+ ret void
+}
diff --git a/llvm/test/CodeGen/NVPTX/bug22322.ll b/llvm/test/CodeGen/NVPTX/bug22322.ll
index 055c512..71e180b 100644
--- a/llvm/test/CodeGen/NVPTX/bug22322.ll
+++ b/llvm/test/CodeGen/NVPTX/bug22322.ll
@@ -20,12 +20,12 @@ _ZL11compute_vecRK6float3jb.exit:
call void @llvm.lifetime.start.p0(i64 4, ptr %ret_vec.sroa.8.i)
%6 = and i32 %4, 15
%7 = icmp eq i32 %6, 0
- %8 = select i1 %7, float 0.000000e+00, float -1.000000e+00
+ %8 = select nnan nsz i1 %7, float 0.000000e+00, float -1.000000e+00
store float %8, ptr %ret_vec.sroa.8.i, align 4
; CHECK: max.f32 %r{{[0-9]+}}, %r{{[0-9]+}}, 0f00000000
%9 = fcmp olt float %8, 0.000000e+00
%ret_vec.sroa.8.i.val = load float, ptr %ret_vec.sroa.8.i, align 4
- %10 = select i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val
+ %10 = select nnan nsz i1 %9, float 0.000000e+00, float %ret_vec.sroa.8.i.val
call void @llvm.lifetime.end.p0(i64 4, ptr %ret_vec.sroa.8.i)
%11 = getelementptr inbounds %class.float3, ptr %dst, i64 %5, i32 0
store float 0.000000e+00, ptr %11, align 4
@@ -51,7 +51,7 @@ declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #2
; Function Attrs: nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #2
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "no-signed-zeros-fp-math"="true" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll
index 216d498..5f637e3 100644
--- a/llvm/test/CodeGen/PowerPC/scalar-min-max.ll
+++ b/llvm/test/CodeGen/PowerPC/scalar-min-max.ll
@@ -1,36 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names --enable-unsafe-fp-math \
-; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
-; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names --enable-unsafe-fp-math \
-; RUN: -verify-machineinstrs --enable-no-signed-zeros-fp-math \
-; RUN: --enable-no-nans-fp-math \
-; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
; RUN: llc -mcpu=pwr9 -ppc-asm-full-reg-names -verify-machineinstrs \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P9
+; RUN: --check-prefix=P9
; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -verify-machineinstrs \
; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s \
-; RUN: --check-prefix=NO-FAST-P8
+; RUN: --check-prefix=P8
define dso_local float @testfmax(float %a, float %b) local_unnamed_addr {
-; CHECK-LABEL: testfmax:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmaxdp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testfmax:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testfmax:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: bgtlr cr0
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testfmax:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmaxcdp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testfmax:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: bgtlr cr0
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp ogt float %a, %b
%cond = select i1 %cmp, float %a, float %b
@@ -38,23 +25,18 @@ entry:
}
define dso_local double @testdmax(double %a, double %b) local_unnamed_addr {
-; CHECK-LABEL: testdmax:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmaxdp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testdmax:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testdmax:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: bgtlr cr0
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testdmax:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmaxcdp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testdmax:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: bgtlr cr0
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp ogt double %a, %b
%cond = select i1 %cmp, double %a, double %b
@@ -62,23 +44,18 @@ entry:
}
define dso_local float @testfmin(float %a, float %b) local_unnamed_addr {
-; CHECK-LABEL: testfmin:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmindp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testfmin:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testfmin:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: fcmpu cr0, f1, f2
-; NO-FAST-P8-NEXT: bltlr cr0
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testfmin:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmincdp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testfmin:
+; P8: # %bb.0: # %entry
+; P8-NEXT: fcmpu cr0, f1, f2
+; P8-NEXT: bltlr cr0
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp olt float %a, %b
%cond = select i1 %cmp, float %a, float %b
@@ -86,23 +63,18 @@ entry:
}
define dso_local double @testdmin(double %a, double %b) local_unnamed_addr {
-; CHECK-LABEL: testdmin:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmindp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testdmin:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testdmin:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xscmpudp cr0, f1, f2
-; NO-FAST-P8-NEXT: bltlr cr0
-; NO-FAST-P8-NEXT: # %bb.1: # %entry
-; NO-FAST-P8-NEXT: fmr f1, f2
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testdmin:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmincdp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testdmin:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xscmpudp cr0, f1, f2
+; P8-NEXT: bltlr cr0
+; P8-NEXT: # %bb.1: # %entry
+; P8-NEXT: fmr f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp olt double %a, %b
%cond = select i1 %cmp, double %a, double %b
@@ -110,86 +82,62 @@ entry:
}
define dso_local float @testfmax_fast(float %a, float %b) local_unnamed_addr {
-; CHECK-LABEL: testfmax_fast:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmaxdp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testfmax_fast:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testfmax_fast:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testfmax_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmaxdp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testfmax_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xsmaxdp f1, f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp nnan ninf ogt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nnan nsz i1 %cmp, float %a, float %b
ret float %cond
}
define dso_local double @testdmax_fast(double %a, double %b) local_unnamed_addr {
-; CHECK-LABEL: testdmax_fast:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmaxdp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testdmax_fast:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmaxcdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testdmax_fast:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f2, f1
-; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testdmax_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmaxdp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testdmax_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xsmaxdp f1, f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp nnan ninf ogt double %a, %b
- %cond = select i1 %cmp, double %a, double %b
+ %cond = select nnan nsz i1 %cmp, double %a, double %b
ret double %cond
}
define dso_local float @testfmin_fast(float %a, float %b) local_unnamed_addr {
-; CHECK-LABEL: testfmin_fast:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmindp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testfmin_fast:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testfmin_fast:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubsp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testfmin_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmindp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testfmin_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xsmindp f1, f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp nnan ninf olt float %a, %b
- %cond = select i1 %cmp, float %a, float %b
+ %cond = select nnan nsz i1 %cmp, float %a, float %b
ret float %cond
}
define dso_local double @testdmin_fast(double %a, double %b) local_unnamed_addr {
-; CHECK-LABEL: testdmin_fast:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: xsmindp f1, f1, f2
-; CHECK-NEXT: blr
-;
-; NO-FAST-P9-LABEL: testdmin_fast:
-; NO-FAST-P9: # %bb.0: # %entry
-; NO-FAST-P9-NEXT: xsmincdp f1, f1, f2
-; NO-FAST-P9-NEXT: blr
-;
-; NO-FAST-P8-LABEL: testdmin_fast:
-; NO-FAST-P8: # %bb.0: # %entry
-; NO-FAST-P8-NEXT: xssubdp f0, f1, f2
-; NO-FAST-P8-NEXT: fsel f1, f0, f2, f1
-; NO-FAST-P8-NEXT: blr
+; P9-LABEL: testdmin_fast:
+; P9: # %bb.0: # %entry
+; P9-NEXT: xsmindp f1, f1, f2
+; P9-NEXT: blr
+;
+; P8-LABEL: testdmin_fast:
+; P8: # %bb.0: # %entry
+; P8-NEXT: xsmindp f1, f1, f2
+; P8-NEXT: blr
entry:
%cmp = fcmp nnan ninf olt double %a, %b
- %cond = select i1 %cmp, double %a, double %b
+ %cond = select nnan nsz i1 %cmp, double %a, double %b
ret double %cond
}
diff --git a/llvm/test/CodeGen/RISCV/select-bare.ll b/llvm/test/CodeGen/RISCV/select-bare.ll
index 796121a..44028a7 100644
--- a/llvm/test/CodeGen/RISCV/select-bare.ll
+++ b/llvm/test/CodeGen/RISCV/select-bare.ll
@@ -26,8 +26,8 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind {
; RV32IXQCI-LABEL: bare_select:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: andi a0, a0, 1
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%1 = select i1 %a, i32 %b, i32 %c
ret i32 %1
@@ -53,8 +53,8 @@ define float @bare_select_float(i1 %a, float %b, float %c) nounwind {
; RV32IXQCI-LABEL: bare_select_float:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: andi a0, a0, 1
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%1 = select i1 %a, float %b, float %c
ret float %1
diff --git a/llvm/test/CodeGen/RISCV/select-cc.ll b/llvm/test/CodeGen/RISCV/select-cc.ll
index 14055df..b57f625 100644
--- a/llvm/test/CodeGen/RISCV/select-cc.ll
+++ b/llvm/test/CodeGen/RISCV/select-cc.ll
@@ -87,40 +87,40 @@ define signext i32 @foo(i32 signext %a, ptr %b) nounwind {
;
; RV32IXQCI-LABEL: foo:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: lw a5, 0(a1)
; RV32IXQCI-NEXT: lw a2, 0(a1)
; RV32IXQCI-NEXT: lw a4, 0(a1)
; RV32IXQCI-NEXT: lw t5, 0(a1)
; RV32IXQCI-NEXT: lw t4, 0(a1)
+; RV32IXQCI-NEXT: lw t3, 0(a1)
; RV32IXQCI-NEXT: lw t2, 0(a1)
-; RV32IXQCI-NEXT: lw t1, 0(a1)
; RV32IXQCI-NEXT: lw t0, 0(a1)
; RV32IXQCI-NEXT: lw a7, 0(a1)
; RV32IXQCI-NEXT: lw a6, 0(a1)
-; RV32IXQCI-NEXT: lw t3, 0(a1)
; RV32IXQCI-NEXT: lw a3, 0(a1)
-; RV32IXQCI-NEXT: bltz t3, .LBB0_2
+; RV32IXQCI-NEXT: lw t1, 0(a1)
+; RV32IXQCI-NEXT: lw a5, 0(a1)
+; RV32IXQCI-NEXT: bltz t1, .LBB0_2
; RV32IXQCI-NEXT: # %bb.1:
-; RV32IXQCI-NEXT: li t6, 0
-; RV32IXQCI-NEXT: qc.mveq a5, a0, a5, a0
-; RV32IXQCI-NEXT: qc.mvne a2, a5, a2, a5
-; RV32IXQCI-NEXT: qc.mvltu a4, a4, a2, a2
-; RV32IXQCI-NEXT: qc.mvgeu t5, a4, t5, a4
-; RV32IXQCI-NEXT: qc.mvltu t4, t5, t4, t5
-; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t4, t4
-; RV32IXQCI-NEXT: qc.mvlt t1, t1, t2, t2
-; RV32IXQCI-NEXT: qc.mvge t0, t1, t0, t1
-; RV32IXQCI-NEXT: qc.mvlt a7, t0, a7, t0
-; RV32IXQCI-NEXT: qc.mvge a6, a6, a7, a7
-; RV32IXQCI-NEXT: mv a3, t3
-; RV32IXQCI-NEXT: qc.mvge a3, t6, t3, a6
+; RV32IXQCI-NEXT: li a5, 0
+; RV32IXQCI-NEXT: qc.mveq a2, a0, a2, a0
+; RV32IXQCI-NEXT: qc.mvne a4, a2, a4, a2
+; RV32IXQCI-NEXT: qc.mvltu t5, t5, a4, a4
+; RV32IXQCI-NEXT: qc.mvgeu t4, t5, t4, t5
+; RV32IXQCI-NEXT: qc.mvltu t3, t4, t3, t4
+; RV32IXQCI-NEXT: qc.mvgeu t2, t2, t3, t3
+; RV32IXQCI-NEXT: qc.mvlt t0, t0, t2, t2
+; RV32IXQCI-NEXT: qc.mvge a7, t0, a7, t0
+; RV32IXQCI-NEXT: qc.mvlt a6, a7, a6, a7
+; RV32IXQCI-NEXT: qc.mvge a3, a3, a6, a6
+; RV32IXQCI-NEXT: qc.mvlt a3, a5, t1, t1
+; RV32IXQCI-NEXT: mv a5, a3
; RV32IXQCI-NEXT: .LBB0_2:
; RV32IXQCI-NEXT: lw a2, 0(a1)
; RV32IXQCI-NEXT: lw a0, 0(a1)
; RV32IXQCI-NEXT: li a1, 1024
-; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a3
+; RV32IXQCI-NEXT: qc.mvlt a2, a1, a2, a5
; RV32IXQCI-NEXT: li a1, 2046
-; RV32IXQCI-NEXT: qc.mvltu a0, a1, t3, a2
+; RV32IXQCI-NEXT: qc.mvltu a0, a1, t1, a2
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: foo:
@@ -417,8 +417,8 @@ define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z)
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: lui a3, 1048560
; RV32IXQCI-NEXT: addi a3, a3, -1
-; RV32IXQCI-NEXT: qc.mvlt a2, a3, a0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mvge a1, a3, a0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: select_sge_int16min:
@@ -471,10 +471,10 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
; RV32IXQCI-NEXT: srli a0, a1, 31
; RV32IXQCI-NEXT: xori a0, a0, 1
; RV32IXQCI-NEXT: qc.mveqi a0, a1, -1, a6
-; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2
-; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a3
-; RV32IXQCI-NEXT: mv a0, a4
-; RV32IXQCI-NEXT: mv a1, a5
+; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4
+; RV32IXQCI-NEXT: qc.mveqi a3, a0, 0, a5
+; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: mv a1, a3
; RV32IXQCI-NEXT: ret
;
; RV64I-LABEL: select_sge_int32min:
diff --git a/llvm/test/CodeGen/RISCV/select-cond.ll b/llvm/test/CodeGen/RISCV/select-cond.ll
index b88fe9a..3ca0f46 100644
--- a/llvm/test/CodeGen/RISCV/select-cond.ll
+++ b/llvm/test/CodeGen/RISCV/select-cond.ll
@@ -35,8 +35,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign
; RV32-XQCICM-LABEL: select_i32_trunc:
; RV32-XQCICM: # %bb.0:
; RV32-XQCICM-NEXT: andi a0, a0, 1
-; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32-XQCICM-NEXT: mv a0, a2
+; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32-XQCICM-NEXT: mv a0, a1
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_trunc:
@@ -48,8 +48,8 @@ define signext i32 @select_i32_trunc(i32 signext %cond, i32 signext %x, i32 sign
; RV32IXQCI-LABEL: select_i32_trunc:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: andi a0, a0, 1
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_trunc:
@@ -93,8 +93,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe
; RV32-XQCICM-LABEL: select_i32_param:
; RV32-XQCICM: # %bb.0:
; RV32-XQCICM-NEXT: andi a0, a0, 1
-; RV32-XQCICM-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32-XQCICM-NEXT: mv a0, a2
+; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32-XQCICM-NEXT: mv a0, a1
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_param:
@@ -106,8 +106,8 @@ define signext i32 @select_i32_param(i1 signext %cond, i32 signext %x, i32 signe
; RV32IXQCI-LABEL: select_i32_param:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: andi a0, a0, 1
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_param:
@@ -148,8 +148,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x
;
; RV32-XQCICM-LABEL: select_i32_eq:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mveq a3, a0, a1, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvne a2, a0, a1, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_eq:
@@ -163,8 +163,8 @@ define signext i32 @select_i32_eq(i32 signext %a, i32 signext %b, i32 signext %x
;
; RV32IXQCI-LABEL: select_i32_eq:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_eq:
@@ -205,8 +205,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x
;
; RV32-XQCICM-LABEL: select_i32_ne:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvne a3, a0, a1, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mveq a2, a0, a1, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_ne:
@@ -220,8 +220,8 @@ define signext i32 @select_i32_ne(i32 signext %a, i32 signext %b, i32 signext %x
;
; RV32IXQCI-LABEL: select_i32_ne:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_ne:
@@ -262,8 +262,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_ugt:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvltu a3, a1, a0, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvgeu a2, a1, a0, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_ugt:
@@ -277,8 +277,8 @@ define signext i32 @select_i32_ugt(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_ugt:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_ugt:
@@ -319,8 +319,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_uge:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvgeu a3, a0, a1, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvltu a2, a0, a1, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_uge:
@@ -334,8 +334,8 @@ define signext i32 @select_i32_uge(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_uge:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_uge:
@@ -376,8 +376,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_ult:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvltu a3, a0, a1, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvgeu a2, a0, a1, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_ult:
@@ -391,8 +391,8 @@ define signext i32 @select_i32_ult(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_ult:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_ult:
@@ -433,8 +433,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_ule:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvgeu a3, a1, a0, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvltu a2, a1, a0, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_ule:
@@ -448,8 +448,8 @@ define signext i32 @select_i32_ule(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_ule:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_ule:
@@ -490,8 +490,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_sgt:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvlt a3, a1, a0, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvge a2, a1, a0, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_sgt:
@@ -505,8 +505,8 @@ define signext i32 @select_i32_sgt(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_sgt:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_sgt:
@@ -547,8 +547,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_sge:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvge a3, a0, a1, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvlt a2, a0, a1, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_sge:
@@ -562,8 +562,8 @@ define signext i32 @select_i32_sge(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_sge:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_sge:
@@ -604,8 +604,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_slt:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvlt a3, a0, a1, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvge a2, a0, a1, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_slt:
@@ -619,8 +619,8 @@ define signext i32 @select_i32_slt(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_slt:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_slt:
@@ -661,8 +661,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32-XQCICM-LABEL: select_i32_sle:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: qc.mvge a3, a1, a0, a2
-; RV32-XQCICM-NEXT: mv a0, a3
+; RV32-XQCICM-NEXT: qc.mvlt a2, a1, a0, a3
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i32_sle:
@@ -676,8 +676,8 @@ define signext i32 @select_i32_sle(i32 signext %a, i32 signext %b, i32 signext %
;
; RV32IXQCI-LABEL: select_i32_sle:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i32_sle:
@@ -723,11 +723,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind {
;
; RV32-XQCICM-LABEL: select_i64_trunc:
; RV32-XQCICM: # %bb.0:
-; RV32-XQCICM-NEXT: mv a1, a5
+; RV32-XQCICM-NEXT: mv a1, a3
; RV32-XQCICM-NEXT: andi a0, a0, 1
-; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2
-; RV32-XQCICM-NEXT: qc.mvnei a1, a0, 0, a3
-; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4
+; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a5
+; RV32-XQCICM-NEXT: mv a0, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_trunc:
@@ -740,11 +740,11 @@ define i64 @select_i64_trunc(i64 %cond, i64 %x, i64 %y) nounwind {
;
; RV32IXQCI-LABEL: select_i64_trunc:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: mv a1, a5
+; RV32IXQCI-NEXT: mv a1, a3
; RV32IXQCI-NEXT: andi a0, a0, 1
-; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2
-; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a3
-; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a5
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_trunc:
@@ -792,10 +792,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-LABEL: select_i64_param:
; RV32-XQCICM: # %bb.0:
; RV32-XQCICM-NEXT: andi a0, a0, 1
-; RV32-XQCICM-NEXT: qc.mvnei a3, a0, 0, a1
-; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a2
-; RV32-XQCICM-NEXT: mv a0, a3
-; RV32-XQCICM-NEXT: mv a1, a4
+; RV32-XQCICM-NEXT: qc.mveqi a1, a0, 0, a3
+; RV32-XQCICM-NEXT: qc.mveqi a2, a0, 0, a4
+; RV32-XQCICM-NEXT: mv a0, a1
+; RV32-XQCICM-NEXT: mv a1, a2
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_param:
@@ -810,10 +810,10 @@ define i64 @select_i64_param(i1 %cond, i64 %x, i64 %y) nounwind {
; RV32IXQCI-LABEL: select_i64_param:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: andi a0, a0, 1
-; RV32IXQCI-NEXT: qc.mvnei a3, a0, 0, a1
-; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a2
-; RV32IXQCI-NEXT: mv a0, a3
-; RV32IXQCI-NEXT: mv a1, a4
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a3
+; RV32IXQCI-NEXT: qc.mveqi a2, a0, 0, a4
+; RV32IXQCI-NEXT: mv a0, a1
+; RV32IXQCI-NEXT: mv a1, a2
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_param:
@@ -866,10 +866,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: xor a1, a1, a3
; RV32-XQCICM-NEXT: xor a0, a0, a2
; RV32-XQCICM-NEXT: or a0, a0, a1
-; RV32-XQCICM-NEXT: qc.mveqi a6, a0, 0, a4
-; RV32-XQCICM-NEXT: qc.mveqi a7, a0, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mvnei a4, a0, 0, a6
+; RV32-XQCICM-NEXT: qc.mvnei a5, a0, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_eq:
@@ -887,10 +887,10 @@ define i64 @select_i64_eq(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: xor a1, a1, a3
; RV32IXQCI-NEXT: xor a0, a0, a2
; RV32IXQCI-NEXT: or a0, a0, a1
-; RV32IXQCI-NEXT: qc.mveqi a6, a0, 0, a4
-; RV32IXQCI-NEXT: qc.mveqi a7, a0, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mvnei a4, a0, 0, a6
+; RV32IXQCI-NEXT: qc.mvnei a5, a0, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_eq:
@@ -943,10 +943,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: xor a1, a1, a3
; RV32-XQCICM-NEXT: xor a0, a0, a2
; RV32-XQCICM-NEXT: or a0, a0, a1
-; RV32-XQCICM-NEXT: qc.mvnei a6, a0, 0, a4
-; RV32-XQCICM-NEXT: qc.mvnei a7, a0, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mveqi a4, a0, 0, a6
+; RV32-XQCICM-NEXT: qc.mveqi a5, a0, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_ne:
@@ -964,10 +964,10 @@ define i64 @select_i64_ne(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: xor a1, a1, a3
; RV32IXQCI-NEXT: xor a0, a0, a2
; RV32IXQCI-NEXT: or a0, a0, a1
-; RV32IXQCI-NEXT: qc.mvnei a6, a0, 0, a4
-; RV32IXQCI-NEXT: qc.mvnei a7, a0, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mveqi a4, a0, 0, a6
+; RV32IXQCI-NEXT: qc.mveqi a5, a0, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_ne:
@@ -1025,10 +1025,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a2, a0
; RV32-XQCICM-NEXT: sltu a2, a3, a1
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_ugt:
@@ -1050,10 +1050,10 @@ define i64 @select_i64_ugt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a2, a0
; RV32IXQCI-NEXT: sltu a2, a3, a1
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_ugt:
@@ -1111,10 +1111,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a0, a2
; RV32-XQCICM-NEXT: sltu a2, a1, a3
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_uge:
@@ -1136,10 +1136,10 @@ define i64 @select_i64_uge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a0, a2
; RV32IXQCI-NEXT: sltu a2, a1, a3
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_uge:
@@ -1197,10 +1197,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a0, a2
; RV32-XQCICM-NEXT: sltu a2, a1, a3
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_ult:
@@ -1222,10 +1222,10 @@ define i64 @select_i64_ult(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a0, a2
; RV32IXQCI-NEXT: sltu a2, a1, a3
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_ult:
@@ -1283,10 +1283,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a2, a0
; RV32-XQCICM-NEXT: sltu a2, a3, a1
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_ule:
@@ -1308,10 +1308,10 @@ define i64 @select_i64_ule(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a2, a0
; RV32IXQCI-NEXT: sltu a2, a3, a1
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_ule:
@@ -1369,10 +1369,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a2, a0
; RV32-XQCICM-NEXT: slt a2, a3, a1
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_sgt:
@@ -1394,10 +1394,10 @@ define i64 @select_i64_sgt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a2, a0
; RV32IXQCI-NEXT: slt a2, a3, a1
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_sgt:
@@ -1455,10 +1455,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a0, a2
; RV32-XQCICM-NEXT: slt a2, a1, a3
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_sge:
@@ -1480,10 +1480,10 @@ define i64 @select_i64_sge(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a0, a2
; RV32IXQCI-NEXT: slt a2, a1, a3
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_sge:
@@ -1541,10 +1541,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a0, a2
; RV32-XQCICM-NEXT: slt a2, a1, a3
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_slt:
@@ -1566,10 +1566,10 @@ define i64 @select_i64_slt(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a0, a2
; RV32IXQCI-NEXT: slt a2, a1, a3
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mvnei a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mvnei a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mveqi a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mveqi a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_slt:
@@ -1627,10 +1627,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32-XQCICM-NEXT: sltu a0, a2, a0
; RV32-XQCICM-NEXT: slt a2, a3, a1
; RV32-XQCICM-NEXT: qc.mveq a2, a1, a3, a0
-; RV32-XQCICM-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32-XQCICM-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32-XQCICM-NEXT: mv a0, a6
-; RV32-XQCICM-NEXT: mv a1, a7
+; RV32-XQCICM-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32-XQCICM-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32-XQCICM-NEXT: mv a0, a4
+; RV32-XQCICM-NEXT: mv a1, a5
; RV32-XQCICM-NEXT: ret
;
; RV32-XQCICS-LABEL: select_i64_sle:
@@ -1652,10 +1652,10 @@ define i64 @select_i64_sle(i64 %a, i64 %b, i64 %x, i64 %y) nounwind {
; RV32IXQCI-NEXT: sltu a0, a2, a0
; RV32IXQCI-NEXT: slt a2, a3, a1
; RV32IXQCI-NEXT: qc.mveq a2, a1, a3, a0
-; RV32IXQCI-NEXT: qc.mveqi a6, a2, 0, a4
-; RV32IXQCI-NEXT: qc.mveqi a7, a2, 0, a5
-; RV32IXQCI-NEXT: mv a0, a6
-; RV32IXQCI-NEXT: mv a1, a7
+; RV32IXQCI-NEXT: qc.mvnei a4, a2, 0, a6
+; RV32IXQCI-NEXT: qc.mvnei a5, a2, 0, a7
+; RV32IXQCI-NEXT: mv a0, a4
+; RV32IXQCI-NEXT: mv a1, a5
; RV32IXQCI-NEXT: ret
;
; RV64-LABEL: select_i64_sle:
diff --git a/llvm/test/CodeGen/RISCV/select.ll b/llvm/test/CodeGen/RISCV/select.ll
index 19fade6..8273c65 100644
--- a/llvm/test/CodeGen/RISCV/select.ll
+++ b/llvm/test/CodeGen/RISCV/select.ll
@@ -1153,8 +1153,8 @@ define i32 @select_sub_1(i1 zeroext %cond, i32 %a, i32 %b) {
; RV32IXQCI-LABEL: select_sub_1:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: sub a1, a1, a2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = sub i32 %a, %b
@@ -1301,9 +1301,9 @@ define i32 @select_sub_4(i1 zeroext %cond, i32 %x) {
;
; RV32IXQCI-LABEL: select_sub_4:
; RV32IXQCI: # %bb.0:
-; RV32IXQCI-NEXT: addi a1, a1, -128
-; RV32IXQCI-NEXT: li a2, 128
-; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2
+; RV32IXQCI-NEXT: addi a2, a1, -128
+; RV32IXQCI-NEXT: li a1, 128
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%add = sub i32 %x, 128
@@ -1348,8 +1348,8 @@ define i32 @select_and_1(i1 zeroext %cond, i32 %a, i32 %b) {
; RV32IXQCI-LABEL: select_and_1:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: and a1, a1, a2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = and i32 %a, %b
@@ -1493,8 +1493,8 @@ define i32 @select_udiv_1(i1 zeroext %cond, i32 %a, i32 %b) {
; RV32IXQCI-LABEL: select_udiv_1:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: divu a1, a1, a2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = udiv i32 %a, %b
@@ -1682,8 +1682,8 @@ define i32 @select_shl_1(i1 zeroext %cond, i32 %a, i32 %b) {
; RV32IXQCI-LABEL: select_shl_1:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: sll a1, a1, a2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = shl i32 %a, %b
@@ -1798,8 +1798,8 @@ define i32 @select_ashr_1(i1 zeroext %cond, i32 %a, i32 %b) {
; RV32IXQCI-LABEL: select_ashr_1:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: sra a1, a1, a2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = ashr i32 %a, %b
@@ -1914,8 +1914,8 @@ define i32 @select_lshr_1(i1 zeroext %cond, i32 %a, i32 %b) {
; RV32IXQCI-LABEL: select_lshr_1:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: srl a1, a1, a2
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%c = lshr i32 %a, %b
@@ -2371,9 +2371,9 @@ define i32 @select_cst5(i1 zeroext %cond) {
; RV32IXQCI-LABEL: select_cst5:
; RV32IXQCI: # %bb.0:
; RV32IXQCI-NEXT: lui a1, 1
-; RV32IXQCI-NEXT: addi a1, a1, -2047
-; RV32IXQCI-NEXT: li a2, 2047
-; RV32IXQCI-NEXT: qc.mvnei a1, a0, 0, a2
+; RV32IXQCI-NEXT: addi a2, a1, -2047
+; RV32IXQCI-NEXT: li a1, 2047
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
%ret = select i1 %cond, i32 2047, i32 2049
@@ -2870,8 +2870,8 @@ define void @select_redundant_czero_eqz1(ptr %0, ptr %1) {
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: lui a2, %hi(select_redundant_czero_eqz_data)
; RV32IXQCI-NEXT: addi a2, a2, %lo(select_redundant_czero_eqz_data)
-; RV32IXQCI-NEXT: qc.mveqi a0, a0, 0, a2
-; RV32IXQCI-NEXT: sw a0, 0(a1)
+; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a0
+; RV32IXQCI-NEXT: sw a2, 0(a1)
; RV32IXQCI-NEXT: ret
entry:
%3 = icmp eq ptr %0, null
diff --git a/llvm/test/CodeGen/RISCV/xqcicm.ll b/llvm/test/CodeGen/RISCV/xqcicm.ll
index 1741be7..fb48301 100644
--- a/llvm/test/CodeGen/RISCV/xqcicm.ll
+++ b/llvm/test/CodeGen/RISCV/xqcicm.ll
@@ -23,15 +23,15 @@ define i32 @select_example(i32 %cond, i32 %x, i32 %y) {
; RV32IXQCICM-LABEL: select_example:
; RV32IXQCICM: # %bb.0: # %entry
; RV32IXQCICM-NEXT: andi a0, a0, 1
-; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCICM-NEXT: mv a0, a2
+; RV32IXQCICM-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCICM-NEXT: mv a0, a1
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_example:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: andi a0, a0, 1
-; RV32IXQCI-NEXT: qc.mvnei a2, a0, 0, a1
-; RV32IXQCI-NEXT: mv a0, a2
+; RV32IXQCI-NEXT: qc.mveqi a1, a0, 0, a2
+; RV32IXQCI-NEXT: mv a0, a1
; RV32IXQCI-NEXT: ret
entry:
%cond_trunc = trunc i32 %cond to i1
@@ -52,14 +52,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_eq:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_eq:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 11
@@ -80,14 +80,14 @@ define i32 @select_cc_example_eq1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_eq1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_eq1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 11, %a
@@ -108,14 +108,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ne:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ne:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %a, 11
@@ -136,14 +136,14 @@ define i32 @select_cc_example_ne1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ne1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ne1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 11, %a
@@ -164,14 +164,14 @@ define i32 @select_cc_example_slt(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_slt:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_slt:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp slt i32 %a, 11
@@ -192,14 +192,14 @@ define i32 @select_cc_example_slt1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_slt1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_slt1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp slt i32 11, %a
@@ -220,14 +220,14 @@ define i32 @select_cc_example_sle(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sle:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sle:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sle i32 %a, 11
@@ -248,14 +248,14 @@ define i32 @select_cc_example_sle1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sle1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sle1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sle i32 11, %a
@@ -276,14 +276,14 @@ define i32 @select_cc_example_sgt(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sgt:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sgt:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgei a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlti a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sgt i32 %a, 11
@@ -304,14 +304,14 @@ define i32 @select_cc_example_sgt1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sgt1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sgt1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvlti a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgei a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sgt i32 11, %a
@@ -332,14 +332,14 @@ define i32 @select_cc_example_sge(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sge:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgei a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvlti a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sge:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgei a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlti a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sge i32 %a, 11
@@ -360,14 +360,14 @@ define i32 @select_cc_example_sge1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sge1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvlti a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgei a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sge1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvlti a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgei a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sge i32 11, %a
@@ -388,14 +388,14 @@ define i32 @select_cc_example_ule(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ule:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ule:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ule i32 %a, 11
@@ -416,14 +416,14 @@ define i32 @select_cc_example_ule1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ule1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ule1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ule i32 11, %a
@@ -444,14 +444,14 @@ define i32 @select_cc_example_ugt(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ugt:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ugt:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ugt i32 %a, 11
@@ -472,14 +472,14 @@ define i32 @select_cc_example_ugt1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ugt1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ugt1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ugt i32 11, %a
@@ -500,14 +500,14 @@ define i32 @select_cc_example_ult(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ult:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ult:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvltui a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ult i32 %a, 11
@@ -528,14 +528,14 @@ define i32 @select_cc_example_ult1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ult1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ult1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltui a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ult i32 11, %a
@@ -556,14 +556,14 @@ define i32 @select_cc_example_uge(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_uge:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgeui a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvltui a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_uge:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgeui a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltui a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp uge i32 %a, 11
@@ -584,14 +584,14 @@ define i32 @select_cc_example_uge1(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_uge1:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvltui a3, a0, 12, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgeui a2, a0, 12, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_uge1:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvltui a3, a0, 12, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeui a2, a0, 12, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp uge i32 11, %a
@@ -611,14 +611,14 @@ define i32 @select_cc_example_eq_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_eq_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mveq a3, a0, a1, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvne a2, a0, a1, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_eq_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mveq a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvne a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, %b
@@ -638,14 +638,14 @@ define i32 @select_cc_example_ne_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ne_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvne a3, a0, a1, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mveq a2, a0, a1, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ne_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvne a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mveq a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %a, %b
@@ -665,14 +665,14 @@ define i32 @select_cc_example_slt_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_slt_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvlt a3, a0, a1, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvge a2, a0, a1, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_slt_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvlt a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvge a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp slt i32 %a, %b
@@ -692,14 +692,14 @@ define i32 @select_cc_example_sge_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sge_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvge a3, a0, a1, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvlt a2, a0, a1, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sge_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvge a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlt a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sge i32 %a, %b
@@ -719,14 +719,14 @@ define i32 @select_cc_example_sgt_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sgt_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvlt a3, a1, a0, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvge a2, a1, a0, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sgt_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvlt a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvge a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sgt i32 %a, %b
@@ -746,14 +746,14 @@ define i32 @select_cc_example_sle_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_sle_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvge a3, a1, a0, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvlt a2, a1, a0, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_sle_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvge a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvlt a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp sle i32 %a, %b
@@ -773,14 +773,14 @@ define i32 @select_cc_example_ugt_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ugt_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvltu a3, a1, a0, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgeu a2, a1, a0, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ugt_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvltu a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeu a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ugt i32 %a, %b
@@ -800,14 +800,14 @@ define i32 @select_cc_example_ult_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ult_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ult_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ult i32 %a, %b
@@ -827,14 +827,14 @@ define i32 @select_cc_example_uge_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_uge_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgeu a3, a0, a1, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvltu a2, a0, a1, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_uge_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgeu a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltu a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp uge i32 %a, %b
@@ -854,14 +854,14 @@ define i32 @select_cc_example_ule_reg(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ule_reg:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvgeu a3, a1, a0, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvltu a2, a1, a0, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ule_reg:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvgeu a3, a1, a0, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvltu a2, a1, a0, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ule i32 %a, %b
@@ -883,18 +883,263 @@ define i32 @select_cc_example_ule_neg(i32 %a, i32 %b, i32 %x, i32 %y) {
; RV32IXQCICM-LABEL: select_cc_example_ule_neg:
; RV32IXQCICM: # %bb.0: # %entry
; RV32IXQCICM-NEXT: li a1, -10
-; RV32IXQCICM-NEXT: qc.mvltu a3, a0, a1, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvgeu a2, a0, a1, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ule_neg:
; RV32IXQCI: # %bb.0: # %entry
; RV32IXQCI-NEXT: li a1, -10
-; RV32IXQCI-NEXT: qc.mvltu a3, a0, a1, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvgeu a2, a0, a1, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ule i32 %a, -11
%sel = select i1 %cmp, i32 %x, i32 %y
ret i32 %sel
}
+
+define i32 @select_cc_example_eq_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_eq_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: beq a2, a1, .LBB32_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB32_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_eq_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvne a0, a2, a1, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_eq_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvne a0, a2, a1, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp eq i32 %x, %b
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_lt_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_lt_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: blt a2, a1, .LBB33_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB33_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_lt_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvge a0, a2, a1, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_lt_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvge a0, a2, a1, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp slt i32 %x, %b
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ge_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ge_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: bge a2, a1, .LBB34_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB34_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ge_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvlt a0, a2, a1, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ge_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvlt a0, a2, a1, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp sge i32 %x, %b
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ult_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ult_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: bltu a2, a1, .LBB35_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB35_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ult_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvgeu a0, a2, a1, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ult_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvgeu a0, a2, a1, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp ult i32 %x, %b
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_uge_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_uge_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: bgeu a2, a1, .LBB36_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB36_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_uge_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvltu a0, a2, a1, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_uge_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvltu a0, a2, a1, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp uge i32 %x, %b
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_eq_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_eq_imm_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: li a1, 11
+; RV32I-NEXT: beq a2, a1, .LBB37_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB37_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_eq_imm_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvnei a0, a2, 11, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_eq_imm_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvnei a0, a2, 11, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp eq i32 %x, 11
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_lt_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_lt_imm_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: li a1, 11
+; RV32I-NEXT: blt a2, a1, .LBB38_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB38_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_lt_imm_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvgei a0, a2, 11, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_lt_imm_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvgei a0, a2, 11, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp slt i32 %x, 11
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ge_imm_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: li a1, 10
+; RV32I-NEXT: blt a1, a2, .LBB39_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB39_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ge_imm_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvlti a0, a2, 11, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ge_imm_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvlti a0, a2, 11, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp sge i32 %x, 11
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_ult_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_ult_imm_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: li a1, 11
+; RV32I-NEXT: bltu a2, a1, .LBB40_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB40_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_ult_imm_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvgeui a0, a2, 11, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_ult_imm_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvgeui a0, a2, 11, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp ult i32 %x, 11
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
+
+define i32 @select_cc_example_uge_imm_mv(i32 %a, i32 %b, i32 %x, i32 %y) {
+; RV32I-LABEL: select_cc_example_uge_imm_mv:
+; RV32I: # %bb.0: # %entry
+; RV32I-NEXT: li a1, 10
+; RV32I-NEXT: bltu a1, a2, .LBB41_2
+; RV32I-NEXT: # %bb.1: # %entry
+; RV32I-NEXT: mv a0, a3
+; RV32I-NEXT: .LBB41_2: # %entry
+; RV32I-NEXT: ret
+;
+; RV32IXQCICM-LABEL: select_cc_example_uge_imm_mv:
+; RV32IXQCICM: # %bb.0: # %entry
+; RV32IXQCICM-NEXT: qc.mvltui a0, a2, 11, a3
+; RV32IXQCICM-NEXT: ret
+;
+; RV32IXQCI-LABEL: select_cc_example_uge_imm_mv:
+; RV32IXQCI: # %bb.0: # %entry
+; RV32IXQCI-NEXT: qc.mvltui a0, a2, 11, a3
+; RV32IXQCI-NEXT: ret
+entry:
+ %cmp = icmp uge i32 %x, 11
+ %sel = select i1 %cmp, i32 %a, i32 %y
+ ret i32 %sel
+}
diff --git a/llvm/test/CodeGen/RISCV/xqcics.ll b/llvm/test/CodeGen/RISCV/xqcics.ll
index 38de8fb..5b7ca9e7 100644
--- a/llvm/test/CodeGen/RISCV/xqcics.ll
+++ b/llvm/test/CodeGen/RISCV/xqcics.ll
@@ -134,14 +134,14 @@ define i32 @select_cc_example_eq(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_eq:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_eq:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 11
@@ -167,14 +167,14 @@ define i32 @select_cc_example_eq_c(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_eq_c:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_eq_c:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mveqi a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mvnei a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp eq i32 11, %a
@@ -200,14 +200,14 @@ define i32 @select_cc_example_ne(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ne:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ne:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 %a, 11
@@ -233,14 +233,14 @@ define i32 @select_cc_example_ne_c(i32 %a, i32 %b, i32 %x, i32 %y) {
;
; RV32IXQCICM-LABEL: select_cc_example_ne_c:
; RV32IXQCICM: # %bb.0: # %entry
-; RV32IXQCICM-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCICM-NEXT: mv a0, a3
+; RV32IXQCICM-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCICM-NEXT: mv a0, a2
; RV32IXQCICM-NEXT: ret
;
; RV32IXQCI-LABEL: select_cc_example_ne_c:
; RV32IXQCI: # %bb.0: # %entry
-; RV32IXQCI-NEXT: qc.mvnei a3, a0, 11, a2
-; RV32IXQCI-NEXT: mv a0, a3
+; RV32IXQCI-NEXT: qc.mveqi a2, a0, 11, a3
+; RV32IXQCI-NEXT: mv a0, a2
; RV32IXQCI-NEXT: ret
entry:
%cmp = icmp ne i32 11, %a
diff --git a/llvm/test/CodeGen/VE/Scalar/max.ll b/llvm/test/CodeGen/VE/Scalar/max.ll
index 51da557..7950842 100644
--- a/llvm/test/CodeGen/VE/Scalar/max.ll
+++ b/llvm/test/CodeGen/VE/Scalar/max.ll
@@ -1,7 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
-; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \
-; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT
define double @maxf64(double, double) {
; CHECK-LABEL: maxf64:
@@ -10,16 +8,21 @@ define double @maxf64(double, double) {
; CHECK-NEXT: cmov.d.gt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxf64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ogt double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @maxf64_fast(double, double) {
+; CHECK-LABEL: maxf64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ogt double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
define double @max2f64(double, double) {
; CHECK-LABEL: max2f64:
; CHECK: # %bb.0:
@@ -27,16 +30,21 @@ define double @max2f64(double, double) {
; CHECK-NEXT: cmov.d.ge %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2f64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp oge double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @max2f64_fast(double, double) {
+; CHECK-LABEL: max2f64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp oge double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
; VE has no max for unordered comparison
define double @maxuf64(double, double) {
; CHECK-LABEL: maxuf64:
@@ -45,16 +53,21 @@ define double @maxuf64(double, double) {
; CHECK-NEXT: cmov.d.gtnan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxuf64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ugt double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @maxuf64_fast(double, double) {
+; CHECK-LABEL: maxuf64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ugt double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
; VE has no max for unordered comparison
define double @max2uf64(double, double) {
; CHECK-LABEL: max2uf64:
@@ -63,16 +76,21 @@ define double @max2uf64(double, double) {
; CHECK-NEXT: cmov.d.genan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2uf64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp uge double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @max2uf64_fast(double, double) {
+; CHECK-LABEL: max2uf64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp uge double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
define float @maxf32(float, float) {
; CHECK-LABEL: maxf32:
; CHECK: # %bb.0:
@@ -80,16 +98,21 @@ define float @maxf32(float, float) {
; CHECK-NEXT: cmov.s.gt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxf32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ogt float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @maxf32_fast(float, float) {
+; CHECK-LABEL: maxf32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ogt float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define float @max2f32(float, float) {
; CHECK-LABEL: max2f32:
; CHECK: # %bb.0:
@@ -97,16 +120,21 @@ define float @max2f32(float, float) {
; CHECK-NEXT: cmov.s.ge %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2f32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp oge float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @max2f32_fast(float, float) {
+; CHECK-LABEL: max2f32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp oge float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define float @maxuf32(float, float) {
; CHECK-LABEL: maxuf32:
; CHECK: # %bb.0:
@@ -114,16 +142,21 @@ define float @maxuf32(float, float) {
; CHECK-NEXT: cmov.s.gtnan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxuf32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ugt float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @maxuf32_fast(float, float) {
+; CHECK-LABEL: maxuf32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ugt float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define float @max2uf32(float, float) {
; CHECK-LABEL: max2uf32:
; CHECK: # %bb.0:
@@ -131,26 +164,26 @@ define float @max2uf32(float, float) {
; CHECK-NEXT: cmov.s.genan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2uf32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmax.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp uge float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @max2uf32_fast(float, float) {
+; CHECK-LABEL: max2uf32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmax.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp uge float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define i64 @maxi64(i64, i64) {
; CHECK-LABEL: maxi64:
; CHECK: # %bb.0:
; CHECK-NEXT: maxs.l %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxi64:
-; OPT: # %bb.0:
-; OPT-NEXT: maxs.l %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp sgt i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -161,11 +194,6 @@ define i64 @max2i64(i64, i64) {
; CHECK: # %bb.0:
; CHECK-NEXT: maxs.l %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2i64:
-; OPT: # %bb.0:
-; OPT-NEXT: maxs.l %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp sge i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -178,13 +206,6 @@ define i64 @maxu64(i64, i64) {
; CHECK-NEXT: cmov.l.gt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxu64:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.l %s2, %s0, %s1
-; OPT-NEXT: cmov.l.gt %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp ugt i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -197,13 +218,6 @@ define i64 @max2u64(i64, i64) {
; CHECK-NEXT: cmov.l.ge %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2u64:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.l %s2, %s0, %s1
-; OPT-NEXT: cmov.l.ge %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp uge i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -214,11 +228,6 @@ define i32 @maxi32(i32, i32) {
; CHECK: # %bb.0:
; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxi32:
-; OPT: # %bb.0:
-; OPT-NEXT: maxs.w.sx %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp sgt i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -229,11 +238,6 @@ define i32 @max2i32(i32, i32) {
; CHECK: # %bb.0:
; CHECK-NEXT: maxs.w.sx %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2i32:
-; OPT: # %bb.0:
-; OPT-NEXT: maxs.w.sx %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp sge i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -246,13 +250,6 @@ define i32 @maxu32(i32, i32) {
; CHECK-NEXT: cmov.w.gt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxu32:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.w %s2, %s0, %s1
-; OPT-NEXT: cmov.w.gt %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp ugt i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -265,13 +262,6 @@ define i32 @max2u32(i32, i32) {
; CHECK-NEXT: cmov.w.ge %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: max2u32:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.w %s2, %s0, %s1
-; OPT-NEXT: cmov.w.ge %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp uge i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -283,12 +273,6 @@ define zeroext i1 @maxi1(i1 zeroext, i1 zeroext) {
; CHECK-NEXT: or %s0, %s0, %s1
; CHECK-NEXT: and %s0, 1, %s0
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: maxi1:
-; OPT: # %bb.0:
-; OPT-NEXT: or %s0, %s0, %s1
-; OPT-NEXT: and %s0, 1, %s0
-; OPT-NEXT: b.l.t (, %s10)
%3 = xor i1 %1, true
%4 = and i1 %3, %0
%5 = select i1 %4, i1 %0, i1 %1
diff --git a/llvm/test/CodeGen/VE/Scalar/min.ll b/llvm/test/CodeGen/VE/Scalar/min.ll
index e8f4939..36a2e06 100644
--- a/llvm/test/CodeGen/VE/Scalar/min.ll
+++ b/llvm/test/CodeGen/VE/Scalar/min.ll
@@ -1,7 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
-; RUN: llc < %s -mtriple=ve-unknown-unknown -enable-no-signed-zeros-fp-math \
-; RUN: -enable-no-nans-fp-math | FileCheck %s -check-prefix=OPT
define double @minf64(double, double) {
; CHECK-LABEL: minf64:
@@ -10,16 +8,21 @@ define double @minf64(double, double) {
; CHECK-NEXT: cmov.d.lt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: minf64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp olt double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @minf64_fast(double, double) {
+; CHECK-LABEL: minf64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp olt double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
define double @min2f64(double, double) {
; CHECK-LABEL: min2f64:
; CHECK: # %bb.0:
@@ -27,16 +30,21 @@ define double @min2f64(double, double) {
; CHECK-NEXT: cmov.d.le %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2f64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ole double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @min2f64_fast(double, double) {
+; CHECK-LABEL: min2f64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ole double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
define double @minuf64(double, double) {
; CHECK-LABEL: minuf64:
; CHECK: # %bb.0:
@@ -44,16 +52,21 @@ define double @minuf64(double, double) {
; CHECK-NEXT: cmov.d.ltnan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: minuf64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ult double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @minuf64_fast(double, double) {
+; CHECK-LABEL: minuf64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ult double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
define double @min2uf64(double, double) {
; CHECK-LABEL: min2uf64:
; CHECK: # %bb.0:
@@ -61,16 +74,21 @@ define double @min2uf64(double, double) {
; CHECK-NEXT: cmov.d.lenan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2uf64:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.d %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ule double %0, %1
%4 = select i1 %3, double %0, double %1
ret double %4
}
+define double @min2uf64_fast(double, double) {
+; CHECK-LABEL: min2uf64_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.d %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ule double %0, %1
+ %4 = select nnan nsz i1 %3, double %0, double %1
+ ret double %4
+}
+
define float @minf32(float, float) {
; CHECK-LABEL: minf32:
; CHECK: # %bb.0:
@@ -78,16 +96,21 @@ define float @minf32(float, float) {
; CHECK-NEXT: cmov.s.lt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: minf32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp olt float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @minf32_fast(float, float) {
+; CHECK-LABEL: minf32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp olt float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define float @min2f32(float, float) {
; CHECK-LABEL: min2f32:
; CHECK: # %bb.0:
@@ -95,16 +118,21 @@ define float @min2f32(float, float) {
; CHECK-NEXT: cmov.s.le %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2f32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ole float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @min2f32_fast(float, float) {
+; CHECK-LABEL: min2f32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ole float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define float @minuf32(float, float) {
; CHECK-LABEL: minuf32:
; CHECK: # %bb.0:
@@ -112,16 +140,21 @@ define float @minuf32(float, float) {
; CHECK-NEXT: cmov.s.ltnan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: minuf32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ult float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @minuf32_fast(float, float) {
+; CHECK-LABEL: minuf32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ult float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define float @min2uf32(float, float) {
; CHECK-LABEL: min2uf32:
; CHECK: # %bb.0:
@@ -129,26 +162,26 @@ define float @min2uf32(float, float) {
; CHECK-NEXT: cmov.s.lenan %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2uf32:
-; OPT: # %bb.0:
-; OPT-NEXT: fmin.s %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = fcmp ule float %0, %1
%4 = select i1 %3, float %0, float %1
ret float %4
}
+define float @min2uf32_fast(float, float) {
+; CHECK-LABEL: min2uf32_fast:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fmin.s %s0, %s0, %s1
+; CHECK-NEXT: b.l.t (, %s10)
+ %3 = fcmp ule float %0, %1
+ %4 = select nnan nsz i1 %3, float %0, float %1
+ ret float %4
+}
+
define i64 @mini64(i64, i64) {
; CHECK-LABEL: mini64:
; CHECK: # %bb.0:
; CHECK-NEXT: mins.l %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: mini64:
-; OPT: # %bb.0:
-; OPT-NEXT: mins.l %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp slt i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -159,11 +192,6 @@ define i64 @min2i64(i64, i64) {
; CHECK: # %bb.0:
; CHECK-NEXT: mins.l %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2i64:
-; OPT: # %bb.0:
-; OPT-NEXT: mins.l %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp sle i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -176,13 +204,6 @@ define i64 @minu64(i64, i64) {
; CHECK-NEXT: cmov.l.lt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: minu64:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.l %s2, %s0, %s1
-; OPT-NEXT: cmov.l.lt %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp ult i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -195,13 +216,6 @@ define i64 @min2u64(i64, i64) {
; CHECK-NEXT: cmov.l.le %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2u64:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.l %s2, %s0, %s1
-; OPT-NEXT: cmov.l.le %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp ule i64 %0, %1
%4 = select i1 %3, i64 %0, i64 %1
ret i64 %4
@@ -212,11 +226,6 @@ define i32 @mini32(i32, i32) {
; CHECK: # %bb.0:
; CHECK-NEXT: mins.w.sx %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: mini32:
-; OPT: # %bb.0:
-; OPT-NEXT: mins.w.sx %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp slt i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -227,11 +236,6 @@ define i32 @min2i32(i32, i32) {
; CHECK: # %bb.0:
; CHECK-NEXT: mins.w.sx %s0, %s0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2i32:
-; OPT: # %bb.0:
-; OPT-NEXT: mins.w.sx %s0, %s0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp sle i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -244,13 +248,6 @@ define i32 @minu32(i32, i32) {
; CHECK-NEXT: cmov.w.lt %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: minu32:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.w %s2, %s0, %s1
-; OPT-NEXT: cmov.w.lt %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp ult i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -263,13 +260,6 @@ define i32 @min2u32(i32, i32) {
; CHECK-NEXT: cmov.w.le %s1, %s0, %s2
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: min2u32:
-; OPT: # %bb.0:
-; OPT-NEXT: cmpu.w %s2, %s0, %s1
-; OPT-NEXT: cmov.w.le %s1, %s0, %s2
-; OPT-NEXT: or %s0, 0, %s1
-; OPT-NEXT: b.l.t (, %s10)
%3 = icmp ule i32 %0, %1
%4 = select i1 %3, i32 %0, i32 %1
ret i32 %4
@@ -283,14 +273,6 @@ define zeroext i1 @mini1(i1 zeroext, i1 zeroext) {
; CHECK-NEXT: cmov.w.ne %s0, %s1, %s2
; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1
; CHECK-NEXT: b.l.t (, %s10)
-;
-; OPT-LABEL: mini1:
-; OPT: # %bb.0:
-; OPT-NEXT: and %s2, 1, %s0
-; OPT-NEXT: and %s0, %s1, %s0
-; OPT-NEXT: cmov.w.ne %s0, %s1, %s2
-; OPT-NEXT: adds.w.zx %s0, %s0, (0)1
-; OPT-NEXT: b.l.t (, %s10)
%3 = xor i1 %0, true
%4 = and i1 %3, %1
%5 = select i1 %4, i1 %0, i1 %1
diff --git a/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll b/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll
index 47ea762..a599f46 100644
--- a/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll
+++ b/llvm/test/CodeGen/WebAssembly/partial-reduce-accumulate.ll
@@ -19,11 +19,11 @@ define hidden i32 @accumulate_add_u8_u8(ptr noundef readonly %a, ptr noundef re
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u
-; MAX-BANDWIDTH: i32x4.add
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u
; MAX-BANDWIDTH: i32x4.add
+; MAX-BANDWIDTH: i32x4.add
entry:
%cmp8.not = icmp eq i32 %N, 0
@@ -65,11 +65,11 @@ define hidden i32 @accumulate_add_s8_s8(ptr noundef readonly %a, ptr noundef re
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s
-; MAX-BANDWIDTH: i32x4.add
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s
; MAX-BANDWIDTH: i32x4.add
+; MAX-BANDWIDTH: i32x4.add
entry:
%cmp8.not = icmp eq i32 %N, 0
br i1 %cmp8.not, label %for.cond.cleanup, label %for.body
@@ -108,12 +108,11 @@ define hidden i32 @accumulate_add_s8_u8(ptr noundef readonly %a, ptr noundef re
; MAX-BANDWIDTH: loop
; MAX-BANDWIDTH: v128.load
-; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s
-; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s
-; MAX-BANDWIDTH: i32x4.add
-; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_u
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u
+; MAX-BANDWIDTH: v128.load
+; MAX-BANDWIDTH: i16x8.extadd_pairwise_i8x16_s
+; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s
; MAX-BANDWIDTH: i32x4.add
entry:
%cmp8.not = icmp eq i32 %N, 0
@@ -363,10 +362,10 @@ define hidden i32 @accumulate_add_u16_u16(ptr noundef readonly %a, ptr noundef
; MAX-BANDWIDTH: loop
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u
-; MAX-BANDWIDTH: i32x4.add
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_u
; MAX-BANDWIDTH: i32x4.add
+; MAX-BANDWIDTH: i32x4.add
entry:
%cmp8.not = icmp eq i32 %N, 0
br i1 %cmp8.not, label %for.cond.cleanup, label %for.body
@@ -402,10 +401,10 @@ define hidden i32 @accumulate_add_s16_s16(ptr noundef readonly %a, ptr noundef
; MAX-BANDWIDTH: loop
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s
-; MAX-BANDWIDTH: i32x4.add
; MAX-BANDWIDTH: v128.load
; MAX-BANDWIDTH: i32x4.extadd_pairwise_i16x8_s
; MAX-BANDWIDTH: i32x4.add
+; MAX-BANDWIDTH: i32x4.add
entry:
%cmp8.not = icmp eq i32 %N, 0
br i1 %cmp8.not, label %for.cond.cleanup, label %for.body
diff --git a/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll b/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll
new file mode 100644
index 0000000..76d84c1
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx10.2-intrinsic-upgrade.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=X64
+
+declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx10_vpdpbssd_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx10_vpdpbssd_512:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx10_vpdpbssd_512:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx10_vpdpbssds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx10_vpdpbssds_512:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx10_vpdpbssds_512:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx10_vpdpbsud_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx10_vpdpbsud_512:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx10_vpdpbsud_512:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx10_vpdpbsuds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx10_vpdpbsuds_512:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx10_vpdpbsuds_512:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx10_vpdpbuud_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx10_vpdpbuud_512:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx10_vpdpbuud_512:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
+ ret <16 x i32> %res
+}
+
+declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+
+define <16 x i32>@test_int_x86_avx10_vpdpbuuds_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx10_vpdpbuuds_512:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx10_vpdpbuuds_512:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+ %res = call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2)
+ ret <16 x i32> %res
+}
diff --git a/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll
index 09eb53f..a2aad60 100644
--- a/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx10_2_512ni-intrinsics.ll
@@ -53,7 +53,7 @@ declare <16 x float> @llvm.x86.avx10.vdpphps.512(<16 x float>, <32 x half>, <32
; VNNI INT8
-define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) {
+define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) {
; X86-LABEL: test_mm512_dpbssd_epi32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -64,12 +64,12 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr
; X64: # %bb.0:
; X64-NEXT: vpdpbssd (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x77,0x48,0x50,0x07]
; X64-NEXT: retq # encoding: [0xc3]
- %__B = load <16 x i32>, ptr %pB
- %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %__B = load <64 x i8>, ptr %pB
+ %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) {
+define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) {
; X86-LABEL: test_mm512_mask_dpbssds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
@@ -81,13 +81,13 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbssds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x77,0x49,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) {
+define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) {
; X86-LABEL: test_mm512_maskz_dpbssd_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
@@ -99,16 +99,16 @@ define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %_
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbssd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0xc9,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>)
-declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <64 x i8>, <64 x i8>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <64 x i8>, <64 x i8>)
-define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) {
+define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) {
; X86-LABEL: test_mm512_dpbsud_epi32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -119,12 +119,12 @@ define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr
; X64: # %bb.0:
; X64-NEXT: vpdpbsud (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x76,0x48,0x50,0x07]
; X64-NEXT: retq # encoding: [0xc3]
- %__B = load <16 x i32>, ptr %pB
- %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %__B = load <64 x i8>, ptr %pB
+ %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) {
+define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) {
; X86-LABEL: test_mm512_mask_dpbsuds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
@@ -136,13 +136,13 @@ define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %_
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbsuds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x76,0x49,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) {
+define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) {
; X86-LABEL: test_mm512_maskz_dpbsud_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
@@ -154,16 +154,16 @@ define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %_
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbsud %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0xc9,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>)
-declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <64 x i8>, <64 x i8>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <64 x i8>, <64 x i8>)
-define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) {
+define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) {
; X86-LABEL: test_mm512_dpbuud_epi32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -174,12 +174,12 @@ define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr
; X64: # %bb.0:
; X64-NEXT: vpdpbuud (%rdi), %zmm1, %zmm0 # encoding: [0x62,0xf2,0x74,0x48,0x50,0x07]
; X64-NEXT: retq # encoding: [0xc3]
- %__B = load <16 x i32>, ptr %pB
- %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %__B = load <64 x i8>, ptr %pB
+ %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) {
+define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) {
; X86-LABEL: test_mm512_mask_dpbuuds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
@@ -191,13 +191,13 @@ define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %_
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbuuds %zmm2, %zmm1, %zmm0 {%k1} # encoding: [0x62,0xf2,0x74,0x49,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) {
+define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) {
; X86-LABEL: test_mm512_maskz_dpbuud_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04]
@@ -209,14 +209,14 @@ define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %_
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbuud %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0xc9,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>)
-declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <64 x i8>, <64 x i8>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <64 x i8>, <64 x i8>)
; VNNI INT16
diff --git a/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll b/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll
index 0c5fd3b..1f270d5 100644
--- a/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avx10_2ni-intrinsics.ll
@@ -101,7 +101,7 @@ declare <8 x float> @llvm.x86.avx10.vdpphps.256(<8 x float>, <16 x half>, <16 x
; VNNI INT8
-define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) {
+define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) {
; X86-LABEL: test_mm_mask_dpbssd_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -113,13 +113,13 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x77,0x09,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W
ret <4 x i32> %res
}
-define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) {
+define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) {
; X86-LABEL: test_mm_maskz_dpbssds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -131,13 +131,13 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0x89,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer
ret <4 x i32> %res
}
-define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) {
+define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) {
; X86-LABEL: test_mm256_maskz_dpbssds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -149,13 +149,13 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x77,0x29,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W
ret <8 x i32> %res
}
-define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) {
+define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) {
; X86-LABEL: test_mm256_mask_dpbssd_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -167,18 +167,18 @@ define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W,
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x77,0xa9,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) {
+define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) {
; X86-LABEL: test_mm_mask_dpbsud_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -190,13 +190,13 @@ define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x76,0x09,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W
ret <4 x i32> %res
}
-define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) {
+define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) {
; X86-LABEL: test_mm_maskz_dpbsuds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -208,13 +208,13 @@ define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0x89,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer
ret <4 x i32> %res
}
-define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) {
+define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) {
; X86-LABEL: test_mm256_maskz_dpbsuds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -226,13 +226,13 @@ define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x76,0x29,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W
ret <8 x i32> %res
}
-define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) {
+define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) {
; X86-LABEL: test_mm256_mask_dpbsud_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -244,18 +244,18 @@ define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W,
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x76,0xa9,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) {
+define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) {
; X86-LABEL: test_mm_mask_dpbuud_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -267,13 +267,13 @@ define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 {%k1} # encoding: [0x62,0xf2,0x74,0x09,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W
ret <4 x i32> %res
}
-define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) {
+define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) {
; X86-LABEL: test_mm_maskz_dpbuuds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -285,13 +285,13 @@ define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0x89,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer
ret <4 x i32> %res
}
-define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) {
+define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) {
; X86-LABEL: test_mm256_maskz_dpbuuds_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -303,13 +303,13 @@ define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 {%k1} # encoding: [0x62,0xf2,0x74,0x29,0x51,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W
ret <8 x i32> %res
}
-define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) {
+define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) {
; X86-LABEL: test_mm256_mask_dpbuud_epi32:
; X86: # %bb.0:
; X86-NEXT: kmovb {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf9,0x90,0x4c,0x24,0x04]
@@ -321,16 +321,16 @@ define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W,
; X64-NEXT: kmovd %edi, %k1 # encoding: [0xc5,0xfb,0x92,0xcf]
; X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x74,0xa9,0x50,0xc2]
; X64-NEXT: retq # encoding: [0xc3]
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
; VNNI INT16
diff --git a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll
new file mode 100644
index 0000000..ce9a0fb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics-upgrade.ll
@@ -0,0 +1,318 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avxvnniint8 --show-mc-encoding | FileCheck %s --check-prefixes=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avxvnniint8 --show-mc-encoding | FileCheck %s --check-prefixes=X64
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X64
+
+declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbssd_128:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbssd_128:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssd_128:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssd_128:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbssds_128:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbssds_128:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x73,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssds_128:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssds_128:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
+ ret <4 x i32> %res
+}
+
+declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbssd_256:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbssd_256:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssd_256:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssd_256:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
+ ret <8 x i32> %res
+}
+
+declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbssds_256:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbssds_256:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x77,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbssds_256:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbssds_256:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
+ ret <8 x i32> %res
+}
+
+declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbsud_128:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbsud_128:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsud_128:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsud_128:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbsuds_128:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbsuds_128:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x72,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsuds_128:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsuds_128:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
+ ret <4 x i32> %res
+}
+
+declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbsud_256:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbsud_256:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsud_256:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsud_256:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
+ ret <8 x i32> %res
+}
+
+declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbsuds_256:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbsuds_256:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x76,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbsuds_256:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbsuds_256:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
+ ret <8 x i32> %res
+}
+
+declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx2_vpdpbuud(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbuud:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbuud:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuud:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuud:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
+ ret <4 x i32> %res
+}
+
+declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+
+define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbuuds_128:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbuuds_128:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0xc4,0xe2,0x70,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuuds_128:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuuds_128:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
+ ret <4 x i32> %res
+}
+
+declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbuud_256:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x50,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbuud_256:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x50,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuud_256:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuud_256:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
+ ret <8 x i32> %res
+}
+
+declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+
+define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2) {
+; X86-LABEL: test_int_x86_avx2_vpdpbuuds_256:
+; X86: # %bb.0:
+; X86-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x51,0xc2]
+; X86-NEXT: retl # encoding: [0xc3]
+;
+; X64-LABEL: test_int_x86_avx2_vpdpbuuds_256:
+; X64: # %bb.0:
+; X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0xc4,0xe2,0x74,0x51,0xc2]
+; X64-NEXT: retq # encoding: [0xc3]
+;
+; AVX10-X86-LABEL: test_int_x86_avx2_vpdpbuuds_256:
+; AVX10-X86: # %bb.0:
+; AVX10-X86-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2]
+; AVX10-X86-NEXT: retl # encoding: [0xc3]
+;
+; AVX10-X64-LABEL: test_int_x86_avx2_vpdpbuuds_256:
+; AVX10-X64: # %bb.0:
+; AVX10-X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2]
+; AVX10-X64-NEXT: retq # encoding: [0xc3]
+ %res = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
+ ret <8 x i32> %res
+}
+
diff --git a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll
index 0ddd017..6c3d90aa 100644
--- a/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/avxvnniint8-intrinsics.ll
@@ -5,9 +5,9 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx10.2 --show-mc-encoding | FileCheck %s --check-prefixes=AVX10-X64
-declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssd_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -41,16 +41,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt
; AVX10-X64-NEXT: vpdpbssd %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x50,0xc2]
; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssds_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -84,16 +84,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p
; AVX10-X64-NEXT: vpdpbssds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x77,0x08,0x51,0xc2]
; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssd_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -127,16 +127,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt
; AVX10-X64-NEXT: vpdpbssd %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x50,0xc2]
; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbssds_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -170,16 +170,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p
; AVX10-X64-NEXT: vpdpbssds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x77,0x28,0x51,0xc2]
; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsud_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -213,16 +213,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, pt
; AVX10-X64-NEXT: vpdpbsud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x50,0xc2]
; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsuds_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -256,16 +256,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, p
; AVX10-X64-NEXT: vpdpbsuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x76,0x08,0x51,0xc2]
; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsud_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -299,16 +299,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, pt
; AVX10-X64-NEXT: vpdpbsud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x50,0xc2]
; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbsuds_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -342,16 +342,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, p
; AVX10-X64-NEXT: vpdpbsuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x76,0x28,0x51,0xc2]
; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuud_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -385,16 +385,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, pt
; AVX10-X64-NEXT: vpdpbuud %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x50,0xc2]
; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) {
+define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuuds_128:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -428,16 +428,16 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, p
; AVX10-X64-NEXT: vpdpbuuds %xmm2, %xmm1, %xmm0 # encoding: [0x62,0xf2,0x74,0x08,0x51,0xc2]
; AVX10-X64-NEXT: vpaddd %xmm0, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuud_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -471,16 +471,16 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, pt
; AVX10-X64-NEXT: vpdpbuud %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x50,0xc2]
; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) {
+define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) {
; X86-LABEL: test_int_x86_avx2_vpdpbuuds_256:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
@@ -514,9 +514,9 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, p
; AVX10-X64-NEXT: vpdpbuuds %ymm2, %ymm1, %ymm0 # encoding: [0x62,0xf2,0x74,0x28,0x51,0xc2]
; AVX10-X64-NEXT: vpaddd %ymm0, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0xfe,0xc0]
; AVX10-X64-NEXT: retq # encoding: [0xc3]
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll b/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll
index fd988f7..a49d3a5 100644
--- a/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-int-avxvnniint8.ll
@@ -1,20 +1,20 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avxvnniint8 < %s | FileCheck %s
-declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssd:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -24,11 +24,11 @@ define <4 x i32> @stack_fold_vpdpbssd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a
; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2)
ret <4 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssd_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -38,11 +38,11 @@ define <4 x i32> @stack_fold_vpdpbssd_commuted(<4 x i32> %a0, <4 x i32> %a1, <4
; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1)
ret <4 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssd_256:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -52,11 +52,11 @@ define <8 x i32> @stack_fold_vpdpbssd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32
; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2)
ret <8 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssd_256_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -66,11 +66,11 @@ define <8 x i32> @stack_fold_vpdpbssd_256_commuted(<8 x i32> %a0, <8 x i32> %a1,
; CHECK-NEXT: vpdpbssd {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1)
ret <8 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssds:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -80,11 +80,11 @@ define <4 x i32> @stack_fold_vpdpbssds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %
; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2)
ret <4 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssds_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -94,11 +94,11 @@ define <4 x i32> @stack_fold_vpdpbssds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4
; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1)
ret <4 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssds_256:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -108,11 +108,11 @@ define <8 x i32> @stack_fold_vpdpbssds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3
; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2)
ret <8 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbssds_256_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -122,11 +122,11 @@ define <8 x i32> @stack_fold_vpdpbssds_256_commuted(<8 x i32> %a0, <8 x i32> %a1
; CHECK-NEXT: vpdpbssds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1)
ret <8 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsud:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -136,11 +136,11 @@ define <4 x i32> @stack_fold_vpdpbsud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a
; CHECK-NEXT: vpdpbsud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2)
ret <4 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsud_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -151,11 +151,11 @@ define <4 x i32> @stack_fold_vpdpbsud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4
; CHECK-NEXT: vpdpbsud %xmm1, %xmm2, %xmm0
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1)
ret <4 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsud_256:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -165,11 +165,11 @@ define <8 x i32> @stack_fold_vpdpbsud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32
; CHECK-NEXT: vpdpbsud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2)
ret <8 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsud_256_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -180,11 +180,11 @@ define <8 x i32> @stack_fold_vpdpbsud_256_commuted(<8 x i32> %a0, <8 x i32> %a1,
; CHECK-NEXT: vpdpbsud %ymm1, %ymm2, %ymm0
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1)
ret <8 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsuds:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -194,11 +194,11 @@ define <4 x i32> @stack_fold_vpdpbsuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %
; CHECK-NEXT: vpdpbsuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2)
ret <4 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsuds_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -209,11 +209,11 @@ define <4 x i32> @stack_fold_vpdpbsuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4
; CHECK-NEXT: vpdpbsuds %xmm1, %xmm2, %xmm0
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1)
ret <4 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsuds_256:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -223,11 +223,11 @@ define <8 x i32> @stack_fold_vpdpbsuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3
; CHECK-NEXT: vpdpbsuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2)
ret <8 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbsuds_256_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -238,11 +238,11 @@ define <8 x i32> @stack_fold_vpdpbsuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1
; CHECK-NEXT: vpdpbsuds %ymm1, %ymm2, %ymm0
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1)
ret <8 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuud:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -252,11 +252,11 @@ define <4 x i32> @stack_fold_vpdpbuud(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a
; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2)
ret <4 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuud_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -266,11 +266,11 @@ define <4 x i32> @stack_fold_vpdpbuud_commuted(<4 x i32> %a0, <4 x i32> %a1, <4
; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1)
ret <4 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuud_256:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -280,11 +280,11 @@ define <8 x i32> @stack_fold_vpdpbuud_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32
; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2)
ret <8 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuud_256_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -294,11 +294,11 @@ define <8 x i32> @stack_fold_vpdpbuud_256_commuted(<8 x i32> %a0, <8 x i32> %a1,
; CHECK-NEXT: vpdpbuud {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1)
ret <8 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuuds:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -308,11 +308,11 @@ define <4 x i32> @stack_fold_vpdpbuuds(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %
; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2)
ret <4 x i32> %2
}
-define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) {
+define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <16 x i8> %a1, <16 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuuds_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -322,11 +322,11 @@ define <4 x i32> @stack_fold_vpdpbuuds_commuted(<4 x i32> %a0, <4 x i32> %a1, <4
; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm0 # 16-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <4 x i32> %a2, <4 x i32> %a1)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %a0, <16 x i8> %a2, <16 x i8> %a1)
ret <4 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuuds_256:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -336,11 +336,11 @@ define <8 x i32> @stack_fold_vpdpbuuds_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i3
; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2)
ret <8 x i32> %2
}
-define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2) {
+define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <32 x i8> %a1, <32 x i8> %a2) {
; CHECK-LABEL: stack_fold_vpdpbuuds_256_commuted:
; CHECK: # %bb.0:
; CHECK-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@@ -350,6 +350,6 @@ define <8 x i32> @stack_fold_vpdpbuuds_256_commuted(<8 x i32> %a0, <8 x i32> %a1
; CHECK-NEXT: vpdpbuuds {{[-0-9]+}}(%r{{[sb]}}p), %ymm1, %ymm0 # 32-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <8 x i32> %a2, <8 x i32> %a1)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %a0, <32 x i8> %a2, <32 x i8> %a1)
ret <8 x i32> %2
}
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll
index 93006ae..991467e 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2_512ni-intrinsics.ll
@@ -124,11 +124,11 @@ define <16 x float> @test_mm512_maskz_dpph_ps(i16 zeroext %__U, <16 x float> %__
declare <16 x float> @llvm.x86.avx10.vdpphps.512(<16 x float>, <32 x half>, <32 x half>)
-define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory {
+define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpbssd_epi32(
-; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP12:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
@@ -137,22 +137,18 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
; CHECK: [[BB5]]:
-; CHECK-NEXT: [[__B:%.*]] = load <16 x i32>, ptr [[PB]], align 64
+; CHECK-NEXT: [[TMP10:%.*]] = load <64 x i8>, ptr [[PB]], align 64
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PB]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP8]], align 64
-; CHECK-NEXT: [[TMP9:%.*]] = bitcast <16 x i32> [[__A]] to <64 x i8>
-; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x i32> [[__B]] to <64 x i8>
-; CHECK-NEXT: [[TMP11:%.*]] = bitcast <16 x i32> [[TMP3]] to <64 x i8>
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <16 x i32> [[_MSLD]] to <64 x i8>
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <64 x i8> [[TMP11]], zeroinitializer
+; CHECK-NEXT: [[TMP9:%.*]] = load <64 x i8>, ptr [[TMP8]], align 64
; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <64 x i8> [[TMP12]], zeroinitializer
; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <64 x i8> [[TMP9]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <64 x i8> [[TMP10]], zeroinitializer
-; CHECK-NEXT: [[TMP17:%.*]] = and <64 x i1> [[TMP13]], [[TMP14]]
-; CHECK-NEXT: [[TMP18:%.*]] = and <64 x i1> [[TMP15]], [[TMP14]]
-; CHECK-NEXT: [[TMP19:%.*]] = and <64 x i1> [[TMP13]], [[TMP16]]
+; CHECK-NEXT: [[TMP17:%.*]] = and <64 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = and <64 x i1> [[TMP11]], [[TMP15]]
+; CHECK-NEXT: [[TMP19:%.*]] = and <64 x i1> [[TMP14]], [[TMP16]]
; CHECK-NEXT: [[TMP20:%.*]] = or <64 x i1> [[TMP17]], [[TMP18]]
; CHECK-NEXT: [[TMP21:%.*]] = or <64 x i1> [[TMP20]], [[TMP19]]
; CHECK-NEXT: [[TMP22:%.*]] = sext <64 x i1> [[TMP21]] to <64 x i8>
@@ -160,34 +156,30 @@ define <16 x i32> @test_mm512_dpbssd_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr
; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i32> [[TMP23]], zeroinitializer
; CHECK-NEXT: [[TMP27:%.*]] = sext <16 x i1> [[TMP24]] to <16 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[TMP27]], [[TMP4]]
-; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
+; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[TMP10]])
; CHECK-NEXT: store <16 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %__B = load <16 x i32>, ptr %pB
- %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %__B = load <64 x i8>, ptr %pB
+ %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
+define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbssds_epi32(
-; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP24:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP24:%.*]] = bitcast <16 x i32> [[__A]] to <64 x i8>
-; CHECK-NEXT: [[TMP25:%.*]] = bitcast <16 x i32> [[__B]] to <64 x i8>
-; CHECK-NEXT: [[TMP26:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8>
-; CHECK-NEXT: [[TMP27:%.*]] = bitcast <16 x i32> [[TMP3]] to <64 x i8>
-; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <64 x i8> [[TMP26]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP27]], zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[TMP25]], zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP28]], [[TMP10]]
-; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP28]], [[TMP12]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP11]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP10]], [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP11]], [[TMP21]]
; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]]
; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]]
; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8>
@@ -195,7 +187,7 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[TMP23]], [[TMP1]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
+; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> [[TMP1]]
@@ -207,31 +199,27 @@ define <16 x i32> @test_mm512_mask_dpbssds_epi32(<16 x i32> %__W, i16 zeroext %_
; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
+define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbssd_epi32(
-; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP25:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP26:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP25:%.*]] = bitcast <16 x i32> [[__A]] to <64 x i8>
-; CHECK-NEXT: [[TMP26:%.*]] = bitcast <16 x i32> [[__B]] to <64 x i8>
-; CHECK-NEXT: [[TMP27:%.*]] = bitcast <16 x i32> [[TMP2]] to <64 x i8>
-; CHECK-NEXT: [[TMP28:%.*]] = bitcast <16 x i32> [[TMP3]] to <64 x i8>
-; CHECK-NEXT: [[TMP29:%.*]] = icmp ne <64 x i8> [[TMP27]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[TMP28]], zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[TMP25]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[TMP26]], zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP29]], [[TMP10]]
-; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP29]], [[TMP12]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP11]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP10]], [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP11]], [[TMP21]]
; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]]
; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]]
; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8>
@@ -239,7 +227,7 @@ define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %_
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[TMP23]], [[TMP24]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
+; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> zeroinitializer
@@ -251,21 +239,21 @@ define <16 x i32> @test_mm512_maskz_dpbssd_epi32(i16 zeroext %__U, <16 x i32> %_
; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <16 x i32>, <16 x i32>)
-declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbssd.512(<16 x i32>, <64 x i8>, <64 x i8>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbssds.512(<16 x i32>, <64 x i8>, <64 x i8>)
-define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory {
+define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpbsud_epi32(
-; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -273,87 +261,123 @@ define <16 x i32> @test_mm512_dpbsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
; CHECK: [[BB5]]:
-; CHECK-NEXT: [[__B:%.*]] = load <16 x i32>, ptr [[PB]], align 64
+; CHECK-NEXT: [[__B:%.*]] = load <64 x i8>, ptr [[PB]], align 64
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PB]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP8]], align 64
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
-; CHECK-NEXT: store <16 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[_MSLD:%.*]] = load <64 x i8>, ptr [[TMP8]], align 64
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP9]], [[TMP10]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP9]], [[TMP12]]
+; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8>
+; CHECK-NEXT: [[TMP19:%.*]] = bitcast <64 x i8> [[TMP18]] to <16 x i32>
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32>
+; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i32> [[TMP21]], [[TMP4]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
+; CHECK-NEXT: store <16 x i32> [[TMP22]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %__B = load <16 x i32>, ptr %pB
- %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %__B = load <64 x i8>, ptr %pB
+ %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
+define <16 x i32> @test_mm512_mask_dpbsuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbsuds_epi32(
-; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = and <64 x i1> [[TMP19]], [[TMP20]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP21]], [[TMP20]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP19]], [[TMP22]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP23]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32>
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1>
-; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> [[TMP1]]
+; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], [[__W]]
-; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]]
; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], [[TMP1]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]]
; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> [[__W]]
; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
+define <16 x i32> @test_mm512_maskz_dpbsud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbsud_epi32(
-; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = and <64 x i1> [[TMP20]], [[TMP21]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP22]], [[TMP21]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP20]], [[TMP23]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP24]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32>
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP19]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1>
-; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]]
; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], zeroinitializer
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]]
; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> zeroinitializer
; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <16 x i32>, <16 x i32>)
-declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbsud.512(<16 x i32>, <64 x i8>, <64 x i8>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbsuds.512(<16 x i32>, <64 x i8>, <64 x i8>)
-define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory {
+define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <64 x i8> %__A, ptr %pB) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_dpbuud_epi32(
-; CHECK-SAME: <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], ptr [[PB:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 128) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB4:.*]], label %[[BB5:.*]], !prof [[PROF1]]
@@ -361,80 +385,116 @@ define <16 x i32> @test_mm512_dpbuud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR5]]
; CHECK-NEXT: unreachable
; CHECK: [[BB5]]:
-; CHECK-NEXT: [[__B:%.*]] = load <16 x i32>, ptr [[PB]], align 64
+; CHECK-NEXT: [[__B:%.*]] = load <64 x i8>, ptr [[PB]], align 64
; CHECK-NEXT: [[TMP6:%.*]] = ptrtoint ptr [[PB]] to i64
; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[TMP6]], 87960930222080
; CHECK-NEXT: [[TMP8:%.*]] = inttoptr i64 [[TMP7]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP8]], align 64
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
-; CHECK-NEXT: store <16 x i32> [[_MSPROP1]], ptr @__msan_retval_tls, align 8
+; CHECK-NEXT: [[_MSLD:%.*]] = load <64 x i8>, ptr [[TMP8]], align 64
+; CHECK-NEXT: [[TMP9:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <64 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <64 x i1> [[TMP9]], [[TMP10]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <64 x i1> [[TMP11]], [[TMP10]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <64 x i1> [[TMP9]], [[TMP12]]
+; CHECK-NEXT: [[TMP16:%.*]] = or <64 x i1> [[TMP13]], [[TMP14]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <64 x i1> [[TMP16]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = sext <64 x i1> [[TMP17]] to <64 x i8>
+; CHECK-NEXT: [[TMP19:%.*]] = bitcast <64 x i8> [[TMP18]] to <16 x i32>
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i32> [[TMP19]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = sext <16 x i1> [[TMP20]] to <16 x i32>
+; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i32> [[TMP21]], [[TMP4]]
+; CHECK-NEXT: [[RES:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
+; CHECK-NEXT: store <16 x i32> [[TMP22]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %__B = load <16 x i32>, ptr %pB
- %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %__B = load <64 x i8>, ptr %pB
+ %res = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
+define <16 x i32> @test_mm512_mask_dpbuuds_epi32(<16 x i32> %__W, i16 zeroext %__U, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_mask_dpbuuds_epi32(
-; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <16 x i32> [[__W:%.*]], i16 zeroext [[__U:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = and <64 x i1> [[TMP19]], [[TMP20]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP21]], [[TMP20]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP19]], [[TMP22]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP23]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32>
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1>
-; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> [[TMP1]]
+; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> [[TMP1]]
; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], [[__W]]
-; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]]
; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], [[TMP1]]
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]]
; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> [[__W]]
; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> %__W
ret <16 x i32> %res
}
-define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B) sanitize_memory {
+define <16 x i32> @test_mm512_maskz_dpbuud_epi32(i16 zeroext %__U, <16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <16 x i32> @test_mm512_maskz_dpbuud_epi32(
-; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <16 x i32> [[__A:%.*]], <16 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-SAME: i16 zeroext [[__U:%.*]], <16 x i32> [[__W:%.*]], <64 x i8> [[__A:%.*]], <64 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP2:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <64 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 136) to ptr), align 8
+; CHECK-NEXT: [[TMP19:%.*]] = load <16 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <16 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <16 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <16 x i32> [[__A]], <16 x i32> [[__B]])
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <64 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <64 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = icmp ne <64 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <64 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = and <64 x i1> [[TMP20]], [[TMP21]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <64 x i1> [[TMP22]], [[TMP21]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <64 x i1> [[TMP20]], [[TMP23]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <64 x i1> [[TMP24]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <64 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <64 x i1> [[TMP13]] to <64 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <64 x i8> [[TMP14]] to <16 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <16 x i1> [[TMP16]] to <16 x i32>
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i32> [[TMP17]], [[TMP19]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> [[__W]], <64 x i8> [[__A]], <64 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16 [[TMP4]] to <16 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i16 [[__U]] to <16 x i1>
-; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[_MSPROP1]], <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[TMP18]], <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[DPI]], zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[_MSPROP1]]
+; CHECK-NEXT: [[TMP8:%.*]] = or <16 x i32> [[TMP7]], [[TMP18]]
; CHECK-NEXT: [[TMP9:%.*]] = or <16 x i32> [[TMP8]], zeroinitializer
; CHECK-NEXT: [[_MSPROP_SELECT:%.*]] = select <16 x i1> [[TMP5]], <16 x i32> [[TMP9]], <16 x i32> [[TMP6]]
; CHECK-NEXT: [[RES:%.*]] = select <16 x i1> [[BST]], <16 x i32> [[DPI]], <16 x i32> zeroinitializer
; CHECK-NEXT: store <16 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <16 x i32> [[RES]]
;
- %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <16 x i32> %__A, <16 x i32> %__B)
+ %dpi = tail call <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32> %__W, <64 x i8> %__A, <64 x i8> %__B)
%bst = bitcast i16 %__U to <16 x i1>
%res = select <16 x i1> %bst, <16 x i32> %dpi, <16 x i32> zeroinitializer
ret <16 x i32> %res
}
-declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <16 x i32>, <16 x i32>)
-declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <16 x i32>, <16 x i32>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbuud.512(<16 x i32>, <64 x i8>, <64 x i8>)
+declare <16 x i32> @llvm.x86.avx10.vpdpbuuds.512(<16 x i32>, <64 x i8>, <64 x i8>)
define <16 x i32> @test_mm512_dpwsud_epi32(<16 x i32> %__W, <16 x i32> %__A, ptr %pB) sanitize_memory {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll
index e121c3b..373eff6 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avx10_2ni-intrinsics.ll
@@ -243,25 +243,21 @@ declare <4 x float> @llvm.x86.avx10.vdpphps.128(<4 x float>, <8 x half>, <8 x ha
declare <8 x float> @llvm.x86.avx10.vdpphps.256(<8 x float>, <16 x half>, <16 x half>)
-define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
+define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbssd_epi32(
-; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP24:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP24:%.*]] = bitcast <4 x i32> [[__A]] to <16 x i8>
-; CHECK-NEXT: [[TMP25:%.*]] = bitcast <4 x i32> [[__B]] to <16 x i8>
-; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8>
-; CHECK-NEXT: [[TMP27:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8>
-; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[TMP26]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP27]], zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <16 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[TMP25]], zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP28]], [[TMP10]]
-; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP28]], [[TMP12]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP11]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP10]], [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP11]], [[TMP21]]
; CHECK-NEXT: [[TMP16:%.*]] = or <16 x i1> [[TMP13]], [[TMP14]]
; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP16]], [[TMP15]]
; CHECK-NEXT: [[TMP18:%.*]] = sext <16 x i1> [[TMP17]] to <16 x i8>
@@ -269,7 +265,7 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <4 x i32> [[TMP19]], zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = sext <4 x i1> [[TMP20]] to <4 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP23]], [[TMP1]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]])
+; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> [[TMP1]]
@@ -281,31 +277,27 @@ define <4 x i32> @test_mm_mask_dpbssd_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W
ret <4 x i32> %res
}
-define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
+define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbssds_epi32(
-; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP25:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP26:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP24:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP25:%.*]] = bitcast <4 x i32> [[__A]] to <16 x i8>
-; CHECK-NEXT: [[TMP26:%.*]] = bitcast <4 x i32> [[__B]] to <16 x i8>
-; CHECK-NEXT: [[TMP27:%.*]] = bitcast <4 x i32> [[TMP2]] to <16 x i8>
-; CHECK-NEXT: [[TMP28:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8>
-; CHECK-NEXT: [[TMP29:%.*]] = icmp ne <16 x i8> [[TMP27]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[TMP28]], zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <16 x i8> [[TMP25]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[TMP26]], zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP29]], [[TMP10]]
-; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP29]], [[TMP12]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <16 x i1> [[TMP11]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP10]], [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP11]], [[TMP21]]
; CHECK-NEXT: [[TMP16:%.*]] = or <16 x i1> [[TMP13]], [[TMP14]]
; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP16]], [[TMP15]]
; CHECK-NEXT: [[TMP18:%.*]] = sext <16 x i1> [[TMP17]] to <16 x i8>
@@ -313,7 +305,7 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <4 x i32> [[TMP19]], zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = sext <4 x i1> [[TMP20]] to <4 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP23]], [[TMP24]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]])
+; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> zeroinitializer
@@ -325,31 +317,27 @@ define <4 x i32> @test_mm_maskz_dpbssds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer
ret <4 x i32> %res
}
-define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
+define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbssds_epi32(
-; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP24:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP24:%.*]] = bitcast <8 x i32> [[__A]] to <32 x i8>
-; CHECK-NEXT: [[TMP25:%.*]] = bitcast <8 x i32> [[__B]] to <32 x i8>
-; CHECK-NEXT: [[TMP26:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8>
-; CHECK-NEXT: [[TMP27:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[TMP26]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP27]], zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <32 x i8> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[TMP25]], zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP28]], [[TMP10]]
-; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP28]], [[TMP12]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP11]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP10]], [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP11]], [[TMP21]]
; CHECK-NEXT: [[TMP16:%.*]] = or <32 x i1> [[TMP13]], [[TMP14]]
; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP16]], [[TMP15]]
; CHECK-NEXT: [[TMP18:%.*]] = sext <32 x i1> [[TMP17]] to <32 x i8>
@@ -357,7 +345,7 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <8 x i32> [[TMP19]], zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = sext <8 x i1> [[TMP20]] to <8 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP23]], [[TMP1]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]])
+; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> [[TMP1]]
@@ -369,31 +357,27 @@ define <8 x i32> @test_mm256_maskz_dpbssds_epi32(<8 x i32> %__W, i8 zeroext %__U
; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W
ret <8 x i32> %res
}
-define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
+define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbssd_epi32(
-; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP25:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP26:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP24:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP25:%.*]] = bitcast <8 x i32> [[__A]] to <32 x i8>
-; CHECK-NEXT: [[TMP26:%.*]] = bitcast <8 x i32> [[__B]] to <32 x i8>
-; CHECK-NEXT: [[TMP27:%.*]] = bitcast <8 x i32> [[TMP2]] to <32 x i8>
-; CHECK-NEXT: [[TMP28:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP29:%.*]] = icmp ne <32 x i8> [[TMP27]], zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[TMP28]], zeroinitializer
; CHECK-NEXT: [[TMP11:%.*]] = icmp ne <32 x i8> [[TMP25]], zeroinitializer
; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[TMP26]], zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP29]], [[TMP10]]
-; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP11]], [[TMP10]]
-; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP29]], [[TMP12]]
+; CHECK-NEXT: [[TMP10:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = and <32 x i1> [[TMP11]], [[TMP12]]
+; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP10]], [[TMP12]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP11]], [[TMP21]]
; CHECK-NEXT: [[TMP16:%.*]] = or <32 x i1> [[TMP13]], [[TMP14]]
; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP16]], [[TMP15]]
; CHECK-NEXT: [[TMP18:%.*]] = sext <32 x i1> [[TMP17]] to <32 x i8>
@@ -401,7 +385,7 @@ define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W,
; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <8 x i32> [[TMP19]], zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = sext <8 x i1> [[TMP20]] to <8 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP23]], [[TMP24]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]])
+; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> zeroinitializer
@@ -413,28 +397,40 @@ define <8 x i32> @test_mm256_mask_dpbssd_epi32(i8 zeroext %__U, <8 x i32> %__W,
; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
+define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbsud_epi32(
-; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> [[TMP1]]
@@ -446,23 +442,35 @@ define <4 x i32> @test_mm_mask_dpbsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W
ret <4 x i32> %res
}
-define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
+define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbsuds_epi32(
-; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> zeroinitializer
@@ -474,23 +482,35 @@ define <4 x i32> @test_mm_maskz_dpbsuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer
ret <4 x i32> %res
}
-define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
+define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(
-; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> [[TMP1]]
@@ -502,23 +522,35 @@ define <8 x i32> @test_mm256_maskz_dpbsuds_epi32(<8 x i32> %__W, i8 zeroext %__U
; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W
ret <8 x i32> %res
}
-define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
+define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbsud_epi32(
-; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> zeroinitializer
@@ -530,28 +562,40 @@ define <8 x i32> @test_mm256_mask_dpbsud_epi32(i8 zeroext %__U, <8 x i32> %__W,
; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
+define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_mask_dpbuud_epi32(
-; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <4 x i32> [[__W:%.*]], i4 zeroext [[__U:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> [[TMP1]]
@@ -563,23 +607,35 @@ define <4 x i32> @test_mm_mask_dpbuud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4
; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> %__W
ret <4 x i32> %res
}
-define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
+define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_mm_maskz_dpbuuds_epi32(
-; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <4 x i32> [[__A:%.*]], <4 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: i4 zeroext [[__U:%.*]], <4 x i32> [[__W:%.*]], <16 x i8> [[__A:%.*]], <16 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 24) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i4, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[__W]], <4 x i32> [[__A]], <4 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <16 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <16 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <16 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <16 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <16 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <16 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <16 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <16 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <16 x i1> [[TMP13]] to <16 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <16 x i8> [[TMP14]] to <4 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <4 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <4 x i1> [[TMP16]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[__W]], <16 x i8> [[__A]], <16 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i4 [[TMP4]] to <4 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i4 [[__U]] to <4 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[BST]], <4 x i32> [[_MSPROP1]], <4 x i32> zeroinitializer
@@ -591,23 +647,35 @@ define <4 x i32> @test_mm_maskz_dpbuuds_epi32(i4 zeroext %__U, <4 x i32> %__W, <
; CHECK-NEXT: store <4 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <4 x i32> %__A, <4 x i32> %__B)
+ %dpi = tail call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %__W, <16 x i8> %__A, <16 x i8> %__B)
%bst = bitcast i4 %__U to <4 x i1>
%res = select <4 x i1> %bst, <4 x i32> %dpi, <4 x i32> zeroinitializer
ret <4 x i32> %res
}
-define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
+define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(
-; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: <8 x i32> [[__W:%.*]], i8 zeroext [[__U:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> [[TMP1]]
@@ -619,23 +687,35 @@ define <8 x i32> @test_mm256_maskz_dpbuuds_epi32(<8 x i32> %__W, i8 zeroext %__U
; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> %__W
ret <8 x i32> %res
}
-define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B) sanitize_memory {
+define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_mm256_mask_dpbuud_epi32(
-; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <8 x i32> [[__A:%.*]], <8 x i32> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-SAME: i8 zeroext [[__U:%.*]], <8 x i32> [[__W:%.*]], <32 x i8> [[__A:%.*]], <32 x i8> [[__B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP2:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 8) to ptr), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr @__msan_param_tls, align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[TMP3]]
-; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[__W]], <8 x i32> [[__A]], <8 x i32> [[__B]])
+; CHECK-NEXT: [[TMP18:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP19:%.*]] = icmp ne <32 x i8> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = icmp ne <32 x i8> [[__A]], zeroinitializer
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <32 x i8> [[__B]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = and <32 x i1> [[TMP18]], [[TMP19]]
+; CHECK-NEXT: [[TMP10:%.*]] = and <32 x i1> [[TMP20]], [[TMP19]]
+; CHECK-NEXT: [[TMP11:%.*]] = and <32 x i1> [[TMP18]], [[TMP21]]
+; CHECK-NEXT: [[TMP12:%.*]] = or <32 x i1> [[TMP22]], [[TMP10]]
+; CHECK-NEXT: [[TMP13:%.*]] = or <32 x i1> [[TMP12]], [[TMP11]]
+; CHECK-NEXT: [[TMP14:%.*]] = sext <32 x i1> [[TMP13]] to <32 x i8>
+; CHECK-NEXT: [[TMP15:%.*]] = bitcast <32 x i8> [[TMP14]] to <8 x i32>
+; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <8 x i32> [[TMP15]], zeroinitializer
+; CHECK-NEXT: [[TMP17:%.*]] = sext <8 x i1> [[TMP16]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP17]], [[TMP1]]
+; CHECK-NEXT: [[DPI:%.*]] = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[__W]], <32 x i8> [[__A]], <32 x i8> [[__B]])
; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8 [[TMP4]] to <8 x i1>
; CHECK-NEXT: [[BST:%.*]] = bitcast i8 [[__U]] to <8 x i1>
; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[BST]], <8 x i32> [[_MSPROP1]], <8 x i32> zeroinitializer
@@ -647,16 +727,16 @@ define <8 x i32> @test_mm256_mask_dpbuud_epi32(i8 zeroext %__U, <8 x i32> %__W,
; CHECK-NEXT: store <8 x i32> [[_MSPROP_SELECT]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <8 x i32> %__A, <8 x i32> %__B)
+ %dpi = tail call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %__W, <32 x i8> %__A, <32 x i8> %__B)
%bst = bitcast i8 %__U to <8 x i1>
%res = select <8 x i1> %bst, <8 x i32> %dpi, <8 x i32> zeroinitializer
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>)
-declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
define <4 x i32> @test_mm_mask_dpwsud_epi32(<4 x i32> %__W, i4 zeroext %__U, <4 x i32> %__A, <4 x i32> %__B) sanitize_memory {
diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll
index 3df0f1d..d91abea 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/X86/avxvnniint8-intrinsics.ll
@@ -10,15 +10,15 @@
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory {
+define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbssd_128(
-; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1:![0-9]+]]
@@ -26,22 +26,18 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16
+; CHECK-NEXT: [[TMP30:%.*]] = load <16 x i8>, ptr [[X2P]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16
-; CHECK-NEXT: [[TMP29:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8>
-; CHECK-NEXT: [[TMP30:%.*]] = bitcast <4 x i32> [[X2]] to <16 x i8>
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8>
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[_MSLD]] to <16 x i8>
-; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <16 x i8> [[TMP12]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16
; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i8> [[TMP29]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <16 x i8> [[TMP30]], zeroinitializer
-; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP14]], [[TMP15]]
-; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP16]], [[TMP15]]
-; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP14]], [[TMP17]]
+; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP15]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP12]], [[TMP16]]
+; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP15]], [[TMP17]]
; CHECK-NEXT: [[TMP21:%.*]] = or <16 x i1> [[TMP18]], [[TMP19]]
; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i1> [[TMP21]], [[TMP20]]
; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP22]] to <16 x i8>
@@ -49,18 +45,14 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt
; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <4 x i32> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = sext <4 x i1> [[TMP25]] to <4 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP28]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]])
-; CHECK-NEXT: [[TMP31:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8>
-; CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x i32> [[X4]] to <16 x i8>
-; CHECK-NEXT: [[TMP33:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8>
-; CHECK-NEXT: [[TMP34:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
-; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <16 x i8> [[TMP33]], zeroinitializer
-; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <16 x i8> [[TMP34]], zeroinitializer
-; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <16 x i8> [[TMP31]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[TMP30]])
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <16 x i8> [[TMP32]], zeroinitializer
-; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP35]], [[TMP36]]
-; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP37]], [[TMP36]]
-; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP35]], [[TMP38]]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP26]], [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP27]], [[TMP38]]
+; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP26]], [[TMP31]]
; CHECK-NEXT: [[TMP42:%.*]] = or <16 x i1> [[TMP39]], [[TMP40]]
; CHECK-NEXT: [[TMP43:%.*]] = or <16 x i1> [[TMP42]], [[TMP41]]
; CHECK-NEXT: [[TMP44:%.*]] = sext <16 x i1> [[TMP43]] to <16 x i8>
@@ -68,28 +60,28 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssd_128(<4 x i32> %x0, <4 x i32> %x1, pt
; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <4 x i32> [[TMP45]], zeroinitializer
; CHECK-NEXT: [[TMP49:%.*]] = sext <4 x i1> [[TMP46]] to <4 x i32>
; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP49]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]])
+; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssd.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory {
+define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbssds_128(
-; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -97,22 +89,18 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16
+; CHECK-NEXT: [[TMP30:%.*]] = load <16 x i8>, ptr [[X2P]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16
-; CHECK-NEXT: [[TMP29:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8>
-; CHECK-NEXT: [[TMP30:%.*]] = bitcast <4 x i32> [[X2]] to <16 x i8>
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8>
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <4 x i32> [[_MSLD]] to <16 x i8>
-; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <16 x i8> [[TMP12]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16
; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <16 x i8> [[TMP29]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <16 x i8> [[TMP30]], zeroinitializer
-; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP14]], [[TMP15]]
-; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP16]], [[TMP15]]
-; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP14]], [[TMP17]]
+; CHECK-NEXT: [[TMP18:%.*]] = and <16 x i1> [[TMP15]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = and <16 x i1> [[TMP12]], [[TMP16]]
+; CHECK-NEXT: [[TMP20:%.*]] = and <16 x i1> [[TMP15]], [[TMP17]]
; CHECK-NEXT: [[TMP21:%.*]] = or <16 x i1> [[TMP18]], [[TMP19]]
; CHECK-NEXT: [[TMP22:%.*]] = or <16 x i1> [[TMP21]], [[TMP20]]
; CHECK-NEXT: [[TMP23:%.*]] = sext <16 x i1> [[TMP22]] to <16 x i8>
@@ -120,18 +108,14 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p
; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <4 x i32> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = sext <4 x i1> [[TMP25]] to <4 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP28]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]])
-; CHECK-NEXT: [[TMP31:%.*]] = bitcast <4 x i32> [[X1]] to <16 x i8>
-; CHECK-NEXT: [[TMP32:%.*]] = bitcast <4 x i32> [[X4]] to <16 x i8>
-; CHECK-NEXT: [[TMP33:%.*]] = bitcast <4 x i32> [[TMP3]] to <16 x i8>
-; CHECK-NEXT: [[TMP34:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
-; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <16 x i8> [[TMP33]], zeroinitializer
-; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <16 x i8> [[TMP34]], zeroinitializer
-; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <16 x i8> [[TMP31]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[TMP30]])
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <16 x i8> [[TMP32]], zeroinitializer
-; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP35]], [[TMP36]]
-; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP37]], [[TMP36]]
-; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP35]], [[TMP38]]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP39:%.*]] = and <16 x i1> [[TMP26]], [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = and <16 x i1> [[TMP27]], [[TMP38]]
+; CHECK-NEXT: [[TMP41:%.*]] = and <16 x i1> [[TMP26]], [[TMP31]]
; CHECK-NEXT: [[TMP42:%.*]] = or <16 x i1> [[TMP39]], [[TMP40]]
; CHECK-NEXT: [[TMP43:%.*]] = or <16 x i1> [[TMP42]], [[TMP41]]
; CHECK-NEXT: [[TMP44:%.*]] = sext <16 x i1> [[TMP43]] to <16 x i8>
@@ -139,28 +123,28 @@ define <4 x i32>@test_int_x86_avx2_vpdpbssds_128(<4 x i32> %x0, <4 x i32> %x1, p
; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <4 x i32> [[TMP45]], zeroinitializer
; CHECK-NEXT: [[TMP49:%.*]] = sext <4 x i1> [[TMP46]] to <4 x i32>
; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP49]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]])
+; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbssds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory {
+define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbssd_256(
-; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -168,22 +152,18 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32
+; CHECK-NEXT: [[TMP30:%.*]] = load <32 x i8>, ptr [[X2P]], align 32
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
-; CHECK-NEXT: [[TMP29:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8>
-; CHECK-NEXT: [[TMP30:%.*]] = bitcast <8 x i32> [[X2]] to <32 x i8>
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <8 x i32> [[_MSLD]] to <32 x i8>
-; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <32 x i8> [[TMP12]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32
; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <32 x i8> [[TMP29]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <32 x i8> [[TMP30]], zeroinitializer
-; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP14]], [[TMP15]]
-; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP16]], [[TMP15]]
-; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP14]], [[TMP17]]
+; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP15]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP12]], [[TMP16]]
+; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP15]], [[TMP17]]
; CHECK-NEXT: [[TMP21:%.*]] = or <32 x i1> [[TMP18]], [[TMP19]]
; CHECK-NEXT: [[TMP22:%.*]] = or <32 x i1> [[TMP21]], [[TMP20]]
; CHECK-NEXT: [[TMP23:%.*]] = sext <32 x i1> [[TMP22]] to <32 x i8>
@@ -191,18 +171,14 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt
; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <8 x i32> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = sext <8 x i1> [[TMP25]] to <8 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP28]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]])
-; CHECK-NEXT: [[TMP31:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8>
-; CHECK-NEXT: [[TMP32:%.*]] = bitcast <8 x i32> [[X4]] to <32 x i8>
-; CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x i32> [[TMP4]] to <32 x i8>
-; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <32 x i8> [[TMP33]], zeroinitializer
-; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <32 x i8> [[TMP34]], zeroinitializer
-; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <32 x i8> [[TMP31]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[TMP30]])
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <32 x i8> [[TMP32]], zeroinitializer
-; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP35]], [[TMP36]]
-; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP37]], [[TMP36]]
-; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP35]], [[TMP38]]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP26]], [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP27]], [[TMP38]]
+; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP26]], [[TMP31]]
; CHECK-NEXT: [[TMP42:%.*]] = or <32 x i1> [[TMP39]], [[TMP40]]
; CHECK-NEXT: [[TMP43:%.*]] = or <32 x i1> [[TMP42]], [[TMP41]]
; CHECK-NEXT: [[TMP44:%.*]] = sext <32 x i1> [[TMP43]] to <32 x i8>
@@ -210,28 +186,28 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssd_256(<8 x i32> %x0, <8 x i32> %x1, pt
; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <8 x i32> [[TMP45]], zeroinitializer
; CHECK-NEXT: [[TMP49:%.*]] = sext <8 x i1> [[TMP46]] to <8 x i32>
; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP49]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]])
+; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssd.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory {
+define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbssds_256(
-; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP5:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP32:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -239,22 +215,18 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32
+; CHECK-NEXT: [[TMP30:%.*]] = load <32 x i8>, ptr [[X2P]], align 32
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
-; CHECK-NEXT: [[TMP29:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8>
-; CHECK-NEXT: [[TMP30:%.*]] = bitcast <8 x i32> [[X2]] to <32 x i8>
-; CHECK-NEXT: [[TMP12:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP13:%.*]] = bitcast <8 x i32> [[_MSLD]] to <32 x i8>
-; CHECK-NEXT: [[TMP14:%.*]] = icmp ne <32 x i8> [[TMP12]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32
; CHECK-NEXT: [[TMP15:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP16:%.*]] = icmp ne <32 x i8> [[TMP29]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
; CHECK-NEXT: [[TMP17:%.*]] = icmp ne <32 x i8> [[TMP30]], zeroinitializer
-; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP14]], [[TMP15]]
-; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP16]], [[TMP15]]
-; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP14]], [[TMP17]]
+; CHECK-NEXT: [[TMP18:%.*]] = and <32 x i1> [[TMP15]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = and <32 x i1> [[TMP12]], [[TMP16]]
+; CHECK-NEXT: [[TMP20:%.*]] = and <32 x i1> [[TMP15]], [[TMP17]]
; CHECK-NEXT: [[TMP21:%.*]] = or <32 x i1> [[TMP18]], [[TMP19]]
; CHECK-NEXT: [[TMP22:%.*]] = or <32 x i1> [[TMP21]], [[TMP20]]
; CHECK-NEXT: [[TMP23:%.*]] = sext <32 x i1> [[TMP22]] to <32 x i8>
@@ -262,18 +234,14 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p
; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <8 x i32> [[TMP24]], zeroinitializer
; CHECK-NEXT: [[TMP28:%.*]] = sext <8 x i1> [[TMP25]] to <8 x i32>
; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP28]], [[TMP5]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]])
-; CHECK-NEXT: [[TMP31:%.*]] = bitcast <8 x i32> [[X1]] to <32 x i8>
-; CHECK-NEXT: [[TMP32:%.*]] = bitcast <8 x i32> [[X4]] to <32 x i8>
-; CHECK-NEXT: [[TMP33:%.*]] = bitcast <8 x i32> [[TMP3]] to <32 x i8>
-; CHECK-NEXT: [[TMP34:%.*]] = bitcast <8 x i32> [[TMP4]] to <32 x i8>
-; CHECK-NEXT: [[TMP35:%.*]] = icmp ne <32 x i8> [[TMP33]], zeroinitializer
-; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <32 x i8> [[TMP34]], zeroinitializer
-; CHECK-NEXT: [[TMP37:%.*]] = icmp ne <32 x i8> [[TMP31]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[TMP30]])
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP13]], zeroinitializer
; CHECK-NEXT: [[TMP38:%.*]] = icmp ne <32 x i8> [[TMP32]], zeroinitializer
-; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP35]], [[TMP36]]
-; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP37]], [[TMP36]]
-; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP35]], [[TMP38]]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP31:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP39:%.*]] = and <32 x i1> [[TMP26]], [[TMP38]]
+; CHECK-NEXT: [[TMP40:%.*]] = and <32 x i1> [[TMP27]], [[TMP38]]
+; CHECK-NEXT: [[TMP41:%.*]] = and <32 x i1> [[TMP26]], [[TMP31]]
; CHECK-NEXT: [[TMP42:%.*]] = or <32 x i1> [[TMP39]], [[TMP40]]
; CHECK-NEXT: [[TMP43:%.*]] = or <32 x i1> [[TMP42]], [[TMP41]]
; CHECK-NEXT: [[TMP44:%.*]] = sext <32 x i1> [[TMP43]] to <32 x i8>
@@ -281,28 +249,28 @@ define <8 x i32>@test_int_x86_avx2_vpdpbssds_256(<8 x i32> %x0, <8 x i32> %x1, p
; CHECK-NEXT: [[TMP46:%.*]] = icmp ne <8 x i32> [[TMP45]], zeroinitializer
; CHECK-NEXT: [[TMP49:%.*]] = sext <8 x i1> [[TMP46]] to <8 x i32>
; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP49]], [[TMP5]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]])
+; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbssds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory {
+define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbsud_128(
-; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -310,38 +278,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsud_128(<4 x i32> %x0, <4 x i32> %x1, pt
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16
+; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory {
+define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbsuds_128(
-; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -349,38 +341,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbsuds_128(<4 x i32> %x0, <4 x i32> %x1, p
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16
+; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbsuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory {
+define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbsud_256(
-; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -388,38 +404,62 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsud_256(<8 x i32> %x0, <8 x i32> %x1, pt
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32
+; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory {
+define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbsuds_256(
-; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -427,38 +467,62 @@ define <8 x i32>@test_int_x86_avx2_vpdpbsuds_256(<8 x i32> %x0, <8 x i32> %x1, p
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32
+; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbsuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory {
+define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbuud_128(
-; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -466,38 +530,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuud_128(<4 x i32> %x0, <4 x i32> %x1, pt
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16
+; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuud.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <4 x i32>, <4 x i32>)
+declare <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32>, <16 x i8>, <16 x i8>)
-define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, ptr %x2p, <4 x i32> %x4) sanitize_memory {
+define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <16 x i8> %x1, ptr %x2p, <16 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <4 x i32> @test_int_x86_avx2_vpdpbuuds_128(
-; CHECK-SAME: <4 x i32> [[X0:%.*]], <4 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <4 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <4 x i32> [[X0:%.*]], <16 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <16 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 16) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 40) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -505,38 +593,62 @@ define <4 x i32>@test_int_x86_avx2_vpdpbuuds_128(<4 x i32> %x0, <4 x i32> %x1, p
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <4 x i32>, ptr [[X2P]], align 16
+; CHECK-NEXT: [[X2:%.*]] = load <16 x i8>, ptr [[X2P]], align 16
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 16
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <4 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <4 x i32> [[X1]], <4 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <16 x i8>, ptr [[TMP9]], align 16
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <16 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <16 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <16 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <16 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <16 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <16 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <16 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <16 x i1> [[TMP18]] to <16 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <16 x i8> [[TMP19]] to <4 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <4 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <4 x i1> [[TMP21]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <4 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <16 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <16 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <16 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <16 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <16 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <16 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <16 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <16 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <16 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <16 x i1> [[TMP33]] to <16 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <16 x i8> [[TMP34]] to <4 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <4 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <4 x i1> [[TMP36]] to <4 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <4 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> [[X0]], <16 x i8> [[X1]], <16 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <4 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <4 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <4 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <4 x i32> [[RES]]
;
- %x2 = load <4 x i32>, ptr %x2p
- %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2)
- %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4)
+ %x2 = load <16 x i8>, ptr %x2p
+ %1 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x2)
+ %2 = call <4 x i32> @llvm.x86.avx2.vpdpbuuds.128(<4 x i32> %x0, <16 x i8> %x1, <16 x i8> %x4)
%res = add <4 x i32> %1, %2
ret <4 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory {
+define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbuud_256(
-; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -544,38 +656,62 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuud_256(<8 x i32> %x0, <8 x i32> %x1, pt
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32
+; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuud.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
-declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <8 x i32>, <8 x i32>)
+declare <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32>, <32 x i8>, <32 x i8>)
-define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, ptr %x2p, <8 x i32> %x4) sanitize_memory {
+define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <32 x i8> %x1, ptr %x2p, <32 x i8> %x4) sanitize_memory {
; CHECK-LABEL: define <8 x i32> @test_int_x86_avx2_vpdpbuuds_256(
-; CHECK-SAME: <8 x i32> [[X0:%.*]], <8 x i32> [[X1:%.*]], ptr [[X2P:%.*]], <8 x i32> [[X4:%.*]]) #[[ATTR1]] {
+; CHECK-SAME: <8 x i32> [[X0:%.*]], <32 x i8> [[X1:%.*]], ptr [[X2P:%.*]], <32 x i8> [[X4:%.*]]) #[[ATTR1]] {
; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 64) to ptr), align 8
+; CHECK-NEXT: [[TMP3:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr @__msan_param_tls, align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 32) to ptr), align 8
-; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i32>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
+; CHECK-NEXT: [[TMP4:%.*]] = load <32 x i8>, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__msan_param_tls to i64), i64 72) to ptr), align 8
; CHECK-NEXT: call void @llvm.donothing()
; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB5:.*]], label %[[BB6:.*]], !prof [[PROF1]]
@@ -583,25 +719,49 @@ define <8 x i32>@test_int_x86_avx2_vpdpbuuds_256(<8 x i32> %x0, <8 x i32> %x1, p
; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]]
; CHECK-NEXT: unreachable
; CHECK: [[BB6]]:
-; CHECK-NEXT: [[X2:%.*]] = load <8 x i32>, ptr [[X2P]], align 32
+; CHECK-NEXT: [[X2:%.*]] = load <32 x i8>, ptr [[X2P]], align 32
; CHECK-NEXT: [[TMP7:%.*]] = ptrtoint ptr [[X2P]] to i64
; CHECK-NEXT: [[TMP8:%.*]] = xor i64 [[TMP7]], 87960930222080
; CHECK-NEXT: [[TMP9:%.*]] = inttoptr i64 [[TMP8]] to ptr
-; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
-; CHECK-NEXT: [[_MSPROP:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[_MSPROP]], [[_MSLD]]
-; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X2]])
-; CHECK-NEXT: [[_MSPROP2:%.*]] = or <8 x i32> [[TMP2]], [[TMP3]]
-; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[_MSPROP2]], [[TMP4]]
-; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <8 x i32> [[X1]], <8 x i32> [[X4]])
+; CHECK-NEXT: [[_MSLD:%.*]] = load <32 x i8>, ptr [[TMP9]], align 32
+; CHECK-NEXT: [[TMP23:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP24:%.*]] = icmp ne <32 x i8> [[_MSLD]], zeroinitializer
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP13:%.*]] = icmp ne <32 x i8> [[X2]], zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = and <32 x i1> [[TMP23]], [[TMP24]]
+; CHECK-NEXT: [[TMP15:%.*]] = and <32 x i1> [[TMP12]], [[TMP24]]
+; CHECK-NEXT: [[TMP16:%.*]] = and <32 x i1> [[TMP23]], [[TMP13]]
+; CHECK-NEXT: [[TMP17:%.*]] = or <32 x i1> [[TMP14]], [[TMP15]]
+; CHECK-NEXT: [[TMP18:%.*]] = or <32 x i1> [[TMP17]], [[TMP16]]
+; CHECK-NEXT: [[TMP19:%.*]] = sext <32 x i1> [[TMP18]] to <32 x i8>
+; CHECK-NEXT: [[TMP20:%.*]] = bitcast <32 x i8> [[TMP19]] to <8 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = icmp ne <8 x i32> [[TMP20]], zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = sext <8 x i1> [[TMP21]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP1:%.*]] = or <8 x i32> [[TMP22]], [[TMP2]]
+; CHECK-NEXT: [[TMP10:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X2]])
+; CHECK-NEXT: [[TMP25:%.*]] = icmp ne <32 x i8> [[TMP3]], zeroinitializer
+; CHECK-NEXT: [[TMP26:%.*]] = icmp ne <32 x i8> [[TMP4]], zeroinitializer
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ne <32 x i8> [[X1]], zeroinitializer
+; CHECK-NEXT: [[TMP28:%.*]] = icmp ne <32 x i8> [[X4]], zeroinitializer
+; CHECK-NEXT: [[TMP29:%.*]] = and <32 x i1> [[TMP25]], [[TMP26]]
+; CHECK-NEXT: [[TMP30:%.*]] = and <32 x i1> [[TMP27]], [[TMP26]]
+; CHECK-NEXT: [[TMP31:%.*]] = and <32 x i1> [[TMP25]], [[TMP28]]
+; CHECK-NEXT: [[TMP32:%.*]] = or <32 x i1> [[TMP29]], [[TMP30]]
+; CHECK-NEXT: [[TMP33:%.*]] = or <32 x i1> [[TMP32]], [[TMP31]]
+; CHECK-NEXT: [[TMP34:%.*]] = sext <32 x i1> [[TMP33]] to <32 x i8>
+; CHECK-NEXT: [[TMP35:%.*]] = bitcast <32 x i8> [[TMP34]] to <8 x i32>
+; CHECK-NEXT: [[TMP36:%.*]] = icmp ne <8 x i32> [[TMP35]], zeroinitializer
+; CHECK-NEXT: [[TMP37:%.*]] = sext <8 x i1> [[TMP36]] to <8 x i32>
+; CHECK-NEXT: [[_MSPROP3:%.*]] = or <8 x i32> [[TMP37]], [[TMP2]]
+; CHECK-NEXT: [[TMP11:%.*]] = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> [[X0]], <32 x i8> [[X1]], <32 x i8> [[X4]])
; CHECK-NEXT: [[_MSPROP4:%.*]] = or <8 x i32> [[_MSPROP1]], [[_MSPROP3]]
; CHECK-NEXT: [[RES:%.*]] = add <8 x i32> [[TMP10]], [[TMP11]]
; CHECK-NEXT: store <8 x i32> [[_MSPROP4]], ptr @__msan_retval_tls, align 8
; CHECK-NEXT: ret <8 x i32> [[RES]]
;
- %x2 = load <8 x i32>, ptr %x2p
- %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2)
- %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4)
+ %x2 = load <32 x i8>, ptr %x2p
+ %1 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x2)
+ %2 = call <8 x i32> @llvm.x86.avx2.vpdpbuuds.256(<8 x i32> %x0, <32 x i8> %x1, <32 x i8> %x4)
%res = add <8 x i32> %1, %2
ret <8 x i32> %res
}
diff --git a/llvm/test/MC/LoongArch/Macros/macros-la.s b/llvm/test/MC/LoongArch/Macros/macros-la.s
index a732988..8022d5b 100644
--- a/llvm/test/MC/LoongArch/Macros/macros-la.s
+++ b/llvm/test/MC/LoongArch/Macros/macros-la.s
@@ -26,6 +26,7 @@ la.abs $a0, sym_abs
# ABS-NEXT: lu32i.d $a0, %abs64_lo20(sym_abs)
# ABS-NEXT: lu52i.d $a0, $a0, %abs64_hi12(sym_abs)
# ABS-EMPTY:
+# RELOC-NEXT: R_LARCH_MARK_LA - 0x0
# RELOC-NEXT: R_LARCH_ABS_HI20 sym_abs 0x0
# RELOC-NEXT: R_LARCH_ABS_LO12 sym_abs 0x0
# RELOC-NEXT: R_LARCH_ABS64_LO20 sym_abs 0x0
diff --git a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll
index b26320b..6fbe960 100644
--- a/llvm/test/Transforms/AggressiveInstCombine/memchr.ll
+++ b/llvm/test/Transforms/AggressiveInstCombine/memchr.ll
@@ -6,9 +6,10 @@
declare ptr @memchr(ptr, i32, i64)
-define i1 @test_memchr_null(i32 %x) {
+define i1 @test_memchr_null(i32 %x) !prof !0 {
; CHECK-LABEL: define i1 @test_memchr_null(
-; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]])
+; CHECK: !prof [[PROF_0:![0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8
; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [
@@ -40,9 +41,10 @@ entry:
ret i1 %isnull
}
-define ptr @test_memchr(i32 %x) {
+define ptr @test_memchr(i32 %x) !prof !0 {
; CHECK-LABEL: define ptr @test_memchr(
-; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]])
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8
; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [
@@ -72,16 +74,17 @@ entry:
ret ptr %memchr
}
-define ptr @test_memchr_smaller_n(i32 %x) {
+define ptr @test_memchr_smaller_n(i32 %x) !prof !0 {
; CHECK-LABEL: define ptr @test_memchr_smaller_n(
-; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]])
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[X]] to i8
; CHECK-NEXT: switch i8 [[TMP0]], label %[[ENTRY_SPLIT:.*]] [
; CHECK-NEXT: i8 48, label %[[MEMCHR_CASE:.*]]
; CHECK-NEXT: i8 49, label %[[MEMCHR_CASE1:.*]]
; CHECK-NEXT: i8 0, label %[[MEMCHR_CASE2:.*]]
-; CHECK-NEXT: ]
+; CHECK-NEXT: ], !prof [[PROF_1:![0-9]+]]
; CHECK: [[MEMCHR_CASE]]:
; CHECK-NEXT: br label %[[MEMCHR_SUCCESS:.*]]
; CHECK: [[MEMCHR_CASE1]]:
@@ -103,9 +106,10 @@ entry:
; negative tests
-define ptr @test_memchr_larger_n(i32 %x) {
+define ptr @test_memchr_larger_n(i32 %x) !prof !0 {
; CHECK-LABEL: define ptr @test_memchr_larger_n(
-; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]])
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 6)
; CHECK-NEXT: ret ptr [[MEMCHR]]
@@ -115,9 +119,10 @@ entry:
ret ptr %memchr
}
-define ptr @test_memchr_non_constant(i32 %x, ptr %str) {
+define ptr @test_memchr_non_constant(i32 %x, ptr %str) !prof !0 {
; CHECK-LABEL: define ptr @test_memchr_non_constant(
-; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]], ptr [[STR:%.*]])
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr [[STR]], i32 [[X]], i64 5)
; CHECK-NEXT: ret ptr [[MEMCHR]]
@@ -127,8 +132,9 @@ entry:
ret ptr %memchr
}
-define ptr @test_memchr_constant_ch() {
-; CHECK-LABEL: define ptr @test_memchr_constant_ch() {
+define ptr @test_memchr_constant_ch() !prof !0 {
+; CHECK-LABEL: define ptr @test_memchr_constant_ch()
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 49, i64 5)
; CHECK-NEXT: ret ptr [[MEMCHR]]
@@ -138,9 +144,10 @@ entry:
ret ptr %memchr
}
-define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) {
+define ptr @test_memchr_dynamic_n(i32 %x, i32 %y) !prof !0 {
; CHECK-LABEL: define ptr @test_memchr_dynamic_n(
-; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]])
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i32 [[Y]])
; CHECK-NEXT: ret ptr [[MEMCHR]]
@@ -150,9 +157,10 @@ entry:
ret ptr %memchr
}
-define ptr @test_memchr_long(i32 %x) {
+define ptr @test_memchr_long(i32 %x) !prof !0 {
; CHECK-LABEL: define ptr @test_memchr_long(
-; CHECK-SAME: i32 [[X:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]])
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str_long, i32 [[X]], i64 8)
; CHECK-NEXT: ret ptr [[MEMCHR]]
@@ -163,9 +171,10 @@ entry:
}
; We want to check that the compiler still calls memchr if the length is non-constant:
-define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) {
+define ptr @test_memchr_non_constant_length2(i32 %x, i64 %len) !prof !0 {
; CHECK-LABEL: define ptr @test_memchr_non_constant_length2(
-; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]]) {
+; CHECK-SAME: i32 [[X:%.*]], i64 [[LEN:%.*]])
+; CHECK: !prof [[PROF_0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[MEMCHR:%.*]] = call ptr @memchr(ptr @str, i32 [[X]], i64 [[LEN]])
; CHECK-NEXT: ret ptr [[MEMCHR]]
@@ -174,3 +183,7 @@ entry:
%memchr = call ptr @memchr(ptr @str, i32 %x, i64 %len)
ret ptr %memchr
}
+
+!0 = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_0]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF_1]] = !{!"unknown", !"aggressive-instcombine"} \ No newline at end of file
diff --git a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
index 4e6ef0d..5a0c69b 100644
--- a/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/vplan-printing-reductions.ll
@@ -580,6 +580,127 @@ exit:
ret i32 %add
}
+define i32 @print_mulacc_negated(ptr %a, ptr %b) {
+; CHECK-LABEL: 'print_mulacc_negated'
+; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
+; CHECK-NEXT: Live-in vp<%0> = VF
+; CHECK-NEXT: Live-in vp<%1> = VF * UF
+; CHECK-NEXT: Live-in vp<%2> = vector-trip-count
+; CHECK-NEXT: Live-in ir<1024> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): scalar.ph, vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: EMIT vp<%3> = reduction-start-vector ir<0>, ir<0>, ir<1>
+; CHECK-NEXT: Successor(s): vector loop
+; CHECK-EMPTY:
+; CHECK-NEXT: <x1> vector loop: {
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT vp<%4> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<%3>, vp<%8>
+; CHECK-NEXT: vp<%5> = SCALAR-STEPS vp<%4>, ir<1>, vp<%0>
+; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%5>
+; CHECK-NEXT: vp<%6> = vector-pointer ir<%gep.a>
+; CHECK-NEXT: WIDEN ir<%load.a> = load vp<%6>
+; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%5>
+; CHECK-NEXT: vp<%7> = vector-pointer ir<%gep.b>
+; CHECK-NEXT: WIDEN ir<%load.b> = load vp<%7>
+; CHECK-NEXT: EXPRESSION vp<%8> = ir<%accum> + reduce.add (sub (0, mul (ir<%load.b> zext to i32), (ir<%load.a> zext to i32)))
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%4>, vp<%1>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<%2>
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK-NEXT: Successor(s): middle.block
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<%10> = compute-reduction-result ir<%accum>, vp<%8>
+; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<1024>, vp<%2>
+; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n>
+; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<%10> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-EMPTY:
+; CHECK-NEXT: scalar.ph:
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<%2>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: EMIT-SCALAR vp<%bc.merge.rdx> = phi [ vp<%10>, middle.block ], [ ir<0>, ir-bb<entry> ]
+; CHECK-NEXT: Successor(s): ir-bb<loop>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<loop>:
+; CHECK-NEXT: IR %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] (extra operand: vp<%bc.resume.val> from scalar.ph)
+; CHECK-NEXT: IR %accum = phi i32 [ 0, %entry ], [ %add, %loop ] (extra operand: vp<%bc.merge.rdx> from scalar.ph)
+; CHECK-NEXT: IR %gep.a = getelementptr i8, ptr %a, i64 %iv
+; CHECK-NEXT: IR %load.a = load i8, ptr %gep.a, align 1
+; CHECK-NEXT: IR %ext.a = zext i8 %load.a to i32
+; CHECK-NEXT: IR %gep.b = getelementptr i8, ptr %b, i64 %iv
+; CHECK-NEXT: IR %load.b = load i8, ptr %gep.b, align 1
+; CHECK-NEXT: IR %ext.b = zext i8 %load.b to i32
+; CHECK-NEXT: IR %mul = mul i32 %ext.b, %ext.a
+; CHECK-NEXT: IR %sub = sub i32 0, %mul
+; CHECK-NEXT: IR %add = add i32 %accum, %sub
+; CHECK-NEXT: IR %iv.next = add i64 %iv, 1
+; CHECK-NEXT: IR %exitcond.not = icmp eq i64 %iv.next, 1024
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+; CHECK: VPlan 'Final VPlan for VF={4},UF={1}' {
+; CHECK-NEXT: Live-in ir<1024> = vector-trip-count
+; CHECK-NEXT: Live-in ir<1024> = original trip-count
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<entry>:
+; CHECK-NEXT: Successor(s): vector.ph
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.ph:
+; CHECK-NEXT: Successor(s): vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: vector.body:
+; CHECK-NEXT: EMIT-SCALAR vp<%index> = phi [ ir<0>, vector.ph ], [ vp<%index.next>, vector.body ]
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi ir<0>, ir<%add>
+; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<%index>
+; CHECK-NEXT: WIDEN ir<%load.a> = load ir<%gep.a>
+; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<%index>
+; CHECK-NEXT: WIDEN ir<%load.b> = load ir<%gep.b>
+; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32
+; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32
+; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
+; CHECK-NEXT: WIDEN ir<%sub> = sub ir<0>, ir<%mul>
+; CHECK-NEXT: REDUCE ir<%add> = ir<%accum> + reduce.add (ir<%sub>)
+; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<%index>, ir<4>
+; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, ir<1024>
+; CHECK-NEXT: Successor(s): middle.block, vector.body
+; CHECK-EMPTY:
+; CHECK-NEXT: middle.block:
+; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<%accum>, ir<%add>
+; CHECK-NEXT: Successor(s): ir-bb<exit>
+; CHECK-EMPTY:
+; CHECK-NEXT: ir-bb<exit>:
+; CHECK-NEXT: IR %add.lcssa = phi i32 [ %add, %loop ] (extra operand: vp<[[RED_RESULT]]> from middle.block)
+; CHECK-NEXT: No successors
+; CHECK-NEXT: }
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %accum = phi i32 [ 0, %entry ], [ %add, %loop ]
+ %gep.a = getelementptr i8, ptr %a, i64 %iv
+ %load.a = load i8, ptr %gep.a, align 1
+ %ext.a = zext i8 %load.a to i32
+ %gep.b = getelementptr i8, ptr %b, i64 %iv
+ %load.b = load i8, ptr %gep.b, align 1
+ %ext.b = zext i8 %load.b to i32
+ %mul = mul i32 %ext.b, %ext.a
+ %sub = sub i32 0, %mul
+ %add = add i32 %accum, %sub
+ %iv.next = add i64 %iv, 1
+ %exitcond.not = icmp eq i64 %iv.next, 1024
+ br i1 %exitcond.not, label %exit, label %loop
+
+exit:
+ ret i32 %add
+}
+
define i64 @print_mulacc_sub_extended(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 %n) {
; CHECK-LABEL: 'print_mulacc_sub_extended'
; CHECK: VPlan 'Initial VPlan for VF={4},UF>=1' {
diff --git a/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll b/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll
index 79e72aa..38c624e 100644
--- a/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/bitop-of-castops.ll
@@ -357,7 +357,7 @@ define <4 x i32> @or_sext_v4i8_to_v4i32_constant_with_loss(<4 x i8> %a) {
define <4 x i16> @and_trunc_nuw_nsw_constant(<4 x i32> %a) {
; CHECK-LABEL: @and_trunc_nuw_nsw_constant(
; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], <i32 1, i32 2, i32 3, i32 4>
-; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i16>
+; CHECK-NEXT: [[AND:%.*]] = trunc nuw nsw <4 x i32> [[AND_INNER]] to <4 x i16>
; CHECK-NEXT: ret <4 x i16> [[AND]]
;
%t1 = trunc nuw nsw <4 x i32> %a to <4 x i16>
@@ -368,7 +368,7 @@ define <4 x i16> @and_trunc_nuw_nsw_constant(<4 x i32> %a) {
define <4 x i8> @and_trunc_nuw_nsw_minus_constant(<4 x i32> %a) {
; CHECK-LABEL: @and_trunc_nuw_nsw_minus_constant(
; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], <i32 240, i32 241, i32 242, i32 243>
-; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i8>
+; CHECK-NEXT: [[AND:%.*]] = trunc nuw <4 x i32> [[AND_INNER]] to <4 x i8>
; CHECK-NEXT: ret <4 x i8> [[AND]]
;
%t1 = trunc nuw nsw <4 x i32> %a to <4 x i8>
@@ -379,7 +379,7 @@ define <4 x i8> @and_trunc_nuw_nsw_minus_constant(<4 x i32> %a) {
define <4 x i8> @and_trunc_nuw_nsw_multiconstant(<4 x i32> %a) {
; CHECK-LABEL: @and_trunc_nuw_nsw_multiconstant(
; CHECK-NEXT: [[AND_INNER:%.*]] = and <4 x i32> [[A:%.*]], <i32 240, i32 1, i32 242, i32 3>
-; CHECK-NEXT: [[AND:%.*]] = trunc <4 x i32> [[AND_INNER]] to <4 x i8>
+; CHECK-NEXT: [[AND:%.*]] = trunc nuw <4 x i32> [[AND_INNER]] to <4 x i8>
; CHECK-NEXT: ret <4 x i8> [[AND]]
;
%t1 = trunc nuw nsw <4 x i32> %a to <4 x i8>
@@ -391,7 +391,7 @@ define <4 x i8> @and_trunc_nuw_nsw_multiconstant(<4 x i32> %a) {
define <4 x i32> @or_zext_nneg_constant(<4 x i16> %a) {
; CHECK-LABEL: @or_zext_nneg_constant(
; CHECK-NEXT: [[OR_INNER:%.*]] = or <4 x i16> [[A:%.*]], <i16 1, i16 2, i16 3, i16 4>
-; CHECK-NEXT: [[OR:%.*]] = zext <4 x i16> [[OR_INNER]] to <4 x i32>
+; CHECK-NEXT: [[OR:%.*]] = zext nneg <4 x i16> [[OR_INNER]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[OR]]
;
%z1 = zext nneg <4 x i16> %a to <4 x i32>