aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AMDGPU/bf16.ll32
-rw-r--r--llvm/test/CodeGen/AMDGPU/fabs.bf16.ll48
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll151
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg.bf16.ll85
-rw-r--r--llvm/test/CodeGen/ARM/fp16-promote.ll50
-rw-r--r--llvm/test/CodeGen/Generic/bfloat-op.ll104
-rw-r--r--llvm/test/CodeGen/Generic/bfloat.ll75
-rw-r--r--llvm/test/CodeGen/Generic/half-op.ll115
-rw-r--r--llvm/test/CodeGen/PowerPC/memcmp.ll20
-rw-r--r--llvm/test/CodeGen/PowerPC/ucmp.ll74
-rw-r--r--llvm/test/CodeGen/RISCV/half-arith.ll300
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vabd.ll44
-rw-r--r--llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll43
-rw-r--r--llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll196
-rw-r--r--llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll14
-rw-r--r--llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll148
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll2
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll24
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/x264-satd-8x4.ll526
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/poison-within-divisions.ll98
-rw-r--r--llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll14
21 files changed, 1534 insertions, 629 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/bf16.ll b/llvm/test/CodeGen/AMDGPU/bf16.ll
index 4b14dc6..7ee0015f 100644
--- a/llvm/test/CodeGen/AMDGPU/bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/bf16.ll
@@ -21204,18 +21204,14 @@ define bfloat @v_fabs_bf16(bfloat %a) {
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_fabs_bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fabs_bf16:
@@ -21440,10 +21436,7 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GCN-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GCN-NEXT: v_or_b32_e32 v0, 0x80000000, v0
; GCN-NEXT: s_setpc_b64 s[30:31]
;
; GFX7-LABEL: v_fneg_fabs_bf16:
@@ -21451,10 +21444,7 @@ define bfloat @v_fneg_fabs_bf16(bfloat %a) {
; GFX7-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX7-NEXT: v_mul_f32_e32 v0, 1.0, v0
; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; GFX7-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GFX7-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0x80000000, v0
; GFX7-NEXT: s_setpc_b64 s[30:31]
;
; GFX8-LABEL: v_fneg_fabs_bf16:
@@ -21510,23 +21500,17 @@ define amdgpu_ps i32 @s_fneg_fabs_bf16(bfloat inreg %a) {
; GCN-LABEL: s_fneg_fabs_bf16:
; GCN: ; %bb.0:
; GCN-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GCN-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GCN-NEXT: v_or_b32_e32 v0, 0x8000, v0
; GCN-NEXT: v_readfirstlane_b32 s0, v0
-; GCN-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT: s_bitset0_b32 s0, 31
-; GCN-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GCN-NEXT: s_xor_b32 s0, s0, 0x80000000
-; GCN-NEXT: s_lshr_b32 s0, s0, 16
; GCN-NEXT: ; return to shader part epilog
;
; GFX7-LABEL: s_fneg_fabs_bf16:
; GFX7: ; %bb.0:
; GFX7-NEXT: v_mul_f32_e64 v0, 1.0, s0
+; GFX7-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX7-NEXT: v_or_b32_e32 v0, 0x8000, v0
; GFX7-NEXT: v_readfirstlane_b32 s0, v0
-; GFX7-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT: s_bitset0_b32 s0, 31
-; GFX7-NEXT: s_and_b32 s0, s0, 0xffff0000
-; GFX7-NEXT: s_xor_b32 s0, s0, 0x80000000
-; GFX7-NEXT: s_lshr_b32 s0, s0, 16
; GFX7-NEXT: ; return to shader part epilog
;
; GFX8-LABEL: s_fneg_fabs_bf16:
diff --git a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
index 5d184b1..c46fcde 100644
--- a/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fabs.bf16.ll
@@ -218,19 +218,11 @@ define amdgpu_kernel void @s_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat> %in
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s4, s3, 0xffff0000
-; CI-NEXT: s_lshl_b32 s3, s3, 16
-; CI-NEXT: s_and_b32 s5, s2, 0xffff0000
-; CI-NEXT: v_mul_f32_e64 v0, 1.0, |s4|
-; CI-NEXT: v_mul_f32_e64 v1, 1.0, |s3|
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s5|
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_alignbit_b32 v1, v0, v1, 16
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v2
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s2|
-; CI-NEXT: v_alignbit_b32 v0, v0, v2, 16
+; CI-NEXT: s_and_b32 s3, s3, 0x7fff7fff
+; CI-NEXT: s_and_b32 s2, s2, 0x7fff7fff
; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v0, s2
+; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: v_mov_b32_e32 v2, s0
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CI-NEXT: s_endpgm
@@ -537,16 +529,15 @@ define amdgpu_kernel void @v_fabs_fold_self_v2bf16(ptr addrspace(1) %out, ptr ad
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e64 v4, 1.0, |v3|
-; CI-NEXT: v_mul_f32_e64 v5, 1.0, |v2|
-; CI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; CI-NEXT: v_mul_f32_e32 v3, v4, v3
-; CI-NEXT: v_mul_f32_e32 v2, v5, v2
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16
+; CI-NEXT: v_and_b32_e32 v3, 0x7fff, v2
+; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
+; CI-NEXT: v_and_b32_e32 v2, 0x7fff0000, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_mul_f32_e32 v2, v2, v5
+; CI-NEXT: v_mul_f32_e32 v3, v3, v4
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_alignbit_b32 v2, v2, v3, 16
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -898,16 +889,13 @@ define amdgpu_kernel void @v_extract_fabs_fold_v2bf16(ptr addrspace(1) %in) #0 {
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CI-NEXT: flat_load_dword v0, v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_mul_f32_e64 v1, 1.0, |v1|
-; CI-NEXT: v_mul_f32_e64 v0, 1.0, |v0|
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_mul_f32_e32 v1, 4.0, v1
+; CI-NEXT: v_and_b32_e32 v1, 0x7fff, v0
+; CI-NEXT: v_and_b32_e32 v0, 0x7fff0000, v0
+; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; CI-NEXT: v_add_f32_e32 v0, 2.0, v0
-; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; CI-NEXT: v_mul_f32_e32 v1, 4.0, v1
; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; CI-NEXT: flat_store_short v[0:1], v1
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: flat_store_short v[0:1], v0
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
index 64a9727..76da0aa 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-fabs.bf16.ll
@@ -107,12 +107,10 @@ define amdgpu_kernel void @fneg_fabs_fmul_bf16(ptr addrspace(1) %out, bfloat %x,
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s3, s2, 0x7fff
-; CI-NEXT: s_lshl_b32 s3, s3, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
+; CI-NEXT: s_lshl_b32 s3, s2, 16
; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_mul_f32_e32 v0, s2, v0
+; CI-NEXT: v_mov_b32_e32 v0, s3
+; CI-NEXT: v_mul_f32_e64 v0, s2, -|v0|
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
@@ -204,12 +202,10 @@ define amdgpu_kernel void @fneg_fabs_free_bf16(ptr addrspace(1) %out, i16 %in) {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s2, s2, 0x7fff
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_bitset1_b32 s2, 15
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -279,12 +275,10 @@ define amdgpu_kernel void @fneg_fabs_bf16(ptr addrspace(1) %out, bfloat %in) {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s2, s2, 0x7fff
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_bitset1_b32 s2, 15
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -345,43 +339,22 @@ define amdgpu_kernel void @fneg_fabs_bf16(ptr addrspace(1) %out, bfloat %in) {
}
define amdgpu_kernel void @v_fneg_fabs_bf16(ptr addrspace(1) %out, ptr addrspace(1) %in) {
-; CI-LABEL: v_fneg_fabs_bf16:
-; CI: ; %bb.0:
-; CI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; CI-NEXT: s_add_i32 s12, s12, s17
-; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v0, s2
-; CI-NEXT: v_mov_b32_e32 v1, s3
-; CI-NEXT: flat_load_ushort v2, v[0:1]
-; CI-NEXT: v_mov_b32_e32 v0, s0
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |v2|
-; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; CI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-NEXT: flat_store_short v[0:1], v2
-; CI-NEXT: s_endpgm
-;
-; VI-LABEL: v_fneg_fabs_bf16:
-; VI: ; %bb.0:
-; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
-; VI-NEXT: s_add_i32 s12, s12, s17
-; VI-NEXT: s_mov_b32 flat_scratch_lo, s13
-; VI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
-; VI-NEXT: s_waitcnt lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v0, s2
-; VI-NEXT: v_mov_b32_e32 v1, s3
-; VI-NEXT: flat_load_ushort v2, v[0:1]
-; VI-NEXT: v_mov_b32_e32 v0, s0
-; VI-NEXT: v_mov_b32_e32 v1, s1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_e32 v2, 0x8000, v2
-; VI-NEXT: flat_store_short v[0:1], v2
-; VI-NEXT: s_endpgm
+; CIVI-LABEL: v_fneg_fabs_bf16:
+; CIVI: ; %bb.0:
+; CIVI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; CIVI-NEXT: s_add_i32 s12, s12, s17
+; CIVI-NEXT: s_mov_b32 flat_scratch_lo, s13
+; CIVI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
+; CIVI-NEXT: s_waitcnt lgkmcnt(0)
+; CIVI-NEXT: v_mov_b32_e32 v0, s2
+; CIVI-NEXT: v_mov_b32_e32 v1, s3
+; CIVI-NEXT: flat_load_ushort v2, v[0:1]
+; CIVI-NEXT: v_mov_b32_e32 v0, s0
+; CIVI-NEXT: v_mov_b32_e32 v1, s1
+; CIVI-NEXT: s_waitcnt vmcnt(0)
+; CIVI-NEXT: v_or_b32_e32 v2, 0x8000, v2
+; CIVI-NEXT: flat_store_short v[0:1], v2
+; CIVI-NEXT: s_endpgm
;
; GFX9-LABEL: v_fneg_fabs_bf16:
; GFX9: ; %bb.0:
@@ -431,21 +404,13 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_non_bc_src(ptr addrspace(1) %out,
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_add_f32_e64 v0, s3, 2.0
-; CI-NEXT: v_add_f32_e64 v1, s2, 1.0
-; CI-NEXT: v_readfirstlane_b32 s2, v0
+; CI-NEXT: s_lshl_b32 s3, s2, 16
; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT: s_bitset0_b32 s2, 31
-; CI-NEXT: v_and_b32_e32 v0, 0x7fffffff, v1
-; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: s_xor_b32 s2, s2, 0x80000000
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: s_lshr_b32 s2, s2, 16
-; CI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT: v_alignbit_b32 v2, s2, v0, 16
+; CI-NEXT: v_add_f32_e64 v1, s2, 2.0
+; CI-NEXT: v_add_f32_e64 v0, s3, 1.0
+; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; CI-NEXT: v_alignbit_b32 v0, v1, v0, 16
+; CI-NEXT: v_or_b32_e32 v2, 0x80008000, v0
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: flat_store_dword v[0:1], v2
@@ -566,15 +531,10 @@ define amdgpu_kernel void @s_fneg_fabs_v2bf16_bc_src(ptr addrspace(1) %out, <2 x
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_and_b32 s3, s2, 0x7fff
-; CI-NEXT: s_and_b32 s2, s2, 0x7fff0000
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: s_lshl_b32 s2, s3, 16
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_or_b32 s2, s2, 0x80008000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -629,27 +589,11 @@ define amdgpu_kernel void @fneg_fabs_v4bf16(ptr addrspace(1) %out, <4 x bfloat>
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_lshl_b32 s4, s2, 16
-; CI-NEXT: s_and_b32 s2, s2, 0xffff0000
-; CI-NEXT: v_mul_f32_e64 v2, 1.0, |s2|
-; CI-NEXT: s_and_b32 s2, s3, 0xffff0000
-; CI-NEXT: s_lshl_b32 s5, s3, 16
-; CI-NEXT: v_mul_f32_e64 v3, 1.0, |s2|
-; CI-NEXT: v_mul_f32_e64 v0, 1.0, |s4|
-; CI-NEXT: v_mul_f32_e64 v1, 1.0, |s5|
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; CI-NEXT: v_xor_b32_e32 v3, 0x80000000, v3
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; CI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
-; CI-NEXT: v_and_b32_e32 v0, 0xffff0000, v0
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; CI-NEXT: v_alignbit_b32 v1, v3, v1, 16
-; CI-NEXT: v_alignbit_b32 v0, v2, v0, 16
+; CI-NEXT: s_or_b32 s3, s3, 0x80008000
+; CI-NEXT: s_or_b32 s2, s2, 0x80008000
; CI-NEXT: v_mov_b32_e32 v3, s1
+; CI-NEXT: v_mov_b32_e32 v0, s2
+; CI-NEXT: v_mov_b32_e32 v1, s3
; CI-NEXT: v_mov_b32_e32 v2, s0
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
; CI-NEXT: s_endpgm
@@ -860,21 +804,20 @@ define amdgpu_kernel void @s_fneg_multi_use_fabs_v2bf16(ptr addrspace(1) %out0,
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: v_mov_b32_e32 v1, s1
-; CI-NEXT: v_mov_b32_e32 v2, s2
-; CI-NEXT: s_and_b32 s1, s4, 0x7fff
-; CI-NEXT: s_and_b32 s2, s4, 0x7fff0000
-; CI-NEXT: v_mul_f32_e64 v4, -1.0, s2
-; CI-NEXT: s_lshl_b32 s1, s1, 16
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: s_and_b32 s0, s4, 0x7fff7fff
-; CI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; CI-NEXT: v_mul_f32_e64 v5, -1.0, s1
-; CI-NEXT: v_alignbit_b32 v4, v4, v5, 16
-; CI-NEXT: v_mov_b32_e32 v5, s0
+; CI-NEXT: v_mov_b32_e32 v2, s2
+; CI-NEXT: s_or_b32 s2, s0, 0x8000
+; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: s_and_b32 s1, s4, 0x7fff0000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s1, s1, s2
+; CI-NEXT: s_bitset1_b32 s1, 31
+; CI-NEXT: v_mov_b32_e32 v4, s0
; CI-NEXT: v_mov_b32_e32 v3, s3
-; CI-NEXT: flat_store_dword v[0:1], v5
-; CI-NEXT: flat_store_dword v[2:3], v4
+; CI-NEXT: flat_store_dword v[0:1], v4
+; CI-NEXT: v_mov_b32_e32 v0, s1
+; CI-NEXT: flat_store_dword v[2:3], v0
; CI-NEXT: s_endpgm
;
; VI-LABEL: s_fneg_multi_use_fabs_v2bf16:
@@ -1086,5 +1029,3 @@ declare <4 x bfloat> @llvm.fabs.v4bf16(<4 x bfloat>) #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CIVI: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
index d232693..98044a7 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg.bf16.ll
@@ -14,11 +14,10 @@ define amdgpu_kernel void @s_fneg_bf16(ptr addrspace(1) %out, bfloat %in) #0 {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -93,9 +92,7 @@ define amdgpu_kernel void @v_fneg_bf16(ptr addrspace(1) %out, ptr addrspace(1) %
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CI-NEXT: flat_load_ushort v2, v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v2, -1.0, v2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_xor_b32_e32 v2, 0x8000, v2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -170,11 +167,10 @@ define amdgpu_kernel void @s_fneg_free_bf16(ptr addrspace(1) %out, i16 %in) #0 {
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_short v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -248,9 +244,9 @@ define amdgpu_kernel void @v_fneg_fold_bf16(ptr addrspace(1) %out, ptr addrspace
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: s_waitcnt vmcnt(0)
+; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2
; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v3, -1.0, v2
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; CI-NEXT: v_mul_f32_e32 v2, v3, v2
; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; CI-NEXT: flat_store_short v[0:1], v2
@@ -365,13 +361,13 @@ define amdgpu_kernel void @s_fneg_v2bf16(ptr addrspace(1) %out, <2 x bfloat> %in
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s2, s2, s3
+; CI-NEXT: s_add_i32 s2, s2, 0x80000000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -426,16 +422,16 @@ define amdgpu_kernel void @s_fneg_v2bf16_nonload(ptr addrspace(1) %out) #0 {
; CI-NEXT: ; def s2
; CI-NEXT: ;;#ASMEND
; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s2, s2, s3
+; CI-NEXT: s_add_i32 s2, s2, 0x80000000
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: s_mov_b32 flat_scratch_lo, s13
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -501,13 +497,11 @@ define amdgpu_kernel void @v_fneg_v2bf16(ptr addrspace(1) %out, ptr addrspace(1)
; CI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
; CI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; CI-NEXT: flat_load_dword v2, v[0:1]
+; CI-NEXT: s_mov_b32 s0, 0xffff
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v3, -1.0, v3
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_mul_f32_e32 v2, -1.0, v2
-; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16
+; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2
+; CI-NEXT: v_bfi_b32 v2, s0, v3, v2
+; CI-NEXT: v_add_i32_e32 v2, vcc, 0x80000000, v2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -570,13 +564,13 @@ define amdgpu_kernel void @fneg_free_v2bf16(ptr addrspace(1) %out, i32 %in) #0 {
; CI-NEXT: s_lshr_b32 flat_scratch_hi, s12, 8
; CI-NEXT: s_waitcnt lgkmcnt(0)
; CI-NEXT: s_and_b32 s3, s2, 0xffff0000
-; CI-NEXT: s_lshl_b32 s2, s2, 16
-; CI-NEXT: v_mul_f32_e64 v0, -1.0, s3
-; CI-NEXT: v_mul_f32_e64 v1, -1.0, s2
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_alignbit_b32 v2, v0, v1, 16
+; CI-NEXT: s_xor_b32 s2, s2, 0x8000
+; CI-NEXT: s_and_b32 s2, s2, 0xffff
+; CI-NEXT: s_or_b32 s2, s2, s3
+; CI-NEXT: s_add_i32 s2, s2, 0x80000000
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
+; CI-NEXT: v_mov_b32_e32 v2, s2
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -637,16 +631,14 @@ define amdgpu_kernel void @v_fneg_fold_v2bf16(ptr addrspace(1) %out, ptr addrspa
; CI-NEXT: v_mov_b32_e32 v0, s0
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; CI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; CI-NEXT: v_mul_f32_e32 v4, -1.0, v3
-; CI-NEXT: v_mul_f32_e32 v5, -1.0, v2
-; CI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
-; CI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
-; CI-NEXT: v_mul_f32_e32 v3, v4, v3
-; CI-NEXT: v_mul_f32_e32 v2, v5, v2
-; CI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; CI-NEXT: v_alignbit_b32 v2, v3, v2, 16
+; CI-NEXT: v_xor_b32_e32 v3, 0x8000, v2
+; CI-NEXT: v_lshlrev_b32_e32 v4, 16, v2
+; CI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
+; CI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; CI-NEXT: v_mul_f32_e64 v2, -v2, v2
+; CI-NEXT: v_mul_f32_e32 v3, v3, v4
+; CI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; CI-NEXT: v_alignbit_b32 v2, v2, v3, 16
; CI-NEXT: flat_store_dword v[0:1], v2
; CI-NEXT: s_endpgm
;
@@ -912,12 +904,9 @@ define amdgpu_kernel void @v_extract_fneg_no_fold_v2bf16(ptr addrspace(1) %in) #
; CI-NEXT: v_mov_b32_e32 v1, s1
; CI-NEXT: flat_load_dword v0, v[0:1]
; CI-NEXT: s_waitcnt vmcnt(0)
-; CI-NEXT: v_and_b32_e32 v1, 0xffff0000, v0
-; CI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; CI-NEXT: v_mul_f32_e32 v1, -1.0, v1
-; CI-NEXT: v_mul_f32_e32 v0, -1.0, v0
-; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; CI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; CI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; CI-NEXT: v_xor_b32_e32 v0, 0x8000, v0
+; CI-NEXT: v_xor_b32_e32 v1, 0x8000, v1
; CI-NEXT: flat_store_short v[0:1], v0
; CI-NEXT: s_waitcnt vmcnt(0)
; CI-NEXT: flat_store_short v[0:1], v1
diff --git a/llvm/test/CodeGen/ARM/fp16-promote.ll b/llvm/test/CodeGen/ARM/fp16-promote.ll
index 800ee87..8230e47 100644
--- a/llvm/test/CodeGen/ARM/fp16-promote.ll
+++ b/llvm/test/CodeGen/ARM/fp16-promote.ll
@@ -1572,26 +1572,11 @@ define void @test_fma(ptr %p, ptr %q, ptr %r) #0 {
}
define void @test_fabs(ptr %p) {
-; CHECK-FP16-LABEL: test_fabs:
-; CHECK-FP16: ldrh r1, [r0]
-; CHECK-FP16-NEXT: vmov s0, r1
-; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-FP16-NEXT: vabs.f32 s0, s0
-; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
-; CHECK-FP16-NEXT: vmov r1, s0
-; CHECK-FP16-NEXT: strh r1, [r0]
-; CHECK-FP16-NEXT: bx lr
-;
-; CHECK-LIBCALL-LABEL: test_fabs:
-; CHECK-LIBCALL: .save {r4, lr}
-; CHECK-LIBCALL-NEXT: push {r4, lr}
-; CHECK-LIBCALL-NEXT: mov r4, r0
-; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
-; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
-; CHECK-LIBCALL-NEXT: bic r0, r0, #-2147483648
-; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
-; CHECK-LIBCALL-NEXT: strh r0, [r4]
-; CHECK-LIBCALL-NEXT: pop {r4, pc}
+; CHECK-ALL-LABEL: test_fabs:
+; CHECK-ALL: ldrh r1, [r0]
+; CHECK-ALL-NEXT: bfc r1, #15, #17
+; CHECK-ALL-NEXT: strh r1, [r0]
+; CHECK-ALL-NEXT: bx lr
%a = load half, ptr %p, align 2
%r = call half @llvm.fabs.f16(half %a)
store half %r, ptr %p
@@ -2454,26 +2439,11 @@ define half @test_sitofp_i32_fadd(i32 %a, half %b) #0 {
}
define void @test_fneg(ptr %p1, ptr %p2) #0 {
-; CHECK-FP16-LABEL: test_fneg:
-; CHECK-FP16: ldrh r0, [r0]
-; CHECK-FP16-NEXT: vmov s0, r0
-; CHECK-FP16-NEXT: vcvtb.f32.f16 s0, s0
-; CHECK-FP16-NEXT: vneg.f32 s0, s0
-; CHECK-FP16-NEXT: vcvtb.f16.f32 s0, s0
-; CHECK-FP16-NEXT: vmov r0, s0
-; CHECK-FP16-NEXT: strh r0, [r1]
-; CHECK-FP16-NEXT: bx lr
-;
-; CHECK-LIBCALL-LABEL: test_fneg:
-; CHECK-LIBCALL: .save {r4, lr}
-; CHECK-LIBCALL-NEXT: push {r4, lr}
-; CHECK-LIBCALL-NEXT: ldrh r0, [r0]
-; CHECK-LIBCALL-NEXT: mov r4, r1
-; CHECK-LIBCALL-NEXT: bl __aeabi_h2f
-; CHECK-LIBCALL-NEXT: eor r0, r0, #-2147483648
-; CHECK-LIBCALL-NEXT: bl __aeabi_f2h
-; CHECK-LIBCALL-NEXT: strh r0, [r4]
-; CHECK-LIBCALL-NEXT: pop {r4, pc}
+; CHECK-ALL-LABEL: test_fneg:
+; CHECK-ALL: ldrh r0, [r0]
+; CHECK-ALL-NEXT: eor r0, r0, #32768
+; CHECK-ALL-NEXT: strh r0, [r1]
+; CHECK-ALL-NEXT: bx lr
%v = load half, ptr %p1, align 2
%res = fneg half %v
store half %res, ptr %p2, align 2
diff --git a/llvm/test/CodeGen/Generic/bfloat-op.ll b/llvm/test/CodeGen/Generic/bfloat-op.ll
new file mode 100644
index 0000000..d593328
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/bfloat-op.ll
@@ -0,0 +1,104 @@
+; Same as `bfloat.ll`, but for `fneg`, `fabs`, `copysign` and `fma`.
+; Can be merged back into `bfloat.ll` once they have the same platform coverage.
+; Once all targets are fixed, the `CHECK-*` prefixes should all be merged into a single `CHECK` prefix and the `BAD-*` prefixes should be removed.
+
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=arm64ec-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; FIXME: BPF has a compiler error
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; FIXME: hard float csky crashes
+; FIXME: directx has a compiler error
+; FIXME: hexagon crashes
+; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; FIXME: mips crashes
+; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %}
+; FIXME: powerpc crashes
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; FIXME: sparc crashes
+; FIXME: spirv crashes
+; FIXME: s390x crashes
+; FIXME: ve crashes
+; FIXME: wasm crashes
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,BAD-COPYSIGN,CHECK-FMA %}
+
+; Note that arm64ec labels are quoted, hence the `{{"?}}:`.
+
+; Codegen tests don't work the same for graphics targets. Add a dummy directive
+; for filecheck, just make sure we don't crash.
+; NOCRASH: {{.*}}
+
+; fneg, fabs and copysign all need to not quieten signalling NaNs, so should not call any conversion functions which do.
+; These tests won't catch cases where the everything is done using native instructions instead of builtins.
+
+define void @test_fneg(ptr %p1, ptr %p2) #0 {
+; ALL-LABEL: test_fneg{{"?}}:
+; ALL-NEG-NOT: __extend
+; ALL-NEG-NOT: __trunc
+; ALL-NEG-NOT: __gnu
+; ALL-NEG-NOT: __aeabi
+ %v = load bfloat, ptr %p1
+ %res = fneg bfloat %v
+ store bfloat %res, ptr %p2
+ ret void
+}
+
+define void @test_fabs(ptr %p1, ptr %p2) {
+; ALL-LABEL: test_fabs{{"?}}:
+; ALL-ABS-NOT: __extend
+; ALL-ABS-NOT: __trunc
+; ALL-ABS-NOT: __gnu
+; ALL-ABS-NOT: __aeabi
+ %a = load bfloat, ptr %p1
+ %r = call bfloat @llvm.fabs.f16(bfloat %a)
+ store bfloat %r, ptr %p2
+ ret void
+}
+
+define void @test_copysign(ptr %p1, ptr %p2, ptr %p3) {
+; ALL-LABEL: test_copysign{{"?}}:
+; CHECK-COPYSIGN-NOT: __extend
+; CHECK-COPYSIGN-NOT: __trunc
+; CHECK-COPYSIGN-NOT: __gnu
+; CHECK-COPYSIGN-NOT: __aeabi
+; BAD-COPYSIGN: __truncsfbf2
+ %a = load bfloat, ptr %p1
+ %b = load bfloat, ptr %p2
+ %r = call bfloat @llvm.copysign.f16(bfloat %a, bfloat %b)
+ store bfloat %r, ptr %p3
+ ret void
+}
+
+; There is no floating-point type LLVM supports that is large enough to promote bfloat FMA to
+; without causing double rounding issues. This checks for libcalls to f32/f64 fma and truncating
+; f32/f64 to bf16. See https://github.com/llvm/llvm-project/issues/131531
+
+define void @test_fma(ptr %p1, ptr %p2, ptr %p3, ptr %p4) {
+; ALL-LABEL: test_fma{{"?}}:
+; CHECK-FMA-NOT: {{\bfmaf?\b}}
+; CHECK-FMA-NOT: __truncsfbf2
+; CHECK-FMA-NOT: __truncdfbf2
+; BAD-FMA: {{__truncsfbf2|\bfmaf?\b}}
+ %a = load bfloat, ptr %p1
+ %b = load bfloat, ptr %p2
+ %c = load bfloat, ptr %p3
+ %r = call bfloat @llvm.fma.f16(bfloat %a, bfloat %b, bfloat %c)
+ store bfloat %r, ptr %p4
+ ret void
+}
diff --git a/llvm/test/CodeGen/Generic/bfloat.ll b/llvm/test/CodeGen/Generic/bfloat.ll
new file mode 100644
index 0000000..83c6711
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/bfloat.ll
@@ -0,0 +1,75 @@
+; Simple cross-platform smoke checks for basic bf16 operations.
+;
+; There shouldn't be any architectures that crash when trying to use `bfloat`;
+; check that here. Additionally do a small handful of smoke tests that work
+; well cross-platform.
+
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; FIXME: arm64ec crashes when passing/returning bfloat
+; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if bpf-registered-target %{ llc %s -o - -mtriple=bpfel | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,CHECK %}
+; FIXME: hard float csky crashes
+; RUN: %if directx-registered-target %{ llc %s -o - -mtriple=dxil-pc-shadermodel6.3-library | FileCheck %s --check-prefixes=NOCRASH %}
+; FIXME: hexagon crashes
+; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; FIXME: mips crashes
+; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %}
+; FIXME: powerpc crashes
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; FIXME: sparc crashes
+; FIXME: spirv crashes
+; FIXME: s390x crashes
+; FIXME: ve crashes
+; FIXME: wasm crashes
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,CHECK %}
+; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,CHECK %}
+
+; Note that arm64ec labels are quoted, hence the `{{"?}}:`.
+
+; Codegen tests don't work the same for graphics targets. Add a dummy directive
+; for filecheck, just make sure we don't crash.
+; NOCRASH: {{.*}}
+
+; All backends need to be able to bitcast without converting to another format,
+; so we assert against libcalls (specifically __truncsfbf2). This won't catch hardware conversions.
+
+define bfloat @from_bits(i16 %bits) nounwind {
+; ALL-LABEL: from_bits{{"?}}:
+; ALL-NOT: __extend
+; ALL-NOT: __trunc
+; ALL-NOT: __gnu
+ %f = bitcast i16 %bits to bfloat
+ ret bfloat %f
+}
+
+define i16 @to_bits(bfloat %f) nounwind {
+; ALL-LABEL: to_bits{{"?}}:
+; CHECK-NOT: __extend
+; CHECK-NOT: __trunc
+; CHECK-NOT: __gnu
+; BAD: __truncsfbf2
+ %bits = bitcast bfloat %f to i16
+ ret i16 %bits
+}
+
+define bfloat @check_freeze(bfloat %f) nounwind {
+; ALL-LABEL: check_freeze{{"?}}:
+ %t0 = freeze bfloat %f
+ ret bfloat %t0
+}
diff --git a/llvm/test/CodeGen/Generic/half-op.ll b/llvm/test/CodeGen/Generic/half-op.ll
new file mode 100644
index 0000000..1037d8e
--- /dev/null
+++ b/llvm/test/CodeGen/Generic/half-op.ll
@@ -0,0 +1,115 @@
+; Same as `half.ll`, but for `fneg`, `fabs`, `copysign` and `fma`.
+; Can be merged back into `half.ll` once BPF doesn't have a compiler error.
+; Once all targets are fixed, the `CHECK-*` prefixes should all be merged into a single `CHECK` prefix and the `BAD-*` prefixes should be removed.
+
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-apple-darwin | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=aarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if aarch64-registered-target %{ llc %s -o - -mtriple=arm64ec-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if amdgpu-registered-target %{ llc %s -o - -mtriple=amdgcn-amd-amdhsa | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,CHECK-FMA %}
+; RUN: %if arc-registered-target %{ llc %s -o - -mtriple=arc-elf | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=arm-unknown-linux-gnueabi | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if arm-registered-target %{ llc %s -o - -mtriple=thumbv7em-none-eabi | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if avr-registered-target %{ llc %s -o - -mtriple=avr-none | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; FIXME: BPF has a compiler error
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if csky-registered-target %{ llc %s -o - -mtriple=csky-unknown-linux-gnuabiv2 -mcpu=ck860fv -mattr=+hard-float | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; FIXME: directx has a compiler error
+; RUN: %if hexagon-registered-target %{ llc %s -o - -mtriple=hexagon-unknown-linux-musl | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if lanai-registered-target %{ llc %s -o - -mtriple=lanai-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if loongarch-registered-target %{ llc %s -o - -mtriple=loongarch64-unknown-linux-gnu -mattr=+f | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if m68k-registered-target %{ llc %s -o - -mtriple=m68k-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mips64el-unknown-linux-gnuabi64 | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if mips-registered-target %{ llc %s -o - -mtriple=mipsel-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if msp430-registered-target %{ llc %s -o - -mtriple=msp430-none-elf | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if nvptx-registered-target %{ llc %s -o - -mtriple=nvptx64-nvidia-cuda | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if powerpc-registered-target %{ llc %s -o - -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv32-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if riscv-registered-target %{ llc %s -o - -mtriple=riscv64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if sparc-registered-target %{ llc %s -o - -mtriple=sparc64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if spirv-registered-target %{ llc %s -o - -mtriple=spirv-unknown-unknown | FileCheck %s --check-prefixes=NOCRASH %}
+; RUN: %if systemz-registered-target %{ llc %s -o - -mtriple=s390x-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if ve-registered-target %{ llc %s -o - -mtriple=ve-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if webassembly-registered-target %{ llc %s -o - -mtriple=wasm32-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=i686-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-pc-windows-msvc | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if x86-registered-target %{ llc %s -o - -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefixes=ALL,CHECK-NEG-ABS,CHECK-COPYSIGN,BAD-FMA %}
+; RUN: %if xcore-registered-target %{ llc %s -o - -mtriple=xcore-unknown-unknown | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,BAD-FMA %}
+; RUN: %if xtensa-registered-target %{ llc %s -o - -mtriple=xtensa-none-elf | FileCheck %s --check-prefixes=ALL,BAD-NEG-ABS,BAD-COPYSIGN,CHECK-FMA %}
+
+; Note that arm64ec labels are quoted, hence the `{{"?}}:`.
+
+; Codegen tests don't work the same for graphics targets. Add a dummy directive
+; for filecheck, just make sure we don't crash.
+; NOCRASH: {{.*}}
+
+; fneg, fabs and copysign all need to not quieten signalling NaNs, so should not call any conversion functions which do.
+; These tests won't catch cases where the everything is done using native instructions instead of builtins.
+; See https://github.com/llvm/llvm-project/issues/104915
+
+define void @test_fneg(ptr %p1, ptr %p2) #0 {
+; ALL-LABEL: test_fneg{{"?}}:
+; CHECK-NEG-ABS-NOT: __extend
+; CHECK-NEG-ABS-NOT: __trunc
+; CHECK-NEG-ABS-NOT: __gnu
+; CHECK-NEG-ABS-NOT: __aeabi
+; BAD-NEG-ABS: {{__extendhfsf2|__gnu_h2f_ieee|__aeabi_h2f}}
+ %v = load half, ptr %p1
+ %res = fneg half %v
+ store half %res, ptr %p2
+ ret void
+}
+
+define void @test_fabs(ptr %p1, ptr %p2) {
+; ALL-LABEL: test_fabs{{"?}}:
+; CHECK-NEG-ABS-NOT: __extend
+; CHECK-NEG-ABS-NOT: __trunc
+; CHECK-NEG-ABS-NOT: __gnu
+; CHECK-NEG-ABS-NOT: __aeabi
+; BAD-NEG-ABS: {{__extendhfsf2|__gnu_h2f_ieee|__aeabi_h2f}}
+ %a = load half, ptr %p1
+ %r = call half @llvm.fabs.f16(half %a)
+ store half %r, ptr %p2
+ ret void
+}
+
+define void @test_copysign(ptr %p1, ptr %p2, ptr %p3) {
+; ALL-LABEL: test_copysign{{"?}}:
+; CHECK-COPYSIGN-NOT: __extend
+; CHECK-COPYSIGN-NOT: __trunc
+; CHECK-COPYSIGN-NOT: __gnu
+; CHECK-COPYSIGN-NOT: __aeabi
+; BAD-COPYSIGN: {{__extendhfsf2|__gnu_h2f_ieee}}
+ %a = load half, ptr %p1
+ %b = load half, ptr %p2
+ %r = call half @llvm.copysign.f16(half %a, half %b)
+ store half %r, ptr %p3
+ ret void
+}
+
+; If promoting, fma must promote at least to f64 to avoid double rounding issues.
+; This checks for calls to f32 fmaf and truncating f32 to f16.
+; See https://github.com/llvm/llvm-project/issues/98389
+
+define void @test_fma(ptr %p1, ptr %p2, ptr %p3, ptr %p4) {
+; ALL-LABEL: test_fma{{"?}}:
+; Allow fmaf16
+; CHECK-FMA-NOT: fmaf{{\b}}
+; CHECK-FMA-NOT: __truncsfhf2
+; CHECK-FMA-NOT: __gnu_f2h_ieee
+; CHECK-FMA-NOT: __aeabi_f2h
+; BAD-FMA: {{__truncsfhf2|__gnu_f2h_ieee|__aeabi_f2h|fmaf\b}}
+ %a = load half, ptr %p1
+ %b = load half, ptr %p2
+ %c = load half, ptr %p3
+ %r = call half @llvm.fma.f16(half %a, half %b, half %c)
+ store half %r, ptr %p4
+ ret void
+}
diff --git a/llvm/test/CodeGen/PowerPC/memcmp.ll b/llvm/test/CodeGen/PowerPC/memcmp.ll
index 39f9269..4998d87 100644
--- a/llvm/test/CodeGen/PowerPC/memcmp.ll
+++ b/llvm/test/CodeGen/PowerPC/memcmp.ll
@@ -6,12 +6,10 @@ define signext i32 @memcmp8(ptr nocapture readonly %buffer1, ptr nocapture reado
; CHECK: # %bb.0:
; CHECK-NEXT: ldbrx 3, 0, 3
; CHECK-NEXT: ldbrx 4, 0, 4
-; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: subc 3, 4, 3
-; CHECK-NEXT: subfe 3, 4, 4
-; CHECK-NEXT: li 4, -1
-; CHECK-NEXT: neg 3, 3
-; CHECK-NEXT: isellt 3, 4, 3
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 8)
@@ -23,11 +21,11 @@ define signext i32 @memcmp4(ptr nocapture readonly %buffer1, ptr nocapture reado
; CHECK: # %bb.0:
; CHECK-NEXT: lwbrx 3, 0, 3
; CHECK-NEXT: lwbrx 4, 0, 4
-; CHECK-NEXT: cmplw 3, 4
-; CHECK-NEXT: sub 5, 4, 3
-; CHECK-NEXT: li 3, -1
-; CHECK-NEXT: rldicl 5, 5, 1, 63
-; CHECK-NEXT: isellt 3, 3, 5
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
+; CHECK-NEXT: extsw 3, 3
; CHECK-NEXT: blr
%call = tail call signext i32 @memcmp(ptr %buffer1, ptr %buffer2, i64 4)
ret i32 %call
diff --git a/llvm/test/CodeGen/PowerPC/ucmp.ll b/llvm/test/CodeGen/PowerPC/ucmp.ll
index d2dff6e..4d393dd 100644
--- a/llvm/test/CodeGen/PowerPC/ucmp.ll
+++ b/llvm/test/CodeGen/PowerPC/ucmp.ll
@@ -4,12 +4,10 @@
define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_8:
; CHECK: # %bb.0:
-; CHECK-NEXT: cmplw 3, 4
-; CHECK-NEXT: sub 5, 4, 3
-; CHECK-NEXT: li 3, -1
-; CHECK-NEXT: rldicl 5, 5, 1, 63
-; CHECK-NEXT: rldic 3, 3, 0, 32
-; CHECK-NEXT: isellt 3, 3, 5
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: blr
%1 = call i8 @llvm.ucmp(i8 %x, i8 %y)
ret i8 %1
@@ -18,12 +16,10 @@ define i8 @ucmp_8_8(i8 zeroext %x, i8 zeroext %y) nounwind {
define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
; CHECK-LABEL: ucmp_8_16:
; CHECK: # %bb.0:
-; CHECK-NEXT: cmplw 3, 4
-; CHECK-NEXT: sub 5, 4, 3
-; CHECK-NEXT: li 3, -1
-; CHECK-NEXT: rldicl 5, 5, 1, 63
-; CHECK-NEXT: rldic 3, 3, 0, 32
-; CHECK-NEXT: isellt 3, 3, 5
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: blr
%1 = call i8 @llvm.ucmp(i16 %x, i16 %y)
ret i8 %1
@@ -32,14 +28,10 @@ define i8 @ucmp_8_16(i16 zeroext %x, i16 zeroext %y) nounwind {
define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_8_32:
; CHECK: # %bb.0:
-; CHECK-NEXT: clrldi 5, 4, 32
-; CHECK-NEXT: clrldi 6, 3, 32
-; CHECK-NEXT: sub 5, 5, 6
-; CHECK-NEXT: cmplw 3, 4
-; CHECK-NEXT: li 3, -1
-; CHECK-NEXT: rldic 3, 3, 0, 32
-; CHECK-NEXT: rldicl 5, 5, 1, 63
-; CHECK-NEXT: isellt 3, 3, 5
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: blr
%1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
ret i8 %1
@@ -48,12 +40,10 @@ define i8 @ucmp_8_32(i32 %x, i32 %y) nounwind {
define i8 @ucmp_8_64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: ucmp_8_64:
; CHECK: # %bb.0:
-; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: subc 3, 4, 3
-; CHECK-NEXT: subfe 3, 4, 4
-; CHECK-NEXT: li 4, -1
-; CHECK-NEXT: neg 3, 3
-; CHECK-NEXT: isellt 3, 4, 3
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: blr
%1 = call i8 @llvm.ucmp(i64 %x, i64 %y)
ret i8 %1
@@ -82,14 +72,10 @@ define i8 @ucmp_8_128(i128 %x, i128 %y) nounwind {
define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: ucmp_32_32:
; CHECK: # %bb.0:
-; CHECK-NEXT: clrldi 5, 4, 32
-; CHECK-NEXT: clrldi 6, 3, 32
-; CHECK-NEXT: sub 5, 5, 6
-; CHECK-NEXT: cmplw 3, 4
-; CHECK-NEXT: li 3, -1
-; CHECK-NEXT: rldic 3, 3, 0, 32
-; CHECK-NEXT: rldicl 5, 5, 1, 63
-; CHECK-NEXT: isellt 3, 3, 5
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: blr
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
@@ -98,12 +84,10 @@ define i32 @ucmp_32_32(i32 %x, i32 %y) nounwind {
define i32 @ucmp_32_64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: ucmp_32_64:
; CHECK: # %bb.0:
-; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: subc 3, 4, 3
-; CHECK-NEXT: subfe 3, 4, 4
-; CHECK-NEXT: li 4, -1
-; CHECK-NEXT: neg 3, 3
-; CHECK-NEXT: isellt 3, 4, 3
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: blr
%1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
ret i32 %1
@@ -112,12 +96,10 @@ define i32 @ucmp_32_64(i64 %x, i64 %y) nounwind {
define i64 @ucmp_64_64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: ucmp_64_64:
; CHECK: # %bb.0:
-; CHECK-NEXT: subc 5, 4, 3
-; CHECK-NEXT: cmpld 3, 4
-; CHECK-NEXT: li 3, -1
-; CHECK-NEXT: subfe 5, 4, 4
-; CHECK-NEXT: neg 5, 5
-; CHECK-NEXT: isellt 3, 3, 5
+; CHECK-NEXT: subc 6, 4, 3
+; CHECK-NEXT: sub 5, 3, 4
+; CHECK-NEXT: subfe 3, 4, 3
+; CHECK-NEXT: subfe 3, 3, 5
; CHECK-NEXT: blr
%1 = call i64 @llvm.ucmp(i64 %x, i64 %y)
ret i64 %1
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 2ebb6e9..d089e36 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -514,6 +514,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32I-NEXT: sw s2, 0(sp) # 4-byte Folded Spill
; RV32I-NEXT: lui a1, 16
; RV32I-NEXT: addi s1, a1, -1
; RV32I-NEXT: and a0, a0, s1
@@ -521,13 +522,12 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, a0, a1
; RV32I-NEXT: and a0, a0, s1
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui a0, 524288
-; RV32I-NEXT: xor a0, s0, a0
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s1
+; RV32I-NEXT: and a0, s2, s1
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s0
@@ -536,6 +536,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32I-NEXT: lw s2, 0(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
@@ -545,6 +546,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: lui a1, 16
; RV64I-NEXT: addi s1, a1, -1
; RV64I-NEXT: and a0, a0, s1
@@ -552,13 +554,12 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, a0, a1
; RV64I-NEXT: and a0, a0, s1
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: lui a0, 524288
-; RV64I-NEXT: xor a0, s0, a0
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s1
+; RV64I-NEXT: and a0, s2, s1
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s0
@@ -567,6 +568,7 @@ define i32 @fneg_h(half %a, half %b) nounwind {
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
@@ -638,11 +640,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: not a0, a0
; RV32I-NEXT: lui a1, 1048568
; RV32I-NEXT: slli s1, s1, 17
; RV32I-NEXT: and a0, a0, a1
@@ -677,11 +675,7 @@ define half @fsgnjn_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: not a0, a0
; RV64I-NEXT: lui a1, 1048568
; RV64I-NEXT: slli s1, s1, 49
; RV64I-NEXT: and a0, a0, a1
@@ -804,15 +798,14 @@ define half @fabs_h(half %a, half %b) nounwind {
; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
+; RV32I-NEXT: slli s0, a0, 17
+; RV32I-NEXT: srli s0, s0, 17
; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: slli a0, a0, 1
-; RV32I-NEXT: srli a0, a0, 1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s2
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv a1, s0
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -841,15 +834,14 @@ define half @fabs_h(half %a, half %b) nounwind {
; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
+; RV64I-NEXT: slli s0, a0, 49
+; RV64I-NEXT: srli s0, s0, 49
; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: slli a0, a0, 33
-; RV64I-NEXT: srli a0, a0, 33
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s2
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv a1, s0
+; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
@@ -1217,25 +1209,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a1
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 16
-; RV32I-NEXT: addi s3, a0, -1
-; RV32I-NEXT: and a0, a2, s3
+; RV32I-NEXT: addi s2, a0, -1
+; RV32I-NEXT: and a0, a2, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s1
@@ -1261,25 +1249,21 @@ define half @fmsub_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a1
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 16
-; RV64I-NEXT: addi s3, a0, -1
-; RV64I-NEXT: and a0, a2, s3
+; RV64I-NEXT: addi s2, a0, -1
+; RV64I-NEXT: and a0, a2, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s1
@@ -1355,43 +1339,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s1, a2
-; RV32I-NEXT: mv s0, a1
-; RV32I-NEXT: lui s3, 16
-; RV32I-NEXT: addi s3, s3, -1
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: mv s1, a1
+; RV32I-NEXT: lui a1, 16
+; RV32I-NEXT: addi s3, a1, -1
; RV32I-NEXT: and a0, a0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui s4, 524288
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, s2, a1
+; RV32I-NEXT: xor s4, a0, a1
; RV32I-NEXT: and a0, s1, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: and a0, s4, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
-; RV32I-NEXT: mv a0, s2
+; RV32I-NEXT: mv a0, s1
; RV32I-NEXT: mv a1, s0
; RV32I-NEXT: call fmaf
; RV32I-NEXT: call __truncsfhf2
@@ -1413,43 +1388,34 @@ define half @fnmadd_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s1, a2
-; RV64I-NEXT: mv s0, a1
-; RV64I-NEXT: lui s3, 16
-; RV64I-NEXT: addi s3, s3, -1
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: mv s1, a1
+; RV64I-NEXT: lui a1, 16
+; RV64I-NEXT: addi s3, a1, -1
; RV64I-NEXT: and a0, a0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui s4, 524288
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, s2, a1
+; RV64I-NEXT: xor s4, a0, a1
; RV64I-NEXT: and a0, s1, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: and a0, s4, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
-; RV64I-NEXT: mv a0, s2
+; RV64I-NEXT: mv a0, s1
; RV64I-NEXT: mv a1, s0
; RV64I-NEXT: call fmaf
; RV64I-NEXT: call __truncsfhf2
@@ -1535,44 +1501,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill
-; RV32I-NEXT: mv s1, a2
-; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: lui s3, 16
-; RV32I-NEXT: addi s3, s3, -1
+; RV32I-NEXT: mv s0, a2
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lui a0, 16
+; RV32I-NEXT: addi s3, a0, -1
; RV32I-NEXT: and a0, a1, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s2, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui s4, 524288
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s2, s2, a1
+; RV32I-NEXT: xor s4, a0, a1
; RV32I-NEXT: and a0, s1, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: xor a0, a0, s4
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
-; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
; RV32I-NEXT: and a0, s2, s3
; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: and a0, s4, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: mv a0, s0
-; RV32I-NEXT: mv a1, s2
+; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: call fmaf
; RV32I-NEXT: call __truncsfhf2
; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload
@@ -1593,44 +1550,35 @@ define half @fnmadd_h_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill
-; RV64I-NEXT: mv s1, a2
-; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: lui s3, 16
-; RV64I-NEXT: addi s3, s3, -1
+; RV64I-NEXT: mv s0, a2
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lui a0, 16
+; RV64I-NEXT: addi s3, a0, -1
; RV64I-NEXT: and a0, a1, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s2, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui s4, 524288
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s2, s2, a1
+; RV64I-NEXT: xor s4, a0, a1
; RV64I-NEXT: and a0, s1, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: xor a0, a0, s4
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
-; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
; RV64I-NEXT: and a0, s2, s3
; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: and a0, s4, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: mv a0, s0
-; RV64I-NEXT: mv a1, s2
+; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: call fmaf
; RV64I-NEXT: call __truncsfhf2
; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
@@ -1960,25 +1908,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a1
; RV32I-NEXT: lui a1, 16
-; RV32I-NEXT: addi s3, a1, -1
-; RV32I-NEXT: and a0, a0, s3
+; RV32I-NEXT: addi s2, a1, -1
+; RV32I-NEXT: and a0, a0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, s1
; RV32I-NEXT: mv a2, s0
@@ -2003,25 +1947,21 @@ define half @fnmsub_h(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a1
; RV64I-NEXT: lui a1, 16
-; RV64I-NEXT: addi s3, a1, -1
-; RV64I-NEXT: and a0, a0, s3
+; RV64I-NEXT: addi s2, a1, -1
+; RV64I-NEXT: and a0, a0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, s1
; RV64I-NEXT: mv a2, s0
@@ -2096,25 +2036,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: mv s1, a0
; RV32I-NEXT: lui a0, 16
-; RV32I-NEXT: addi s3, a0, -1
-; RV32I-NEXT: and a0, a1, s3
+; RV32I-NEXT: addi s2, a0, -1
+; RV32I-NEXT: and a0, a1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: li a1, 0
; RV32I-NEXT: call __addsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s2, a0
-; RV32I-NEXT: and a0, s1, s3
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s3, a0, a1
+; RV32I-NEXT: and a0, s1, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s1, a0
-; RV32I-NEXT: and a0, s0, s3
+; RV32I-NEXT: and a0, s0, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
-; RV32I-NEXT: and a0, s2, s3
+; RV32I-NEXT: and a0, s3, s2
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: mv a0, s1
@@ -2140,25 +2076,21 @@ define half @fnmsub_h_2(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: mv s1, a0
; RV64I-NEXT: lui a0, 16
-; RV64I-NEXT: addi s3, a0, -1
-; RV64I-NEXT: and a0, a1, s3
+; RV64I-NEXT: addi s2, a0, -1
+; RV64I-NEXT: and a0, a1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: li a1, 0
; RV64I-NEXT: call __addsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s2, a0
-; RV64I-NEXT: and a0, s1, s3
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s3, a0, a1
+; RV64I-NEXT: and a0, s1, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s1, a0
-; RV64I-NEXT: and a0, s0, s3
+; RV64I-NEXT: and a0, s0, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
-; RV64I-NEXT: and a0, s2, s3
+; RV64I-NEXT: and a0, s3, s2
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: mv a0, s1
@@ -2519,12 +2451,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind {
; RV32I-NEXT: mv a0, s2
; RV32I-NEXT: call __mulsf3
; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: and a0, a0, s3
-; RV32I-NEXT: call __extendhfsf2
-; RV32I-NEXT: lui a1, 524288
-; RV32I-NEXT: xor a0, a0, a1
-; RV32I-NEXT: call __truncsfhf2
-; RV32I-NEXT: mv s1, a0
+; RV32I-NEXT: lui a1, 8
+; RV32I-NEXT: xor s1, a0, a1
; RV32I-NEXT: and a0, s0, s3
; RV32I-NEXT: call __extendhfsf2
; RV32I-NEXT: mv s0, a0
@@ -2580,12 +2508,8 @@ define half @fnmadd_h_contract(half %a, half %b, half %c) nounwind {
; RV64I-NEXT: mv a0, s2
; RV64I-NEXT: call __mulsf3
; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: and a0, a0, s3
-; RV64I-NEXT: call __extendhfsf2
-; RV64I-NEXT: lui a1, 524288
-; RV64I-NEXT: xor a0, a0, a1
-; RV64I-NEXT: call __truncsfhf2
-; RV64I-NEXT: mv s1, a0
+; RV64I-NEXT: lui a1, 8
+; RV64I-NEXT: xor s1, a0, a1
; RV64I-NEXT: and a0, s0, s3
; RV64I-NEXT: call __extendhfsf2
; RV64I-NEXT: mv s0, a0
diff --git a/llvm/test/CodeGen/Thumb2/mve-vabd.ll b/llvm/test/CodeGen/Thumb2/mve-vabd.ll
index 8d52fe5..3c35a29 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vabd.ll
+++ b/llvm/test/CodeGen/Thumb2/mve-vabd.ll
@@ -63,34 +63,30 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11, d12, d13}
; CHECK-MVE-NEXT: mov r4, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q1[0]
+; CHECK-MVE-NEXT: vmov.u16 r0, q1[1]
; CHECK-MVE-NEXT: vmov q5, q1
; CHECK-MVE-NEXT: vmov q4, q0
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r5, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q4[0]
+; CHECK-MVE-NEXT: vmov.u16 r0, q4[1]
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
; CHECK-MVE-NEXT: mov r5, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q5[1]
+; CHECK-MVE-NEXT: vmov.u16 r0, q5[0]
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r6, r0
-; CHECK-MVE-NEXT: vmov.u16 r0, q4[1]
+; CHECK-MVE-NEXT: vmov.u16 r0, q4[0]
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r1, r6
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: vmov.16 q6[0], r5
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: vmov.16 q6[1], r0
+; CHECK-MVE-NEXT: bfc r0, #15, #17
+; CHECK-MVE-NEXT: bfc r5, #15, #17
+; CHECK-MVE-NEXT: vmov.16 q6[0], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[2]
+; CHECK-MVE-NEXT: vmov.16 q6[1], r5
; CHECK-MVE-NEXT: bl __aeabi_h2f
; CHECK-MVE-NEXT: mov r5, r0
; CHECK-MVE-NEXT: vmov.u16 r0, q4[2]
@@ -98,9 +94,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[2], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[3]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -110,9 +104,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[3], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[4]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -122,9 +114,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[4], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[5]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -134,9 +124,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[5], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[6]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -146,9 +134,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[6], r0
; CHECK-MVE-NEXT: vmov.u16 r0, q5[7]
; CHECK-MVE-NEXT: bl __aeabi_h2f
@@ -158,9 +144,7 @@ define arm_aapcs_vfpcc void @vabd_v8f16(<8 x half> %x, <8 x half> %y, ptr %z) {
; CHECK-MVE-NEXT: mov r1, r5
; CHECK-MVE-NEXT: bl __aeabi_fsub
; CHECK-MVE-NEXT: bl __aeabi_f2h
-; CHECK-MVE-NEXT: bl __aeabi_h2f
-; CHECK-MVE-NEXT: bic r0, r0, #-2147483648
-; CHECK-MVE-NEXT: bl __aeabi_f2h
+; CHECK-MVE-NEXT: bfc r0, #15, #17
; CHECK-MVE-NEXT: vmov.16 q6[7], r0
; CHECK-MVE-NEXT: vstrw.32 q6, [r4]
; CHECK-MVE-NEXT: vpop {d8, d9, d10, d11, d12, d13}
diff --git a/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll b/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll
new file mode 100644
index 0000000..a0c243b
--- /dev/null
+++ b/llvm/test/CodeGen/X86/global-variable-partition-with-dap.ll
@@ -0,0 +1,43 @@
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+;; A minimal test case. llc will crash if global variables already has a section
+;; prefix. Subsequent PRs will expand on this test case to test the hotness
+;; reconciliation implementation.
+
+; RUN: not llc -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic \
+; RUN: -partition-static-data-sections=true \
+; RUN: -data-sections=true -unique-section-names=false \
+; RUN: %s -o - 2>&1 | FileCheck %s --check-prefix=ERR
+
+; ERR: Global variable hot_bss already has a section prefix hot
+
+@hot_bss = internal global i32 0, !section_prefix !17
+
+define void @hot_func() !prof !14 {
+ %9 = load i32, ptr @hot_bss
+ %11 = call i32 (...) @func_taking_arbitrary_param(i32 %9)
+ ret void
+}
+
+declare i32 @func_taking_arbitrary_param(...)
+
+!llvm.module.flags = !{!1}
+
+!1 = !{i32 1, !"ProfileSummary", !2}
+!2 = !{!3, !4, !5, !6, !7, !8, !9, !10}
+!3 = !{!"ProfileFormat", !"InstrProf"}
+!4 = !{!"TotalCount", i64 1460183}
+!5 = !{!"MaxCount", i64 849024}
+!6 = !{!"MaxInternalCount", i64 32769}
+!7 = !{!"MaxFunctionCount", i64 849024}
+!8 = !{!"NumCounts", i64 23627}
+!9 = !{!"NumFunctions", i64 3271}
+!10 = !{!"DetailedSummary", !11}
+!11 = !{!12, !13}
+!12 = !{i32 990000, i64 166, i32 73}
+!13 = !{i32 999999, i64 3, i32 1443}
+!14 = !{!"function_entry_count", i64 100000}
+!15 = !{!"function_entry_count", i64 1}
+!16 = !{!"branch_weights", i32 1, i32 99999}
+!17 = !{!"section_prefix", !"hot"}
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
index 5929c15..84c7df1 100644
--- a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll
@@ -1,152 +1,190 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S %s -passes=atomic-expand -mtriple=x86_64-linux-gnu | FileCheck %s
; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and
-; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this
+; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this
; functionality, please move this test to a target which still is.
define float @float_load_expand(ptr %ptr) {
-; CHECK-LABEL: @float_load_expand
-; CHECK: %1 = load atomic i32, ptr %ptr unordered, align 4
-; CHECK: %2 = bitcast i32 %1 to float
-; CHECK: ret float %2
+; CHECK-LABEL: define float @float_load_expand(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr [[PTR]] unordered, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT: ret float [[TMP2]]
+;
%res = load atomic float, ptr %ptr unordered, align 4
ret float %res
}
define float @float_load_expand_seq_cst(ptr %ptr) {
-; CHECK-LABEL: @float_load_expand_seq_cst
-; CHECK: %1 = load atomic i32, ptr %ptr seq_cst, align 4
-; CHECK: %2 = bitcast i32 %1 to float
-; CHECK: ret float %2
+; CHECK-LABEL: define float @float_load_expand_seq_cst(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT: ret float [[TMP2]]
+;
%res = load atomic float, ptr %ptr seq_cst, align 4
ret float %res
}
define float @float_load_expand_vol(ptr %ptr) {
-; CHECK-LABEL: @float_load_expand_vol
-; CHECK: %1 = load atomic volatile i32, ptr %ptr unordered, align 4
-; CHECK: %2 = bitcast i32 %1 to float
-; CHECK: ret float %2
+; CHECK-LABEL: define float @float_load_expand_vol(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load atomic volatile i32, ptr [[PTR]] unordered, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT: ret float [[TMP2]]
+;
%res = load atomic volatile float, ptr %ptr unordered, align 4
ret float %res
}
define float @float_load_expand_addr1(ptr addrspace(1) %ptr) {
-; CHECK-LABEL: @float_load_expand_addr1
-; CHECK: %1 = load atomic i32, ptr addrspace(1) %ptr unordered, align 4
-; CHECK: %2 = bitcast i32 %1 to float
-; CHECK: ret float %2
+; CHECK-LABEL: define float @float_load_expand_addr1(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = load atomic i32, ptr addrspace(1) [[PTR]] unordered, align 4
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT: ret float [[TMP2]]
+;
%res = load atomic float, ptr addrspace(1) %ptr unordered, align 4
ret float %res
}
define void @float_store_expand(ptr %ptr, float %v) {
-; CHECK-LABEL: @float_store_expand
-; CHECK: %1 = bitcast float %v to i32
-; CHECK: store atomic i32 %1, ptr %ptr unordered, align 4
+; CHECK-LABEL: define void @float_store_expand(
+; CHECK-SAME: ptr [[PTR:%.*]], float [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[V]] to i32
+; CHECK-NEXT: store atomic i32 [[TMP1]], ptr [[PTR]] unordered, align 4
+; CHECK-NEXT: ret void
+;
store atomic float %v, ptr %ptr unordered, align 4
ret void
}
define void @float_store_expand_seq_cst(ptr %ptr, float %v) {
-; CHECK-LABEL: @float_store_expand_seq_cst
-; CHECK: %1 = bitcast float %v to i32
-; CHECK: store atomic i32 %1, ptr %ptr seq_cst, align 4
+; CHECK-LABEL: define void @float_store_expand_seq_cst(
+; CHECK-SAME: ptr [[PTR:%.*]], float [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[V]] to i32
+; CHECK-NEXT: store atomic i32 [[TMP1]], ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT: ret void
+;
store atomic float %v, ptr %ptr seq_cst, align 4
ret void
}
define void @float_store_expand_vol(ptr %ptr, float %v) {
-; CHECK-LABEL: @float_store_expand_vol
-; CHECK: %1 = bitcast float %v to i32
-; CHECK: store atomic volatile i32 %1, ptr %ptr unordered, align 4
+; CHECK-LABEL: define void @float_store_expand_vol(
+; CHECK-SAME: ptr [[PTR:%.*]], float [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[V]] to i32
+; CHECK-NEXT: store atomic volatile i32 [[TMP1]], ptr [[PTR]] unordered, align 4
+; CHECK-NEXT: ret void
+;
store atomic volatile float %v, ptr %ptr unordered, align 4
ret void
}
define void @float_store_expand_addr1(ptr addrspace(1) %ptr, float %v) {
-; CHECK-LABEL: @float_store_expand_addr1
-; CHECK: %1 = bitcast float %v to i32
-; CHECK: store atomic i32 %1, ptr addrspace(1) %ptr unordered, align 4
+; CHECK-LABEL: define void @float_store_expand_addr1(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]], float [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast float [[V]] to i32
+; CHECK-NEXT: store atomic i32 [[TMP1]], ptr addrspace(1) [[PTR]] unordered, align 4
+; CHECK-NEXT: ret void
+;
store atomic float %v, ptr addrspace(1) %ptr unordered, align 4
ret void
}
define void @pointer_cmpxchg_expand(ptr %ptr, ptr %v) {
-; CHECK-LABEL: @pointer_cmpxchg_expand
-; CHECK: %1 = ptrtoint ptr %v to i64
-; CHECK: %2 = cmpxchg ptr %ptr, i64 0, i64 %1 seq_cst monotonic
-; CHECK: %3 = extractvalue { i64, i1 } %2, 0
-; CHECK: %4 = extractvalue { i64, i1 } %2, 1
-; CHECK: %5 = inttoptr i64 %3 to ptr
-; CHECK: %6 = insertvalue { ptr, i1 } poison, ptr %5, 0
-; CHECK: %7 = insertvalue { ptr, i1 } %6, i1 %4, 1
+; CHECK-LABEL: define void @pointer_cmpxchg_expand(
+; CHECK-SAME: ptr [[PTR:%.*]], ptr [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[V]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 0, i64 [[TMP1]] seq_cst monotonic, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { ptr, i1 } poison, ptr [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { ptr, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: ret void
+;
cmpxchg ptr %ptr, ptr null, ptr %v seq_cst monotonic
ret void
}
define void @pointer_cmpxchg_expand2(ptr %ptr, ptr %v) {
-; CHECK-LABEL: @pointer_cmpxchg_expand2
-; CHECK: %1 = ptrtoint ptr %v to i64
-; CHECK: %2 = cmpxchg ptr %ptr, i64 0, i64 %1 release monotonic
-; CHECK: %3 = extractvalue { i64, i1 } %2, 0
-; CHECK: %4 = extractvalue { i64, i1 } %2, 1
-; CHECK: %5 = inttoptr i64 %3 to ptr
-; CHECK: %6 = insertvalue { ptr, i1 } poison, ptr %5, 0
-; CHECK: %7 = insertvalue { ptr, i1 } %6, i1 %4, 1
+; CHECK-LABEL: define void @pointer_cmpxchg_expand2(
+; CHECK-SAME: ptr [[PTR:%.*]], ptr [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[V]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 0, i64 [[TMP1]] release monotonic, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { ptr, i1 } poison, ptr [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { ptr, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: ret void
+;
cmpxchg ptr %ptr, ptr null, ptr %v release monotonic
ret void
}
define void @pointer_cmpxchg_expand3(ptr %ptr, ptr %v) {
-; CHECK-LABEL: @pointer_cmpxchg_expand3
-; CHECK: %1 = ptrtoint ptr %v to i64
-; CHECK: %2 = cmpxchg ptr %ptr, i64 0, i64 %1 seq_cst seq_cst
-; CHECK: %3 = extractvalue { i64, i1 } %2, 0
-; CHECK: %4 = extractvalue { i64, i1 } %2, 1
-; CHECK: %5 = inttoptr i64 %3 to ptr
-; CHECK: %6 = insertvalue { ptr, i1 } poison, ptr %5, 0
-; CHECK: %7 = insertvalue { ptr, i1 } %6, i1 %4, 1
+; CHECK-LABEL: define void @pointer_cmpxchg_expand3(
+; CHECK-SAME: ptr [[PTR:%.*]], ptr [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[V]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR]], i64 0, i64 [[TMP1]] seq_cst seq_cst, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { ptr, i1 } poison, ptr [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { ptr, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: ret void
+;
cmpxchg ptr %ptr, ptr null, ptr %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand4(ptr %ptr, ptr %v) {
-; CHECK-LABEL: @pointer_cmpxchg_expand4
-; CHECK: %1 = ptrtoint ptr %v to i64
-; CHECK: %2 = cmpxchg weak ptr %ptr, i64 0, i64 %1 seq_cst seq_cst
-; CHECK: %3 = extractvalue { i64, i1 } %2, 0
-; CHECK: %4 = extractvalue { i64, i1 } %2, 1
-; CHECK: %5 = inttoptr i64 %3 to ptr
-; CHECK: %6 = insertvalue { ptr, i1 } poison, ptr %5, 0
-; CHECK: %7 = insertvalue { ptr, i1 } %6, i1 %4, 1
+; CHECK-LABEL: define void @pointer_cmpxchg_expand4(
+; CHECK-SAME: ptr [[PTR:%.*]], ptr [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[V]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg weak ptr [[PTR]], i64 0, i64 [[TMP1]] seq_cst seq_cst, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { ptr, i1 } poison, ptr [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { ptr, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: ret void
+;
cmpxchg weak ptr %ptr, ptr null, ptr %v seq_cst seq_cst
ret void
}
define void @pointer_cmpxchg_expand5(ptr %ptr, ptr %v) {
-; CHECK-LABEL: @pointer_cmpxchg_expand5
-; CHECK: %1 = ptrtoint ptr %v to i64
-; CHECK: %2 = cmpxchg volatile ptr %ptr, i64 0, i64 %1 seq_cst seq_cst
-; CHECK: %3 = extractvalue { i64, i1 } %2, 0
-; CHECK: %4 = extractvalue { i64, i1 } %2, 1
-; CHECK: %5 = inttoptr i64 %3 to ptr
-; CHECK: %6 = insertvalue { ptr, i1 } poison, ptr %5, 0
-; CHECK: %7 = insertvalue { ptr, i1 } %6, i1 %4, 1
+; CHECK-LABEL: define void @pointer_cmpxchg_expand5(
+; CHECK-SAME: ptr [[PTR:%.*]], ptr [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[V]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg volatile ptr [[PTR]], i64 0, i64 [[TMP1]] seq_cst seq_cst, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP3]] to ptr
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { ptr, i1 } poison, ptr [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { ptr, i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: ret void
+;
cmpxchg volatile ptr %ptr, ptr null, ptr %v seq_cst seq_cst
ret void
}
-define void @pointer_cmpxchg_expand6(ptr addrspace(1) %ptr,
- ptr addrspace(2) %v) {
-; CHECK-LABEL: @pointer_cmpxchg_expand6
-; CHECK: %1 = ptrtoint ptr addrspace(2) %v to i64
-; CHECK: %2 = cmpxchg ptr addrspace(1) %ptr, i64 0, i64 %1 seq_cst seq_cst
-; CHECK: %3 = extractvalue { i64, i1 } %2, 0
-; CHECK: %4 = extractvalue { i64, i1 } %2, 1
-; CHECK: %5 = inttoptr i64 %3 to ptr addrspace(2)
-; CHECK: %6 = insertvalue { ptr addrspace(2), i1 } poison, ptr addrspace(2) %5, 0
-; CHECK: %7 = insertvalue { ptr addrspace(2), i1 } %6, i1 %4, 1
+define void @pointer_cmpxchg_expand6(ptr addrspace(1) %ptr, ptr addrspace(2) %v) {
+; CHECK-LABEL: define void @pointer_cmpxchg_expand6(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]], ptr addrspace(2) [[V:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[V]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(1) [[PTR]], i64 0, i64 [[TMP1]] seq_cst seq_cst, align 8
+; CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = inttoptr i64 [[TMP3]] to ptr addrspace(2)
+; CHECK-NEXT: [[TMP6:%.*]] = insertvalue { ptr addrspace(2), i1 } poison, ptr addrspace(2) [[TMP5]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = insertvalue { ptr addrspace(2), i1 } [[TMP6]], i1 [[TMP4]], 1
+; CHECK-NEXT: ret void
+;
cmpxchg ptr addrspace(1) %ptr, ptr addrspace(2) null, ptr addrspace(2) %v seq_cst seq_cst
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
index ff6d9aa..1ba7005 100644
--- a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
+++ b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll
@@ -481,7 +481,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(float %x, float %y) {
define float @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
@@ -509,7 +509,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(float %x, float %y) {
define float @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
@@ -558,7 +558,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_nsz_inverted(f
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
@@ -571,7 +571,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x, float %
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x, float %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
@@ -585,7 +585,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero(float %x, float nofpclass(nzero) %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
@@ -598,7 +598,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_ne
define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub(float %x, float nofpclass(nzero nsub) %y) {
; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
@@ -705,7 +705,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32(float %x) {
define float @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x) {
; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(
; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00
-; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
+; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00
; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan float [[X]], [[SCALED_X]]
; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]]
;
diff --git a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
index 253bc9e7..c14dd46 100644
--- a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
+++ b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll
@@ -23,6 +23,50 @@ define float @select_fpclass_fadd(i1 %cond, float nofpclass(nan) %A, float %B) {
ret float %D
}
+define float @select_fpclass_fadd_ninf1(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fadd_ninf1(
+; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fadd ninf float %A, %B
+ %D = select i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fadd_ninf2(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fadd_ninf2(
+; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fadd float %A, %B
+ %D = select ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fadd_ninf3(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fadd_ninf3(
+; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fadd ninf float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fadd ninf float %A, %B
+ %D = select ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fadd_nnan_ninf(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fadd_nnan_ninf(
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fadd float %A, %B
+ %D = select nnan ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
define float @select_nnan_fadd(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fadd(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
@@ -47,7 +91,7 @@ define float @select_nnan_fadd_swapped(i1 %cond, float %A, float %B) {
define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fadd_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00
; CHECK-NEXT: [[D:%.*]] = fadd reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
@@ -58,7 +102,7 @@ define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) {
define float @select_nnan_fadd_swapped_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fadd_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fadd reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
@@ -124,7 +168,7 @@ define float @select_nnan_fmul_swapped(i1 %cond, float %A, float %B) {
define float @select_nnan_fmul_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fmul_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
; CHECK-NEXT: [[D:%.*]] = fmul reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
@@ -135,7 +179,7 @@ define float @select_nnan_fmul_fast_math(i1 %cond, float %A, float %B) {
define float @select_nnan_fmul_swapped_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fmul_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fmul reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
@@ -144,6 +188,50 @@ define float @select_nnan_fmul_swapped_fast_math(i1 %cond, float %A, float %B) {
ret float %D
}
+define float @select_fpclass_fmul_ninf1(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fmul_ninf1(
+; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fmul ninf float %A, %B
+ %D = select i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fmul_ninf2(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fmul_ninf2(
+; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fmul float %A, %B
+ %D = select ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fmul_ninf3(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fmul_ninf3(
+; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fmul ninf float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fmul ninf float %A, %B
+ %D = select ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fmul_nnan_ninf(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fmul_nnan_ninf(
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fmul float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fmul float %A, %B
+ %D = select nnan ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
define float @select_nnan_fsub(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fsub(
; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
@@ -168,7 +256,7 @@ define float @select_nnan_fsub_swapped(i1 %cond, float %A, float %B) {
define float @select_nnan_fsub_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fsub_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
; CHECK-NEXT: [[D:%.*]] = fsub reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
@@ -179,7 +267,7 @@ define float @select_nnan_fsub_fast_math(i1 %cond, float %A, float %B) {
define float @select_nnan_fsub_swapped_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fsub_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float 0.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fsub reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
@@ -188,6 +276,50 @@ define float @select_nnan_fsub_swapped_fast_math(i1 %cond, float %A, float %B) {
ret float %D
}
+define float @select_fpclass_fsub_ninf1(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fsub_ninf1(
+; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fsub ninf float %A, %B
+ %D = select i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fsub_ninf2(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fsub_ninf2(
+; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fsub float %A, %B
+ %D = select ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fsub_ninf3(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fsub_ninf3(
+; CHECK-NEXT: [[C:%.*]] = select ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fsub ninf float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fsub ninf float %A, %B
+ %D = select ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
+define float @select_fpclass_fsub_nnan_ninf(i1 %cond, float nofpclass(nan) %A, float %B) {
+; CHECK-LABEL: @select_fpclass_fsub_nnan_ninf(
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 0.000000e+00
+; CHECK-NEXT: [[D:%.*]] = fsub float [[A:%.*]], [[C]]
+; CHECK-NEXT: ret float [[D]]
+;
+ %C = fsub float %A, %B
+ %D = select nnan ninf i1 %cond, float %C, float %A
+ ret float %D
+}
+
define <4 x float> @select_nnan_nsz_fsub_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) {
; CHECK-LABEL: @select_nnan_nsz_fsub_v4f32(
; CHECK-NEXT: [[C:%.*]] = select nnan nsz <4 x i1> [[COND:%.*]], <4 x float> [[B:%.*]], <4 x float> zeroinitializer
@@ -246,7 +378,7 @@ define float @select_nnan_fdiv_swapped(i1 %cond, float %A, float %B) {
define float @select_nnan_fdiv_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fdiv_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float [[B:%.*]], float 1.000000e+00
; CHECK-NEXT: [[D:%.*]] = fdiv reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
@@ -257,7 +389,7 @@ define float @select_nnan_fdiv_fast_math(i1 %cond, float %A, float %B) {
define float @select_nnan_fdiv_swapped_fast_math(i1 %cond, float %A, float %B) {
; CHECK-LABEL: @select_nnan_fdiv_swapped_fast_math(
-; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
+; CHECK-NEXT: [[C:%.*]] = select nnan ninf i1 [[COND:%.*]], float 1.000000e+00, float [[B:%.*]]
; CHECK-NEXT: [[D:%.*]] = fdiv reassoc nnan arcp contract afn float [[A:%.*]], [[C]]
; CHECK-NEXT: ret float [[D]]
;
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
index e154883c..9dbbf4c 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll
@@ -45,7 +45,7 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 %
; CHECK-NEXT: [[TMP7:%.*]] = fsub fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD7]]
; CHECK-NEXT: [[TMP8:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP7]])
; CHECK-NEXT: [[TMP9:%.*]] = fdiv fast <4 x float> [[TMP8]], [[TMP6]]
-; CHECK-NEXT: [[TMP10:%.*]] = select <4 x i1> [[TMP20]], <4 x float> [[TMP9]], <4 x float> splat (float -0.000000e+00)
+; CHECK-NEXT: [[TMP10:%.*]] = select ninf <4 x i1> [[TMP20]], <4 x float> [[TMP9]], <4 x float> splat (float -0.000000e+00)
; CHECK-NEXT: [[PREDPHI]] = fadd reassoc arcp contract afn <4 x float> [[VEC_PHI]], [[TMP10]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
index e8709a5..55adda7 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/predicated-reduction.ll
@@ -41,12 +41,12 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK-NEXT: [[TMP9:%.*]] = fcmp fast ogt <4 x double> [[TMP7]], zeroinitializer
; CHECK-NEXT: [[TMP10:%.*]] = fmul fast <4 x double> [[TMP6]], [[TMP6]]
; CHECK-NEXT: [[TMP11:%.*]] = fmul fast <4 x double> [[TMP7]], [[TMP7]]
-; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP8]], <4 x double> [[TMP6]], <4 x double> splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> [[TMP9]], <4 x double> [[TMP7]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP12:%.*]] = select ninf <4 x i1> [[TMP8]], <4 x double> [[TMP6]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP13:%.*]] = select ninf <4 x i1> [[TMP9]], <4 x double> [[TMP7]], <4 x double> splat (double -0.000000e+00)
; CHECK-NEXT: [[TMP14]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI16]], [[TMP12]]
; CHECK-NEXT: [[TMP15]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI17]], [[TMP13]]
-; CHECK-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP8]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP17:%.*]] = select <4 x i1> [[TMP9]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP16:%.*]] = select ninf <4 x i1> [[TMP8]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP17:%.*]] = select ninf <4 x i1> [[TMP9]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00)
; CHECK-NEXT: [[TMP18]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI]], [[TMP16]]
; CHECK-NEXT: [[TMP19]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI15]], [[TMP17]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS_IV1]], 8
@@ -75,9 +75,9 @@ define nofpclass(nan inf) double @monte_simple(i32 noundef %nblocks, i32 noundef
; CHECK-NEXT: [[SUB:%.*]] = fsub fast double [[MUL]], [[Z]]
; CHECK-NEXT: [[CMP1:%.*]] = fcmp fast ogt double [[SUB]], 0.000000e+00
; CHECK-NEXT: [[MUL3:%.*]] = fmul fast double [[SUB]], [[SUB]]
-; CHECK-NEXT: [[ADD8:%.*]] = select i1 [[CMP1]], double [[SUB]], double -0.000000e+00
+; CHECK-NEXT: [[ADD8:%.*]] = select ninf i1 [[CMP1]], double [[SUB]], double -0.000000e+00
; CHECK-NEXT: [[V0_2]] = fadd reassoc arcp contract afn double [[V0_011]], [[ADD8]]
-; CHECK-NEXT: [[ADD4:%.*]] = select i1 [[CMP1]], double [[MUL3]], double -0.000000e+00
+; CHECK-NEXT: [[ADD4:%.*]] = select ninf i1 [[CMP1]], double [[MUL3]], double -0.000000e+00
; CHECK-NEXT: [[V1_2]] = fadd reassoc arcp contract afn double [[V1_012]], [[ADD4]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
@@ -229,12 +229,12 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
; CHECK-NEXT: [[TMP13:%.*]] = fcmp fast ogt <4 x double> [[TMP11]], zeroinitializer
; CHECK-NEXT: [[TMP14:%.*]] = fmul fast <4 x double> [[TMP10]], [[TMP10]]
; CHECK-NEXT: [[TMP15:%.*]] = fmul fast <4 x double> [[TMP11]], [[TMP11]]
-; CHECK-NEXT: [[TMP16:%.*]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP17:%.*]] = select <4 x i1> [[TMP13]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP16:%.*]] = select ninf <4 x i1> [[TMP12]], <4 x double> [[TMP10]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP17:%.*]] = select ninf <4 x i1> [[TMP13]], <4 x double> [[TMP11]], <4 x double> splat (double -0.000000e+00)
; CHECK-NEXT: [[TMP18]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI32]], [[TMP16]]
; CHECK-NEXT: [[TMP19]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI33]], [[TMP17]]
-; CHECK-NEXT: [[TMP20:%.*]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP14]], <4 x double> splat (double -0.000000e+00)
-; CHECK-NEXT: [[TMP21:%.*]] = select <4 x i1> [[TMP13]], <4 x double> [[TMP15]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP20:%.*]] = select ninf <4 x i1> [[TMP12]], <4 x double> [[TMP14]], <4 x double> splat (double -0.000000e+00)
+; CHECK-NEXT: [[TMP21:%.*]] = select ninf <4 x i1> [[TMP13]], <4 x double> [[TMP15]], <4 x double> splat (double -0.000000e+00)
; CHECK-NEXT: [[TMP22]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI]], [[TMP20]]
; CHECK-NEXT: [[TMP23]] = fadd reassoc arcp contract afn <4 x double> [[VEC_PHI31]], [[TMP21]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS_IV1]], 8
@@ -263,9 +263,9 @@ define nofpclass(nan inf) double @monte_exp(i32 noundef %nblocks, i32 noundef %R
; CHECK-NEXT: [[SUB_US:%.*]] = fsub fast double [[MUL_US]], [[Z]]
; CHECK-NEXT: [[CMP4_US:%.*]] = fcmp fast ogt double [[SUB_US]], 0.000000e+00
; CHECK-NEXT: [[ADD7_US:%.*]] = fmul fast double [[SUB_US]], [[SUB_US]]
-; CHECK-NEXT: [[ADD12_US:%.*]] = select i1 [[CMP4_US]], double [[SUB_US]], double -0.000000e+00
+; CHECK-NEXT: [[ADD12_US:%.*]] = select ninf i1 [[CMP4_US]], double [[SUB_US]], double -0.000000e+00
; CHECK-NEXT: [[V0_2_US]] = fadd reassoc arcp contract afn double [[V0_115_US]], [[ADD12_US]]
-; CHECK-NEXT: [[ADD7_US1:%.*]] = select i1 [[CMP4_US]], double [[ADD7_US]], double -0.000000e+00
+; CHECK-NEXT: [[ADD7_US1:%.*]] = select ninf i1 [[CMP4_US]], double [[ADD7_US]], double -0.000000e+00
; CHECK-NEXT: [[V1_2_US]] = fadd reassoc arcp contract afn double [[V1_116_US]], [[ADD7_US1]]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND25_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]]
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/x264-satd-8x4.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/x264-satd-8x4.ll
new file mode 100644
index 0000000..c1042f18
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/x264-satd-8x4.ll
@@ -0,0 +1,526 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -mtriple=riscv64 -mattr=+m,+v,+unaligned-vector-mem \
+; RUN: -passes=slp-vectorizer -S < %s | FileCheck %s
+; Function Attrs: nounwind uwtable vscale_range(8,1024)
+define i32 @x264_pixel_satd_8x4(ptr %pix1, i32 %i_pix1, ptr %pix2, i32 %i_pix2) {
+; CHECK-LABEL: define i32 @x264_pixel_satd_8x4(
+; CHECK-SAME: ptr [[PIX1:%.*]], i32 [[I_PIX1:%.*]], ptr [[PIX2:%.*]], i32 [[I_PIX2:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I_PIX1]] to i64
+; CHECK-NEXT: [[IDX_EXT63:%.*]] = sext i32 [[I_PIX2]] to i64
+; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX1]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX2]], i64 4
+; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, ptr [[PIX1]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ADD_PTR64:%.*]] = getelementptr inbounds i8, ptr [[PIX2]], i64 [[IDX_EXT63]]
+; CHECK-NEXT: [[ARRAYIDX3_1:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5_1:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR64]], i64 4
+; CHECK-NEXT: [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ADD_PTR64_1:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64]], i64 [[IDX_EXT63]]
+; CHECK-NEXT: [[ARRAYIDX3_2:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR_1]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5_2:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR64_1]], i64 4
+; CHECK-NEXT: [[ADD_PTR_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR_1]], i64 [[IDX_EXT]]
+; CHECK-NEXT: [[ADD_PTR64_2:%.*]] = getelementptr inbounds i8, ptr [[ADD_PTR64_1]], i64 [[IDX_EXT63]]
+; CHECK-NEXT: [[ARRAYIDX3_3:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR_2]], i64 4
+; CHECK-NEXT: [[ARRAYIDX5_3:%.*]] = getelementptr inbounds nuw i8, ptr [[ADD_PTR64_2]], i64 4
+; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i8>, ptr [[PIX1]], align 1
+; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, ptr [[PIX2]], align 1
+; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5]], align 1
+; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i8>, ptr [[ADD_PTR]], align 1
+; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i8>, ptr [[ADD_PTR64]], align 1
+; CHECK-NEXT: [[TMP6:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_1]], align 1
+; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_1]], align 1
+; CHECK-NEXT: [[TMP8:%.*]] = load <4 x i8>, ptr [[ADD_PTR_1]], align 1
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_1]], align 1
+; CHECK-NEXT: [[TMP10:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_2]], align 1
+; CHECK-NEXT: [[TMP11:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_2]], align 1
+; CHECK-NEXT: [[TMP12:%.*]] = load <4 x i8>, ptr [[ADD_PTR_2]], align 1
+; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x i8> [[TMP4]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x i8> [[TMP0]], <4 x i8> [[TMP4]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <4 x i8> [[TMP8]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <16 x i8> [[TMP15]], <16 x i8> [[TMP16]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <4 x i8> [[TMP12]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP19:%.*]] = shufflevector <16 x i8> [[TMP17]], <16 x i8> [[TMP18]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: [[TMP20:%.*]] = zext <16 x i8> [[TMP19]] to <16 x i32>
+; CHECK-NEXT: [[TMP21:%.*]] = load <4 x i8>, ptr [[ADD_PTR64_2]], align 1
+; CHECK-NEXT: [[TMP22:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP23:%.*]] = shufflevector <4 x i8> [[TMP5]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP24:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> [[TMP5]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP25:%.*]] = shufflevector <4 x i8> [[TMP9]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP26:%.*]] = shufflevector <16 x i8> [[TMP24]], <16 x i8> [[TMP25]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP27:%.*]] = shufflevector <4 x i8> [[TMP21]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP28:%.*]] = shufflevector <16 x i8> [[TMP26]], <16 x i8> [[TMP27]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: [[TMP29:%.*]] = zext <16 x i8> [[TMP28]] to <16 x i32>
+; CHECK-NEXT: [[TMP30:%.*]] = sub nsw <16 x i32> [[TMP20]], [[TMP29]]
+; CHECK-NEXT: [[TMP31:%.*]] = load <4 x i8>, ptr [[ARRAYIDX3_3]], align 1
+; CHECK-NEXT: [[TMP32:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP33:%.*]] = shufflevector <4 x i8> [[TMP6]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP34:%.*]] = shufflevector <4 x i8> [[TMP2]], <4 x i8> [[TMP6]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP35:%.*]] = shufflevector <4 x i8> [[TMP10]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP36:%.*]] = shufflevector <16 x i8> [[TMP34]], <16 x i8> [[TMP35]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP37:%.*]] = shufflevector <4 x i8> [[TMP31]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP38:%.*]] = shufflevector <16 x i8> [[TMP36]], <16 x i8> [[TMP37]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: [[TMP39:%.*]] = zext <16 x i8> [[TMP38]] to <16 x i32>
+; CHECK-NEXT: [[TMP40:%.*]] = load <4 x i8>, ptr [[ARRAYIDX5_3]], align 1
+; CHECK-NEXT: [[TMP41:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP42:%.*]] = shufflevector <4 x i8> [[TMP7]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP43:%.*]] = shufflevector <4 x i8> [[TMP3]], <4 x i8> [[TMP7]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP44:%.*]] = shufflevector <4 x i8> [[TMP11]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP45:%.*]] = shufflevector <16 x i8> [[TMP43]], <16 x i8> [[TMP44]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP46:%.*]] = shufflevector <4 x i8> [[TMP40]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP47:%.*]] = shufflevector <16 x i8> [[TMP45]], <16 x i8> [[TMP46]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 16, i32 17, i32 18, i32 19>
+; CHECK-NEXT: [[TMP48:%.*]] = zext <16 x i8> [[TMP47]] to <16 x i32>
+; CHECK-NEXT: [[TMP49:%.*]] = sub nsw <16 x i32> [[TMP39]], [[TMP48]]
+; CHECK-NEXT: [[TMP50:%.*]] = shl nsw <16 x i32> [[TMP49]], splat (i32 16)
+; CHECK-NEXT: [[TMP51:%.*]] = add nsw <16 x i32> [[TMP50]], [[TMP30]]
+; CHECK-NEXT: [[TMP52:%.*]] = shufflevector <16 x i32> [[TMP51]], <16 x i32> poison, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
+; CHECK-NEXT: [[TMP53:%.*]] = add nsw <16 x i32> [[TMP52]], [[TMP51]]
+; CHECK-NEXT: [[TMP54:%.*]] = sub nsw <16 x i32> [[TMP52]], [[TMP51]]
+; CHECK-NEXT: [[TMP55:%.*]] = shufflevector <16 x i32> [[TMP53]], <16 x i32> [[TMP54]], <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
+; CHECK-NEXT: [[TMP56:%.*]] = shufflevector <16 x i32> [[TMP55]], <16 x i32> poison, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 6, i32 7, i32 4, i32 5, i32 10, i32 11, i32 8, i32 9, i32 14, i32 15, i32 12, i32 13>
+; CHECK-NEXT: [[TMP57:%.*]] = add nsw <16 x i32> [[TMP55]], [[TMP56]]
+; CHECK-NEXT: [[TMP58:%.*]] = sub nsw <16 x i32> [[TMP55]], [[TMP56]]
+; CHECK-NEXT: [[TMP59:%.*]] = shufflevector <16 x i32> [[TMP57]], <16 x i32> [[TMP58]], <16 x i32> <i32 16, i32 17, i32 2, i32 3, i32 20, i32 21, i32 6, i32 7, i32 24, i32 25, i32 10, i32 11, i32 28, i32 29, i32 14, i32 15>
+; CHECK-NEXT: [[TMP60:%.*]] = shufflevector <16 x i32> [[TMP59]], <16 x i32> poison, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
+; CHECK-NEXT: [[TMP61:%.*]] = sub nsw <16 x i32> [[TMP59]], [[TMP60]]
+; CHECK-NEXT: [[TMP62:%.*]] = add nsw <16 x i32> [[TMP59]], [[TMP60]]
+; CHECK-NEXT: [[TMP63:%.*]] = shufflevector <16 x i32> [[TMP61]], <16 x i32> [[TMP62]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 28, i32 29, i32 30, i32 31>
+; CHECK-NEXT: [[TMP64:%.*]] = shufflevector <16 x i32> [[TMP63]], <16 x i32> poison, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[TMP65:%.*]] = add nsw <16 x i32> [[TMP63]], [[TMP64]]
+; CHECK-NEXT: [[TMP66:%.*]] = sub nsw <16 x i32> [[TMP63]], [[TMP64]]
+; CHECK-NEXT: [[TMP67:%.*]] = shufflevector <16 x i32> [[TMP65]], <16 x i32> [[TMP66]], <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: [[TMP68:%.*]] = lshr <16 x i32> [[TMP67]], splat (i32 15)
+; CHECK-NEXT: [[TMP69:%.*]] = and <16 x i32> [[TMP68]], splat (i32 65537)
+; CHECK-NEXT: [[TMP70:%.*]] = mul nuw <16 x i32> [[TMP69]], splat (i32 65535)
+; CHECK-NEXT: [[TMP71:%.*]] = add <16 x i32> [[TMP70]], [[TMP67]]
+; CHECK-NEXT: [[TMP72:%.*]] = xor <16 x i32> [[TMP71]], [[TMP70]]
+; CHECK-NEXT: [[TMP73:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP72]])
+; CHECK-NEXT: [[CONV118:%.*]] = and i32 [[TMP73]], 65535
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[TMP73]], 16
+; CHECK-NEXT: [[ADD119:%.*]] = add nuw nsw i32 [[CONV118]], [[SHR]]
+; CHECK-NEXT: [[SHR120:%.*]] = lshr i32 [[ADD119]], 1
+; CHECK-NEXT: ret i32 [[SHR120]]
+;
+entry:
+ %idx.ext = sext i32 %i_pix1 to i64
+ %idx.ext63 = sext i32 %i_pix2 to i64
+ %0 = load i8, ptr %pix1, align 1
+ %conv = zext i8 %0 to i32
+ %1 = load i8, ptr %pix2, align 1
+ %conv2 = zext i8 %1 to i32
+ %sub = sub nsw i32 %conv, %conv2
+ %arrayidx3 = getelementptr inbounds nuw i8, ptr %pix1, i64 4
+ %2 = load i8, ptr %arrayidx3, align 1
+ %conv4 = zext i8 %2 to i32
+ %arrayidx5 = getelementptr inbounds nuw i8, ptr %pix2, i64 4
+ %3 = load i8, ptr %arrayidx5, align 1
+ %conv6 = zext i8 %3 to i32
+ %sub7 = sub nsw i32 %conv4, %conv6
+ %shl = shl nsw i32 %sub7, 16
+ %add = add nsw i32 %shl, %sub
+ %arrayidx8 = getelementptr inbounds nuw i8, ptr %pix1, i64 1
+ %4 = load i8, ptr %arrayidx8, align 1
+ %conv9 = zext i8 %4 to i32
+ %arrayidx10 = getelementptr inbounds nuw i8, ptr %pix2, i64 1
+ %5 = load i8, ptr %arrayidx10, align 1
+ %conv11 = zext i8 %5 to i32
+ %sub12 = sub nsw i32 %conv9, %conv11
+ %arrayidx13 = getelementptr inbounds nuw i8, ptr %pix1, i64 5
+ %6 = load i8, ptr %arrayidx13, align 1
+ %conv14 = zext i8 %6 to i32
+ %arrayidx15 = getelementptr inbounds nuw i8, ptr %pix2, i64 5
+ %7 = load i8, ptr %arrayidx15, align 1
+ %conv16 = zext i8 %7 to i32
+ %sub17 = sub nsw i32 %conv14, %conv16
+ %shl18 = shl nsw i32 %sub17, 16
+ %add19 = add nsw i32 %shl18, %sub12
+ %arrayidx20 = getelementptr inbounds nuw i8, ptr %pix1, i64 2
+ %8 = load i8, ptr %arrayidx20, align 1
+ %conv21 = zext i8 %8 to i32
+ %arrayidx22 = getelementptr inbounds nuw i8, ptr %pix2, i64 2
+ %9 = load i8, ptr %arrayidx22, align 1
+ %conv23 = zext i8 %9 to i32
+ %sub24 = sub nsw i32 %conv21, %conv23
+ %arrayidx25 = getelementptr inbounds nuw i8, ptr %pix1, i64 6
+ %10 = load i8, ptr %arrayidx25, align 1
+ %conv26 = zext i8 %10 to i32
+ %arrayidx27 = getelementptr inbounds nuw i8, ptr %pix2, i64 6
+ %11 = load i8, ptr %arrayidx27, align 1
+ %conv28 = zext i8 %11 to i32
+ %sub29 = sub nsw i32 %conv26, %conv28
+ %shl30 = shl nsw i32 %sub29, 16
+ %add31 = add nsw i32 %shl30, %sub24
+ %arrayidx32 = getelementptr inbounds nuw i8, ptr %pix1, i64 3
+ %12 = load i8, ptr %arrayidx32, align 1
+ %conv33 = zext i8 %12 to i32
+ %arrayidx34 = getelementptr inbounds nuw i8, ptr %pix2, i64 3
+ %13 = load i8, ptr %arrayidx34, align 1
+ %conv35 = zext i8 %13 to i32
+ %sub36 = sub nsw i32 %conv33, %conv35
+ %arrayidx37 = getelementptr inbounds nuw i8, ptr %pix1, i64 7
+ %14 = load i8, ptr %arrayidx37, align 1
+ %conv38 = zext i8 %14 to i32
+ %arrayidx39 = getelementptr inbounds nuw i8, ptr %pix2, i64 7
+ %15 = load i8, ptr %arrayidx39, align 1
+ %conv40 = zext i8 %15 to i32
+ %sub41 = sub nsw i32 %conv38, %conv40
+ %shl42 = shl nsw i32 %sub41, 16
+ %add43 = add nsw i32 %shl42, %sub36
+ %add44 = add nsw i32 %add19, %add
+ %sub45 = sub nsw i32 %add, %add19
+ %add46 = add nsw i32 %add43, %add31
+ %sub47 = sub nsw i32 %add31, %add43
+ %add48 = add nsw i32 %add46, %add44
+ %sub51 = sub nsw i32 %add44, %add46
+ %add55 = add nsw i32 %sub47, %sub45
+ %sub59 = sub nsw i32 %sub45, %sub47
+ %add.ptr = getelementptr inbounds i8, ptr %pix1, i64 %idx.ext
+ %add.ptr64 = getelementptr inbounds i8, ptr %pix2, i64 %idx.ext63
+ %16 = load i8, ptr %add.ptr, align 1
+ %conv.1 = zext i8 %16 to i32
+ %17 = load i8, ptr %add.ptr64, align 1
+ %conv2.1 = zext i8 %17 to i32
+ %sub.1 = sub nsw i32 %conv.1, %conv2.1
+ %arrayidx3.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 4
+ %18 = load i8, ptr %arrayidx3.1, align 1
+ %conv4.1 = zext i8 %18 to i32
+ %arrayidx5.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 4
+ %19 = load i8, ptr %arrayidx5.1, align 1
+ %conv6.1 = zext i8 %19 to i32
+ %sub7.1 = sub nsw i32 %conv4.1, %conv6.1
+ %shl.1 = shl nsw i32 %sub7.1, 16
+ %add.1 = add nsw i32 %shl.1, %sub.1
+ %arrayidx8.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 1
+ %20 = load i8, ptr %arrayidx8.1, align 1
+ %conv9.1 = zext i8 %20 to i32
+ %arrayidx10.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 1
+ %21 = load i8, ptr %arrayidx10.1, align 1
+ %conv11.1 = zext i8 %21 to i32
+ %sub12.1 = sub nsw i32 %conv9.1, %conv11.1
+ %arrayidx13.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 5
+ %22 = load i8, ptr %arrayidx13.1, align 1
+ %conv14.1 = zext i8 %22 to i32
+ %arrayidx15.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 5
+ %23 = load i8, ptr %arrayidx15.1, align 1
+ %conv16.1 = zext i8 %23 to i32
+ %sub17.1 = sub nsw i32 %conv14.1, %conv16.1
+ %shl18.1 = shl nsw i32 %sub17.1, 16
+ %add19.1 = add nsw i32 %shl18.1, %sub12.1
+ %arrayidx20.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 2
+ %24 = load i8, ptr %arrayidx20.1, align 1
+ %conv21.1 = zext i8 %24 to i32
+ %arrayidx22.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 2
+ %25 = load i8, ptr %arrayidx22.1, align 1
+ %conv23.1 = zext i8 %25 to i32
+ %sub24.1 = sub nsw i32 %conv21.1, %conv23.1
+ %arrayidx25.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 6
+ %26 = load i8, ptr %arrayidx25.1, align 1
+ %conv26.1 = zext i8 %26 to i32
+ %arrayidx27.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 6
+ %27 = load i8, ptr %arrayidx27.1, align 1
+ %conv28.1 = zext i8 %27 to i32
+ %sub29.1 = sub nsw i32 %conv26.1, %conv28.1
+ %shl30.1 = shl nsw i32 %sub29.1, 16
+ %add31.1 = add nsw i32 %shl30.1, %sub24.1
+ %arrayidx32.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 3
+ %28 = load i8, ptr %arrayidx32.1, align 1
+ %conv33.1 = zext i8 %28 to i32
+ %arrayidx34.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 3
+ %29 = load i8, ptr %arrayidx34.1, align 1
+ %conv35.1 = zext i8 %29 to i32
+ %sub36.1 = sub nsw i32 %conv33.1, %conv35.1
+ %arrayidx37.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 7
+ %30 = load i8, ptr %arrayidx37.1, align 1
+ %conv38.1 = zext i8 %30 to i32
+ %arrayidx39.1 = getelementptr inbounds nuw i8, ptr %add.ptr64, i64 7
+ %31 = load i8, ptr %arrayidx39.1, align 1
+ %conv40.1 = zext i8 %31 to i32
+ %sub41.1 = sub nsw i32 %conv38.1, %conv40.1
+ %shl42.1 = shl nsw i32 %sub41.1, 16
+ %add43.1 = add nsw i32 %shl42.1, %sub36.1
+ %add44.1 = add nsw i32 %add19.1, %add.1
+ %sub45.1 = sub nsw i32 %add.1, %add19.1
+ %add46.1 = add nsw i32 %add43.1, %add31.1
+ %sub47.1 = sub nsw i32 %add31.1, %add43.1
+ %add48.1 = add nsw i32 %add46.1, %add44.1
+ %sub51.1 = sub nsw i32 %add44.1, %add46.1
+ %add55.1 = add nsw i32 %sub47.1, %sub45.1
+ %sub59.1 = sub nsw i32 %sub45.1, %sub47.1
+ %add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
+ %add.ptr64.1 = getelementptr inbounds i8, ptr %add.ptr64, i64 %idx.ext63
+ %32 = load i8, ptr %add.ptr.1, align 1
+ %conv.2 = zext i8 %32 to i32
+ %33 = load i8, ptr %add.ptr64.1, align 1
+ %conv2.2 = zext i8 %33 to i32
+ %sub.2 = sub nsw i32 %conv.2, %conv2.2
+ %arrayidx3.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 4
+ %34 = load i8, ptr %arrayidx3.2, align 1
+ %conv4.2 = zext i8 %34 to i32
+ %arrayidx5.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 4
+ %35 = load i8, ptr %arrayidx5.2, align 1
+ %conv6.2 = zext i8 %35 to i32
+ %sub7.2 = sub nsw i32 %conv4.2, %conv6.2
+ %shl.2 = shl nsw i32 %sub7.2, 16
+ %add.2 = add nsw i32 %shl.2, %sub.2
+ %arrayidx8.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 1
+ %36 = load i8, ptr %arrayidx8.2, align 1
+ %conv9.2 = zext i8 %36 to i32
+ %arrayidx10.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 1
+ %37 = load i8, ptr %arrayidx10.2, align 1
+ %conv11.2 = zext i8 %37 to i32
+ %sub12.2 = sub nsw i32 %conv9.2, %conv11.2
+ %arrayidx13.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 5
+ %38 = load i8, ptr %arrayidx13.2, align 1
+ %conv14.2 = zext i8 %38 to i32
+ %arrayidx15.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 5
+ %39 = load i8, ptr %arrayidx15.2, align 1
+ %conv16.2 = zext i8 %39 to i32
+ %sub17.2 = sub nsw i32 %conv14.2, %conv16.2
+ %shl18.2 = shl nsw i32 %sub17.2, 16
+ %add19.2 = add nsw i32 %shl18.2, %sub12.2
+ %arrayidx20.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 2
+ %40 = load i8, ptr %arrayidx20.2, align 1
+ %conv21.2 = zext i8 %40 to i32
+ %arrayidx22.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 2
+ %41 = load i8, ptr %arrayidx22.2, align 1
+ %conv23.2 = zext i8 %41 to i32
+ %sub24.2 = sub nsw i32 %conv21.2, %conv23.2
+ %arrayidx25.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 6
+ %42 = load i8, ptr %arrayidx25.2, align 1
+ %conv26.2 = zext i8 %42 to i32
+ %arrayidx27.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 6
+ %43 = load i8, ptr %arrayidx27.2, align 1
+ %conv28.2 = zext i8 %43 to i32
+ %sub29.2 = sub nsw i32 %conv26.2, %conv28.2
+ %shl30.2 = shl nsw i32 %sub29.2, 16
+ %add31.2 = add nsw i32 %shl30.2, %sub24.2
+ %arrayidx32.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 3
+ %44 = load i8, ptr %arrayidx32.2, align 1
+ %conv33.2 = zext i8 %44 to i32
+ %arrayidx34.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 3
+ %45 = load i8, ptr %arrayidx34.2, align 1
+ %conv35.2 = zext i8 %45 to i32
+ %sub36.2 = sub nsw i32 %conv33.2, %conv35.2
+ %arrayidx37.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 7
+ %46 = load i8, ptr %arrayidx37.2, align 1
+ %conv38.2 = zext i8 %46 to i32
+ %arrayidx39.2 = getelementptr inbounds nuw i8, ptr %add.ptr64.1, i64 7
+ %47 = load i8, ptr %arrayidx39.2, align 1
+ %conv40.2 = zext i8 %47 to i32
+ %sub41.2 = sub nsw i32 %conv38.2, %conv40.2
+ %shl42.2 = shl nsw i32 %sub41.2, 16
+ %add43.2 = add nsw i32 %shl42.2, %sub36.2
+ %add44.2 = add nsw i32 %add19.2, %add.2
+ %sub45.2 = sub nsw i32 %add.2, %add19.2
+ %add46.2 = add nsw i32 %add43.2, %add31.2
+ %sub47.2 = sub nsw i32 %add31.2, %add43.2
+ %add48.2 = add nsw i32 %add46.2, %add44.2
+ %sub51.2 = sub nsw i32 %add44.2, %add46.2
+ %add55.2 = add nsw i32 %sub47.2, %sub45.2
+ %sub59.2 = sub nsw i32 %sub45.2, %sub47.2
+ %add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
+ %add.ptr64.2 = getelementptr inbounds i8, ptr %add.ptr64.1, i64 %idx.ext63
+ %48 = load i8, ptr %add.ptr.2, align 1
+ %conv.3 = zext i8 %48 to i32
+ %49 = load i8, ptr %add.ptr64.2, align 1
+ %conv2.3 = zext i8 %49 to i32
+ %sub.3 = sub nsw i32 %conv.3, %conv2.3
+ %arrayidx3.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 4
+ %50 = load i8, ptr %arrayidx3.3, align 1
+ %conv4.3 = zext i8 %50 to i32
+ %arrayidx5.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 4
+ %51 = load i8, ptr %arrayidx5.3, align 1
+ %conv6.3 = zext i8 %51 to i32
+ %sub7.3 = sub nsw i32 %conv4.3, %conv6.3
+ %shl.3 = shl nsw i32 %sub7.3, 16
+ %add.3 = add nsw i32 %shl.3, %sub.3
+ %arrayidx8.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 1
+ %52 = load i8, ptr %arrayidx8.3, align 1
+ %conv9.3 = zext i8 %52 to i32
+ %arrayidx10.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 1
+ %53 = load i8, ptr %arrayidx10.3, align 1
+ %conv11.3 = zext i8 %53 to i32
+ %sub12.3 = sub nsw i32 %conv9.3, %conv11.3
+ %arrayidx13.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 5
+ %54 = load i8, ptr %arrayidx13.3, align 1
+ %conv14.3 = zext i8 %54 to i32
+ %arrayidx15.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 5
+ %55 = load i8, ptr %arrayidx15.3, align 1
+ %conv16.3 = zext i8 %55 to i32
+ %sub17.3 = sub nsw i32 %conv14.3, %conv16.3
+ %shl18.3 = shl nsw i32 %sub17.3, 16
+ %add19.3 = add nsw i32 %shl18.3, %sub12.3
+ %arrayidx20.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 2
+ %56 = load i8, ptr %arrayidx20.3, align 1
+ %conv21.3 = zext i8 %56 to i32
+ %arrayidx22.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 2
+ %57 = load i8, ptr %arrayidx22.3, align 1
+ %conv23.3 = zext i8 %57 to i32
+ %sub24.3 = sub nsw i32 %conv21.3, %conv23.3
+ %arrayidx25.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 6
+ %58 = load i8, ptr %arrayidx25.3, align 1
+ %conv26.3 = zext i8 %58 to i32
+ %arrayidx27.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 6
+ %59 = load i8, ptr %arrayidx27.3, align 1
+ %conv28.3 = zext i8 %59 to i32
+ %sub29.3 = sub nsw i32 %conv26.3, %conv28.3
+ %shl30.3 = shl nsw i32 %sub29.3, 16
+ %add31.3 = add nsw i32 %shl30.3, %sub24.3
+ %arrayidx32.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 3
+ %60 = load i8, ptr %arrayidx32.3, align 1
+ %conv33.3 = zext i8 %60 to i32
+ %arrayidx34.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 3
+ %61 = load i8, ptr %arrayidx34.3, align 1
+ %conv35.3 = zext i8 %61 to i32
+ %sub36.3 = sub nsw i32 %conv33.3, %conv35.3
+ %arrayidx37.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 7
+ %62 = load i8, ptr %arrayidx37.3, align 1
+ %conv38.3 = zext i8 %62 to i32
+ %arrayidx39.3 = getelementptr inbounds nuw i8, ptr %add.ptr64.2, i64 7
+ %63 = load i8, ptr %arrayidx39.3, align 1
+ %conv40.3 = zext i8 %63 to i32
+ %sub41.3 = sub nsw i32 %conv38.3, %conv40.3
+ %shl42.3 = shl nsw i32 %sub41.3, 16
+ %add43.3 = add nsw i32 %shl42.3, %sub36.3
+ %add44.3 = add nsw i32 %add19.3, %add.3
+ %sub45.3 = sub nsw i32 %add.3, %add19.3
+ %add46.3 = add nsw i32 %add43.3, %add31.3
+ %sub47.3 = sub nsw i32 %add31.3, %add43.3
+ %add48.3 = add nsw i32 %add46.3, %add44.3
+ %sub51.3 = sub nsw i32 %add44.3, %add46.3
+ %add55.3 = add nsw i32 %sub47.3, %sub45.3
+ %sub59.3 = sub nsw i32 %sub45.3, %sub47.3
+ %add78 = add nsw i32 %add48.1, %add48
+ %sub86 = sub nsw i32 %add48, %add48.1
+ %add94 = add nsw i32 %add48.3, %add48.2
+ %sub102 = sub nsw i32 %add48.2, %add48.3
+ %add103 = add nsw i32 %add94, %add78
+ %sub104 = sub nsw i32 %add78, %add94
+ %add105 = add nsw i32 %sub102, %sub86
+ %sub106 = sub nsw i32 %sub86, %sub102
+ %shr.i = lshr i32 %add103, 15
+ %and.i = and i32 %shr.i, 65537
+ %mul.i = mul nuw i32 %and.i, 65535
+ %add.i = add i32 %mul.i, %add103
+ %xor.i = xor i32 %add.i, %mul.i
+ %shr.i169 = lshr i32 %add105, 15
+ %and.i170 = and i32 %shr.i169, 65537
+ %mul.i171 = mul nuw i32 %and.i170, 65535
+ %add.i172 = add i32 %mul.i171, %add105
+ %xor.i173 = xor i32 %add.i172, %mul.i171
+ %shr.i174 = lshr i32 %sub104, 15
+ %and.i175 = and i32 %shr.i174, 65537
+ %mul.i176 = mul nuw i32 %and.i175, 65535
+ %add.i177 = add i32 %mul.i176, %sub104
+ %xor.i178 = xor i32 %add.i177, %mul.i176
+ %shr.i179 = lshr i32 %sub106, 15
+ %and.i180 = and i32 %shr.i179, 65537
+ %mul.i181 = mul nuw i32 %and.i180, 65535
+ %add.i182 = add i32 %mul.i181, %sub106
+ %xor.i183 = xor i32 %add.i182, %mul.i181
+ %add110 = add i32 %xor.i173, %xor.i
+ %add112 = add i32 %add110, %xor.i178
+ %add113 = add i32 %add112, %xor.i183
+ %add78.1 = add nsw i32 %add55.1, %add55
+ %sub86.1 = sub nsw i32 %add55, %add55.1
+ %add94.1 = add nsw i32 %add55.3, %add55.2
+ %sub102.1 = sub nsw i32 %add55.2, %add55.3
+ %add103.1 = add nsw i32 %add94.1, %add78.1
+ %sub104.1 = sub nsw i32 %add78.1, %add94.1
+ %add105.1 = add nsw i32 %sub102.1, %sub86.1
+ %sub106.1 = sub nsw i32 %sub86.1, %sub102.1
+ %shr.i.1 = lshr i32 %add103.1, 15
+ %and.i.1 = and i32 %shr.i.1, 65537
+ %mul.i.1 = mul nuw i32 %and.i.1, 65535
+ %add.i.1 = add i32 %mul.i.1, %add103.1
+ %xor.i.1 = xor i32 %add.i.1, %mul.i.1
+ %shr.i169.1 = lshr i32 %add105.1, 15
+ %and.i170.1 = and i32 %shr.i169.1, 65537
+ %mul.i171.1 = mul nuw i32 %and.i170.1, 65535
+ %add.i172.1 = add i32 %mul.i171.1, %add105.1
+ %xor.i173.1 = xor i32 %add.i172.1, %mul.i171.1
+ %shr.i174.1 = lshr i32 %sub104.1, 15
+ %and.i175.1 = and i32 %shr.i174.1, 65537
+ %mul.i176.1 = mul nuw i32 %and.i175.1, 65535
+ %add.i177.1 = add i32 %mul.i176.1, %sub104.1
+ %xor.i178.1 = xor i32 %add.i177.1, %mul.i176.1
+ %shr.i179.1 = lshr i32 %sub106.1, 15
+ %and.i180.1 = and i32 %shr.i179.1, 65537
+ %mul.i181.1 = mul nuw i32 %and.i180.1, 65535
+ %add.i182.1 = add i32 %mul.i181.1, %sub106.1
+ %xor.i183.1 = xor i32 %add.i182.1, %mul.i181.1
+ %add108.1 = add i32 %xor.i173.1, %add113
+ %add110.1 = add i32 %add108.1, %xor.i.1
+ %add112.1 = add i32 %add110.1, %xor.i178.1
+ %add113.1 = add i32 %add112.1, %xor.i183.1
+ %add78.2 = add nsw i32 %sub51.1, %sub51
+ %sub86.2 = sub nsw i32 %sub51, %sub51.1
+ %add94.2 = add nsw i32 %sub51.3, %sub51.2
+ %sub102.2 = sub nsw i32 %sub51.2, %sub51.3
+ %add103.2 = add nsw i32 %add94.2, %add78.2
+ %sub104.2 = sub nsw i32 %add78.2, %add94.2
+ %add105.2 = add nsw i32 %sub102.2, %sub86.2
+ %sub106.2 = sub nsw i32 %sub86.2, %sub102.2
+ %shr.i.2 = lshr i32 %add103.2, 15
+ %and.i.2 = and i32 %shr.i.2, 65537
+ %mul.i.2 = mul nuw i32 %and.i.2, 65535
+ %add.i.2 = add i32 %mul.i.2, %add103.2
+ %xor.i.2 = xor i32 %add.i.2, %mul.i.2
+ %shr.i169.2 = lshr i32 %add105.2, 15
+ %and.i170.2 = and i32 %shr.i169.2, 65537
+ %mul.i171.2 = mul nuw i32 %and.i170.2, 65535
+ %add.i172.2 = add i32 %mul.i171.2, %add105.2
+ %xor.i173.2 = xor i32 %add.i172.2, %mul.i171.2
+ %shr.i174.2 = lshr i32 %sub104.2, 15
+ %and.i175.2 = and i32 %shr.i174.2, 65537
+ %mul.i176.2 = mul nuw i32 %and.i175.2, 65535
+ %add.i177.2 = add i32 %mul.i176.2, %sub104.2
+ %xor.i178.2 = xor i32 %add.i177.2, %mul.i176.2
+ %shr.i179.2 = lshr i32 %sub106.2, 15
+ %and.i180.2 = and i32 %shr.i179.2, 65537
+ %mul.i181.2 = mul nuw i32 %and.i180.2, 65535
+ %add.i182.2 = add i32 %mul.i181.2, %sub106.2
+ %xor.i183.2 = xor i32 %add.i182.2, %mul.i181.2
+ %add108.2 = add i32 %xor.i173.2, %add113.1
+ %add110.2 = add i32 %add108.2, %xor.i.2
+ %add112.2 = add i32 %add110.2, %xor.i178.2
+ %add113.2 = add i32 %add112.2, %xor.i183.2
+ %add78.3 = add nsw i32 %sub59.1, %sub59
+ %sub86.3 = sub nsw i32 %sub59, %sub59.1
+ %add94.3 = add nsw i32 %sub59.3, %sub59.2
+ %sub102.3 = sub nsw i32 %sub59.2, %sub59.3
+ %add103.3 = add nsw i32 %add94.3, %add78.3
+ %sub104.3 = sub nsw i32 %add78.3, %add94.3
+ %add105.3 = add nsw i32 %sub102.3, %sub86.3
+ %sub106.3 = sub nsw i32 %sub86.3, %sub102.3
+ %shr.i.3 = lshr i32 %add103.3, 15
+ %and.i.3 = and i32 %shr.i.3, 65537
+ %mul.i.3 = mul nuw i32 %and.i.3, 65535
+ %add.i.3 = add i32 %mul.i.3, %add103.3
+ %xor.i.3 = xor i32 %add.i.3, %mul.i.3
+ %shr.i169.3 = lshr i32 %add105.3, 15
+ %and.i170.3 = and i32 %shr.i169.3, 65537
+ %mul.i171.3 = mul nuw i32 %and.i170.3, 65535
+ %add.i172.3 = add i32 %mul.i171.3, %add105.3
+ %xor.i173.3 = xor i32 %add.i172.3, %mul.i171.3
+ %shr.i174.3 = lshr i32 %sub104.3, 15
+ %and.i175.3 = and i32 %shr.i174.3, 65537
+ %mul.i176.3 = mul nuw i32 %and.i175.3, 65535
+ %add.i177.3 = add i32 %mul.i176.3, %sub104.3
+ %xor.i178.3 = xor i32 %add.i177.3, %mul.i176.3
+ %shr.i179.3 = lshr i32 %sub106.3, 15
+ %and.i180.3 = and i32 %shr.i179.3, 65537
+ %mul.i181.3 = mul nuw i32 %and.i180.3, 65535
+ %add.i182.3 = add i32 %mul.i181.3, %sub106.3
+ %xor.i183.3 = xor i32 %add.i182.3, %mul.i181.3
+ %add108.3 = add i32 %xor.i173.3, %add113.2
+ %add110.3 = add i32 %add108.3, %xor.i.3
+ %add112.3 = add i32 %add110.3, %xor.i178.3
+ %add113.3 = add i32 %add112.3, %xor.i183.3
+ %conv118 = and i32 %add113.3, 65535
+ %shr = lshr i32 %add113.3, 16
+ %add119 = add nuw nsw i32 %conv118, %shr
+ %shr120 = lshr i32 %add119, 1
+ ret i32 %shr120
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/poison-within-divisions.ll b/llvm/test/Transforms/SLPVectorizer/X86/poison-within-divisions.ll
new file mode 100644
index 0000000..76ef396
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/X86/poison-within-divisions.ll
@@ -0,0 +1,98 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt --passes=slp-vectorizer -S -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+define i32 @test(i1 %tobool2.not, i64 %conv21) {
+; CHECK-LABEL: define i32 @test(
+; CHECK-SAME: i1 [[TOBOOL2_NOT:%.*]], i64 [[CONV21:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY:.*]]
+; CHECK: [[WHILE_BODY]]:
+; CHECK-NEXT: [[Q24_659:%.*]] = phi i32 [ [[Q24_655:%.*]], %[[IF_END35:.*]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[L15_1:%.*]] = phi i32 [ [[L15_4:%.*]], %[[IF_END35]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[IF_END4:.*]], label %[[Q:.*]]
+; CHECK: [[IF_END4]]:
+; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[Q24_659]], 0
+; CHECK-NEXT: br label %[[AB:.*]]
+; CHECK: [[AB]]:
+; CHECK-NEXT: [[Q24_658:%.*]] = phi i32 [ [[Q24_660:%.*]], %[[IF_END35]] ], [ 0, %[[IF_END4]] ]
+; CHECK-NEXT: [[M_1:%.*]] = phi i1 [ false, %[[IF_END35]] ], [ [[TMP0]], %[[IF_END4]] ]
+; CHECK-NEXT: [[O_2:%.*]] = phi i32 [ [[O_7:%.*]], %[[IF_END35]] ], [ 0, %[[IF_END4]] ]
+; CHECK-NEXT: [[Q24_2:%.*]] = phi i32 [ [[Q24_7:%.*]], %[[IF_END35]] ], [ 0, %[[IF_END4]] ]
+; CHECK-NEXT: br i1 [[M_1]], label %[[AE:.*]], label %[[AC:.*]]
+; CHECK: [[Q]]:
+; CHECK-NEXT: [[TOBOOL16_NOT:%.*]] = icmp ne i32 [[L15_1]], 0
+; CHECK-NEXT: [[SPEC_SELECT2:%.*]] = zext i1 [[TOBOOL16_NOT]] to i32
+; CHECK-NEXT: br label %[[AE]]
+; CHECK: [[AE]]:
+; CHECK-NEXT: [[Q24_655]] = phi i32 [ [[Q24_658]], %[[AB]] ], [ 0, %[[Q]] ]
+; CHECK-NEXT: [[M_3:%.*]] = phi i64 [ 0, %[[AB]] ], [ 1, %[[Q]] ]
+; CHECK-NEXT: [[L15_4]] = phi i32 [ poison, %[[AB]] ], [ [[SPEC_SELECT2]], %[[Q]] ]
+; CHECK-NEXT: [[O_4:%.*]] = phi i32 [ [[O_2]], %[[AB]] ], [ 0, %[[Q]] ]
+; CHECK-NEXT: [[Q24_4:%.*]] = phi i32 [ [[Q24_2]], %[[AB]] ], [ 0, %[[Q]] ]
+; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[IF_END35]], label %[[IF_THEN20:.*]]
+; CHECK: [[IF_THEN20]]:
+; CHECK-NEXT: [[DIV22:%.*]] = udiv i64 [[M_3]], [[CONV21]]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[DIV22]] to i32
+; CHECK-NEXT: [[CONV23:%.*]] = sub i32 0, [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[M_3]] to i32
+; CHECK-NEXT: [[CONV25:%.*]] = xor i32 [[TMP2]], 1
+; CHECK-NEXT: br label %[[IF_END35]]
+; CHECK: [[AC]]:
+; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[TOBOOL2_NOT]], i32 [[Q24_2]], i32 [[O_2]]
+; CHECK-NEXT: ret i32 [[SPEC_SELECT]]
+; CHECK: [[IF_END35]]:
+; CHECK-NEXT: [[Q24_660]] = phi i32 [ 0, %[[AE]] ], [ [[CONV25]], %[[IF_THEN20]] ]
+; CHECK-NEXT: [[O_7]] = phi i32 [ [[O_4]], %[[AE]] ], [ [[CONV23]], %[[IF_THEN20]] ]
+; CHECK-NEXT: [[Q24_7]] = phi i32 [ [[Q24_4]], %[[AE]] ], [ [[CONV25]], %[[IF_THEN20]] ]
+; CHECK-NEXT: br i1 [[TOBOOL2_NOT]], label %[[WHILE_BODY]], label %[[AB]]
+;
+entry:
+ br label %while.body
+
+while.body:
+ %q24.659 = phi i32 [ %q24.655, %if.end35 ], [ 0, %entry ]
+ %l15.1 = phi i32 [ %l15.4, %if.end35 ], [ 0, %entry ]
+ br i1 %tobool2.not, label %if.end4, label %q
+
+if.end4:
+ %0 = icmp eq i32 %q24.659, 0
+ br label %ab
+
+ab:
+ %q24.658 = phi i32 [ %q24.660, %if.end35 ], [ 0, %if.end4 ]
+ %m.1 = phi i1 [ false, %if.end35 ], [ %0, %if.end4 ]
+ %o.2 = phi i32 [ %o.7, %if.end35 ], [ 0, %if.end4 ]
+ %q24.2 = phi i32 [ %q24.7, %if.end35 ], [ 0, %if.end4 ]
+ br i1 %m.1, label %ae, label %ac
+
+q:
+ %tobool16.not = icmp ne i32 %l15.1, 0
+ %spec.select2 = zext i1 %tobool16.not to i32
+ br label %ae
+
+ae:
+ %q24.655 = phi i32 [ %q24.658, %ab ], [ 0, %q ]
+ %m.3 = phi i64 [ 0, %ab ], [ 1, %q ]
+ %l15.4 = phi i32 [ poison, %ab ], [ %spec.select2, %q ]
+ %o.4 = phi i32 [ %o.2, %ab ], [ 0, %q ]
+ %q24.4 = phi i32 [ %q24.2, %ab ], [ 0, %q ]
+ br i1 %tobool2.not, label %if.end35, label %if.then20
+
+if.then20:
+ %div22 = udiv i64 %m.3, %conv21
+ %1 = trunc i64 %div22 to i32
+ %conv23 = sub i32 0, %1
+ %2 = trunc i64 %m.3 to i32
+ %conv25 = xor i32 %2, 1
+ br label %if.end35
+
+ac:
+ %spec.select = select i1 %tobool2.not, i32 %q24.2, i32 %o.2
+ ret i32 %spec.select
+
+if.end35:
+ %q24.660 = phi i32 [ 0, %ae ], [ %conv25, %if.then20 ]
+ %o.7 = phi i32 [ %o.4, %ae ], [ %conv23, %if.then20 ]
+ %q24.7 = phi i32 [ %q24.4, %ae ], [ %conv25, %if.then20 ]
+ br i1 %tobool2.not, label %while.body, label %ab
+}
diff --git a/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll b/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll
index c858d07..ead6e02 100644
--- a/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll
+++ b/llvm/test/Transforms/SROA/vector-promotion-cannot-tree-structure-merge.ll
@@ -219,4 +219,18 @@ entry:
}
+define <1 x i32> @test_store_value_size_not_multiple_of_allocated_element_type_size(<1 x i16> %a, <1 x i16> %b) {
+entry:
+ %alloca = alloca [2 x i16]
+
+ %ptr0 = getelementptr inbounds [2 x i16], ptr %alloca, i32 0, i32 0
+ store <1 x i16> %a, ptr %ptr0
+
+ %ptr1 = getelementptr inbounds [2 x i16], ptr %alloca, i32 0, i32 1
+ store <1 x i16> %b, ptr %ptr1
+
+ %result = load <1 x i32>, ptr %alloca
+ ret <1 x i32> %result
+}
+
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)