diff options
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r-- | llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll | 49 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.ll | 27 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.mir | 34 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll | 72 | ||||
-rw-r--r-- | llvm/test/CodeGen/NVPTX/i32x2-instructions.ll | 1625 | ||||
-rw-r--r-- | llvm/test/CodeGen/RISCV/features-info.ll | 1 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fmaxnum.ll | 46 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fminimum-fmaximum.ll | 99 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll | 99 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fminnum.ll | 46 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/pgo-profile-o0.ll | 49 |
11 files changed, 2101 insertions, 46 deletions
diff --git a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll index 4287507..dfff35d 100644 --- a/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll +++ b/llvm/test/CodeGen/AArch64/neon-partial-reduce-dot-product.ll @@ -1451,3 +1451,52 @@ define <4 x i32> @partial_reduce_shl_zext_non_const_rhs(<16 x i8> %l, <4 x i32> %red = tail call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> %part, <16 x i32> %shift) ret <4 x i32> %red } + +define <2 x i32> @udot_v16i8tov2i32(<2 x i32> %acc, <16 x i8> %input) { +; CHECK-NODOT-LABEL: udot_v16i8tov2i32: +; CHECK-NODOT: // %bb.0: // %entry +; CHECK-NODOT-NEXT: ushll v2.8h, v1.8b, #0 +; CHECK-NODOT-NEXT: // kill: def $d0 killed $d0 def $q0 +; CHECK-NODOT-NEXT: ushll2 v1.8h, v1.16b, #0 +; CHECK-NODOT-NEXT: ushll v3.4s, v2.4h, #0 +; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v2.4h +; CHECK-NODOT-NEXT: ushll2 v4.4s, v2.8h, #0 +; CHECK-NODOT-NEXT: ext v2.16b, v2.16b, v2.16b, #8 +; CHECK-NODOT-NEXT: ext v3.16b, v3.16b, v3.16b, #8 +; CHECK-NODOT-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-NODOT-NEXT: ext v3.16b, v4.16b, v4.16b, #8 +; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v2.4h +; CHECK-NODOT-NEXT: ushll v2.4s, v1.4h, #0 +; CHECK-NODOT-NEXT: add v0.2s, v3.2s, v0.2s +; CHECK-NODOT-NEXT: ext v2.16b, v2.16b, v2.16b, #8 +; CHECK-NODOT-NEXT: ushll2 v3.4s, v1.8h, #0 +; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v1.4h +; CHECK-NODOT-NEXT: ext v1.16b, v1.16b, v1.16b, #8 +; CHECK-NODOT-NEXT: add v0.2s, v2.2s, v0.2s +; CHECK-NODOT-NEXT: ext v2.16b, v3.16b, v3.16b, #8 +; CHECK-NODOT-NEXT: uaddw v0.4s, v0.4s, v1.4h +; CHECK-NODOT-NEXT: add v0.2s, v2.2s, v0.2s +; CHECK-NODOT-NEXT: ret +; +; CHECK-DOT-LABEL: udot_v16i8tov2i32: +; CHECK-DOT: // %bb.0: // %entry +; CHECK-DOT-NEXT: movi v2.16b, #1 +; CHECK-DOT-NEXT: fmov d0, d0 +; CHECK-DOT-NEXT: udot v0.4s, v1.16b, v2.16b +; CHECK-DOT-NEXT: addp v0.4s, v0.4s, v0.4s +; CHECK-DOT-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-DOT-NEXT: ret +; +; CHECK-DOT-I8MM-LABEL: udot_v16i8tov2i32: +; CHECK-DOT-I8MM: // %bb.0: // %entry +; CHECK-DOT-I8MM-NEXT: movi v2.16b, #1 +; CHECK-DOT-I8MM-NEXT: fmov d0, d0 +; CHECK-DOT-I8MM-NEXT: udot v0.4s, v1.16b, v2.16b +; CHECK-DOT-I8MM-NEXT: addp v0.4s, v0.4s, v0.4s +; CHECK-DOT-I8MM-NEXT: // kill: def $d0 killed $d0 killed $q0 +; CHECK-DOT-I8MM-NEXT: ret +entry: + %input.wide = zext <16 x i8> %input to <16 x i32> + %partial.reduce = tail call <2 x i32> @llvm.vector.partial.reduce.add(<2 x i32> %acc, <16 x i32> %input.wide) + ret <2 x i32> %partial.reduce +} diff --git a/llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.ll b/llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.ll new file mode 100644 index 0000000..f466513 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.ll @@ -0,0 +1,27 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6 +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s + +; Make sure the coalescer doesn't introduce any uses of +; vreg_1024. None are available to allocate with the register budget +; of this function. + +define void @no_introduce_vreg_1024() #0 { +; CHECK-LABEL: no_introduce_vreg_1024: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; def v[0:7] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: v_mov_b32_e32 v9, v0 +; CHECK-NEXT: ;;#ASMSTART +; CHECK-NEXT: ; use v[0:15] +; CHECK-NEXT: ;;#ASMEND +; CHECK-NEXT: s_setpc_b64 s[30:31] + %tuple = call <8 x i32> asm sideeffect "; def $0","=v"() + %sub0 = extractelement <8 x i32> %tuple, i32 0 + %insert = insertelement <16 x i32> poison, i32 %sub0, i32 9 + call void asm sideeffect "; use $0","v"(<16 x i32> %insert) + ret void +} + +attributes #0 = { nounwind "amdgpu-waves-per-eu"="10,10" } diff --git a/llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.mir b/llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.mir new file mode 100644 index 0000000..1f414eb --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/coalescer-avoid-coalesce-class-with-no-registers.mir @@ -0,0 +1,34 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 6 +# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=register-coalescer -o - %s | FileCheck %s + +# The register budget for this function does not permit using 1024-bit +# registers. The coalescer should not introduce a 1024-bit virtual +# register which will fail to allocate. + +--- | + define void @no_introduce_vreg_1024() #0 { + ret void + } + + attributes #0 = { "amdgpu-waves-per-eu"="10,10" } +... +--- +name: no_introduce_vreg_1024 +tracksRegLiveness: true +machineFunctionInfo: + occupancy: 10 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + + ; CHECK-LABEL: name: no_introduce_vreg_1024 + ; CHECK: liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: {{ $}} + ; CHECK-NEXT: [[COPY:%[0-9]+]]:vreg_256 = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK-NEXT: undef [[COPY1:%[0-9]+]].sub9:vreg_512 = COPY [[COPY]].sub0 + ; CHECK-NEXT: SI_RETURN implicit [[COPY1]] + %0:vreg_256 = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 + undef %1.sub9:vreg_512 = COPY %0.sub0 + SI_RETURN implicit %1 + +... diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll index ee11b92..0c1448a 100644 --- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.smfmac.gfx950.ll @@ -44,23 +44,23 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x64_f16__vgpr(ptr addrspace(1) % ; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: global_load_dwordx4 v[14:17], v0, s[6:7] +; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[6:7] ; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x44 ; GISEL-NEXT: s_load_dword s16, s[4:5], 0x64 -; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[2:3] -; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[0:1] +; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[2:3] +; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[0:1] ; GISEL-NEXT: s_waitcnt lgkmcnt(0) ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[8:9] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[10:11] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[14:15] -; GISEL-NEXT: v_mov_b32_e32 v12, s16 +; GISEL-NEXT: v_mov_b32_e32 v16, s16 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_smfmac_f32_16x16x64_f16 v[14:17], v[8:11], v[0:7], v12 cbsz:1 abid:2 +; GISEL-NEXT: v_smfmac_f32_16x16x64_f16 v[8:11], v[12:15], v[0:7], v16 cbsz:1 abid:2 ; GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GISEL-NEXT: s_nop 6 -; GISEL-NEXT: global_store_dwordx4 v0, v[14:17], s[6:7] +; GISEL-NEXT: global_store_dwordx4 v0, v[8:11], s[6:7] ; GISEL-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() @@ -834,24 +834,24 @@ define amdgpu_kernel void @test_smfmac_i32_16x16x128_i8__vgpr(ptr addrspace(1) % ; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: global_load_dwordx4 v[14:17], v0, s[0:1] +; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] ; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34 ; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54 ; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9] ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[16:17] ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[18:19] -; GISEL-NEXT: v_mov_b32_e32 v12, s2 +; GISEL-NEXT: v_mov_b32_e32 v16, s2 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_smfmac_i32_16x16x128_i8 v[14:17], v[8:11], v[0:7], v12 cbsz:1 abid:2 +; GISEL-NEXT: v_smfmac_i32_16x16x128_i8 v[8:11], v[12:15], v[0:7], v16 cbsz:1 abid:2 ; GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GISEL-NEXT: s_nop 6 -; GISEL-NEXT: global_store_dwordx4 v0, v[14:17], s[0:1] +; GISEL-NEXT: global_store_dwordx4 v0, v[8:11], s[0:1] ; GISEL-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() @@ -1349,24 +1349,24 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_bf8__vgpr(ptr addrspace ; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: global_load_dwordx4 v[14:17], v0, s[0:1] +; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] ; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34 ; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54 ; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9] ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[16:17] ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[18:19] -; GISEL-NEXT: v_mov_b32_e32 v12, s2 +; GISEL-NEXT: v_mov_b32_e32 v16, s2 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_smfmac_f32_16x16x128_bf8_bf8 v[14:17], v[8:11], v[0:7], v12 cbsz:1 abid:2 +; GISEL-NEXT: v_smfmac_f32_16x16x128_bf8_bf8 v[8:11], v[12:15], v[0:7], v16 cbsz:1 abid:2 ; GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GISEL-NEXT: s_nop 6 -; GISEL-NEXT: global_store_dwordx4 v0, v[14:17], s[0:1] +; GISEL-NEXT: global_store_dwordx4 v0, v[8:11], s[0:1] ; GISEL-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() @@ -1513,24 +1513,24 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_bf8_fp8__vgpr(ptr addrspace ; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: global_load_dwordx4 v[14:17], v0, s[0:1] +; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] ; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34 ; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54 ; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9] ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[16:17] ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[18:19] -; GISEL-NEXT: v_mov_b32_e32 v12, s2 +; GISEL-NEXT: v_mov_b32_e32 v16, s2 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_smfmac_f32_16x16x128_bf8_fp8 v[14:17], v[8:11], v[0:7], v12 cbsz:1 abid:2 +; GISEL-NEXT: v_smfmac_f32_16x16x128_bf8_fp8 v[8:11], v[12:15], v[0:7], v16 cbsz:1 abid:2 ; GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GISEL-NEXT: s_nop 6 -; GISEL-NEXT: global_store_dwordx4 v0, v[14:17], s[0:1] +; GISEL-NEXT: global_store_dwordx4 v0, v[8:11], s[0:1] ; GISEL-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() @@ -1677,24 +1677,24 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_bf8__vgpr(ptr addrspace ; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: global_load_dwordx4 v[14:17], v0, s[0:1] +; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] ; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34 ; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54 ; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9] ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[16:17] ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[18:19] -; GISEL-NEXT: v_mov_b32_e32 v12, s2 +; GISEL-NEXT: v_mov_b32_e32 v16, s2 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_smfmac_f32_16x16x128_fp8_bf8 v[14:17], v[8:11], v[0:7], v12 cbsz:1 abid:2 +; GISEL-NEXT: v_smfmac_f32_16x16x128_fp8_bf8 v[8:11], v[12:15], v[0:7], v16 cbsz:1 abid:2 ; GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GISEL-NEXT: s_nop 6 -; GISEL-NEXT: global_store_dwordx4 v0, v[14:17], s[0:1] +; GISEL-NEXT: global_store_dwordx4 v0, v[8:11], s[0:1] ; GISEL-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() @@ -1841,24 +1841,24 @@ define amdgpu_kernel void @test_smfmac_f32_16x16x128_fp8_fp8__vgpr(ptr addrspace ; GISEL-NEXT: v_and_b32_e32 v0, 0x3ff, v0 ; GISEL-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: global_load_dwordx4 v[14:17], v0, s[0:1] +; GISEL-NEXT: global_load_dwordx4 v[8:11], v0, s[0:1] ; GISEL-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x34 ; GISEL-NEXT: s_load_dwordx4 s[16:19], s[4:5], 0x54 ; GISEL-NEXT: s_load_dword s2, s[4:5], 0x64 ; GISEL-NEXT: s_waitcnt lgkmcnt(0) -; GISEL-NEXT: v_mov_b64_e32 v[8:9], s[8:9] -; GISEL-NEXT: v_mov_b64_e32 v[10:11], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[14:15], s[10:11] +; GISEL-NEXT: v_mov_b64_e32 v[12:13], s[8:9] ; GISEL-NEXT: v_mov_b64_e32 v[0:1], s[12:13] ; GISEL-NEXT: v_mov_b64_e32 v[2:3], s[14:15] ; GISEL-NEXT: v_mov_b64_e32 v[4:5], s[16:17] ; GISEL-NEXT: v_mov_b64_e32 v[6:7], s[18:19] -; GISEL-NEXT: v_mov_b32_e32 v12, s2 +; GISEL-NEXT: v_mov_b32_e32 v16, s2 ; GISEL-NEXT: s_waitcnt vmcnt(0) ; GISEL-NEXT: s_nop 0 -; GISEL-NEXT: v_smfmac_f32_16x16x128_fp8_fp8 v[14:17], v[8:11], v[0:7], v12 cbsz:1 abid:2 +; GISEL-NEXT: v_smfmac_f32_16x16x128_fp8_fp8 v[8:11], v[12:15], v[0:7], v16 cbsz:1 abid:2 ; GISEL-NEXT: v_mov_b32_e32 v0, 0 ; GISEL-NEXT: s_nop 6 -; GISEL-NEXT: global_store_dwordx4 v0, v[14:17], s[0:1] +; GISEL-NEXT: global_store_dwordx4 v0, v[8:11], s[0:1] ; GISEL-NEXT: s_endpgm bb: %id = call i32 @llvm.amdgcn.workitem.id.x() diff --git a/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll new file mode 100644 index 0000000..153ca10 --- /dev/null +++ b/llvm/test/CodeGen/NVPTX/i32x2-instructions.ll @@ -0,0 +1,1625 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc < %s -mcpu=sm_80 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-NOI32X2 %s +; RUN: %if ptxas-sm_80 %{ \ +; RUN: llc < %s -mcpu=sm_80 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_80 \ +; RUN: %} +; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-I32X2 %s +; RUN: %if ptxas-sm_100 %{ \ +; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all \ +; RUN: -verify-machineinstrs | %ptxas-verify -arch=sm_100 \ +; RUN: %} + +target triple = "nvptx64-nvidia-cuda" +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" + +define <2 x i32> @test_ret_const() #0 { +; CHECK-LABEL: test_ret_const( +; CHECK: { +; CHECK-EMPTY: +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {-1, 2}; +; CHECK-NEXT: ret; + ret <2 x i32> <i32 -1, i32 2> +} + +define i32 @test_extract_0(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_extract_0( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_extract_0_param_0]; +; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_extract_0( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<2>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_extract_0_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, _}, %rd1; +; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-I32X2-NEXT: ret; + %e = extractelement <2 x i32> %a, i32 0 + ret i32 %e +} + +define i32 @test_extract_1(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_extract_1( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_extract_1_param_0]; +; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r2; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_extract_1( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<2>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_extract_1_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {_, %r1}, %rd1; +; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %r1; +; CHECK-I32X2-NEXT: ret; + %e = extractelement <2 x i32> %a, i32 1 + ret i32 %e +} + +define i32 @test_extract_i(<2 x i32> %a, i64 %idx) #0 { +; CHECK-NOI32X2-LABEL: test_extract_i( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .local .align 8 .b8 __local_depot3[8]; +; CHECK-NOI32X2-NEXT: .reg .b64 %SP; +; CHECK-NOI32X2-NEXT: .reg .b64 %SPL; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>; +; CHECK-NOI32X2-NEXT: .reg .b64 %rd<6>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: mov.b64 %SPL, __local_depot3; +; CHECK-NOI32X2-NEXT: cvta.local.u64 %SP, %SPL; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_extract_i_param_0]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_extract_i_param_1]; +; CHECK-NOI32X2-NEXT: st.v2.b32 [%SP], {%r1, %r2}; +; CHECK-NOI32X2-NEXT: and.b64 %rd2, %rd1, 1; +; CHECK-NOI32X2-NEXT: shl.b64 %rd3, %rd2, 2; +; CHECK-NOI32X2-NEXT: add.u64 %rd4, %SP, 0; +; CHECK-NOI32X2-NEXT: or.b64 %rd5, %rd4, %rd3; +; CHECK-NOI32X2-NEXT: ld.b32 %r3, [%rd5]; +; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_extract_i( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .pred %p<2>; +; CHECK-I32X2-NEXT: .reg .b32 %r<4>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_extract_i_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_extract_i_param_0]; +; CHECK-I32X2-NEXT: setp.eq.b64 %p1, %rd2, 0; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: selp.b32 %r3, %r1, %r2, %p1; +; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-I32X2-NEXT: ret; + %e = extractelement <2 x i32> %a, i64 %idx + ret i32 %e +} + +define <2 x i32> @test_add(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_add( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_add_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_add_param_0]; +; CHECK-NOI32X2-NEXT: add.s32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: add.s32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_add( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_add_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_add_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: add.s32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: add.s32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %r = add <2 x i32> %a, %b + ret <2 x i32> %r +} + +define <2 x i32> @test_add_imm_0(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_add_imm_0( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_add_imm_0_param_0]; +; CHECK-NOI32X2-NEXT: add.s32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: add.s32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_add_imm_0( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_add_imm_0_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: add.s32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: add.s32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = add <2 x i32> <i32 1, i32 2>, %a + ret <2 x i32> %r +} + +define <2 x i32> @test_add_imm_1(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_add_imm_1( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_add_imm_1_param_0]; +; CHECK-NOI32X2-NEXT: add.s32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: add.s32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_add_imm_1( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_add_imm_1_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: add.s32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: add.s32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = add <2 x i32> %a, <i32 1, i32 2> + ret <2 x i32> %r +} + +define <2 x i32> @test_sub(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_sub( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_sub_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sub_param_0]; +; CHECK-NOI32X2-NEXT: sub.s32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: sub.s32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_sub( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_sub_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_sub_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: sub.s32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: sub.s32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %r = sub <2 x i32> %a, %b + ret <2 x i32> %r +} + +define <2 x i32> @test_smax(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_smax( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_smax_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_smax_param_0]; +; CHECK-NOI32X2-NEXT: max.s32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: max.s32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_smax( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_smax_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_smax_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: max.s32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: max.s32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %cmp = icmp sgt <2 x i32> %a, %b + %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b + ret <2 x i32> %r +} + +define <2 x i32> @test_umax(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_umax( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_umax_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_umax_param_0]; +; CHECK-NOI32X2-NEXT: max.u32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: max.u32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_umax( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_umax_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_umax_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: max.u32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: max.u32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %cmp = icmp ugt <2 x i32> %a, %b + %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b + ret <2 x i32> %r +} + +define <2 x i32> @test_smin(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_smin( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_smin_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_smin_param_0]; +; CHECK-NOI32X2-NEXT: min.s32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: min.s32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_smin( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_smin_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_smin_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: min.s32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: min.s32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %cmp = icmp sle <2 x i32> %a, %b + %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b + ret <2 x i32> %r +} + +define <2 x i32> @test_umin(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_umin( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_umin_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_umin_param_0]; +; CHECK-NOI32X2-NEXT: min.u32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: min.u32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_umin( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_umin_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_umin_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: min.u32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: min.u32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %cmp = icmp ule <2 x i32> %a, %b + %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %b + ret <2 x i32> %r +} + +define <2 x i32> @test_eq(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) #0 { +; CHECK-NOI32X2-LABEL: test_eq( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .pred %p<3>; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<9>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [test_eq_param_2]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_eq_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_eq_param_0]; +; CHECK-NOI32X2-NEXT: setp.eq.b32 %p1, %r1, %r3; +; CHECK-NOI32X2-NEXT: setp.eq.b32 %p2, %r2, %r4; +; CHECK-NOI32X2-NEXT: selp.b32 %r7, %r2, %r6, %p2; +; CHECK-NOI32X2-NEXT: selp.b32 %r8, %r1, %r5, %p1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_eq( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .pred %p<3>; +; CHECK-I32X2-NEXT: .reg .b32 %r<9>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_eq_param_2]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_eq_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_eq_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: setp.eq.b32 %p1, %r3, %r1; +; CHECK-I32X2-NEXT: setp.eq.b32 %p2, %r4, %r2; +; CHECK-I32X2-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-I32X2-NEXT: selp.b32 %r7, %r4, %r6, %p2; +; CHECK-I32X2-NEXT: selp.b32 %r8, %r3, %r5, %p1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7}; +; CHECK-I32X2-NEXT: ret; + %cmp = icmp eq <2 x i32> %a, %b + %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %c + ret <2 x i32> %r +} + +define <2 x i32> @test_ne(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) #0 { +; CHECK-NOI32X2-LABEL: test_ne( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .pred %p<3>; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<9>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [test_ne_param_2]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_ne_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_ne_param_0]; +; CHECK-NOI32X2-NEXT: setp.ne.b32 %p1, %r1, %r3; +; CHECK-NOI32X2-NEXT: setp.ne.b32 %p2, %r2, %r4; +; CHECK-NOI32X2-NEXT: selp.b32 %r7, %r2, %r6, %p2; +; CHECK-NOI32X2-NEXT: selp.b32 %r8, %r1, %r5, %p1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_ne( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .pred %p<3>; +; CHECK-I32X2-NEXT: .reg .b32 %r<9>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_ne_param_2]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ne_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ne_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: setp.ne.b32 %p1, %r3, %r1; +; CHECK-I32X2-NEXT: setp.ne.b32 %p2, %r4, %r2; +; CHECK-I32X2-NEXT: mov.b64 {%r5, %r6}, %rd3; +; CHECK-I32X2-NEXT: selp.b32 %r7, %r4, %r6, %p2; +; CHECK-I32X2-NEXT: selp.b32 %r8, %r3, %r5, %p1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r8, %r7}; +; CHECK-I32X2-NEXT: ret; + %cmp = icmp ne <2 x i32> %a, %b + %r = select <2 x i1> %cmp, <2 x i32> %a, <2 x i32> %c + ret <2 x i32> %r +} + +define <2 x i32> @test_mul(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_mul( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_mul_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_mul_param_0]; +; CHECK-NOI32X2-NEXT: mul.lo.s32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: mul.lo.s32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_mul( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_mul_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_mul_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: mul.lo.s32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: mul.lo.s32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %r = mul <2 x i32> %a, %b + ret <2 x i32> %r +} + +define <2 x i32> @test_or(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_or( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_or_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_or_param_0]; +; CHECK-NOI32X2-NEXT: or.b32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: or.b32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_or( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_or_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_or_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: or.b32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: or.b32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %r = or <2 x i32> %a, %b + ret <2 x i32> %r +} + +define <2 x i32> @test_or_computed(i32 %a) { +; CHECK-LABEL: test_or_computed( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_or_computed_param_0]; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, 5}; +; CHECK-NEXT: ret; + %ins.0 = insertelement <2 x i32> zeroinitializer, i32 %a, i32 0 + %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1 + %r = or <2 x i32> %ins.1, %ins.0 + ret <2 x i32> %r +} + +define <2 x i32> @test_or_imm_0(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_or_imm_0( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_or_imm_0_param_0]; +; CHECK-NOI32X2-NEXT: or.b32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: or.b32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_or_imm_0( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_or_imm_0_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: or.b32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: or.b32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = or <2 x i32> <i32 1, i32 2>, %a + ret <2 x i32> %r +} + +define <2 x i32> @test_or_imm_1(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_or_imm_1( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_or_imm_1_param_0]; +; CHECK-NOI32X2-NEXT: or.b32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: or.b32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_or_imm_1( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_or_imm_1_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: or.b32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: or.b32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = or <2 x i32> %a, <i32 1, i32 2> + ret <2 x i32> %r +} + +define <2 x i32> @test_xor(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_xor( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_xor_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_xor_param_0]; +; CHECK-NOI32X2-NEXT: xor.b32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: xor.b32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_xor( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_xor_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_xor_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: xor.b32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: xor.b32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %r = xor <2 x i32> %a, %b + ret <2 x i32> %r +} + +define <2 x i32> @test_xor_computed(i32 %a) { +; CHECK-LABEL: test_xor_computed( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_xor_computed_param_0]; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {0, 5}; +; CHECK-NEXT: ret; + %ins.0 = insertelement <2 x i32> zeroinitializer, i32 %a, i32 0 + %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1 + %r = xor <2 x i32> %ins.1, %ins.0 + ret <2 x i32> %r +} + +define <2 x i32> @test_xor_imm_0(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_xor_imm_0( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_xor_imm_0_param_0]; +; CHECK-NOI32X2-NEXT: xor.b32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: xor.b32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_xor_imm_0( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_xor_imm_0_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: xor.b32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: xor.b32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = xor <2 x i32> <i32 1, i32 2>, %a + ret <2 x i32> %r +} + +define <2 x i32> @test_xor_imm_1(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_xor_imm_1( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_xor_imm_1_param_0]; +; CHECK-NOI32X2-NEXT: xor.b32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: xor.b32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_xor_imm_1( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_xor_imm_1_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: xor.b32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: xor.b32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = xor <2 x i32> %a, <i32 1, i32 2> + ret <2 x i32> %r +} + +define <2 x i32> @test_and(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_and( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_and_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_and_param_0]; +; CHECK-NOI32X2-NEXT: and.b32 %r5, %r2, %r4; +; CHECK-NOI32X2-NEXT: and.b32 %r6, %r1, %r3; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_and( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_and_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_and_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd1; +; CHECK-I32X2-NEXT: and.b32 %r5, %r4, %r2; +; CHECK-I32X2-NEXT: and.b32 %r6, %r3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-I32X2-NEXT: ret; + %r = and <2 x i32> %a, %b + ret <2 x i32> %r +} + +define <2 x i32> @test_and_computed(i32 %a) { +; CHECK-LABEL: test_and_computed( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_and_computed_param_0]; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, 0}; +; CHECK-NEXT: ret; + %ins.0 = insertelement <2 x i32> zeroinitializer, i32 %a, i32 0 + %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1 + %r = and <2 x i32> %ins.1, %ins.0 + ret <2 x i32> %r +} + +define <2 x i32> @test_and_imm_0(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_and_imm_0( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_and_imm_0_param_0]; +; CHECK-NOI32X2-NEXT: and.b32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: and.b32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_and_imm_0( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_and_imm_0_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: and.b32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: and.b32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = and <2 x i32> <i32 1, i32 2>, %a + ret <2 x i32> %r +} + +define <2 x i32> @test_and_imm_1(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_and_imm_1( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_and_imm_1_param_0]; +; CHECK-NOI32X2-NEXT: and.b32 %r3, %r2, 2; +; CHECK-NOI32X2-NEXT: and.b32 %r4, %r1, 1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_and_imm_1( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_and_imm_1_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: and.b32 %r3, %r2, 2; +; CHECK-I32X2-NEXT: and.b32 %r4, %r1, 1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = and <2 x i32> %a, <i32 1, i32 2> + ret <2 x i32> %r +} + +define void @test_ldst_v2i32(ptr %a, ptr %b) { +; CHECK-NOI32X2-LABEL: test_ldst_v2i32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_param_0]; +; CHECK-NOI32X2-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1]; +; CHECK-NOI32X2-NEXT: st.v2.b32 [%rd2], {%r1, %r2}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_ldst_v2i32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_param_0]; +; CHECK-I32X2-NEXT: ld.b64 %rd3, [%rd1]; +; CHECK-I32X2-NEXT: st.b64 [%rd2], %rd3; +; CHECK-I32X2-NEXT: ret; + %t1 = load <2 x i32>, ptr %a + store <2 x i32> %t1, ptr %b, align 16 + ret void +} + +define void @test_ldst_v3i32(ptr %a, ptr %b) { +; CHECK-LABEL: test_ldst_v3i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd2, [test_ldst_v3i32_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_ldst_v3i32_param_0]; +; CHECK-NEXT: ld.b64 %rd3, [%rd1]; +; CHECK-NEXT: ld.b32 %r1, [%rd1+8]; +; CHECK-NEXT: st.b32 [%rd2+8], %r1; +; CHECK-NEXT: st.b64 [%rd2], %rd3; +; CHECK-NEXT: ret; + %t1 = load <3 x i32>, ptr %a + store <3 x i32> %t1, ptr %b, align 16 + ret void +} + +define void @test_ldst_v4i32(ptr %a, ptr %b) { +; CHECK-NOI32X2-LABEL: test_ldst_v4i32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v4i32_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v4i32_param_0]; +; CHECK-NOI32X2-NEXT: ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1]; +; CHECK-NOI32X2-NEXT: st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_ldst_v4i32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<5>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v4i32_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v4i32_param_0]; +; CHECK-I32X2-NEXT: ld.v2.b64 {%rd3, %rd4}, [%rd1]; +; CHECK-I32X2-NEXT: st.v2.b64 [%rd2], {%rd3, %rd4}; +; CHECK-I32X2-NEXT: ret; + %t1 = load <4 x i32>, ptr %a + store <4 x i32> %t1, ptr %b, align 16 + ret void +} + +define void @test_ldst_v2i32_unaligned(ptr %a, ptr %b) { +; CHECK-NOI32X2-LABEL: test_ldst_v2i32_unaligned( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<13>; +; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_unaligned_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_unaligned_param_0]; +; CHECK-NOI32X2-NEXT: ld.b8 %r1, [%rd1+2]; +; CHECK-NOI32X2-NEXT: shl.b32 %r2, %r1, 16; +; CHECK-NOI32X2-NEXT: ld.b8 %r3, [%rd1+3]; +; CHECK-NOI32X2-NEXT: shl.b32 %r4, %r3, 24; +; CHECK-NOI32X2-NEXT: or.b32 %r5, %r4, %r2; +; CHECK-NOI32X2-NEXT: ld.b8 %r6, [%rd1]; +; CHECK-NOI32X2-NEXT: ld.b8 %r7, [%rd1+1]; +; CHECK-NOI32X2-NEXT: ld.b8 %r8, [%rd1+4]; +; CHECK-NOI32X2-NEXT: ld.b8 %r9, [%rd1+5]; +; CHECK-NOI32X2-NEXT: ld.b8 %r10, [%rd1+6]; +; CHECK-NOI32X2-NEXT: ld.b8 %r11, [%rd1+7]; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2+7], %r11; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2+6], %r10; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2+5], %r9; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2+4], %r8; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2+1], %r7; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2], %r6; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2+3], %r3; +; CHECK-NOI32X2-NEXT: shr.u32 %r12, %r5, 16; +; CHECK-NOI32X2-NEXT: st.b8 [%rd2+2], %r12; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_ldst_v2i32_unaligned( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<28>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_ldst_v2i32_unaligned_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_ldst_v2i32_unaligned_param_0]; +; CHECK-I32X2-NEXT: ld.b8 %rd3, [%rd1]; +; CHECK-I32X2-NEXT: ld.b8 %rd4, [%rd1+1]; +; CHECK-I32X2-NEXT: shl.b64 %rd5, %rd4, 8; +; CHECK-I32X2-NEXT: or.b64 %rd6, %rd5, %rd3; +; CHECK-I32X2-NEXT: ld.b8 %rd7, [%rd1+2]; +; CHECK-I32X2-NEXT: shl.b64 %rd8, %rd7, 16; +; CHECK-I32X2-NEXT: ld.b8 %rd9, [%rd1+3]; +; CHECK-I32X2-NEXT: shl.b64 %rd10, %rd9, 24; +; CHECK-I32X2-NEXT: or.b64 %rd11, %rd10, %rd8; +; CHECK-I32X2-NEXT: or.b64 %rd12, %rd11, %rd6; +; CHECK-I32X2-NEXT: ld.b8 %rd13, [%rd1+4]; +; CHECK-I32X2-NEXT: ld.b8 %rd14, [%rd1+5]; +; CHECK-I32X2-NEXT: shl.b64 %rd15, %rd14, 8; +; CHECK-I32X2-NEXT: or.b64 %rd16, %rd15, %rd13; +; CHECK-I32X2-NEXT: ld.b8 %rd17, [%rd1+6]; +; CHECK-I32X2-NEXT: shl.b64 %rd18, %rd17, 16; +; CHECK-I32X2-NEXT: ld.b8 %rd19, [%rd1+7]; +; CHECK-I32X2-NEXT: shl.b64 %rd20, %rd19, 24; +; CHECK-I32X2-NEXT: or.b64 %rd21, %rd20, %rd18; +; CHECK-I32X2-NEXT: or.b64 %rd22, %rd21, %rd16; +; CHECK-I32X2-NEXT: shl.b64 %rd23, %rd22, 32; +; CHECK-I32X2-NEXT: or.b64 %rd24, %rd23, %rd12; +; CHECK-I32X2-NEXT: st.b8 [%rd2+6], %rd17; +; CHECK-I32X2-NEXT: shr.u64 %rd25, %rd24, 56; +; CHECK-I32X2-NEXT: st.b8 [%rd2+7], %rd25; +; CHECK-I32X2-NEXT: st.b8 [%rd2+4], %rd13; +; CHECK-I32X2-NEXT: shr.u64 %rd26, %rd24, 40; +; CHECK-I32X2-NEXT: st.b8 [%rd2+5], %rd26; +; CHECK-I32X2-NEXT: st.b8 [%rd2+1], %rd4; +; CHECK-I32X2-NEXT: st.b8 [%rd2], %rd3; +; CHECK-I32X2-NEXT: st.b8 [%rd2+3], %rd9; +; CHECK-I32X2-NEXT: shr.u64 %rd27, %rd24, 16; +; CHECK-I32X2-NEXT: st.b8 [%rd2+2], %rd27; +; CHECK-I32X2-NEXT: ret; + %t1 = load <2 x i32>, ptr %a, align 1 + store <2 x i32> %t1, ptr %b, align 1 + ret void +} + +declare <2 x i32> @test_callee(<2 x i32> %a, <2 x i32> %b) #0 + +define <2 x i32> @test_call(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_call( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_call_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_call_param_0]; +; CHECK-NOI32X2-NEXT: { // callseq 0, 0 +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param1], {%r3, %r4}; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param0], {%r1, %r2}; +; CHECK-NOI32X2-NEXT: call.uni (retval0), test_callee, (param0, param1); +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [retval0]; +; CHECK-NOI32X2-NEXT: } // callseq 0 +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r5, %r6}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_call( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_call_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_call_param_0]; +; CHECK-I32X2-NEXT: { // callseq 0, 0 +; CHECK-I32X2-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-I32X2-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-I32X2-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-I32X2-NEXT: st.param.b64 [param1], %rd2; +; CHECK-I32X2-NEXT: st.param.b64 [param0], %rd1; +; CHECK-I32X2-NEXT: call.uni (retval0), test_callee, (param0, param1); +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [retval0]; +; CHECK-I32X2-NEXT: } // callseq 0 +; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-I32X2-NEXT: ret; + %r = call <2 x i32> @test_callee(<2 x i32> %a, <2 x i32> %b) + ret <2 x i32> %r +} + +define <2 x i32> @test_call_flipped(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_call_flipped( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_call_flipped_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_call_flipped_param_0]; +; CHECK-NOI32X2-NEXT: { // callseq 1, 0 +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param1], {%r1, %r2}; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param0], {%r3, %r4}; +; CHECK-NOI32X2-NEXT: call.uni (retval0), test_callee, (param0, param1); +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [retval0]; +; CHECK-NOI32X2-NEXT: } // callseq 1 +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r5, %r6}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_call_flipped( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_call_flipped_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_call_flipped_param_0]; +; CHECK-I32X2-NEXT: { // callseq 1, 0 +; CHECK-I32X2-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-I32X2-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-I32X2-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-I32X2-NEXT: st.param.b64 [param1], %rd1; +; CHECK-I32X2-NEXT: st.param.b64 [param0], %rd2; +; CHECK-I32X2-NEXT: call.uni (retval0), test_callee, (param0, param1); +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [retval0]; +; CHECK-I32X2-NEXT: } // callseq 1 +; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-I32X2-NEXT: ret; + %r = call <2 x i32> @test_callee(<2 x i32> %b, <2 x i32> %a) + ret <2 x i32> %r +} + +define <2 x i32> @test_tailcall_flipped(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_tailcall_flipped( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_tailcall_flipped_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_tailcall_flipped_param_0]; +; CHECK-NOI32X2-NEXT: { // callseq 2, 0 +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-NOI32X2-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param1], {%r1, %r2}; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [param0], {%r3, %r4}; +; CHECK-NOI32X2-NEXT: call.uni (retval0), test_callee, (param0, param1); +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [retval0]; +; CHECK-NOI32X2-NEXT: } // callseq 2 +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r5, %r6}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_tailcall_flipped( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_tailcall_flipped_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_tailcall_flipped_param_0]; +; CHECK-I32X2-NEXT: { // callseq 2, 0 +; CHECK-I32X2-NEXT: .param .align 8 .b8 param0[8]; +; CHECK-I32X2-NEXT: .param .align 8 .b8 param1[8]; +; CHECK-I32X2-NEXT: .param .align 8 .b8 retval0[8]; +; CHECK-I32X2-NEXT: st.param.b64 [param1], %rd1; +; CHECK-I32X2-NEXT: st.param.b64 [param0], %rd2; +; CHECK-I32X2-NEXT: call.uni (retval0), test_callee, (param0, param1); +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [retval0]; +; CHECK-I32X2-NEXT: } // callseq 2 +; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-I32X2-NEXT: ret; + %r = tail call <2 x i32> @test_callee(<2 x i32> %b, <2 x i32> %a) + ret <2 x i32> %r +} + +define <2 x i32> @test_select(<2 x i32> %a, <2 x i32> %b, i1 zeroext %c) #0 { +; CHECK-NOI32X2-LABEL: test_select( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .pred %p<2>; +; CHECK-NOI32X2-NEXT: .reg .b16 %rs<3>; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.b8 %rs1, [test_select_param_2]; +; CHECK-NOI32X2-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-NOI32X2-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_select_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_select_param_0]; +; CHECK-NOI32X2-NEXT: selp.b32 %r5, %r2, %r4, %p1; +; CHECK-NOI32X2-NEXT: selp.b32 %r6, %r1, %r3, %p1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r6, %r5}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_select( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .pred %p<2>; +; CHECK-I32X2-NEXT: .reg .b16 %rs<3>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b8 %rs1, [test_select_param_2]; +; CHECK-I32X2-NEXT: and.b16 %rs2, %rs1, 1; +; CHECK-I32X2-NEXT: setp.ne.b16 %p1, %rs2, 0; +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_select_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_select_param_0]; +; CHECK-I32X2-NEXT: selp.b64 %rd3, %rd1, %rd2, %p1; +; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd3; +; CHECK-I32X2-NEXT: ret; + %r = select i1 %c, <2 x i32> %a, <2 x i32> %b + ret <2 x i32> %r +} + +define <2 x i32> @test_select_cc(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x i32> %d) #0 { +; CHECK-NOI32X2-LABEL: test_select_cc( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .pred %p<3>; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<11>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r7, %r8}, [test_select_cc_param_3]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r5, %r6}, [test_select_cc_param_2]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_select_cc_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_select_cc_param_0]; +; CHECK-NOI32X2-NEXT: setp.ne.b32 %p1, %r5, %r7; +; CHECK-NOI32X2-NEXT: setp.ne.b32 %p2, %r6, %r8; +; CHECK-NOI32X2-NEXT: selp.b32 %r9, %r2, %r4, %p2; +; CHECK-NOI32X2-NEXT: selp.b32 %r10, %r1, %r3, %p1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r10, %r9}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_select_cc( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .pred %p<3>; +; CHECK-I32X2-NEXT: .reg .b32 %r<11>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<5>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd4, [test_select_cc_param_3]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_select_cc_param_2]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_select_cc_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_select_cc_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd4; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd3; +; CHECK-I32X2-NEXT: setp.ne.b32 %p1, %r3, %r1; +; CHECK-I32X2-NEXT: setp.ne.b32 %p2, %r4, %r2; +; CHECK-I32X2-NEXT: mov.b64 {%r5, %r6}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {%r7, %r8}, %rd1; +; CHECK-I32X2-NEXT: selp.b32 %r9, %r8, %r6, %p2; +; CHECK-I32X2-NEXT: selp.b32 %r10, %r7, %r5, %p1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r10, %r9}; +; CHECK-I32X2-NEXT: ret; + %cc = icmp ne <2 x i32> %c, %d + %r = select <2 x i1> %cc, <2 x i32> %a, <2 x i32> %b + ret <2 x i32> %r +} + +define <2 x i16> @test_trunc_2xi32(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_trunc_2xi32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_trunc_2xi32_param_0]; +; CHECK-NOI32X2-NEXT: prmt.b32 %r3, %r1, %r2, 0x5410U; +; CHECK-NOI32X2-NEXT: st.param.b32 [func_retval0], %r3; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_trunc_2xi32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_trunc_2xi32_param_0]; +; CHECK-I32X2-NEXT: st.param.b32 [func_retval0], %rd1; +; CHECK-I32X2-NEXT: ret; + %r = trunc <2 x i32> %a to <2 x i16> + ret <2 x i16> %r +} + +define <2 x i32> @test_trunc_2xi64(<2 x i64> %a) #0 { +; CHECK-LABEL: test_trunc_2xi64( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<3>; +; CHECK-NEXT: .reg .b64 %rd<3>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.v2.b64 {%rd1, %rd2}, [test_trunc_2xi64_param_0]; +; CHECK-NEXT: cvt.u32.u64 %r1, %rd2; +; CHECK-NEXT: cvt.u32.u64 %r2, %rd1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-NEXT: ret; + %r = trunc <2 x i64> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i32> @test_zext_2xi32(<2 x i16> %a) #0 { +; CHECK-LABEL: test_zext_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_zext_2xi32_param_0]; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-NEXT: cvt.u32.u16 %r2, %rs2; +; CHECK-NEXT: cvt.u32.u16 %r3, %rs1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2}; +; CHECK-NEXT: ret; + %r = zext <2 x i16> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i64> @test_zext_2xi64(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_zext_2xi64( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_zext_2xi64_param_0]; +; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd1, %r2; +; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd2, %r1; +; CHECK-NOI32X2-NEXT: st.param.v2.b64 [func_retval0], {%rd2, %rd1}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_zext_2xi64( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<3>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_zext_2xi64_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: cvt.u64.u32 %rd2, %r2; +; CHECK-I32X2-NEXT: cvt.u64.u32 %rd3, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b64 [func_retval0], {%rd3, %rd2}; +; CHECK-I32X2-NEXT: ret; + %r = zext <2 x i32> %a to <2 x i64> + ret <2 x i64> %r +} + +define <2 x i32> @test_bitcast_i64_to_2xi32(i64 %a) #0 { +; CHECK-LABEL: test_bitcast_i64_to_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_bitcast_i64_to_2xi32_param_0]; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; + %r = bitcast i64 %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i32> @test_bitcast_double_to_2xi32(double %a) #0 { +; CHECK-LABEL: test_bitcast_double_to_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b64 %rd<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b64 %rd1, [test_bitcast_double_to_2xi32_param_0]; +; CHECK-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-NEXT: ret; + %r = bitcast double %a to <2 x i32> + ret <2 x i32> %r +} + +define i64 @test_bitcast_2xi32_to_i64(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_bitcast_2xi32_to_i64( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_i64_param_0]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_bitcast_2xi32_to_i64( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_bitcast_2xi32_to_i64_param_0]; +; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-I32X2-NEXT: ret; + %r = bitcast <2 x i32> %a to i64 + ret i64 %r +} + +define double @test_bitcast_2xi32_to_double(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_bitcast_2xi32_to_double( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_double_param_0]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r2}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_bitcast_2xi32_to_double( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_bitcast_2xi32_to_double_param_0]; +; CHECK-I32X2-NEXT: st.param.b64 [func_retval0], %rd1; +; CHECK-I32X2-NEXT: ret; + %r = bitcast <2 x i32> %a to double + ret double %r +} + + +define <4 x half> @test_bitcast_2xi32_to_4xhalf(i32 %a) #0 { +; CHECK-LABEL: test_bitcast_2xi32_to_4xhalf( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<2>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_bitcast_2xi32_to_4xhalf_param_0]; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r1, 5}; +; CHECK-NEXT: ret; + %ins.0 = insertelement <2 x i32> poison, i32 %a, i32 0 + %ins.1 = insertelement <2 x i32> %ins.0, i32 5, i32 1 + %r = bitcast <2 x i32> %ins.1 to <4 x half> + ret <4 x half> %r +} + + +define <2 x i32> @test_shufflevector(<2 x i32> %a) #0 { +; CHECK-NOI32X2-LABEL: test_shufflevector( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<3>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_shufflevector_param_0]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_shufflevector( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<3>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_shufflevector_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-I32X2-NEXT: ret; + %s = shufflevector <2 x i32> %a, <2 x i32> poison, <2 x i32> <i32 1, i32 0> + ret <2 x i32> %s +} + +define <2 x i32> @test_shufflevector_2(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-NOI32X2-LABEL: test_shufflevector_2( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r3, %r4}, [test_shufflevector_2_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_shufflevector_2_param_0]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r4}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_shufflevector_2( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<3>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<3>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_shufflevector_2_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_shufflevector_2_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {_, %r1}, %rd2; +; CHECK-I32X2-NEXT: mov.b64 {_, %r2}, %rd1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-I32X2-NEXT: ret; + %s = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> <i32 1, i32 3> + ret <2 x i32> %s +} + + +define <2 x i32> @test_insertelement(<2 x i32> %a, i32 %x) #0 { +; CHECK-NOI32X2-LABEL: test_insertelement( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<4>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_insertelement_param_0]; +; CHECK-NOI32X2-NEXT: ld.param.b32 %r3, [test_insertelement_param_1]; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r1, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_insertelement( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<3>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b32 %r1, [test_insertelement_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_insertelement_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r2, _}, %rd1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r2, %r1}; +; CHECK-I32X2-NEXT: ret; + %i = insertelement <2 x i32> %a, i32 %x, i64 1 + ret <2 x i32> %i +} + +define <2 x i32> @test_fptosi_2xhalf_to_2xi32(<2 x half> %a) #0 { +; CHECK-LABEL: test_fptosi_2xhalf_to_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_fptosi_2xhalf_to_2xi32_param_0]; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-NEXT: cvt.rzi.s32.f16 %r2, %rs2; +; CHECK-NEXT: cvt.rzi.s32.f16 %r3, %rs1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2}; +; CHECK-NEXT: ret; + %r = fptosi <2 x half> %a to <2 x i32> + ret <2 x i32> %r +} + +define <2 x i32> @test_fptoui_2xhalf_to_2xi32(<2 x half> %a) #0 { +; CHECK-LABEL: test_fptoui_2xhalf_to_2xi32( +; CHECK: { +; CHECK-NEXT: .reg .b16 %rs<3>; +; CHECK-NEXT: .reg .b32 %r<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: +; CHECK-NEXT: ld.param.b32 %r1, [test_fptoui_2xhalf_to_2xi32_param_0]; +; CHECK-NEXT: mov.b32 {%rs1, %rs2}, %r1; +; CHECK-NEXT: cvt.rzi.u32.f16 %r2, %rs2; +; CHECK-NEXT: cvt.rzi.u32.f16 %r3, %rs1; +; CHECK-NEXT: st.param.v2.b32 [func_retval0], {%r3, %r2}; +; CHECK-NEXT: ret; + %r = fptoui <2 x half> %a to <2 x i32> + ret <2 x i32> %r +} + +define void @test_srem_v2i32(ptr %a, ptr %b, ptr %c) { +; CHECK-LABEL: test_srem_v2i32( +; CHECK: { +; CHECK-NEXT: .reg .b32 %r<7>; +; CHECK-NEXT: .reg .b64 %rd<4>; +; CHECK-EMPTY: +; CHECK-NEXT: // %bb.0: // %entry +; CHECK-NEXT: ld.param.b64 %rd3, [test_srem_v2i32_param_2]; +; CHECK-NEXT: ld.param.b64 %rd2, [test_srem_v2i32_param_1]; +; CHECK-NEXT: ld.param.b64 %rd1, [test_srem_v2i32_param_0]; +; CHECK-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1]; +; CHECK-NEXT: ld.v2.b32 {%r3, %r4}, [%rd2]; +; CHECK-NEXT: rem.s32 %r5, %r2, %r4; +; CHECK-NEXT: rem.s32 %r6, %r1, %r3; +; CHECK-NEXT: st.v2.b32 [%rd3], {%r6, %r5}; +; CHECK-NEXT: ret; +entry: + %t57 = load <2 x i32>, ptr %a, align 8 + %t59 = load <2 x i32>, ptr %b, align 8 + %x = srem <2 x i32> %t57, %t59 + store <2 x i32> %x, ptr %c, align 8 + ret void +} + +define void @test_srem_v3i32(ptr %a, ptr %b, ptr %c) { +; CHECK-NOI32X2-LABEL: test_srem_v3i32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<10>; +; CHECK-NOI32X2-NEXT: .reg .b64 %rd<10>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: // %entry +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd3, [test_srem_v3i32_param_2]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_srem_v3i32_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_srem_v3i32_param_0]; +; CHECK-NOI32X2-NEXT: ld.b32 %r1, [%rd1+8]; +; CHECK-NOI32X2-NEXT: ld.b64 %rd4, [%rd1]; +; CHECK-NOI32X2-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r2}, %rd4; } +; CHECK-NOI32X2-NEXT: cvt.u32.u64 %r3, %rd4; +; CHECK-NOI32X2-NEXT: ld.b32 %r4, [%rd2+8]; +; CHECK-NOI32X2-NEXT: ld.b64 %rd5, [%rd2]; +; CHECK-NOI32X2-NEXT: { .reg .b32 tmp; mov.b64 {tmp, %r5}, %rd5; } +; CHECK-NOI32X2-NEXT: cvt.u32.u64 %r6, %rd5; +; CHECK-NOI32X2-NEXT: rem.s32 %r7, %r3, %r6; +; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd6, %r7; +; CHECK-NOI32X2-NEXT: rem.s32 %r8, %r2, %r5; +; CHECK-NOI32X2-NEXT: cvt.u64.u32 %rd7, %r8; +; CHECK-NOI32X2-NEXT: shl.b64 %rd8, %rd7, 32; +; CHECK-NOI32X2-NEXT: or.b64 %rd9, %rd6, %rd8; +; CHECK-NOI32X2-NEXT: rem.s32 %r9, %r1, %r4; +; CHECK-NOI32X2-NEXT: st.b32 [%rd3+8], %r9; +; CHECK-NOI32X2-NEXT: st.b64 [%rd3], %rd9; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_srem_v3i32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<10>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: // %entry +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_srem_v3i32_param_2]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_srem_v3i32_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_srem_v3i32_param_0]; +; CHECK-I32X2-NEXT: ld.v2.b32 {%r1, %r2}, [%rd1]; +; CHECK-I32X2-NEXT: ld.b32 %r3, [%rd1+8]; +; CHECK-I32X2-NEXT: ld.v2.b32 {%r4, %r5}, [%rd2]; +; CHECK-I32X2-NEXT: ld.b32 %r6, [%rd2+8]; +; CHECK-I32X2-NEXT: rem.s32 %r7, %r3, %r6; +; CHECK-I32X2-NEXT: rem.s32 %r8, %r2, %r5; +; CHECK-I32X2-NEXT: rem.s32 %r9, %r1, %r4; +; CHECK-I32X2-NEXT: st.v2.b32 [%rd3], {%r9, %r8}; +; CHECK-I32X2-NEXT: st.b32 [%rd3+8], %r7; +; CHECK-I32X2-NEXT: ret; +entry: + %t57 = load <3 x i32>, ptr %a, align 8 + %t59 = load <3 x i32>, ptr %b, align 8 + %x = srem <3 x i32> %t57, %t59 + store <3 x i32> %x, ptr %c, align 8 + ret void +} + +define void @test_sext_v2i1_to_v2i32(ptr %a, ptr %b, ptr %c) { +; CHECK-NOI32X2-LABEL: test_sext_v2i1_to_v2i32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .pred %p<3>; +; CHECK-NOI32X2-NEXT: .reg .b32 %r<7>; +; CHECK-NOI32X2-NEXT: .reg .b64 %rd<4>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: // %entry +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd3, [test_sext_v2i1_to_v2i32_param_2]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd2, [test_sext_v2i1_to_v2i32_param_1]; +; CHECK-NOI32X2-NEXT: ld.param.b64 %rd1, [test_sext_v2i1_to_v2i32_param_0]; +; CHECK-NOI32X2-NEXT: ld.b32 %r1, [%rd1]; +; CHECK-NOI32X2-NEXT: ld.b32 %r2, [%rd1+4]; +; CHECK-NOI32X2-NEXT: ld.b32 %r3, [%rd2]; +; CHECK-NOI32X2-NEXT: ld.b32 %r4, [%rd2+4]; +; CHECK-NOI32X2-NEXT: setp.gt.u32 %p1, %r2, %r4; +; CHECK-NOI32X2-NEXT: setp.gt.u32 %p2, %r1, %r3; +; CHECK-NOI32X2-NEXT: selp.b32 %r5, -1, 0, %p2; +; CHECK-NOI32X2-NEXT: selp.b32 %r6, -1, 0, %p1; +; CHECK-NOI32X2-NEXT: st.b32 [%rd3+4], %r6; +; CHECK-NOI32X2-NEXT: st.b32 [%rd3], %r5; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_sext_v2i1_to_v2i32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .pred %p<3>; +; CHECK-I32X2-NEXT: .reg .b32 %r<7>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<14>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: // %entry +; CHECK-I32X2-NEXT: ld.param.b64 %rd3, [test_sext_v2i1_to_v2i32_param_2]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd2, [test_sext_v2i1_to_v2i32_param_1]; +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_sext_v2i1_to_v2i32_param_0]; +; CHECK-I32X2-NEXT: ld.b32 %rd4, [%rd1]; +; CHECK-I32X2-NEXT: ld.b32 %rd5, [%rd1+4]; +; CHECK-I32X2-NEXT: shl.b64 %rd6, %rd5, 32; +; CHECK-I32X2-NEXT: or.b64 %rd7, %rd6, %rd4; +; CHECK-I32X2-NEXT: ld.b32 %rd8, [%rd2]; +; CHECK-I32X2-NEXT: ld.b32 %rd9, [%rd2+4]; +; CHECK-I32X2-NEXT: shl.b64 %rd10, %rd9, 32; +; CHECK-I32X2-NEXT: or.b64 %rd11, %rd10, %rd8; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd11; +; CHECK-I32X2-NEXT: mov.b64 {%r3, %r4}, %rd7; +; CHECK-I32X2-NEXT: setp.gt.u32 %p1, %r3, %r1; +; CHECK-I32X2-NEXT: setp.gt.u32 %p2, %r4, %r2; +; CHECK-I32X2-NEXT: selp.b32 %r5, -1, 0, %p2; +; CHECK-I32X2-NEXT: selp.b32 %r6, -1, 0, %p1; +; CHECK-I32X2-NEXT: mov.b64 %rd12, {%r6, %r5}; +; CHECK-I32X2-NEXT: st.b32 [%rd3], %rd12; +; CHECK-I32X2-NEXT: shr.u64 %rd13, %rd12, 32; +; CHECK-I32X2-NEXT: st.b32 [%rd3+4], %rd13; +; CHECK-I32X2-NEXT: ret; +entry: + %t1 = load <2 x i32>, ptr %a, align 4 + %t2 = load <2 x i32>, ptr %b, align 4 + %t5 = icmp ugt <2 x i32> %t1, %t2 + %t6 = sext <2 x i1> %t5 to <2 x i32> + store <2 x i32> %t6, ptr %c, align 4 + ret void +} + +define <2 x float> @test_uitofp_v2i32(<2 x i32> %a) { +; CHECK-NOI32X2-LABEL: test_uitofp_v2i32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_uitofp_v2i32_param_0]; +; CHECK-NOI32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-NOI32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_uitofp_v2i32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_uitofp_v2i32_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: cvt.rn.f32.u32 %r3, %r2; +; CHECK-I32X2-NEXT: cvt.rn.f32.u32 %r4, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = uitofp <2 x i32> %a to <2 x float> + ret <2 x float> %r +} + +define <2 x float> @test_sitofp_v2i32(<2 x i32> %a) { +; CHECK-NOI32X2-LABEL: test_sitofp_v2i32( +; CHECK-NOI32X2: { +; CHECK-NOI32X2-NEXT: .reg .b32 %r<5>; +; CHECK-NOI32X2-EMPTY: +; CHECK-NOI32X2-NEXT: // %bb.0: +; CHECK-NOI32X2-NEXT: ld.param.v2.b32 {%r1, %r2}, [test_sitofp_v2i32_param_0]; +; CHECK-NOI32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-NOI32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-NOI32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-NOI32X2-NEXT: ret; +; +; CHECK-I32X2-LABEL: test_sitofp_v2i32( +; CHECK-I32X2: { +; CHECK-I32X2-NEXT: .reg .b32 %r<5>; +; CHECK-I32X2-NEXT: .reg .b64 %rd<2>; +; CHECK-I32X2-EMPTY: +; CHECK-I32X2-NEXT: // %bb.0: +; CHECK-I32X2-NEXT: ld.param.b64 %rd1, [test_sitofp_v2i32_param_0]; +; CHECK-I32X2-NEXT: mov.b64 {%r1, %r2}, %rd1; +; CHECK-I32X2-NEXT: cvt.rn.f32.s32 %r3, %r2; +; CHECK-I32X2-NEXT: cvt.rn.f32.s32 %r4, %r1; +; CHECK-I32X2-NEXT: st.param.v2.b32 [func_retval0], {%r4, %r3}; +; CHECK-I32X2-NEXT: ret; + %r = sitofp <2 x i32> %a to <2 x float> + ret <2 x float> %r +} + +attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/RISCV/features-info.ll b/llvm/test/CodeGen/RISCV/features-info.ll index 1a7a72d..693a40d 100644 --- a/llvm/test/CodeGen/RISCV/features-info.ll +++ b/llvm/test/CodeGen/RISCV/features-info.ll @@ -142,6 +142,7 @@ ; CHECK-NEXT: shvstvecd - 'Shvstvecd' (vstvec supports Direct mode). ; CHECK-NEXT: shxadd-load-fusion - Enable SH(1|2|3)ADD(.UW) + load macrofusion. ; CHECK-NEXT: sifive7 - SiFive 7-Series processors. +; CHECK-NEXT: single-element-vec-fp64 - Certain vector FP64 operations produce a single result element per cycle. ; CHECK-NEXT: smaia - 'Smaia' (Advanced Interrupt Architecture Machine Level). ; CHECK-NEXT: smcdeleg - 'Smcdeleg' (Counter Delegation Machine Level). ; CHECK-NEXT: smcntrpmf - 'Smcntrpmf' (Cycle and Instret Privilege Mode Filtering). diff --git a/llvm/test/CodeGen/X86/fmaxnum.ll b/llvm/test/CodeGen/X86/fmaxnum.ll index d6252cc..150bef0 100644 --- a/llvm/test/CodeGen/X86/fmaxnum.ll +++ b/llvm/test/CodeGen/X86/fmaxnum.ll @@ -645,11 +645,47 @@ define float @test_maxnum_const_op2(float %x) { ret float %r } -define float @test_maxnum_const_nan(float %x) { -; CHECK-LABEL: test_maxnum_const_nan: -; CHECK: # %bb.0: -; CHECK-NEXT: retq - %r = call float @llvm.maxnum.f32(float %x, float 0x7fff000000000000) +define float @test_maxnum_const_nan(float %x, float %y) { +; SSE-LABEL: test_maxnum_const_nan: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_maxnum_const_nan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq + %r = call float @llvm.maxnum.f32(float %y, float 0x7fff000000000000) + ret float %r +} + +; nnan maxnum(Y, -inf) -> Y +define float @test_maxnum_neg_inf_nnan(float %x, float %y) nounwind { +; SSE-LABEL: test_maxnum_neg_inf_nnan: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_maxnum_neg_inf_nnan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq + %r = call nnan float @llvm.maxnum.f32(float %y, float 0xfff0000000000000) + ret float %r +} + +; Test SNaN quieting +define float @test_maxnum_snan(float %x) { +; SSE-LABEL: test_maxnum_snan: +; SSE: # %bb.0: +; SSE-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; SSE-NEXT: retq +; +; AVX-LABEL: test_maxnum_snan: +; AVX: # %bb.0: +; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; AVX-NEXT: retq + %r = call float @llvm.maxnum.f32(float 0x7ff4000000000000, float %x) ret float %r } diff --git a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll index 864c233..06515e4 100644 --- a/llvm/test/CodeGen/X86/fminimum-fmaximum.ll +++ b/llvm/test/CodeGen/X86/fminimum-fmaximum.ll @@ -2649,3 +2649,102 @@ define <4 x bfloat> @test_fmaximum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) { %r = call <4 x bfloat> @llvm.maximum.v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) ret <4 x bfloat> %r } + +; nnan minimum(Y, +inf) -> Y +define float @test_fminimum_inf_nnan(float %x, float %y) nounwind { +; SSE2-LABEL: test_fminimum_inf_nnan: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fminimum_inf_nnan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fminimum_inf_nnan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovaps %xmm1, %xmm0 +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fminimum_inf_nnan: +; X86: # %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: retl + %1 = call nnan float @llvm.minimum.f32(float %y, float 0x7ff0000000000000) + ret float %1 +} + +; nnan maximum(Y, -inf) -> Y +define float @test_fmaximum_neg_inf_nnan(float %x, float %y) nounwind { +; SSE2-LABEL: test_fmaximum_neg_inf_nnan: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fmaximum_neg_inf_nnan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fmaximum_neg_inf_nnan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovaps %xmm1, %xmm0 +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fmaximum_neg_inf_nnan: +; X86: # %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: retl + %1 = call nnan float @llvm.maximum.f32(float %y, float 0xfff0000000000000) + ret float %1 +} + +; Test SNaN quieting +define float @test_fmaximum_snan(float %x) { +; SSE2-LABEL: test_fmaximum_snan: +; SSE2: # %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fmaximum_snan: +; AVX: # %bb.0: +; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fmaximum_snan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fmaximum_snan: +; X86: # %bb.0: +; X86-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}} +; X86-NEXT: retl + %1 = tail call float @llvm.maximum.f32(float 0x7ff4000000000000, float %x) + ret float %1 +} + +define float @test_fminimum_snan(float %x) { +; SSE2-LABEL: test_fminimum_snan: +; SSE2: # %bb.0: +; SSE2-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fminimum_snan: +; AVX: # %bb.0: +; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fminimum_snan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fminimum_snan: +; X86: # %bb.0: +; X86-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}} +; X86-NEXT: retl + %1 = tail call float @llvm.minimum.f32(float 0x7ff4000000000000, float %x) + ret float %1 +} diff --git a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll index c66473e..0fe107c 100644 --- a/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll +++ b/llvm/test/CodeGen/X86/fminimumnum-fmaximumnum.ll @@ -2479,3 +2479,102 @@ define <4 x bfloat> @test_fmaximumnum_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) n %r = call <4 x bfloat> @llvm.maximumnum.v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) ret <4 x bfloat> %r } + +; nnan minimumnum(Y, +inf) -> Y +define float @test_fminimumnum_inf_nnan(float %x, float %y) nounwind { +; SSE2-LABEL: test_fminimumnum_inf_nnan: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fminimumnum_inf_nnan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fminimumnum_inf_nnan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovaps %xmm1, %xmm0 +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fminimumnum_inf_nnan: +; X86: # %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: retl + %1 = call nnan float @llvm.minimumnum.f32(float %y, float 0x7ff0000000000000) + ret float %1 +} + +; nnan maximumnum(Y, -inf) -> Y +define float @test_fmaximumnum_neg_inf_nnan(float %x, float %y) nounwind { +; SSE2-LABEL: test_fmaximumnum_neg_inf_nnan: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fmaximumnum_neg_inf_nnan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fmaximumnum_neg_inf_nnan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovaps %xmm1, %xmm0 +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fmaximumnum_neg_inf_nnan: +; X86: # %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: retl + %1 = call nnan float @llvm.maximumnum.f32(float %y, float 0xfff0000000000000) + ret float %1 +} + +; Test we propagate the non-NaN arg, even if one arg is SNaN +define float @test_fmaximumnum_snan(float %x, float %y) { +; SSE2-LABEL: test_fmaximumnum_snan: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fmaximumnum_snan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fmaximumnum_snan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovaps %xmm1, %xmm0 +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fmaximumnum_snan: +; X86: # %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: retl + %1 = tail call float @llvm.maximumnum.f32(float 0x7ff4000000000000, float %y) + ret float %1 +} + +define float @test_fminimumnum_snan(float %x, float %y) { +; SSE2-LABEL: test_fminimumnum_snan: +; SSE2: # %bb.0: +; SSE2-NEXT: movaps %xmm1, %xmm0 +; SSE2-NEXT: retq +; +; AVX-LABEL: test_fminimumnum_snan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq +; +; AVX10_2-LABEL: test_fminimumnum_snan: +; AVX10_2: # %bb.0: +; AVX10_2-NEXT: vmovaps %xmm1, %xmm0 +; AVX10_2-NEXT: retq +; +; X86-LABEL: test_fminimumnum_snan: +; X86: # %bb.0: +; X86-NEXT: flds {{[0-9]+}}(%esp) +; X86-NEXT: retl + %1 = tail call float @llvm.minimumnum.f32(float 0x7ff4000000000000, float %y) + ret float %1 +} diff --git a/llvm/test/CodeGen/X86/fminnum.ll b/llvm/test/CodeGen/X86/fminnum.ll index 0ef8fde..4aa1a61 100644 --- a/llvm/test/CodeGen/X86/fminnum.ll +++ b/llvm/test/CodeGen/X86/fminnum.ll @@ -645,11 +645,47 @@ define float @test_minnum_const_op2(float %x) { ret float %r } -define float @test_minnum_const_nan(float %x) { -; CHECK-LABEL: test_minnum_const_nan: -; CHECK: # %bb.0: -; CHECK-NEXT: retq - %r = call float @llvm.minnum.f32(float %x, float 0x7fff000000000000) +define float @test_minnum_const_nan(float %x, float %y) { +; SSE-LABEL: test_minnum_const_nan: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_minnum_const_nan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq + %r = call float @llvm.minnum.f32(float %y, float 0x7fff000000000000) + ret float %r +} + +; nnan minnum(Y, +inf) -> Y +define float @test_minnum_inf_nnan(float %x, float %y) nounwind { +; SSE-LABEL: test_minnum_inf_nnan: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_minnum_inf_nnan: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps %xmm1, %xmm0 +; AVX-NEXT: retq + %r = call nnan float @llvm.minnum.f32(float %y, float 0x7ff0000000000000) + ret float %r +} + +; Test SNaN quieting +define float @test_minnum_snan(float %x) { +; SSE-LABEL: test_minnum_snan: +; SSE: # %bb.0: +; SSE-NEXT: movss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; SSE-NEXT: retq +; +; AVX-LABEL: test_minnum_snan: +; AVX: # %bb.0: +; AVX-NEXT: vmovss {{.*#+}} xmm0 = [NaN,0.0E+0,0.0E+0,0.0E+0] +; AVX-NEXT: retq + %r = call float @llvm.minnum.f32(float 0x7ff4000000000000, float %x) ret float %r } diff --git a/llvm/test/CodeGen/X86/pgo-profile-o0.ll b/llvm/test/CodeGen/X86/pgo-profile-o0.ll new file mode 100644 index 0000000..f9704fc --- /dev/null +++ b/llvm/test/CodeGen/X86/pgo-profile-o0.ll @@ -0,0 +1,49 @@ +; RUN: llc -mtriple=x86_64-- -O0 -pgo-kind=pgo-sample-use-pipeline -debug-pass=Structure %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=PASSES +; RUN: llc -mtriple=x86_64-- -O0 -pgo-kind=pgo-sample-use-pipeline -debug-only=branch-prob %s -o /dev/null 2>&1 | FileCheck %s --check-prefix=BRANCH_PROB +; RUN: llc -mtriple=x86_64-- -O0 -pgo-kind=pgo-sample-use-pipeline -stop-after=finalize-isel %s -o - | FileCheck %s --check-prefix=MIR + +; REQUIRES: asserts + +; This test verifies that PGO profile information (branch weights) is preserved +; during instruction selection at -O0. + +; Test function with explicit branch weights from PGO. +define i32 @test_pgo_preservation(i32 %x) !prof !15 { +entry: + %cmp = icmp sgt i32 %x, 10 + ; This branch has bias: 97 taken vs 3 not taken + br i1 %cmp, label %if.then, label %if.else, !prof !16 + +if.then: + ; Hot path - should have high frequency + %add = add nsw i32 %x, 100 + br label %if.end + +if.else: + ; Cold path - should have low frequency + %sub = sub nsw i32 %x, 50 + br label %if.end + +if.end: + %result = phi i32 [ %add, %if.then ], [ %sub, %if.else ] + ret i32 %result +} + +; Profile metadata with branch weights 97:3. +!15 = !{!"function_entry_count", i64 100} +!16 = !{!"branch_weights", i32 97, i32 3} + +; Verify that Branch Probability Analysis runs at O0. +; PASSES: Branch Probability Analysis + +; Verify that the branch probabilities reflect the exact profile data. +; BRANCH_PROB: ---- Branch Probability Info : test_pgo_preservation ---- +; BRANCH_PROB: set edge entry -> 0 successor probability to {{.*}} = 97.00% +; BRANCH_PROB: set edge entry -> 1 successor probability to {{.*}} = 3.00% + +; Verify that machine IR preserves the branch probabilities from profile data +; MIR: bb.0.entry: +; MIR-NEXT: successors: %bb.{{[0-9]+}}({{0x03d70a3d|0x7c28f5c3}}), %bb.{{[0-9]+}}({{0x7c28f5c3|0x03d70a3d}}) +; The two successor probability values should be: +; - 0x7c28f5c3: approximately 97% (high probability successor) +; - 0x03d70a3d: approximately 3% (low probability successor) |