; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86-SSE ; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx | FileCheck %s --check-prefix=X86-AVX ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64-SSE ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=X64-AVX ;PR29079 define <4 x float> @mask_ucvt_4i32_4f32(<4 x i32> %a) { ; X86-SSE-LABEL: mask_ucvt_4i32_4f32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE-NEXT: retl ; ; X86-AVX-LABEL: mask_ucvt_4i32_4f32: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE-LABEL: mask_ucvt_4i32_4f32: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: mask_ucvt_4i32_4f32: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-AVX-NEXT: retq %and = and <4 x i32> %a, %cvt = uitofp <4 x i32> %and to <4 x float> ret <4 x float> %cvt } define <4 x double> @mask_ucvt_4i32_4f64(<4 x i32> %a) { ; X86-SSE-LABEL: mask_ucvt_4i32_4f64: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm2 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm1 ; X86-SSE-NEXT: movaps %xmm2, %xmm0 ; X86-SSE-NEXT: retl ; ; X86-AVX-LABEL: mask_ucvt_4i32_4f64: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 ; X86-AVX-NEXT: retl ; ; X64-SSE-LABEL: mask_ucvt_4i32_4f64: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm2 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] ; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm1 ; X64-SSE-NEXT: movaps %xmm2, %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: mask_ucvt_4i32_4f64: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: vcvtdq2pd %xmm0, %ymm0 ; X64-AVX-NEXT: retq %and = and <4 x i32> %a, %cvt = uitofp <4 x i32> %and to <4 x double> ret <4 x double> %cvt } ; Regression noticed in D56387 define <4 x float> @lshr_truncate_mask_ucvt_4i64_4f32(ptr%p0) { ; X86-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: ; X86-SSE: # %bb.0: ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movups (%eax), %xmm0 ; X86-SSE-NEXT: movups 16(%eax), %xmm1 ; X86-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X86-SSE-NEXT: psrld $16, %xmm0 ; X86-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X86-SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 ; X86-SSE-NEXT: retl ; ; X86-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: ; X86-AVX: # %bb.0: ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX-NEXT: vmovups (%eax), %xmm0 ; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2] ; X86-AVX-NEXT: vpsrld $16, %xmm0, %xmm0 ; X86-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X86-AVX-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0 ; X86-AVX-NEXT: retl ; ; X64-SSE-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: movups (%rdi), %xmm0 ; X64-SSE-NEXT: movups 16(%rdi), %xmm1 ; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; X64-SSE-NEXT: psrld $16, %xmm0 ; X64-SSE-NEXT: cvtdq2ps %xmm0, %xmm0 ; X64-SSE-NEXT: mulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: lshr_truncate_mask_ucvt_4i64_4f32: ; X64-AVX: # %bb.0: ; X64-AVX-NEXT: vmovups (%rdi), %xmm0 ; X64-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],mem[0,2] ; X64-AVX-NEXT: vpsrld $16, %xmm0, %xmm0 ; X64-AVX-NEXT: vcvtdq2ps %xmm0, %xmm0 ; X64-AVX-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 ; X64-AVX-NEXT: retq %load = load <4 x i64>, ptr %p0, align 2 %lshr = lshr <4 x i64> %load, %and = and <4 x i64> %lshr, %uitofp = uitofp <4 x i64> %and to <4 x float> %fmul = fmul <4 x float> %uitofp, ret <4 x float> %fmul }