; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1OR2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1OR2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512VL define void @widen_fdiv_v2f32_v4f32(ptr %a0, ptr %b0, ptr %c0) { ; SSE-LABEL: widen_fdiv_v2f32_v4f32: ; SSE: # %bb.0: ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: divps %xmm2, %xmm0 ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: divps %xmm2, %xmm1 ; SSE-NEXT: movlps %xmm0, (%rdx) ; SSE-NEXT: movlps %xmm1, 8(%rdx) ; SSE-NEXT: retq ; ; AVX-LABEL: widen_fdiv_v2f32_v4f32: ; AVX: # %bb.0: ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX-NEXT: vdivps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX-NEXT: vdivps %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX-NEXT: vmovups %xmm0, (%rdx) ; AVX-NEXT: retq %a2 = getelementptr inbounds i8, ptr %a0, i64 8 %b2 = getelementptr inbounds i8, ptr %b0, i64 8 %c2 = getelementptr inbounds i8, ptr %c0, i64 8 %va0 = load <2 x float>, ptr %a0, align 4 %vb0 = load <2 x float>, ptr %b0, align 4 %va2 = load <2 x float>, ptr %a2, align 4 %vb2 = load <2 x float>, ptr %b2, align 4 %vc0 = fdiv <2 x float> %va0, %vb0 %vc2 = fdiv <2 x float> %va2, %vb2 store <2 x float> %vc0, ptr %c0, align 4 store <2 x float> %vc2, ptr %c2, align 4 ret void } define void @widen_fdiv_v2f32_v8f32(ptr %a0, ptr %b0, ptr %c0) { ; SSE-LABEL: widen_fdiv_v2f32_v8f32: ; SSE: # %bb.0: ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm0 ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm1 ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm2 ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm3 ; SSE-NEXT: movlps %xmm0, (%rdx) ; SSE-NEXT: movlps %xmm1, 8(%rdx) ; SSE-NEXT: movlps %xmm2, 16(%rdx) ; SSE-NEXT: movlps %xmm3, 24(%rdx) ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: widen_fdiv_v2f32_v8f32: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 ; AVX1OR2-NEXT: vdivps (%rsi), %ymm0, %ymm0 ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: widen_fdiv_v2f32_v8f32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovups (%rdi), %ymm0 ; AVX512F-NEXT: vdivps (%rsi), %ymm0, %ymm0 ; AVX512F-NEXT: vmovups %ymm0, (%rdx) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: widen_fdiv_v2f32_v8f32: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm5, %xmm1, %xmm1 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm6, %xmm3, %xmm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX512VL-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] ; AVX512VL-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm3 ; AVX512VL-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vdivps %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] ; AVX512VL-NEXT: vmovups %ymm0, (%rdx) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq %a2 = getelementptr inbounds i8, ptr %a0, i64 8 %b2 = getelementptr inbounds i8, ptr %b0, i64 8 %c2 = getelementptr inbounds i8, ptr %c0, i64 8 %a4 = getelementptr inbounds i8, ptr %a0, i64 16 %b4 = getelementptr inbounds i8, ptr %b0, i64 16 %c4 = getelementptr inbounds i8, ptr %c0, i64 16 %a6 = getelementptr inbounds i8, ptr %a0, i64 24 %b6 = getelementptr inbounds i8, ptr %b0, i64 24 %c6 = getelementptr inbounds i8, ptr %c0, i64 24 %va0 = load <2 x float>, ptr %a0, align 4 %vb0 = load <2 x float>, ptr %b0, align 4 %va2 = load <2 x float>, ptr %a2, align 4 %vb2 = load <2 x float>, ptr %b2, align 4 %va4 = load <2 x float>, ptr %a4, align 4 %vb4 = load <2 x float>, ptr %b4, align 4 %va6 = load <2 x float>, ptr %a6, align 4 %vb6 = load <2 x float>, ptr %b6, align 4 %vc0 = fdiv <2 x float> %va0, %vb0 %vc2 = fdiv <2 x float> %va2, %vb2 %vc4 = fdiv <2 x float> %va4, %vb4 %vc6 = fdiv <2 x float> %va6, %vb6 store <2 x float> %vc0, ptr %c0, align 4 store <2 x float> %vc2, ptr %c2, align 4 store <2 x float> %vc4, ptr %c4, align 4 store <2 x float> %vc6, ptr %c6, align 4 ret void } define void @widen_fdiv_v2f32_v16f32(ptr %a0, ptr %b0, ptr %c0) { ; SSE-LABEL: widen_fdiv_v2f32_v16f32: ; SSE: # %bb.0: ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm0 ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm1 ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm2 ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: divps %xmm4, %xmm3 ; SSE-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero ; SSE-NEXT: divps %xmm5, %xmm4 ; SSE-NEXT: movsd {{.*#+}} xmm5 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero ; SSE-NEXT: divps %xmm6, %xmm5 ; SSE-NEXT: movsd {{.*#+}} xmm6 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero ; SSE-NEXT: divps %xmm7, %xmm6 ; SSE-NEXT: movsd {{.*#+}} xmm7 = mem[0],zero ; SSE-NEXT: movsd {{.*#+}} xmm8 = mem[0],zero ; SSE-NEXT: divps %xmm8, %xmm7 ; SSE-NEXT: movlps %xmm0, (%rdx) ; SSE-NEXT: movlps %xmm1, 8(%rdx) ; SSE-NEXT: movlps %xmm2, 16(%rdx) ; SSE-NEXT: movlps %xmm3, 24(%rdx) ; SSE-NEXT: movlps %xmm4, 32(%rdx) ; SSE-NEXT: movlps %xmm5, 40(%rdx) ; SSE-NEXT: movlps %xmm6, 48(%rdx) ; SSE-NEXT: movlps %xmm7, 56(%rdx) ; SSE-NEXT: retq ; ; AVX1OR2-LABEL: widen_fdiv_v2f32_v16f32: ; AVX1OR2: # %bb.0: ; AVX1OR2-NEXT: vmovups (%rdi), %ymm0 ; AVX1OR2-NEXT: vmovups 32(%rdi), %ymm1 ; AVX1OR2-NEXT: vdivps (%rsi), %ymm0, %ymm0 ; AVX1OR2-NEXT: vdivps 32(%rsi), %ymm1, %ymm1 ; AVX1OR2-NEXT: vmovups %ymm0, (%rdx) ; AVX1OR2-NEXT: vmovups %ymm1, 32(%rdx) ; AVX1OR2-NEXT: vzeroupper ; AVX1OR2-NEXT: retq ; ; AVX512F-LABEL: widen_fdiv_v2f32_v16f32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm4, %xmm0, %xmm0 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm4, %xmm1, %xmm1 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm4, %xmm2, %xmm2 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm4, %xmm3, %xmm3 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm5, %xmm4, %xmm4 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm6, %xmm5, %xmm5 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm7, %xmm6, %xmm6 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero ; AVX512F-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero ; AVX512F-NEXT: vdivps %xmm8, %xmm7, %xmm7 ; AVX512F-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6 ; AVX512F-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4 ; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10] ; AVX512F-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] ; AVX512F-NEXT: vpermt2pd %zmm6, %zmm5, %zmm4 ; AVX512F-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2 ; AVX512F-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vpermt2pd %zmm2, %zmm5, %zmm0 ; AVX512F-NEXT: vinsertf64x4 $0, %ymm0, %zmm4, %zmm0 ; AVX512F-NEXT: vmovupd %zmm0, (%rdx) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: widen_fdiv_v2f32_v16f32: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm4, %xmm0, %xmm0 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm4, %xmm1, %xmm1 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm4, %xmm2, %xmm2 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm4, %xmm3, %xmm3 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm4 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm5, %xmm4, %xmm4 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm5 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm6, %xmm5, %xmm5 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm6 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm7, %xmm6, %xmm6 ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm7 = mem[0],zero ; AVX512VL-NEXT: vmovsd {{.*#+}} xmm8 = mem[0],zero ; AVX512VL-NEXT: vdivps %xmm8, %xmm7, %xmm7 ; AVX512VL-NEXT: vinsertf32x4 $1, %xmm7, %zmm6, %zmm6 ; AVX512VL-NEXT: vinsertf32x4 $1, %xmm5, %zmm4, %zmm4 ; AVX512VL-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = [0,2,8,10,0,2,8,10] ; AVX512VL-NEXT: # zmm5 = mem[0,1,2,3,0,1,2,3] ; AVX512VL-NEXT: vpermi2pd %zmm6, %zmm4, %zmm5 ; AVX512VL-NEXT: vinsertf32x4 $1, %xmm3, %zmm2, %zmm2 ; AVX512VL-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm0 ; AVX512VL-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,4,6] ; AVX512VL-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 ; AVX512VL-NEXT: vinsertf64x4 $0, %ymm1, %zmm5, %zmm0 ; AVX512VL-NEXT: vmovupd %zmm0, (%rdx) ; AVX512VL-NEXT: vzeroupper ; AVX512VL-NEXT: retq %a2 = getelementptr inbounds i8, ptr %a0, i64 8 %b2 = getelementptr inbounds i8, ptr %b0, i64 8 %c2 = getelementptr inbounds i8, ptr %c0, i64 8 %a4 = getelementptr inbounds i8, ptr %a0, i64 16 %b4 = getelementptr inbounds i8, ptr %b0, i64 16 %c4 = getelementptr inbounds i8, ptr %c0, i64 16 %a6 = getelementptr inbounds i8, ptr %a0, i64 24 %b6 = getelementptr inbounds i8, ptr %b0, i64 24 %c6 = getelementptr inbounds i8, ptr %c0, i64 24 %a8 = getelementptr inbounds i8, ptr %a0, i64 32 %b8 = getelementptr inbounds i8, ptr %b0, i64 32 %c8 = getelementptr inbounds i8, ptr %c0, i64 32 %a10 = getelementptr inbounds i8, ptr %a0, i64 40 %b10 = getelementptr inbounds i8, ptr %b0, i64 40 %c10 = getelementptr inbounds i8, ptr %c0, i64 40 %a12 = getelementptr inbounds i8, ptr %a0, i64 48 %b12 = getelementptr inbounds i8, ptr %b0, i64 48 %c12 = getelementptr inbounds i8, ptr %c0, i64 48 %a14 = getelementptr inbounds i8, ptr %a0, i64 56 %b14 = getelementptr inbounds i8, ptr %b0, i64 56 %c14 = getelementptr inbounds i8, ptr %c0, i64 56 %va0 = load <2 x float>, ptr %a0, align 4 %vb0 = load <2 x float>, ptr %b0, align 4 %va2 = load <2 x float>, ptr %a2, align 4 %vb2 = load <2 x float>, ptr %b2, align 4 %va4 = load <2 x float>, ptr %a4, align 4 %vb4 = load <2 x float>, ptr %b4, align 4 %va6 = load <2 x float>, ptr %a6, align 4 %vb6 = load <2 x float>, ptr %b6, align 4 %va8 = load <2 x float>, ptr %a8, align 4 %vb8 = load <2 x float>, ptr %b8, align 4 %va10 = load <2 x float>, ptr %a10, align 4 %vb10 = load <2 x float>, ptr %b10, align 4 %va12 = load <2 x float>, ptr %a12, align 4 %vb12 = load <2 x float>, ptr %b12, align 4 %va14 = load <2 x float>, ptr %a14, align 4 %vb14 = load <2 x float>, ptr %b14, align 4 %vc0 = fdiv <2 x float> %va0, %vb0 %vc2 = fdiv <2 x float> %va2, %vb2 %vc4 = fdiv <2 x float> %va4, %vb4 %vc6 = fdiv <2 x float> %va6, %vb6 %vc8 = fdiv <2 x float> %va8, %vb8 %vc10 = fdiv <2 x float> %va10, %vb10 %vc12 = fdiv <2 x float> %va12, %vb12 %vc14 = fdiv <2 x float> %va14, %vb14 store <2 x float> %vc0, ptr %c0, align 4 store <2 x float> %vc2, ptr %c2, align 4 store <2 x float> %vc4, ptr %c4, align 4 store <2 x float> %vc6, ptr %c6, align 4 store <2 x float> %vc8, ptr %c8, align 4 store <2 x float> %vc10, ptr %c10, align 4 store <2 x float> %vc12, ptr %c12, align 4 store <2 x float> %vc14, ptr %c14, align 4 ret void }