; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s -check-prefixes=SSE,SSE-MOV,SSE2 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s -check-prefixes=SSE,SSE4,SSE4-BLEND ; RUN: llc < %s -mtriple=x86_64-- -mcpu=slm | FileCheck %s -check-prefixes=SSE,SSE-MOV,SSE4,SSE4-MOV ; RUN: llc < %s -mtriple=x86_64-- -mcpu=sandybridge | FileCheck %s -check-prefixes=AVX,AVX1,AVX-BLEND,AVX1-BLEND ; RUN: llc < %s -mtriple=x86_64-- -mcpu=btver2 | FileCheck %s -check-prefixes=AVX,AVX1,AVX-MOV,AVX1-MOV ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s -check-prefixes=AVX,AVX2,AVX-BLEND,AVX2-BLEND ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s -check-prefixes=AVX,AVX2,AVX-BLEND,AVX2-BLEND ; RUN: llc < %s -mtriple=x86_64-- -mcpu=alderlake | FileCheck %s -check-prefixes=AVX,AVX2,AVX-MOV,AVX2-MOV ; ; v2f64 patterns ; define <2 x double> @test_v2f64_blend_movsd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { ; SSE-MOV-LABEL: test_v2f64_blend_movsd: ; SSE-MOV: # %bb.0: ; SSE-MOV-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE-MOV-NEXT: addpd %xmm2, %xmm0 ; SSE-MOV-NEXT: retq ; ; SSE4-BLEND-LABEL: test_v2f64_blend_movsd: ; SSE4-BLEND: # %bb.0: ; SSE4-BLEND-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE4-BLEND-NEXT: addpd %xmm2, %xmm0 ; SSE4-BLEND-NEXT: retq ; ; AVX-BLEND-LABEL: test_v2f64_blend_movsd: ; AVX-BLEND: # %bb.0: ; AVX-BLEND-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-BLEND-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ; AVX-BLEND-NEXT: retq ; ; AVX-MOV-LABEL: test_v2f64_blend_movsd: ; AVX-MOV: # %bb.0: ; AVX-MOV-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-MOV-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ; AVX-MOV-NEXT: retq %s = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %r = fadd <2 x double> %s, %a2 ret <2 x double> %r } define <2 x double> @test_v2f64_blend_movsd_optsize(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) optsize { ; SSE-LABEL: test_v2f64_blend_movsd_optsize: ; SSE: # %bb.0: ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE-NEXT: addpd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v2f64_blend_movsd_optsize: ; AVX: # %bb.0: ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq %s = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %r = fadd <2 x double> %s, %a2 ret <2 x double> %r } define <2 x double> @test_v2f64_blend_movsd_load(ptr %p0, <2 x double> %a1, <2 x double> %a2) { ; SSE2-LABEL: test_v2f64_blend_movsd_load: ; SSE2: # %bb.0: ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[0],mem[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v2f64_blend_movsd_load: ; SSE4: # %bb.0: ; SSE4-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],mem[1] ; SSE4-NEXT: addpd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX-LABEL: test_v2f64_blend_movsd_load: ; AVX: # %bb.0: ; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],mem[1] ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a0 = load <2 x double>, ptr %p0 %s = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %r = fadd <2 x double> %s, %a2 ret <2 x double> %r } define <2 x double> @test_v2f64_blend_movsd_load_commute(<2 x double> %a0, ptr %p1, <2 x double> %a2) { ; SSE2-LABEL: test_v2f64_blend_movsd_load_commute: ; SSE2: # %bb.0: ; SSE2-NEXT: movlpd {{.*#+}} xmm0 = mem[0],xmm0[1] ; SSE2-NEXT: addpd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v2f64_blend_movsd_load_commute: ; SSE4: # %bb.0: ; SSE4-NEXT: blendpd {{.*#+}} xmm0 = mem[0],xmm0[1] ; SSE4-NEXT: addpd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX-LABEL: test_v2f64_blend_movsd_load_commute: ; AVX: # %bb.0: ; AVX-NEXT: vblendpd {{.*#+}} xmm0 = mem[0],xmm0[1] ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a1 = load <2 x double>, ptr %p1 %s = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> %r = fadd <2 x double> %s, %a2 ret <2 x double> %r } define <2 x double> @test_v2f64_blend_movsd_zero(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { ; SSE-MOV-LABEL: test_v2f64_blend_movsd_zero: ; SSE-MOV: # %bb.0: ; SSE-MOV-NEXT: xorpd %xmm1, %xmm1 ; SSE-MOV-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE-MOV-NEXT: addpd %xmm2, %xmm0 ; SSE-MOV-NEXT: retq ; ; SSE4-BLEND-LABEL: test_v2f64_blend_movsd_zero: ; SSE4-BLEND: # %bb.0: ; SSE4-BLEND-NEXT: xorpd %xmm1, %xmm1 ; SSE4-BLEND-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE4-BLEND-NEXT: addpd %xmm2, %xmm0 ; SSE4-BLEND-NEXT: retq ; ; AVX-BLEND-LABEL: test_v2f64_blend_movsd_zero: ; AVX-BLEND: # %bb.0: ; AVX-BLEND-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; AVX-BLEND-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-BLEND-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ; AVX-BLEND-NEXT: retq ; ; AVX-MOV-LABEL: test_v2f64_blend_movsd_zero: ; AVX-MOV: # %bb.0: ; AVX-MOV-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; AVX-MOV-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-MOV-NEXT: vaddpd %xmm2, %xmm0, %xmm0 ; AVX-MOV-NEXT: retq %s = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <2 x i32> %r = fadd <2 x double> %s, %a2 ret <2 x double> %r } ; ; v2i64 patterns ; define <2 x i64> @test_v2i64_blend_movsd(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) { ; SSE2-LABEL: test_v2i64_blend_movsd: ; SSE2: # %bb.0: ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: paddq %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v2i64_blend_movsd: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddq %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v2i64_blend_movsd: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v2i64_blend_movsd: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> %r = add <2 x i64> %s, %a2 ret <2 x i64> %r } define <2 x i64> @test_v2i64_blend_movsd_optsize(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) optsize { ; SSE2-LABEL: test_v2i64_blend_movsd_optsize: ; SSE2: # %bb.0: ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: paddq %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v2i64_blend_movsd_optsize: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddq %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v2i64_blend_movsd_optsize: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v2i64_blend_movsd_optsize: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> %r = add <2 x i64> %s, %a2 ret <2 x i64> %r } define <2 x i64> @test_v2i64_blend_movsd_load(ptr %p0, <2 x i64> %a1, <2 x i64> %a2) { ; SSE2-LABEL: test_v2i64_blend_movsd_load: ; SSE2: # %bb.0: ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[0],mem[1] ; SSE2-NEXT: paddq %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v2i64_blend_movsd_load: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],mem[4,5,6,7] ; SSE4-NEXT: paddq %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v2i64_blend_movsd_load: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],mem[4,5,6,7] ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v2i64_blend_movsd_load: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3] ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %a0 = load <2 x i64>, ptr %p0 %s = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> %r = add <2 x i64> %s, %a2 ret <2 x i64> %r } define <2 x i64> @test_v2i64_blend_movsd_load_commute(<2 x i64> %a0, ptr %p1, <2 x i64> %a2) { ; SSE2-LABEL: test_v2i64_blend_movsd_load_commute: ; SSE2: # %bb.0: ; SSE2-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSE2-NEXT: paddq %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v2i64_blend_movsd_load_commute: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = mem[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddq %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v2i64_blend_movsd_load_commute: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = mem[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v2i64_blend_movsd_load_commute: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %a1 = load <2 x i64>, ptr %p1 %s = shufflevector <2 x i64> %a0, <2 x i64> %a1, <2 x i32> %r = add <2 x i64> %s, %a2 ret <2 x i64> %r } define <2 x i64> @test_v2i64_blend_movsd_zero(<2 x i64> %a0, <2 x i64> %a1, <2 x i64> %a2) { ; SSE2-LABEL: test_v2i64_blend_movsd_zero: ; SSE2: # %bb.0: ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2-NEXT: paddq %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v2i64_blend_movsd_zero: ; SSE4: # %bb.0: ; SSE4-NEXT: pxor %xmm1, %xmm1 ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddq %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v2i64_blend_movsd_zero: ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v2i64_blend_movsd_zero: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <2 x i32> %r = add <2 x i64> %s, %a2 ret <2 x i64> %r } ; ; v4f32 patterns ; define <4 x float> @test_v4f32_blend_movss(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { ; SSE-MOV-LABEL: test_v4f32_blend_movss: ; SSE-MOV: # %bb.0: ; SSE-MOV-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE-MOV-NEXT: addps %xmm2, %xmm0 ; SSE-MOV-NEXT: retq ; ; SSE4-BLEND-LABEL: test_v4f32_blend_movss: ; SSE4-BLEND: # %bb.0: ; SSE4-BLEND-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE4-BLEND-NEXT: addps %xmm2, %xmm0 ; SSE4-BLEND-NEXT: retq ; ; AVX-BLEND-LABEL: test_v4f32_blend_movss: ; AVX-BLEND: # %bb.0: ; AVX-BLEND-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX-BLEND-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-BLEND-NEXT: retq ; ; AVX-MOV-LABEL: test_v4f32_blend_movss: ; AVX-MOV: # %bb.0: ; AVX-MOV-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX-MOV-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-MOV-NEXT: retq %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movsd(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { ; SSE-MOV-LABEL: test_v4f32_blend_movsd: ; SSE-MOV: # %bb.0: ; SSE-MOV-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE-MOV-NEXT: addps %xmm2, %xmm0 ; SSE-MOV-NEXT: retq ; ; SSE4-BLEND-LABEL: test_v4f32_blend_movsd: ; SSE4-BLEND: # %bb.0: ; SSE4-BLEND-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; SSE4-BLEND-NEXT: addps %xmm2, %xmm0 ; SSE4-BLEND-NEXT: retq ; ; AVX-BLEND-LABEL: test_v4f32_blend_movsd: ; AVX-BLEND: # %bb.0: ; AVX-BLEND-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX-BLEND-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-BLEND-NEXT: retq ; ; AVX-MOV-LABEL: test_v4f32_blend_movsd: ; AVX-MOV: # %bb.0: ; AVX-MOV-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-MOV-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-MOV-NEXT: retq %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movss_optsize(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) optsize { ; SSE-LABEL: test_v4f32_blend_movss_optsize: ; SSE: # %bb.0: ; SSE-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE-NEXT: addps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v4f32_blend_movss_optsize: ; AVX: # %bb.0: ; AVX-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movsd_optsize(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) optsize { ; SSE-LABEL: test_v4f32_blend_movsd_optsize: ; SSE: # %bb.0: ; SSE-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE-NEXT: addps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: test_v4f32_blend_movsd_optsize: ; AVX: # %bb.0: ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movss_load(ptr %p0, <4 x float> %a1, <4 x float> %a2) { ; SSE2-LABEL: test_v4f32_blend_movss_load: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps (%rdi), %xmm2 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] ; SSE2-NEXT: addps %xmm1, %xmm2 ; SSE2-NEXT: movaps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4f32_blend_movss_load: ; SSE4: # %bb.0: ; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3] ; SSE4-NEXT: addps %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX-LABEL: test_v4f32_blend_movss_load: ; AVX: # %bb.0: ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],mem[1,2,3] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a0 = load <4 x float>, ptr %p0 %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movss_load_commute(<4 x float> %a0, ptr %p1, <4 x float> %a2) { ; SSE2-LABEL: test_v4f32_blend_movss_load_commute: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps (%rdi), %xmm2 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] ; SSE2-NEXT: addps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4f32_blend_movss_load_commute: ; SSE4: # %bb.0: ; SSE4-NEXT: blendps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] ; SSE4-NEXT: addps %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX-LABEL: test_v4f32_blend_movss_load_commute: ; AVX: # %bb.0: ; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a1 = load <4 x float>, ptr %p1 %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movsd_load(ptr %p0, <4 x float> %a1, <4 x float> %a2) { ; SSE2-LABEL: test_v4f32_blend_movsd_load: ; SSE2: # %bb.0: ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[0],mem[1] ; SSE2-NEXT: addps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4f32_blend_movsd_load: ; SSE4: # %bb.0: ; SSE4-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3] ; SSE4-NEXT: addps %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX-LABEL: test_v4f32_blend_movsd_load: ; AVX: # %bb.0: ; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a0 = load <4 x float>, ptr %p0 %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movsd_load_commute(<4 x float> %a0, ptr %p1, <4 x float> %a2) { ; SSE2-LABEL: test_v4f32_blend_movsd_load_commute: ; SSE2: # %bb.0: ; SSE2-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSE2-NEXT: addps %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4f32_blend_movsd_load_commute: ; SSE4: # %bb.0: ; SSE4-NEXT: blendps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSE4-NEXT: addps %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX-LABEL: test_v4f32_blend_movsd_load_commute: ; AVX: # %bb.0: ; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a1 = load <4 x float>, ptr %p1 %s = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movss_zero(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { ; SSE-MOV-LABEL: test_v4f32_blend_movss_zero: ; SSE-MOV: # %bb.0: ; SSE-MOV-NEXT: xorps %xmm1, %xmm1 ; SSE-MOV-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE-MOV-NEXT: addps %xmm2, %xmm0 ; SSE-MOV-NEXT: retq ; ; SSE4-BLEND-LABEL: test_v4f32_blend_movss_zero: ; SSE4-BLEND: # %bb.0: ; SSE4-BLEND-NEXT: xorps %xmm1, %xmm1 ; SSE4-BLEND-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE4-BLEND-NEXT: addps %xmm2, %xmm0 ; SSE4-BLEND-NEXT: retq ; ; AVX-BLEND-LABEL: test_v4f32_blend_movss_zero: ; AVX-BLEND: # %bb.0: ; AVX-BLEND-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-BLEND-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX-BLEND-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-BLEND-NEXT: retq ; ; AVX-MOV-LABEL: test_v4f32_blend_movss_zero: ; AVX-MOV: # %bb.0: ; AVX-MOV-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-MOV-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX-MOV-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-MOV-NEXT: retq %s = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } define <4 x float> @test_v4f32_blend_movsd_zero(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { ; SSE2-LABEL: test_v4f32_blend_movsd_zero: ; SSE2: # %bb.0: ; SSE2-NEXT: xorpd %xmm1, %xmm1 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: addps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-BLEND-LABEL: test_v4f32_blend_movsd_zero: ; SSE4-BLEND: # %bb.0: ; SSE4-BLEND-NEXT: xorps %xmm1, %xmm1 ; SSE4-BLEND-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; SSE4-BLEND-NEXT: addps %xmm2, %xmm0 ; SSE4-BLEND-NEXT: retq ; ; SSE4-MOV-LABEL: test_v4f32_blend_movsd_zero: ; SSE4-MOV: # %bb.0: ; SSE4-MOV-NEXT: xorps %xmm1, %xmm1 ; SSE4-MOV-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE4-MOV-NEXT: addps %xmm2, %xmm0 ; SSE4-MOV-NEXT: retq ; ; AVX-BLEND-LABEL: test_v4f32_blend_movsd_zero: ; AVX-BLEND: # %bb.0: ; AVX-BLEND-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-BLEND-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX-BLEND-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-BLEND-NEXT: retq ; ; AVX-MOV-LABEL: test_v4f32_blend_movsd_zero: ; AVX-MOV: # %bb.0: ; AVX-MOV-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-MOV-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; AVX-MOV-NEXT: vaddps %xmm2, %xmm0, %xmm0 ; AVX-MOV-NEXT: retq %s = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <4 x i32> %r = fadd <4 x float> %s, %a2 ret <4 x float> %r } ; ; v4i32 patterns ; define <4 x i32> @test_v4i32_blend_movss(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movss: ; SSE2: # %bb.0: ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movss: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] ; SSE4-NEXT: paddd %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movss: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movss: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movsd(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movsd: ; SSE2: # %bb.0: ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movsd: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddd %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movsd: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movsd: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movss_optsize(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) optsize { ; SSE2-LABEL: test_v4i32_blend_movss_optsize: ; SSE2: # %bb.0: ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movss_optsize: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] ; SSE4-NEXT: paddd %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movss_optsize: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movss_optsize: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movsd_optsize(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) optsize { ; SSE2-LABEL: test_v4i32_blend_movsd_optsize: ; SSE2: # %bb.0: ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movsd_optsize: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddd %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movsd_optsize: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movsd_optsize: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movss_load(ptr %p0, <4 x i32> %a1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movss_load: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps (%rdi), %xmm2 ; SSE2-NEXT: movss {{.*#+}} xmm2 = xmm0[0],xmm2[1,2,3] ; SSE2-NEXT: paddd %xmm1, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movss_load: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3,4,5,6,7] ; SSE4-NEXT: paddd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movss_load: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],mem[2,3,4,5,6,7] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movss_load: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],mem[1,2,3] ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %a0 = load <4 x i32>, ptr %p0 %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movss_load_commute(<4 x i32> %a0, ptr %p1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movss_load_commute: ; SSE2: # %bb.0: ; SSE2-NEXT: movaps (%rdi), %xmm2 ; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movss_load_commute: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = mem[0,1],xmm0[2,3,4,5,6,7] ; SSE4-NEXT: paddd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movss_load_commute: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = mem[0,1],xmm0[2,3,4,5,6,7] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movss_load_commute: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %a1 = load <4 x i32>, ptr %p1 %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movsd_load(ptr %p0, <4 x i32> %a1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movsd_load: ; SSE2: # %bb.0: ; SSE2-NEXT: shufpd {{.*#+}} xmm0 = xmm0[0],mem[1] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movsd_load: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],mem[4,5,6,7] ; SSE4-NEXT: paddd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movsd_load: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],mem[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movsd_load: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],mem[2,3] ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %a0 = load <4 x i32>, ptr %p0 %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movsd_load_commute(<4 x i32> %a0, ptr %p1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movsd_load_commute: ; SSE2: # %bb.0: ; SSE2-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; SSE2-NEXT: paddd %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movsd_load_commute: ; SSE4: # %bb.0: ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = mem[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddd %xmm1, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movsd_load_commute: ; AVX1: # %bb.0: ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = mem[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movsd_load_commute: ; AVX2: # %bb.0: ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = mem[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %a1 = load <4 x i32>, ptr %p1 %s = shufflevector <4 x i32> %a0, <4 x i32> %a1, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movss_zero(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movss_zero: ; SSE2: # %bb.0: ; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 ; SSE2-NEXT: paddd %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movss_zero: ; SSE4: # %bb.0: ; SSE4-NEXT: pxor %xmm1, %xmm1 ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] ; SSE4-NEXT: paddd %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movss_zero: ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7] ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movss_zero: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } define <4 x i32> @test_v4i32_blend_movsd_zero(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2) { ; SSE2-LABEL: test_v4i32_blend_movsd_zero: ; SSE2: # %bb.0: ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE2-NEXT: paddd %xmm2, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE4-LABEL: test_v4i32_blend_movsd_zero: ; SSE4: # %bb.0: ; SSE4-NEXT: pxor %xmm1, %xmm1 ; SSE4-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; SSE4-NEXT: paddd %xmm2, %xmm0 ; SSE4-NEXT: retq ; ; AVX1-LABEL: test_v4i32_blend_movsd_zero: ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_v4i32_blend_movsd_zero: ; AVX2: # %bb.0: ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: retq %s = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> %r = add <4 x i32> %s, %a2 ret <4 x i32> %r } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; AVX1-BLEND: {{.*}} ; AVX1-MOV: {{.*}} ; AVX2-BLEND: {{.*}} ; AVX2-MOV: {{.*}}