; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE3,SSE3-SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE3,SSE3-FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX-SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX-SLOW ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST define <2 x double> @haddpd1(<2 x double> %x, <2 x double> %y) { ; SSE3-LABEL: haddpd1: ; SSE3: # %bb.0: ; SSE3-NEXT: haddpd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddpd1: ; AVX: # %bb.0: ; AVX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> %r = fadd <2 x double> %a, %b ret <2 x double> %r } define <2 x double> @haddpd2(<2 x double> %x, <2 x double> %y) { ; SSE3-LABEL: haddpd2: ; SSE3: # %bb.0: ; SSE3-NEXT: haddpd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddpd2: ; AVX: # %bb.0: ; AVX-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> %b = shufflevector <2 x double> %y, <2 x double> %x, <2 x i32> %r = fadd <2 x double> %a, %b ret <2 x double> %r } define <2 x double> @haddpd3(<2 x double> %x) { ; SSE3-SLOW-LABEL: haddpd3: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addpd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: haddpd3: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: haddpd3: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: haddpd3: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> %r = fadd <2 x double> %a, %b ret <2 x double> %r } define <4 x float> @haddps1(<4 x float> %x, <4 x float> %y) { ; SSE3-LABEL: haddps1: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddps1: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> %r = fadd <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @haddps2(<4 x float> %x, <4 x float> %y) { ; SSE3-LABEL: haddps2: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddps2: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> %b = shufflevector <4 x float> %y, <4 x float> %x, <4 x i32> %r = fadd <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @haddps3(<4 x float> %x) { ; SSE3-LABEL: haddps3: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddps3: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fadd <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @haddps4(<4 x float> %x) { ; SSE3-LABEL: haddps4: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddps4: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fadd <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @haddps5(<4 x float> %x) { ; SSE3-LABEL: haddps5: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddps5: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fadd <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @haddps6(<4 x float> %x) { ; SSE3-SLOW-LABEL: haddps6: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: haddps6: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: haddps6: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: haddps6: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fadd <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @haddps7(<4 x float> %x) { ; SSE3-LABEL: haddps7: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddps7: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fadd <4 x float> %a, %b ret <4 x float> %r } define <2 x double> @hsubpd1(<2 x double> %x, <2 x double> %y) { ; SSE3-LABEL: hsubpd1: ; SSE3: # %bb.0: ; SSE3-NEXT: hsubpd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hsubpd1: ; AVX: # %bb.0: ; AVX-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> %b = shufflevector <2 x double> %x, <2 x double> %y, <2 x i32> %r = fsub <2 x double> %a, %b ret <2 x double> %r } define <2 x double> @hsubpd2(<2 x double> %x) { ; SSE3-SLOW-LABEL: hsubpd2: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: subpd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: hsubpd2: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: hsubpd2: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vsubpd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: hsubpd2: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %a = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> %b = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> %r = fsub <2 x double> %a, %b ret <2 x double> %r } define <4 x float> @hsubps1(<4 x float> %x, <4 x float> %y) { ; SSE3-LABEL: hsubps1: ; SSE3: # %bb.0: ; SSE3-NEXT: hsubps %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hsubps1: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> %y, <4 x i32> %r = fsub <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @hsubps2(<4 x float> %x) { ; SSE3-LABEL: hsubps2: ; SSE3: # %bb.0: ; SSE3-NEXT: hsubps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hsubps2: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fsub <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @hsubps3(<4 x float> %x) { ; SSE3-LABEL: hsubps3: ; SSE3: # %bb.0: ; SSE3-NEXT: hsubps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hsubps3: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fsub <4 x float> %a, %b ret <4 x float> %r } define <4 x float> @hsubps4(<4 x float> %x) { ; SSE3-SLOW-LABEL: hsubps4: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: subps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: hsubps4: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: hsubps4: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vsubps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: hsubps4: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %a = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %b = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> %r = fsub <4 x float> %a, %b ret <4 x float> %r } define <8 x float> @vhaddps1(<8 x float> %x, <8 x float> %y) { ; SSE3-LABEL: vhaddps1: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm2, %xmm0 ; SSE3-NEXT: haddps %xmm3, %xmm1 ; SSE3-NEXT: retq ; ; AVX-LABEL: vhaddps1: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> %b = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> %r = fadd <8 x float> %a, %b ret <8 x float> %r } define <8 x float> @vhaddps2(<8 x float> %x, <8 x float> %y) { ; SSE3-LABEL: vhaddps2: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm2, %xmm0 ; SSE3-NEXT: haddps %xmm3, %xmm1 ; SSE3-NEXT: retq ; ; AVX-LABEL: vhaddps2: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> %b = shufflevector <8 x float> %y, <8 x float> %x, <8 x i32> %r = fadd <8 x float> %a, %b ret <8 x float> %r } define <8 x float> @vhaddps3(<8 x float> %x) { ; SSE3-LABEL: vhaddps3: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: haddps %xmm1, %xmm1 ; SSE3-NEXT: retq ; ; AVX-LABEL: vhaddps3: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %ymm0, %ymm0, %ymm0 ; AVX-NEXT: retq %a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> %b = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> %r = fadd <8 x float> %a, %b ret <8 x float> %r } define <8 x float> @vhsubps1(<8 x float> %x, <8 x float> %y) { ; SSE3-LABEL: vhsubps1: ; SSE3: # %bb.0: ; SSE3-NEXT: hsubps %xmm2, %xmm0 ; SSE3-NEXT: hsubps %xmm3, %xmm1 ; SSE3-NEXT: retq ; ; AVX-LABEL: vhsubps1: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %a = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> %b = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> %r = fsub <8 x float> %a, %b ret <8 x float> %r } define <8 x float> @vhsubps3(<8 x float> %x) { ; SSE3-LABEL: vhsubps3: ; SSE3: # %bb.0: ; SSE3-NEXT: hsubps %xmm0, %xmm0 ; SSE3-NEXT: hsubps %xmm1, %xmm1 ; SSE3-NEXT: retq ; ; AVX-LABEL: vhsubps3: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %ymm0, %ymm0, %ymm0 ; AVX-NEXT: retq %a = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> %b = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> %r = fsub <8 x float> %a, %b ret <8 x float> %r } define <4 x double> @vhaddpd1(<4 x double> %x, <4 x double> %y) { ; SSE3-LABEL: vhaddpd1: ; SSE3: # %bb.0: ; SSE3-NEXT: haddpd %xmm2, %xmm0 ; SSE3-NEXT: haddpd %xmm3, %xmm1 ; SSE3-NEXT: retq ; ; AVX-LABEL: vhaddpd1: ; AVX: # %bb.0: ; AVX-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> %b = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> %r = fadd <4 x double> %a, %b ret <4 x double> %r } define <4 x double> @vhsubpd1(<4 x double> %x, <4 x double> %y) { ; SSE3-LABEL: vhsubpd1: ; SSE3: # %bb.0: ; SSE3-NEXT: hsubpd %xmm2, %xmm0 ; SSE3-NEXT: hsubpd %xmm3, %xmm1 ; SSE3-NEXT: retq ; ; AVX-LABEL: vhsubpd1: ; AVX: # %bb.0: ; AVX-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %a = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> %b = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> %r = fsub <4 x double> %a, %b ret <4 x double> %r } define <2 x float> @haddps_v2f32(<4 x float> %v0) { ; SSE3-LABEL: haddps_v2f32: ; SSE3: # %bb.0: ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: haddps_v2f32: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %v0.0 = extractelement <4 x float> %v0, i32 0 %v0.1 = extractelement <4 x float> %v0, i32 1 %v0.2 = extractelement <4 x float> %v0, i32 2 %v0.3 = extractelement <4 x float> %v0, i32 3 %op0 = fadd float %v0.0, %v0.1 %op1 = fadd float %v0.2, %v0.3 %res0 = insertelement <2 x float> undef, float %op0, i32 0 %res1 = insertelement <2 x float> %res0, float %op1, i32 1 ret <2 x float> %res1 } ; 128-bit vectors, float/double, fadd/fsub define float @extract_extract01_v4f32_fadd_f32(<4 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract23_v4f32_fadd_f32(<4 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v4f32_fadd_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v4f32_fadd_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v4f32_fadd_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v4f32_fadd_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 2 %x1 = extractelement <4 x float> %x, i32 3 %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract01_v4f32_fadd_f32_commute(<4 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fadd float %x1, %x0 ret float %x01 } define float @extract_extract23_v4f32_fadd_f32_commute(<4 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v4f32_fadd_f32_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v4f32_fadd_f32_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v4f32_fadd_f32_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v4f32_fadd_f32_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 2 %x1 = extractelement <4 x float> %x, i32 3 %x01 = fadd float %x1, %x0 ret float %x01 } define double @extract_extract01_v2f64_fadd_f64(<2 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v2f64_fadd_f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v2f64_fadd_f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v2f64_fadd_f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v2f64_fadd_f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fadd double %x0, %x1 ret double %x01 } define double @extract_extract01_v2f64_fadd_f64_commute(<2 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v2f64_fadd_f64_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v2f64_fadd_f64_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v2f64_fadd_f64_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v2f64_fadd_f64_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fadd double %x1, %x0 ret double %x01 } define float @extract_extract01_v4f32_fsub_f32(<4 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v4f32_fsub_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f32_fsub_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f32_fsub_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f32_fsub_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fsub float %x0, %x1 ret float %x01 } define float @extract_extract23_v4f32_fsub_f32(<4 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v4f32_fsub_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE3-SLOW-NEXT: subss %xmm0, %xmm1 ; SSE3-SLOW-NEXT: movaps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v4f32_fsub_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v4f32_fsub_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vsubss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v4f32_fsub_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 2 %x1 = extractelement <4 x float> %x, i32 3 %x01 = fsub float %x0, %x1 ret float %x01 } define float @extract_extract01_v4f32_fsub_f32_commute(<4 x float> %x) { ; SSE3-LABEL: extract_extract01_v4f32_fsub_f32_commute: ; SSE3: # %bb.0: ; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-NEXT: subss %xmm0, %xmm1 ; SSE3-NEXT: movaps %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract01_v4f32_fsub_f32_commute: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 %x01 = fsub float %x1, %x0 ret float %x01 } define float @extract_extract23_v4f32_fsub_f32_commute(<4 x float> %x) { ; SSE3-LABEL: extract_extract23_v4f32_fsub_f32_commute: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE3-NEXT: subss %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract23_v4f32_fsub_f32_commute: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <4 x float> %x, i32 2 %x1 = extractelement <4 x float> %x, i32 3 %x01 = fsub float %x1, %x0 ret float %x01 } define double @extract_extract01_v2f64_fsub_f64(<2 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v2f64_fsub_f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v2f64_fsub_f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v2f64_fsub_f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v2f64_fsub_f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fsub double %x0, %x1 ret double %x01 } define double @extract_extract01_v2f64_fsub_f64_commute(<2 x double> %x) { ; SSE3-LABEL: extract_extract01_v2f64_fsub_f64_commute: ; SSE3: # %bb.0: ; SSE3-NEXT: movapd %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: subsd %xmm0, %xmm1 ; SSE3-NEXT: movapd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract01_v2f64_fsub_f64_commute: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <2 x double> %x, i32 0 %x1 = extractelement <2 x double> %x, i32 1 %x01 = fsub double %x1, %x0 ret double %x01 } ; 256-bit vectors, float/double, fadd/fsub define float @extract_extract01_v8f32_fadd_f32(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v8f32_fadd_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v8f32_fadd_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v8f32_fadd_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v8f32_fadd_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract23_v8f32_fadd_f32(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v8f32_fadd_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v8f32_fadd_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v8f32_fadd_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v8f32_fadd_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 2 %x1 = extractelement <8 x float> %x, i32 3 %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract67_v8f32_fadd_f32(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract67_v8f32_fadd_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract67_v8f32_fadd_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm1, %xmm1 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract67_v8f32_fadd_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract67_v8f32_fadd_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 6 %x1 = extractelement <8 x float> %x, i32 7 %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract01_v8f32_fadd_f32_commute(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v8f32_fadd_f32_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v8f32_fadd_f32_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v8f32_fadd_f32_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v8f32_fadd_f32_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fadd float %x1, %x0 ret float %x01 } define float @extract_extract23_v8f32_fadd_f32_commute(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v8f32_fadd_f32_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v8f32_fadd_f32_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v8f32_fadd_f32_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v8f32_fadd_f32_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 2 %x1 = extractelement <8 x float> %x, i32 3 %x01 = fadd float %x1, %x0 ret float %x01 } define float @extract_extract67_v8f32_fadd_f32_commute(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract67_v8f32_fadd_f32_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract67_v8f32_fadd_f32_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm1, %xmm1 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract67_v8f32_fadd_f32_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract67_v8f32_fadd_f32_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 6 %x1 = extractelement <8 x float> %x, i32 7 %x01 = fadd float %x1, %x0 ret float %x01 } define double @extract_extract01_v4f64_fadd_f64(<4 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v4f64_fadd_f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f64_fadd_f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f64_fadd_f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f64_fadd_f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fadd double %x0, %x1 ret double %x01 } define double @extract_extract23_v4f64_fadd_f64(<4 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v4f64_fadd_f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v4f64_fadd_f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movapd %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddpd %xmm1, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v4f64_fadd_f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v4f64_fadd_f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 2 %x1 = extractelement <4 x double> %x, i32 3 %x01 = fadd double %x0, %x1 ret double %x01 } define double @extract_extract01_v4f64_fadd_f64_commute(<4 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v4f64_fadd_f64_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f64_fadd_f64_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f64_fadd_f64_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f64_fadd_f64_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fadd double %x1, %x0 ret double %x01 } define double @extract_extract23_v4f64_fadd_f64_commute(<4 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v4f64_fadd_f64_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v4f64_fadd_f64_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movapd %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddpd %xmm1, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v4f64_fadd_f64_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v4f64_fadd_f64_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 2 %x1 = extractelement <4 x double> %x, i32 3 %x01 = fadd double %x1, %x0 ret double %x01 } define float @extract_extract01_v8f32_fsub_f32(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v8f32_fsub_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v8f32_fsub_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v8f32_fsub_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v8f32_fsub_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fsub float %x0, %x1 ret float %x01 } define float @extract_extract23_v8f32_fsub_f32(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract23_v8f32_fsub_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; SSE3-SLOW-NEXT: subss %xmm0, %xmm1 ; SSE3-SLOW-NEXT: movaps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract23_v8f32_fsub_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract23_v8f32_fsub_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3] ; AVX-SLOW-NEXT: vsubss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract23_v8f32_fsub_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 2 %x1 = extractelement <8 x float> %x, i32 3 %x01 = fsub float %x0, %x1 ret float %x01 } define float @extract_extract45_v8f32_fsub_f32(<8 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract45_v8f32_fsub_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3] ; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract45_v8f32_fsub_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movaps %xmm1, %xmm0 ; SSE3-FAST-NEXT: hsubps %xmm1, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract45_v8f32_fsub_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract45_v8f32_fsub_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x float> %x, i32 4 %x1 = extractelement <8 x float> %x, i32 5 %x01 = fsub float %x0, %x1 ret float %x01 } ; Negative test...or get hoppy and negate? define float @extract_extract01_v8f32_fsub_f32_commute(<8 x float> %x) { ; SSE3-LABEL: extract_extract01_v8f32_fsub_f32_commute: ; SSE3: # %bb.0: ; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-NEXT: subss %xmm0, %xmm1 ; SSE3-NEXT: movaps %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract01_v8f32_fsub_f32_commute: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x0 = extractelement <8 x float> %x, i32 0 %x1 = extractelement <8 x float> %x, i32 1 %x01 = fsub float %x1, %x0 ret float %x01 } define double @extract_extract01_v4f64_fsub_f64(<4 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v4f64_fsub_f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f64_fsub_f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f64_fsub_f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f64_fsub_f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fsub double %x0, %x1 ret double %x01 } ; Negative test...or get hoppy and negate? define double @extract_extract01_v4f64_fsub_f64_commute(<4 x double> %x) { ; SSE3-LABEL: extract_extract01_v4f64_fsub_f64_commute: ; SSE3: # %bb.0: ; SSE3-NEXT: movapd %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: subsd %xmm0, %xmm1 ; SSE3-NEXT: movapd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract01_v4f64_fsub_f64_commute: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x0 = extractelement <4 x double> %x, i32 0 %x1 = extractelement <4 x double> %x, i32 1 %x01 = fsub double %x1, %x0 ret double %x01 } ; 512-bit vectors, float/double, fadd/fsub define float @extract_extract01_v16f32_fadd_f32(<16 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v16f32_fadd_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v16f32_fadd_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v16f32_fadd_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v16f32_fadd_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <16 x float> %x, i32 0 %x1 = extractelement <16 x float> %x, i32 1 %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract01_v16f32_fadd_f32_commute(<16 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v16f32_fadd_f32_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v16f32_fadd_f32_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v16f32_fadd_f32_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v16f32_fadd_f32_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <16 x float> %x, i32 0 %x1 = extractelement <16 x float> %x, i32 1 %x01 = fadd float %x1, %x0 ret float %x01 } define double @extract_extract01_v8f64_fadd_f64(<8 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v8f64_fadd_f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v8f64_fadd_f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v8f64_fadd_f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v8f64_fadd_f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x double> %x, i32 0 %x1 = extractelement <8 x double> %x, i32 1 %x01 = fadd double %x0, %x1 ret double %x01 } define double @extract_extract01_v8f64_fadd_f64_commute(<8 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v8f64_fadd_f64_commute: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v8f64_fadd_f64_commute: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v8f64_fadd_f64_commute: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v8f64_fadd_f64_commute: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x double> %x, i32 0 %x1 = extractelement <8 x double> %x, i32 1 %x01 = fadd double %x1, %x0 ret double %x01 } define float @extract_extract01_v16f32_fsub_f32(<16 x float> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v16f32_fsub_f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: subss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v16f32_fsub_f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v16f32_fsub_f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vsubss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v16f32_fsub_f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <16 x float> %x, i32 0 %x1 = extractelement <16 x float> %x, i32 1 %x01 = fsub float %x0, %x1 ret float %x01 } define float @extract_extract01_v16f32_fsub_f32_commute(<16 x float> %x) { ; SSE3-LABEL: extract_extract01_v16f32_fsub_f32_commute: ; SSE3: # %bb.0: ; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-NEXT: subss %xmm0, %xmm1 ; SSE3-NEXT: movaps %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract01_v16f32_fsub_f32_commute: ; AVX: # %bb.0: ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x0 = extractelement <16 x float> %x, i32 0 %x1 = extractelement <16 x float> %x, i32 1 %x01 = fsub float %x1, %x0 ret float %x01 } define double @extract_extract01_v8f64_fsub_f64(<8 x double> %x) { ; SSE3-SLOW-LABEL: extract_extract01_v8f64_fsub_f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movapd %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: subsd %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v8f64_fsub_f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: hsubpd %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v8f64_fsub_f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vsubsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v8f64_fsub_f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhsubpd %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x0 = extractelement <8 x double> %x, i32 0 %x1 = extractelement <8 x double> %x, i32 1 %x01 = fsub double %x0, %x1 ret double %x01 } define double @extract_extract01_v8f64_fsub_f64_commute(<8 x double> %x) { ; SSE3-LABEL: extract_extract01_v8f64_fsub_f64_commute: ; SSE3: # %bb.0: ; SSE3-NEXT: movapd %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: subsd %xmm0, %xmm1 ; SSE3-NEXT: movapd %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract01_v8f64_fsub_f64_commute: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x0 = extractelement <8 x double> %x, i32 0 %x1 = extractelement <8 x double> %x, i32 1 %x01 = fsub double %x1, %x0 ret double %x01 } ; Check output when 1 or both extracts have extra uses. define float @extract_extract01_v4f32_fadd_f32_uses1(<4 x float> %x, ptr %p) { ; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses1: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movss %xmm0, (%rdi) ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses1: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movss %xmm0, (%rdi) ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses1: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovss %xmm0, (%rdi) ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses1: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vmovss %xmm0, (%rdi) ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 store float %x0, ptr %p %x1 = extractelement <4 x float> %x, i32 1 %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract01_v4f32_fadd_f32_uses2(<4 x float> %x, ptr %p) { ; SSE3-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses2: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: movss %xmm1, (%rdi) ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses2: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-FAST-NEXT: movss %xmm1, (%rdi) ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: extract_extract01_v4f32_fadd_f32_uses2: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vmovss %xmm1, (%rdi) ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: extract_extract01_v4f32_fadd_f32_uses2: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractps $1, %xmm0, (%rdi) ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 %x1 = extractelement <4 x float> %x, i32 1 store float %x1, ptr %p %x01 = fadd float %x0, %x1 ret float %x01 } define float @extract_extract01_v4f32_fadd_f32_uses3(<4 x float> %x, ptr %p1, ptr %p2) { ; SSE3-LABEL: extract_extract01_v4f32_fadd_f32_uses3: ; SSE3: # %bb.0: ; SSE3-NEXT: movss %xmm0, (%rdi) ; SSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-NEXT: movss %xmm1, (%rsi) ; SSE3-NEXT: addss %xmm1, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: extract_extract01_v4f32_fadd_f32_uses3: ; AVX: # %bb.0: ; AVX-NEXT: vmovss %xmm0, (%rdi) ; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-NEXT: vmovss %xmm1, (%rsi) ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <4 x float> %x, i32 0 store float %x0, ptr %p1 %x1 = extractelement <4 x float> %x, i32 1 store float %x1, ptr %p2 %x01 = fadd float %x0, %x1 ret float %x01 } ; Repeat tests from general reductions to verify output for hoppy targets: ; PR38971: https://bugs.llvm.org/show_bug.cgi?id=38971 declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>) declare double @llvm.vector.reduce.fadd.f64.v4f64(double, <4 x double>) define float @fadd_reduce_v8f32(float %a0, <8 x float> %a1) { ; SSE3-SLOW-LABEL: fadd_reduce_v8f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: addps %xmm2, %xmm1 ; SSE3-SLOW-NEXT: movaps %xmm1, %xmm2 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm2 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm2, %xmm1 ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: fadd_reduce_v8f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm1, %xmm2 ; SSE3-FAST-NEXT: haddps %xmm2, %xmm2 ; SSE3-FAST-NEXT: haddps %xmm2, %xmm2 ; SSE3-FAST-NEXT: addss %xmm2, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: fadd_reduce_v8f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1 ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1 ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: fadd_reduce_v8f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX-FAST-NEXT: vhaddps %xmm1, %xmm2, %xmm1 ; AVX-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1 ; AVX-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1 ; AVX-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %r = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float %a0, <8 x float> %a1) ret float %r } define double @fadd_reduce_v4f64(double %a0, <4 x double> %a1) { ; SSE3-SLOW-LABEL: fadd_reduce_v4f64: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: addpd %xmm2, %xmm1 ; SSE3-SLOW-NEXT: movapd %xmm1, %xmm2 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1] ; SSE3-SLOW-NEXT: addsd %xmm1, %xmm2 ; SSE3-SLOW-NEXT: addsd %xmm2, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: fadd_reduce_v4f64: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddpd %xmm1, %xmm2 ; SSE3-FAST-NEXT: haddpd %xmm2, %xmm2 ; SSE3-FAST-NEXT: addsd %xmm2, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: fadd_reduce_v4f64: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX-SLOW-NEXT: vaddpd %xmm2, %xmm1, %xmm1 ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm2 = xmm1[1,0] ; AVX-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1 ; AVX-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: fadd_reduce_v4f64: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2 ; AVX-FAST-NEXT: vhaddpd %xmm1, %xmm2, %xmm1 ; AVX-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1 ; AVX-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %r = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double %a0, <4 x double> %a1) ret double %r } define float @PR39936_v8f32(<8 x float>) { ; SSSE3-SLOW-LABEL: PR39936_v8f32: ; SSSE3-SLOW: # %bb.0: ; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm0 ; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,3] ; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,2,3] ; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSSE3-SLOW-NEXT: retq ; ; SSSE3-FAST-LABEL: PR39936_v8f32: ; SSSE3-FAST: # %bb.0: ; SSSE3-FAST-NEXT: haddps %xmm1, %xmm0 ; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSSE3-FAST-NEXT: retq ; ; SSE3-SLOW-LABEL: PR39936_v8f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: haddps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: haddps %xmm0, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: PR39936_v8f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: haddps %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: PR39936_v8f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: PR39936_v8f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %2 = shufflevector <8 x float> %0, <8 x float> undef, <8 x i32> %3 = shufflevector <8 x float> %0, <8 x float> undef, <8 x i32> %4 = fadd <8 x float> %2, %3 %5 = shufflevector <8 x float> %4, <8 x float> undef, <8 x i32> %6 = shufflevector <8 x float> %4, <8 x float> undef, <8 x i32> %7 = fadd <8 x float> %5, %6 %8 = shufflevector <8 x float> %7, <8 x float> undef, <8 x i32> %9 = fadd <8 x float> %7, %8 %10 = extractelement <8 x float> %9, i32 0 ret float %10 } define float @hadd32_4(<4 x float> %x225) { ; SSE3-SLOW-LABEL: hadd32_4: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: hadd32_4: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movaps %xmm0, %xmm1 ; SSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-FAST-NEXT: addps %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: hadd32_4: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: hadd32_4: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: retq %x226 = shufflevector <4 x float> %x225, <4 x float> undef, <4 x i32> %x227 = fadd <4 x float> %x225, %x226 %x228 = shufflevector <4 x float> %x227, <4 x float> undef, <4 x i32> %x229 = fadd <4 x float> %x227, %x228 %x230 = extractelement <4 x float> %x229, i32 0 ret float %x230 } define float @hadd32_8(<8 x float> %x225) { ; SSE3-SLOW-LABEL: hadd32_8: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: hadd32_8: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movaps %xmm0, %xmm1 ; SSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-FAST-NEXT: addps %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: hadd32_8: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: hadd32_8: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x226 = shufflevector <8 x float> %x225, <8 x float> undef, <8 x i32> %x227 = fadd <8 x float> %x225, %x226 %x228 = shufflevector <8 x float> %x227, <8 x float> undef, <8 x i32> %x229 = fadd <8 x float> %x227, %x228 %x230 = extractelement <8 x float> %x229, i32 0 ret float %x230 } define float @hadd32_16(<16 x float> %x225) { ; SSE3-SLOW-LABEL: hadd32_16: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: hadd32_16: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movaps %xmm0, %xmm1 ; SSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-FAST-NEXT: addps %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: hadd32_16: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: hadd32_16: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> %x227 = fadd <16 x float> %x225, %x226 %x228 = shufflevector <16 x float> %x227, <16 x float> undef, <16 x i32> %x229 = fadd <16 x float> %x227, %x228 %x230 = extractelement <16 x float> %x229, i32 0 ret float %x230 } define float @hadd32_4_optsize(<4 x float> %x225) optsize { ; SSE3-LABEL: hadd32_4_optsize: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: addps %xmm1, %xmm0 ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hadd32_4_optsize: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %x226 = shufflevector <4 x float> %x225, <4 x float> undef, <4 x i32> %x227 = fadd <4 x float> %x225, %x226 %x228 = shufflevector <4 x float> %x227, <4 x float> undef, <4 x i32> %x229 = fadd <4 x float> %x227, %x228 %x230 = extractelement <4 x float> %x229, i32 0 ret float %x230 } define float @hadd32_8_optsize(<8 x float> %x225) optsize { ; SSE3-LABEL: hadd32_8_optsize: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: addps %xmm1, %xmm0 ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hadd32_8_optsize: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x226 = shufflevector <8 x float> %x225, <8 x float> undef, <8 x i32> %x227 = fadd <8 x float> %x225, %x226 %x228 = shufflevector <8 x float> %x227, <8 x float> undef, <8 x i32> %x229 = fadd <8 x float> %x227, %x228 %x230 = extractelement <8 x float> %x229, i32 0 ret float %x230 } define float @hadd32_16_optsize(<16 x float> %x225) optsize { ; SSE3-LABEL: hadd32_16_optsize: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: addps %xmm1, %xmm0 ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hadd32_16_optsize: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> %x227 = fadd <16 x float> %x225, %x226 %x228 = shufflevector <16 x float> %x227, <16 x float> undef, <16 x i32> %x229 = fadd <16 x float> %x227, %x228 %x230 = extractelement <16 x float> %x229, i32 0 ret float %x230 } define float @hadd32_4_pgso(<4 x float> %x225) !prof !14 { ; SSE3-LABEL: hadd32_4_pgso: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: addps %xmm1, %xmm0 ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hadd32_4_pgso: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %x226 = shufflevector <4 x float> %x225, <4 x float> undef, <4 x i32> %x227 = fadd <4 x float> %x225, %x226 %x228 = shufflevector <4 x float> %x227, <4 x float> undef, <4 x i32> %x229 = fadd <4 x float> %x227, %x228 %x230 = extractelement <4 x float> %x229, i32 0 ret float %x230 } define float @hadd32_8_pgso(<8 x float> %x225) !prof !14 { ; SSE3-LABEL: hadd32_8_pgso: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: addps %xmm1, %xmm0 ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hadd32_8_pgso: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x226 = shufflevector <8 x float> %x225, <8 x float> undef, <8 x i32> %x227 = fadd <8 x float> %x225, %x226 %x228 = shufflevector <8 x float> %x227, <8 x float> undef, <8 x i32> %x229 = fadd <8 x float> %x227, %x228 %x230 = extractelement <8 x float> %x229, i32 0 ret float %x230 } define float @hadd32_16_pgso(<16 x float> %x225) !prof !14 { ; SSE3-LABEL: hadd32_16_pgso: ; SSE3: # %bb.0: ; SSE3-NEXT: movaps %xmm0, %xmm1 ; SSE3-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-NEXT: addps %xmm1, %xmm0 ; SSE3-NEXT: haddps %xmm0, %xmm0 ; SSE3-NEXT: retq ; ; AVX-LABEL: hadd32_16_pgso: ; AVX: # %bb.0: ; AVX-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> %x227 = fadd <16 x float> %x225, %x226 %x228 = shufflevector <16 x float> %x227, <16 x float> undef, <16 x i32> %x229 = fadd <16 x float> %x227, %x228 %x230 = extractelement <16 x float> %x229, i32 0 ret float %x230 } define float @partial_reduction_fadd_v8f32(<8 x float> %x) { ; SSE3-SLOW-LABEL: partial_reduction_fadd_v8f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: partial_reduction_fadd_v8f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movaps %xmm0, %xmm1 ; SSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-FAST-NEXT: addps %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: partial_reduction_fadd_v8f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: partial_reduction_fadd_v8f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x23 = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> %x0213 = fadd <8 x float> %x, %x23 %x13 = shufflevector <8 x float> %x0213, <8 x float> undef, <8 x i32> %x0123 = fadd nsz reassoc <8 x float> %x0213, %x13 %r = extractelement <8 x float> %x0123, i32 0 ret float %r } ; Negative test - only the flags on the final math op in the ; sequence determine whether we can transform to horizontal ops. define float @partial_reduction_fadd_v8f32_wrong_flags(<8 x float> %x) { ; SSE3-SLOW-LABEL: partial_reduction_fadd_v8f32_wrong_flags: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: partial_reduction_fadd_v8f32_wrong_flags: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movaps %xmm0, %xmm1 ; SSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-FAST-NEXT: addps %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: partial_reduction_fadd_v8f32_wrong_flags: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: partial_reduction_fadd_v8f32_wrong_flags: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x23 = shufflevector <8 x float> %x, <8 x float> undef, <8 x i32> %x0213 = fadd fast <8 x float> %x, %x23 %x13 = shufflevector <8 x float> %x0213, <8 x float> undef, <8 x i32> %x0123 = fadd ninf nnan <8 x float> %x0213, %x13 %r = extractelement <8 x float> %x0123, i32 0 ret float %r } define float @partial_reduction_fadd_v16f32(<16 x float> %x) { ; SSE3-SLOW-LABEL: partial_reduction_fadd_v16f32: ; SSE3-SLOW: # %bb.0: ; SSE3-SLOW-NEXT: movaps %xmm0, %xmm1 ; SSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-SLOW-NEXT: addps %xmm1, %xmm0 ; SSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SSE3-SLOW-NEXT: addss %xmm1, %xmm0 ; SSE3-SLOW-NEXT: retq ; ; SSE3-FAST-LABEL: partial_reduction_fadd_v16f32: ; SSE3-FAST: # %bb.0: ; SSE3-FAST-NEXT: movaps %xmm0, %xmm1 ; SSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] ; SSE3-FAST-NEXT: addps %xmm1, %xmm0 ; SSE3-FAST-NEXT: haddps %xmm0, %xmm0 ; SSE3-FAST-NEXT: retq ; ; AVX-SLOW-LABEL: partial_reduction_fadd_v16f32: ; AVX-SLOW: # %bb.0: ; AVX-SLOW-NEXT: vshufpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-SLOW-NEXT: vzeroupper ; AVX-SLOW-NEXT: retq ; ; AVX-FAST-LABEL: partial_reduction_fadd_v16f32: ; AVX-FAST: # %bb.0: ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 ; AVX-FAST-NEXT: vzeroupper ; AVX-FAST-NEXT: retq %x23 = shufflevector <16 x float> %x, <16 x float> undef, <16 x i32> %x0213 = fadd <16 x float> %x, %x23 %x13 = shufflevector <16 x float> %x0213, <16 x float> undef, <16 x i32> %x0123 = fadd reassoc nsz <16 x float> %x0213, %x13 %r = extractelement <16 x float> %x0123, i32 0 ret float %r } !llvm.module.flags = !{!0} !0 = !{i32 1, !"ProfileSummary", !1} !1 = !{!2, !3, !4, !5, !6, !7, !8, !9} !2 = !{!"ProfileFormat", !"InstrProf"} !3 = !{!"TotalCount", i64 10000} !4 = !{!"MaxCount", i64 10} !5 = !{!"MaxInternalCount", i64 1} !6 = !{!"MaxFunctionCount", i64 1000} !7 = !{!"NumCounts", i64 3} !8 = !{!"NumFunctions", i64 3} !9 = !{!"DetailedSummary", !10} !10 = !{!11, !12, !13} !11 = !{i32 10000, i64 100, i32 1} !12 = !{i32 999000, i64 100, i32 1} !13 = !{i32 999999, i64 1, i32 2} !14 = !{!"function_entry_count", i64 0}