; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 | FileCheck %s --check-prefixes=SSE2 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE42 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=btver2 | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX2 ; RUN: llc < %s -mtriple=x86_64-- -mcpu=knl | FileCheck %s --check-prefixes=AVX,AVX512F ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512VL define <8 x i16> @umin_v8i16_as_umin_v8i8(<8 x i16> %a0, <8 x i16> %a1) nounwind { ; SSE2-LABEL: umin_v8i16_as_umin_v8i8: ; SSE2: # %bb.0: ; SSE2-NEXT: psrlw $8, %xmm0 ; SSE2-NEXT: psrlw $11, %xmm1 ; SSE2-NEXT: pminsw %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE42-LABEL: umin_v8i16_as_umin_v8i8: ; SSE42: # %bb.0: ; SSE42-NEXT: psrlw $8, %xmm0 ; SSE42-NEXT: psrlw $11, %xmm1 ; SSE42-NEXT: pminuw %xmm1, %xmm0 ; SSE42-NEXT: retq ; ; AVX-LABEL: umin_v8i16_as_umin_v8i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $11, %xmm1, %xmm1 ; AVX-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = lshr <8 x i16> %a0, splat (i16 8) %x1 = lshr <8 x i16> %a1, splat (i16 11) %r = call <8 x i16> @llvm.umin.v8i16(<8 x i16> %x0, <8 x i16> %x1) ret <8 x i16> %r } define <4 x i32> @umin_v4i32_as_umin_v4i8(<4 x i32> %a0, <4 x i32> %a1) nounwind { ; SSE2-LABEL: umin_v4i32_as_umin_v4i8: ; SSE2: # %bb.0: ; SSE2-NEXT: psrld $30, %xmm0 ; SSE2-NEXT: psrld $29, %xmm1 ; SSE2-NEXT: pminub %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE42-LABEL: umin_v4i32_as_umin_v4i8: ; SSE42: # %bb.0: ; SSE42-NEXT: psrld $30, %xmm0 ; SSE42-NEXT: psrld $29, %xmm1 ; SSE42-NEXT: pminud %xmm1, %xmm0 ; SSE42-NEXT: retq ; ; AVX-LABEL: umin_v4i32_as_umin_v4i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrld $30, %xmm0, %xmm0 ; AVX-NEXT: vpsrld $29, %xmm1, %xmm1 ; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = lshr <4 x i32> %a0, splat (i32 30) %x1 = lshr <4 x i32> %a1, splat (i32 29) %r = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %x0, <4 x i32> %x1) ret <4 x i32> %r } define <4 x i32> @umin_v4i32_as_umin_v4i16(<4 x i32> %a0, <4 x i32> %a1) nounwind { ; SSE2-LABEL: umin_v4i32_as_umin_v4i16: ; SSE2: # %bb.0: ; SSE2-NEXT: psrld $16, %xmm0 ; SSE2-NEXT: psrld $16, %xmm1 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: psubusw %xmm1, %xmm2 ; SSE2-NEXT: psubw %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE42-LABEL: umin_v4i32_as_umin_v4i16: ; SSE42: # %bb.0: ; SSE42-NEXT: psrld $16, %xmm0 ; SSE42-NEXT: psrld $16, %xmm1 ; SSE42-NEXT: pminud %xmm1, %xmm0 ; SSE42-NEXT: retq ; ; AVX-LABEL: umin_v4i32_as_umin_v4i16: ; AVX: # %bb.0: ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX-NEXT: vpsrld $16, %xmm1, %xmm1 ; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = lshr <4 x i32> %a0, splat (i32 16) %x1 = lshr <4 x i32> %a1, splat (i32 16) %r = call <4 x i32> @llvm.umin.v4i32(<4 x i32> %x0, <4 x i32> %x1) ret <4 x i32> %r } define <2 x i64> @umin_v2i64_as_umin_v2i16(<2 x i64> %a0, <2 x i64> %a1) nounwind { ; SSE2-LABEL: umin_v2i64_as_umin_v2i16: ; SSE2: # %bb.0: ; SSE2-NEXT: psrlq $49, %xmm0 ; SSE2-NEXT: psrlq $63, %xmm1 ; SSE2-NEXT: pminsw %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE42-LABEL: umin_v2i64_as_umin_v2i16: ; SSE42: # %bb.0: ; SSE42-NEXT: psrlq $49, %xmm0 ; SSE42-NEXT: psrlq $63, %xmm1 ; SSE42-NEXT: pminuw %xmm1, %xmm0 ; SSE42-NEXT: retq ; ; AVX1-LABEL: umin_v2i64_as_umin_v2i16: ; AVX1: # %bb.0: ; AVX1-NEXT: vpsrlq $49, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1 ; AVX1-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: umin_v2i64_as_umin_v2i16: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlq $49, %xmm0, %xmm0 ; AVX2-NEXT: vpsrlq $63, %xmm1, %xmm1 ; AVX2-NEXT: vpminuw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: umin_v2i64_as_umin_v2i16: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlq $49, %xmm0, %xmm0 ; AVX512F-NEXT: vpsrlq $63, %xmm1, %xmm1 ; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: umin_v2i64_as_umin_v2i16: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlq $49, %xmm0, %xmm0 ; AVX512VL-NEXT: vpsrlq $63, %xmm1, %xmm1 ; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0 ; AVX512VL-NEXT: retq %x0 = lshr <2 x i64> %a0, splat (i64 49) %x1 = lshr <2 x i64> %a1, splat (i64 63) %r = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %x0, <2 x i64> %x1) ret <2 x i64> %r } define <2 x i64> @umin_v2i64_as_umin_v2i32(<2 x i64> %a0, <2 x i64> %a1) nounwind { ; SSE2-LABEL: umin_v2i64_as_umin_v2i32: ; SSE2: # %bb.0: ; SSE2-NEXT: psrlq $33, %xmm0 ; SSE2-NEXT: psrlq $43, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm2 ; SSE2-NEXT: pand %xmm2, %xmm0 ; SSE2-NEXT: pandn %xmm1, %xmm2 ; SSE2-NEXT: por %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; SSE42-LABEL: umin_v2i64_as_umin_v2i32: ; SSE42: # %bb.0: ; SSE42-NEXT: psrlq $33, %xmm0 ; SSE42-NEXT: psrlq $43, %xmm1 ; SSE42-NEXT: pminud %xmm1, %xmm0 ; SSE42-NEXT: retq ; ; AVX1-LABEL: umin_v2i64_as_umin_v2i32: ; AVX1: # %bb.0: ; AVX1-NEXT: vpsrlq $33, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlq $43, %xmm1, %xmm1 ; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: umin_v2i64_as_umin_v2i32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlq $33, %xmm0, %xmm0 ; AVX2-NEXT: vpsrlq $43, %xmm1, %xmm1 ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: umin_v2i64_as_umin_v2i32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlq $33, %xmm0, %xmm0 ; AVX512F-NEXT: vpsrlq $43, %xmm1, %xmm1 ; AVX512F-NEXT: vpminuq %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: umin_v2i64_as_umin_v2i32: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlq $33, %xmm0, %xmm0 ; AVX512VL-NEXT: vpsrlq $43, %xmm1, %xmm1 ; AVX512VL-NEXT: vpminuq %xmm1, %xmm0, %xmm0 ; AVX512VL-NEXT: retq %x0 = lshr <2 x i64> %a0, splat (i64 33) %x1 = lshr <2 x i64> %a1, splat (i64 43) %r = call <2 x i64> @llvm.umin.v2i64(<2 x i64> %x0, <2 x i64> %x1) ret <2 x i64> %r } define <4 x i64> @umin_v4i64_as_umin_v4i16(<4 x i64> %a0) nounwind { ; SSE2-LABEL: umin_v4i64_as_umin_v4i16: ; SSE2: # %bb.0: ; SSE2-NEXT: psrlq $48, %xmm1 ; SSE2-NEXT: psrlq $48, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [65530,65530] ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: psubusw %xmm2, %xmm3 ; SSE2-NEXT: psubw %xmm3, %xmm0 ; SSE2-NEXT: movdqa %xmm1, %xmm3 ; SSE2-NEXT: psubusw %xmm2, %xmm3 ; SSE2-NEXT: psubw %xmm3, %xmm1 ; SSE2-NEXT: retq ; ; SSE42-LABEL: umin_v4i64_as_umin_v4i16: ; SSE42: # %bb.0: ; SSE42-NEXT: psrlq $48, %xmm1 ; SSE42-NEXT: psrlq $48, %xmm0 ; SSE42-NEXT: movdqa {{.*#+}} xmm2 = [65530,65530] ; SSE42-NEXT: pminuw %xmm2, %xmm0 ; SSE42-NEXT: pminuw %xmm2, %xmm1 ; SSE42-NEXT: retq ; ; AVX1-LABEL: umin_v4i64_as_umin_v4i16: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [65530,65530] ; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0 ; AVX1-NEXT: vpminuw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpminuw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: umin_v4i64_as_umin_v4i16: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlq $48, %ymm0, %ymm0 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [65530,65530,65530,65530] ; AVX2-NEXT: vpminuw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: umin_v4i64_as_umin_v4i16: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlq $48, %ymm0, %ymm0 ; AVX512F-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %zmm0, %zmm0 ; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: umin_v4i64_as_umin_v4i16: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlq $48, %ymm0, %ymm0 ; AVX512VL-NEXT: vpminuq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %ymm0, %ymm0 ; AVX512VL-NEXT: retq %x0 = lshr <4 x i64> %a0, splat (i64 48) %r = call <4 x i64> @llvm.umin.v4i64(<4 x i64> %x0, <4 x i64> splat (i64 65530)) ret <4 x i64> %r }