; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s -check-prefixes=AVX512VL ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+avx512vbmi2 | FileCheck %s -check-prefixes=AVX512VBMI ; RUN: llc < %s -mtriple=x86_64-- -mcpu=znver4 | FileCheck %s -check-prefixes=ZNVER4 ; i512 shifts hidden inside 512-bit vectors. define <8 x i64> @shl_i512_1(<8 x i64> %a) { ; AVX512VL-LABEL: shl_i512_1: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: valignq {{.*#+}} zmm1 = zmm0[3,4,5,6,7,0,1,2] ; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512VL-NEXT: vpsllq $1, %xmm0, %xmm3 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $63, %xmm4, %xmm4 ; AVX512VL-NEXT: vpaddq %xmm2, %xmm2, %xmm2 ; AVX512VL-NEXT: vpor %xmm4, %xmm2, %xmm2 ; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm3 ; AVX512VL-NEXT: vpaddq %ymm3, %ymm3, %ymm3 ; AVX512VL-NEXT: vpsrlq $63, %ymm1, %ymm1 ; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1 ; AVX512VL-NEXT: vpsrlq $63, %zmm0, %zmm2 ; AVX512VL-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; AVX512VL-NEXT: vpaddq %zmm0, %zmm0, %zmm0 ; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0 ; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[2],zmm0[2],zmm1[4],zmm0[4],zmm1[6],zmm0[6] ; AVX512VL-NEXT: retq ; ; AVX512VBMI-LABEL: shl_i512_1: ; AVX512VBMI: # %bb.0: ; AVX512VBMI-NEXT: vextracti32x4 $2, %zmm0, %xmm1 ; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm2 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX512VBMI-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 ; AVX512VBMI-NEXT: vpsllq $1, %xmm0, %xmm4 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7] ; AVX512VBMI-NEXT: vextracti64x4 $1, %zmm0, %ymm2 ; AVX512VBMI-NEXT: vpshldq $1, %ymm1, %ymm2, %ymm1 ; AVX512VBMI-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; AVX512VBMI-NEXT: vpshldq $1, %zmm0, %zmm2, %zmm0 ; AVX512VBMI-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[2],zmm0[2],zmm1[4],zmm0[4],zmm1[6],zmm0[6] ; AVX512VBMI-NEXT: retq ; ; ZNVER4-LABEL: shl_i512_1: ; ZNVER4: # %bb.0: ; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm1 ; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; ZNVER4-NEXT: vpsllq $1, %xmm0, %xmm4 ; ZNVER4-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1 ; ZNVER4-NEXT: vpshldq $1, %xmm3, %xmm2, %xmm3 ; ZNVER4-NEXT: vextracti64x4 $1, %zmm0, %ymm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,3,2,3,6,7,6,7] ; ZNVER4-NEXT: vpshldq $1, %ymm1, %ymm2, %ymm1 ; ZNVER4-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; ZNVER4-NEXT: vinserti64x4 $1, %ymm1, %zmm3, %zmm1 ; ZNVER4-NEXT: vpshufd {{.*#+}} zmm3 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; ZNVER4-NEXT: vpshldq $1, %zmm0, %zmm3, %zmm0 ; ZNVER4-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[2],zmm0[2],zmm1[4],zmm0[4],zmm1[6],zmm0[6] ; ZNVER4-NEXT: retq %d = bitcast <8 x i64> %a to i512 %s = shl i512 %d, 1 %r = bitcast i512 %s to <8 x i64> ret <8 x i64> %r } define <8 x i64> @lshr_i512_1(<8 x i64> %a) { ; AVX512VL-LABEL: lshr_i512_1: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 ; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm3 ; AVX512VL-NEXT: vpsllq $63, %xmm3, %xmm4 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $1, %xmm5, %xmm5 ; AVX512VL-NEXT: vpor %xmm5, %xmm4, %xmm4 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $1, %xmm3, %xmm3 ; AVX512VL-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsllq $63, %ymm1, %ymm1 ; AVX512VL-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,3,2,3,6,7,6,7] ; AVX512VL-NEXT: vpsrlq $1, %ymm2, %ymm2 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1 ; AVX512VL-NEXT: vpsrlq $1, %zmm0, %zmm2 ; AVX512VL-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0 ; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; AVX512VL-NEXT: retq ; ; AVX512VBMI-LABEL: lshr_i512_1: ; AVX512VBMI: # %bb.0: ; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512VBMI-NEXT: vextracti32x4 $3, %zmm0, %xmm2 ; AVX512VBMI-NEXT: vextracti32x4 $2, %zmm0, %xmm3 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3] ; AVX512VBMI-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; AVX512VBMI-NEXT: vpsrlq $1, %xmm2, %xmm2 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7] ; AVX512VBMI-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1 ; AVX512VBMI-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; AVX512VBMI-NEXT: vpshldq $63, %zmm0, %zmm2, %zmm0 ; AVX512VBMI-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; AVX512VBMI-NEXT: retq ; ; ZNVER4-LABEL: lshr_i512_1: ; ZNVER4: # %bb.0: ; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm3 ; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm1 ; ZNVER4-NEXT: vextracti32x4 $3, %zmm0, %xmm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3] ; ZNVER4-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1 ; ZNVER4-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7] ; ZNVER4-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; ZNVER4-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1 ; ZNVER4-NEXT: vpshufd {{.*#+}} zmm3 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; ZNVER4-NEXT: vpsrlq $1, %xmm2, %xmm2 ; ZNVER4-NEXT: vpshldq $63, %zmm0, %zmm3, %zmm0 ; ZNVER4-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 ; ZNVER4-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; ZNVER4-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; ZNVER4-NEXT: retq %d = bitcast <8 x i64> %a to i512 %s = lshr i512 %d, 1 %r = bitcast i512 %s to <8 x i64> ret <8 x i64> %r } define <8 x i64> @ashr_i512_1(<8 x i64> %a) { ; AVX512VL-LABEL: ashr_i512_1: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512VL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 ; AVX512VL-NEXT: vextracti32x4 $3, %zmm0, %xmm3 ; AVX512VL-NEXT: vpsllq $63, %xmm3, %xmm4 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3] ; AVX512VL-NEXT: vpsrlq $1, %xmm5, %xmm5 ; AVX512VL-NEXT: vpor %xmm5, %xmm4, %xmm4 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] ; AVX512VL-NEXT: vpsraq $1, %xmm3, %xmm3 ; AVX512VL-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsllq $63, %ymm1, %ymm1 ; AVX512VL-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,3,2,3,6,7,6,7] ; AVX512VL-NEXT: vpsrlq $1, %ymm2, %ymm2 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1 ; AVX512VL-NEXT: vpsrlq $1, %zmm0, %zmm2 ; AVX512VL-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; AVX512VL-NEXT: vpsllq $63, %zmm0, %zmm0 ; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0 ; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; AVX512VL-NEXT: retq ; ; AVX512VBMI-LABEL: ashr_i512_1: ; AVX512VBMI: # %bb.0: ; AVX512VBMI-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512VBMI-NEXT: vextracti32x4 $3, %zmm0, %xmm2 ; AVX512VBMI-NEXT: vextracti32x4 $2, %zmm0, %xmm3 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3] ; AVX512VBMI-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; AVX512VBMI-NEXT: vpsraq $1, %xmm2, %xmm2 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 ; AVX512VBMI-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7] ; AVX512VBMI-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1 ; AVX512VBMI-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; AVX512VBMI-NEXT: vpshufd {{.*#+}} zmm2 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; AVX512VBMI-NEXT: vpshldq $63, %zmm0, %zmm2, %zmm0 ; AVX512VBMI-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; AVX512VBMI-NEXT: retq ; ; ZNVER4-LABEL: ashr_i512_1: ; ZNVER4: # %bb.0: ; ZNVER4-NEXT: vextracti32x4 $2, %zmm0, %xmm3 ; ZNVER4-NEXT: vextracti128 $1, %ymm0, %xmm1 ; ZNVER4-NEXT: vextracti32x4 $3, %zmm0, %xmm2 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,3,2,3] ; ZNVER4-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1 ; ZNVER4-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[2,3,2,3,6,7,6,7] ; ZNVER4-NEXT: vpshldq $63, %xmm4, %xmm2, %xmm4 ; ZNVER4-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; ZNVER4-NEXT: vpshldq $63, %ymm3, %ymm1, %ymm1 ; ZNVER4-NEXT: vpshufd {{.*#+}} zmm3 = zmm0[2,3,2,3,6,7,6,7,10,11,10,11,14,15,14,15] ; ZNVER4-NEXT: vpsraq $1, %xmm2, %xmm2 ; ZNVER4-NEXT: vpshldq $63, %zmm0, %zmm3, %zmm0 ; ZNVER4-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2 ; ZNVER4-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm1 ; ZNVER4-NEXT: vpunpcklqdq {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] ; ZNVER4-NEXT: retq %d = bitcast <8 x i64> %a to i512 %s = ashr i512 %d, 1 %r = bitcast i512 %s to <8 x i64> ret <8 x i64> %r }