;; GCC machine description for SSE instructions ;; Copyright (C) 2005-2022 Free Software Foundation, Inc. ;; ;; This file is part of GCC. ;; ;; GCC is free software; you can redistribute it and/or modify ;; it under the terms of the GNU General Public License as published by ;; the Free Software Foundation; either version 3, or (at your option) ;; any later version. ;; ;; GCC is distributed in the hope that it will be useful, ;; but WITHOUT ANY WARRANTY; without even the implied warranty of ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ;; GNU General Public License for more details. ;; ;; You should have received a copy of the GNU General Public License ;; along with GCC; see the file COPYING3. If not see ;; . (define_c_enum "unspec" [ ;; SSE UNSPEC_MOVNT ;; SSE2 UNSPEC_MOVDI_TO_SSE ;; SSE3 UNSPEC_LDDQU ;; SSSE3 UNSPEC_PSIGN UNSPEC_PALIGNR ;; For SSE4A support UNSPEC_EXTRQI UNSPEC_EXTRQ UNSPEC_INSERTQI UNSPEC_INSERTQ ;; For SSE4.1 support UNSPEC_INSERTPS UNSPEC_DP UNSPEC_MOVNTDQA UNSPEC_MPSADBW UNSPEC_PHMINPOSUW UNSPEC_PTEST ;; For SSE4.2 support UNSPEC_PCMPESTR UNSPEC_PCMPISTR ;; For FMA4 support UNSPEC_FMADDSUB UNSPEC_XOP_UNSIGNED_CMP UNSPEC_XOP_TRUEFALSE UNSPEC_FRCZ ;; For AES support UNSPEC_AESENC UNSPEC_AESENCLAST UNSPEC_AESDEC UNSPEC_AESDECLAST UNSPEC_AESIMC UNSPEC_AESKEYGENASSIST ;; For PCLMUL support UNSPEC_PCLMUL ;; For AVX support UNSPEC_VPERMIL UNSPEC_VPERMIL2 UNSPEC_VPERMIL2F128 UNSPEC_CAST UNSPEC_VTESTP UNSPEC_VCVTPH2PS UNSPEC_VCVTPS2PH ;; For AVX2 support UNSPEC_VPERMVAR UNSPEC_VPERMTI UNSPEC_GATHER UNSPEC_VSIBADDR ;; For AVX512F support UNSPEC_VPERMT2 UNSPEC_UNSIGNED_FIX_NOTRUNC UNSPEC_UNSIGNED_PCMP UNSPEC_TESTM UNSPEC_TESTNM UNSPEC_SCATTER UNSPEC_RCP14 UNSPEC_RSQRT14 UNSPEC_FIXUPIMM UNSPEC_VTERNLOG UNSPEC_GETEXP UNSPEC_GETMANT UNSPEC_ALIGN UNSPEC_CONFLICT UNSPEC_COMPRESS UNSPEC_COMPRESS_STORE UNSPEC_EXPAND ;; Mask operations UNSPEC_MASKOP UNSPEC_KORTEST UNSPEC_KTEST ;; Mask load UNSPEC_MASKLOAD ;; For embed. rounding feature UNSPEC_EMBEDDED_ROUNDING ;; For AVX512PF support UNSPEC_GATHER_PREFETCH UNSPEC_SCATTER_PREFETCH ;; For AVX512ER support UNSPEC_EXP2 UNSPEC_RCP28 UNSPEC_RSQRT28 ;; For SHA support UNSPEC_SHA1MSG1 UNSPEC_SHA1MSG2 UNSPEC_SHA1NEXTE UNSPEC_SHA1RNDS4 UNSPEC_SHA256MSG1 UNSPEC_SHA256MSG2 UNSPEC_SHA256RNDS2 ;; For AVX512BW support UNSPEC_DBPSADBW UNSPEC_PMADDUBSW512 UNSPEC_PMADDWD512 UNSPEC_PSHUFHW UNSPEC_PSHUFLW UNSPEC_CVTINT2MASK ;; For AVX512DQ support UNSPEC_REDUCE UNSPEC_FPCLASS UNSPEC_RANGE ;; For AVX512IFMA support UNSPEC_VPMADD52LUQ UNSPEC_VPMADD52HUQ ;; For AVX512VBMI support UNSPEC_VPMULTISHIFT ;; For AVX5124FMAPS/AVX5124VNNIW support UNSPEC_VP4FMADD UNSPEC_VP4FNMADD UNSPEC_VP4DPWSSD UNSPEC_VP4DPWSSDS ;; For GFNI support UNSPEC_GF2P8AFFINEINV UNSPEC_GF2P8AFFINE UNSPEC_GF2P8MUL ;; For AVX512VBMI2 support UNSPEC_VPSHLD UNSPEC_VPSHRD UNSPEC_VPSHRDV UNSPEC_VPSHLDV ;; For AVX512VNNI support UNSPEC_VPMADDUBSWACCD UNSPEC_VPMADDUBSWACCSSD UNSPEC_VPMADDWDACCD UNSPEC_VPMADDWDACCSSD ;; For VAES support UNSPEC_VAESDEC UNSPEC_VAESDECLAST UNSPEC_VAESENC UNSPEC_VAESENCLAST ;; For VPCLMULQDQ support UNSPEC_VPCLMULQDQ ;; For AVX512BITALG support UNSPEC_VPSHUFBIT ;; For VP2INTERSECT support UNSPEC_VP2INTERSECT ;; For AVX512BF16 support UNSPEC_VCVTNE2PS2BF16 UNSPEC_VCVTNEPS2BF16 UNSPEC_VDPBF16PS ;; For AVX512FP16 suppport UNSPEC_COMPLEX_FMA UNSPEC_COMPLEX_FMA_PAIR UNSPEC_COMPLEX_FCMA UNSPEC_COMPLEX_FCMA_PAIR UNSPEC_COMPLEX_FMUL UNSPEC_COMPLEX_FCMUL UNSPEC_COMPLEX_MASK ]) (define_c_enum "unspecv" [ UNSPECV_LDMXCSR UNSPECV_STMXCSR UNSPECV_CLFLUSH UNSPECV_MONITOR UNSPECV_MWAIT UNSPECV_VZEROALL ;; For KEYLOCKER UNSPECV_LOADIWKEY UNSPECV_AESDEC128KLU8 UNSPECV_AESENC128KLU8 UNSPECV_AESDEC256KLU8 UNSPECV_AESENC256KLU8 UNSPECV_AESDECWIDE128KLU8 UNSPECV_AESENCWIDE128KLU8 UNSPECV_AESDECWIDE256KLU8 UNSPECV_AESENCWIDE256KLU8 UNSPECV_ENCODEKEY128U32 UNSPECV_ENCODEKEY256U32 ]) ;; All vector modes including V?TImode, used in move patterns. (define_mode_iterator VMOVE [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX") V1TI (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF]) ;; All AVX-512{F,VL} vector modes without HF. Supposed TARGET_AVX512F baseline. (define_mode_iterator V48_AVX512VL [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL") V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")]) (define_mode_iterator V48_256_512_AVX512VL [V16SI (V8SI "TARGET_AVX512VL") V8DI (V4DI "TARGET_AVX512VL") V16SF (V8SF "TARGET_AVX512VL") V8DF (V4DF "TARGET_AVX512VL")]) ;; All AVX-512{F,VL} vector modes. Supposed TARGET_AVX512F baseline. (define_mode_iterator V48H_AVX512VL [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL") (V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL") V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")]) ;; 1,2 byte AVX-512{BW,VL} vector modes. Supposed TARGET_AVX512BW baseline. (define_mode_iterator VI12_AVX512VL [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL") V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")]) (define_mode_iterator VI12HF_AVX512VL [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL") V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL") V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")]) ;; Same iterator, but without supposed TARGET_AVX512BW (define_mode_iterator VI12_AVX512VLBW [(V64QI "TARGET_AVX512BW") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL && TARGET_AVX512BW") (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")]) (define_mode_iterator VI1_AVX512VL [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")]) ;; All vector modes (define_mode_iterator V [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")]) ;; All 128bit vector modes (define_mode_iterator V_128 [V16QI V8HI V4SI V2DI V4SF (V2DF "TARGET_SSE2")]) ;; All 256bit vector modes (define_mode_iterator V_256 [V32QI V16HI V8SI V4DI V8SF V4DF]) ;; All 256bit vector modes including HF vector mode (define_mode_iterator V_256H [V32QI V16HI V8SI V4DI V8SF V4DF V16HF]) ;; All 128bit and 256bit vector modes (define_mode_iterator V_128_256 [V32QI V16QI V16HI V8HI V8SI V4SI V4DI V2DI V16HF V8HF V8SF V4SF V4DF V2DF]) ;; All 512bit vector modes (define_mode_iterator V_512 [V64QI V32HI V16SI V8DI V16SF V8DF]) ;; All 256bit and 512bit vector modes (define_mode_iterator V_256_512 [V32QI V16HI V16HF V8SI V4DI V8SF V4DF (V64QI "TARGET_AVX512F") (V32HI "TARGET_AVX512F") (V32HF "TARGET_AVX512F") (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")]) ;; All vector float modes (define_mode_iterator VF [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")]) (define_mode_iterator VFH [(V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")]) ;; 128-, 256- and 512-bit float vector modes for bitwise operations (define_mode_iterator VFB [(V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16") (V8HF "TARGET_AVX512FP16") (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")]) ;; 128- and 256-bit float vector modes (define_mode_iterator VF_128_256 [(V8SF "TARGET_AVX") V4SF (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")]) ;; 128- and 256-bit float vector modes for bitwise operations (define_mode_iterator VFB_128_256 [(V16HF "TARGET_AVX512FP16") (V8HF "TARGET_AVX512FP16") (V8SF "TARGET_AVX") V4SF (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")]) ;; All SFmode vector float modes (define_mode_iterator VF1 [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF]) (define_mode_iterator VF1_AVX2 [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX2") V4SF]) ;; 128- and 256-bit SF vector modes (define_mode_iterator VF1_128_256 [(V8SF "TARGET_AVX") V4SF]) (define_mode_iterator VF1_128_256VL [V8SF (V4SF "TARGET_AVX512VL")]) ;; All DFmode vector float modes (define_mode_iterator VF2 [(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF]) ;; All DFmode & HFmode vector float modes (define_mode_iterator VF2H [(V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF]) ;; 128- and 256-bit DF vector modes (define_mode_iterator VF2_128_256 [(V4DF "TARGET_AVX") V2DF]) (define_mode_iterator VF2_512_256 [(V8DF "TARGET_AVX512F") V4DF]) (define_mode_iterator VF2_512_256VL [V8DF (V4DF "TARGET_AVX512VL")]) ;; All 128bit vector SF/DF modes (define_mode_iterator VF_128 [V4SF (V2DF "TARGET_SSE2")]) ;; All 128bit vector HF/SF/DF modes (define_mode_iterator VFH_128 [(V8HF "TARGET_AVX512FP16") V4SF (V2DF "TARGET_SSE2")]) ;; All 256bit vector float modes (define_mode_iterator VF_256 [V8SF V4DF]) ;; All 512bit vector float modes (define_mode_iterator VF_512 [V16SF V8DF]) ;; All 512bit vector float modes for bitwise operations (define_mode_iterator VFB_512 [(V32HF "TARGET_AVX512FP16") V16SF V8DF]) (define_mode_iterator VI48_AVX512VL [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")]) (define_mode_iterator VI1248_AVX512VLBW [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX512VL && TARGET_AVX512BW") (V16QI "TARGET_AVX512VL && TARGET_AVX512BW") (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512VL && TARGET_AVX512BW") (V8HI "TARGET_AVX512VL && TARGET_AVX512BW") V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")]) (define_mode_iterator VF_AVX512VL [V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")]) ;; AVX512ER SF plus 128- and 256-bit SF vector modes (define_mode_iterator VF1_AVX512ER_128_256 [(V16SF "TARGET_AVX512ER") (V8SF "TARGET_AVX") V4SF]) (define_mode_iterator VFH_AVX512VL [(V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL") V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")]) (define_mode_iterator VF2_AVX512VL [V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")]) (define_mode_iterator VF1_AVX512VL [V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")]) (define_mode_iterator VF_AVX512FP16 [V32HF V16HF V8HF]) (define_mode_iterator VF_AVX512FP16VL [V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")]) ;; All vector integer modes (define_mode_iterator VI [(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F") (V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI (V8SI "TARGET_AVX") V4SI (V4DI "TARGET_AVX") V2DI]) ;; All vector integer and HF modes (define_mode_iterator VIHF [(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F") (V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI (V8SI "TARGET_AVX") V4SI (V4DI "TARGET_AVX") V2DI (V32HF "TARGET_AVX512BW") (V16HF "TARGET_AVX") V8HF]) (define_mode_iterator VI_AVX2 [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI]) ;; All QImode vector integer modes (define_mode_iterator VI1 [(V32QI "TARGET_AVX") V16QI]) ;; All DImode vector integer modes (define_mode_iterator V_AVX [V16QI V8HI V4SI V2DI V4SF V2DF (V32QI "TARGET_AVX") (V16HI "TARGET_AVX") (V8SI "TARGET_AVX") (V4DI "TARGET_AVX") (V8SF "TARGET_AVX") (V4DF"TARGET_AVX")]) (define_mode_iterator VI48_AVX [V4SI V2DI (V8SI "TARGET_AVX") (V4DI "TARGET_AVX")]) (define_mode_iterator VI8 [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI]) (define_mode_iterator VI8_FVL [(V8DI "TARGET_AVX512F") V4DI (V2DI "TARGET_AVX512VL")]) (define_mode_iterator VI8_AVX512VL [V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")]) (define_mode_iterator VI8_256_512 [V8DI (V4DI "TARGET_AVX512VL")]) (define_mode_iterator VI1_AVX2 [(V32QI "TARGET_AVX2") V16QI]) (define_mode_iterator VI1_AVX512 [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI]) (define_mode_iterator VI1_AVX512F [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI]) (define_mode_iterator VI1_AVX512VNNI [(V64QI "TARGET_AVX512VNNI") (V32QI "TARGET_AVX2") V16QI]) (define_mode_iterator VI12_256_512_AVX512VL [V64QI (V32QI "TARGET_AVX512VL") V32HI (V16HI "TARGET_AVX512VL")]) (define_mode_iterator VI2_AVX2 [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI]) (define_mode_iterator VI2_AVX512F [(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI]) (define_mode_iterator VI2_AVX512VNNIBW [(V32HI "TARGET_AVX512BW || TARGET_AVX512VNNI") (V16HI "TARGET_AVX2") V8HI]) (define_mode_iterator VI4_AVX [(V8SI "TARGET_AVX") V4SI]) (define_mode_iterator VI4_AVX2 [(V8SI "TARGET_AVX2") V4SI]) (define_mode_iterator VI4_AVX512F [(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI]) (define_mode_iterator VI4_AVX512VL [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")]) (define_mode_iterator VI48_AVX512F_AVX512VL [V4SI V8SI (V16SI "TARGET_AVX512F") (V2DI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V8DI "TARGET_AVX512F")]) (define_mode_iterator VI2_AVX512VL [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI]) (define_mode_iterator VI2H_AVX512VL [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI (V8SI "TARGET_AVX512VL") V16SI V8DI ]) (define_mode_iterator VI1_AVX512VL_F [V32QI (V16QI "TARGET_AVX512VL") (V64QI "TARGET_AVX512F")]) (define_mode_iterator VI8_AVX2_AVX512BW [(V8DI "TARGET_AVX512BW") (V4DI "TARGET_AVX2") V2DI]) (define_mode_iterator VI8_AVX2 [(V4DI "TARGET_AVX2") V2DI]) (define_mode_iterator VI8_AVX2_AVX512F [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI]) (define_mode_iterator VI8_AVX_AVX512F [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX")]) (define_mode_iterator VI4_128_8_256 [V4SI V4DI]) ;; All V8D* modes (define_mode_iterator V8FI [V8DF V8DI]) ;; All V16S* modes (define_mode_iterator V16FI [V16SF V16SI]) ;; ??? We should probably use TImode instead. (define_mode_iterator VIMAX_AVX2_AVX512BW [(V4TI "TARGET_AVX512BW") (V2TI "TARGET_AVX2") V1TI]) ;; Suppose TARGET_AVX512BW as baseline (define_mode_iterator VIMAX_AVX512VL [V4TI (V2TI "TARGET_AVX512VL") (V1TI "TARGET_AVX512VL")]) (define_mode_iterator VIMAX_AVX2 [(V2TI "TARGET_AVX2") V1TI]) ;; ??? This should probably be dropped in favor of VIMAX_AVX2_AVX512BW. (define_mode_iterator SSESCALARMODE [(V4TI "TARGET_AVX512BW") (V2TI "TARGET_AVX2") TI]) (define_mode_iterator VI12_AVX2 [(V32QI "TARGET_AVX2") V16QI (V16HI "TARGET_AVX2") V8HI]) (define_mode_iterator VI12_AVX2_AVX512BW [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI]) (define_mode_iterator VI24_AVX2 [(V16HI "TARGET_AVX2") V8HI (V8SI "TARGET_AVX2") V4SI]) (define_mode_iterator VI124_AVX2_24_AVX512F_1_AVX512BW [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI]) (define_mode_iterator VI124_AVX2 [(V32QI "TARGET_AVX2") V16QI (V16HI "TARGET_AVX2") V8HI (V8SI "TARGET_AVX2") V4SI]) (define_mode_iterator VI2_AVX2_AVX512BW [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI]) (define_mode_iterator VI248_AVX512VL [V32HI V16SI V8DI (V16HI "TARGET_AVX512VL") (V8SI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")]) (define_mode_iterator VI248_AVX512VLBW [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512VL && TARGET_AVX512BW") (V8HI "TARGET_AVX512VL && TARGET_AVX512BW") V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")]) (define_mode_iterator VI48_AVX2 [(V8SI "TARGET_AVX2") V4SI (V4DI "TARGET_AVX2") V2DI]) (define_mode_iterator VI248_AVX2 [(V16HI "TARGET_AVX2") V8HI (V8SI "TARGET_AVX2") V4SI (V4DI "TARGET_AVX2") V2DI]) (define_mode_iterator VI248_AVX2_8_AVX512F_24_AVX512BW [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI (V16SI "TARGET_AVX512BW") (V8SI "TARGET_AVX2") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI]) (define_mode_iterator VI248_AVX512BW [(V32HI "TARGET_AVX512BW") V16SI V8DI]) (define_mode_iterator VI248_AVX512BW_AVX512VL [(V32HI "TARGET_AVX512BW") (V4DI "TARGET_AVX512VL") V16SI V8DI]) ;; Suppose TARGET_AVX512VL as baseline (define_mode_iterator VI248_AVX512BW_1 [(V16HI "TARGET_AVX512BW") (V8HI "TARGET_AVX512BW") V8SI V4SI V2DI]) (define_mode_iterator VI248_AVX512BW_2 [(V16HI "TARGET_AVX512BW") (V8HI "TARGET_AVX512BW") V8SI V4SI V4DI V2DI]) (define_mode_iterator VI48_AVX512F [(V16SI "TARGET_AVX512F") V8SI V4SI (V8DI "TARGET_AVX512F") V4DI V2DI]) (define_mode_iterator VI48_AVX_AVX512F [(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI]) (define_mode_iterator VI12_AVX_AVX512F [ (V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI]) (define_mode_iterator V48_AVX2 [V4SF V2DF V8SF V4DF (V4SI "TARGET_AVX2") (V2DI "TARGET_AVX2") (V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")]) (define_mode_iterator VF4_128_8_256 [V4DF V4SF]) (define_mode_iterator VI1_AVX512VLBW [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX512VL") (V16QI "TARGET_AVX512VL")]) (define_mode_attr avx512 [(V16QI "avx512vl") (V32QI "avx512vl") (V64QI "avx512bw") (V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw") (V4SI "avx512vl") (V8SI "avx512vl") (V16SI "avx512f") (V2DI "avx512vl") (V4DI "avx512vl") (V8DI "avx512f") (V8HF "avx512fp16") (V16HF "avx512vl") (V32HF "avx512bw") (V4SF "avx512vl") (V8SF "avx512vl") (V16SF "avx512f") (V2DF "avx512vl") (V4DF "avx512vl") (V8DF "avx512f")]) (define_mode_attr v_Yw [(V16QI "Yw") (V32QI "Yw") (V64QI "v") (V8HI "Yw") (V16HI "Yw") (V32HI "v") (V4SI "v") (V8SI "v") (V16SI "v") (V2DI "v") (V4DI "v") (V8DI "v") (V4SF "v") (V8SF "v") (V16SF "v") (V2DF "v") (V4DF "v") (V8DF "v") (TI "Yw") (V1TI "Yw") (V2TI "Yw") (V4TI "v")]) (define_mode_attr sse2_avx_avx512f [(V16QI "sse2") (V32QI "avx") (V64QI "avx512f") (V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw") (V4SI "sse2") (V8SI "avx") (V16SI "avx512f") (V2DI "avx512vl") (V4DI "avx512vl") (V8DI "avx512f") (V16SF "avx512f") (V8SF "avx") (V4SF "avx") (V8DF "avx512f") (V4DF "avx") (V2DF "avx")]) (define_mode_attr sse2_avx2 [(V16QI "sse2") (V32QI "avx2") (V64QI "avx512bw") (V8HI "sse2") (V16HI "avx2") (V32HI "avx512bw") (V4SI "sse2") (V8SI "avx2") (V16SI "avx512f") (V2DI "sse2") (V4DI "avx2") (V8DI "avx512f") (V1TI "sse2") (V2TI "avx2") (V4TI "avx512bw")]) (define_mode_attr ssse3_avx2 [(V16QI "ssse3") (V32QI "avx2") (V64QI "avx512bw") (V4HI "ssse3") (V8HI "ssse3") (V16HI "avx2") (V32HI "avx512bw") (V4SI "ssse3") (V8SI "avx2") (V2DI "ssse3") (V4DI "avx2") (TI "ssse3") (V2TI "avx2") (V4TI "avx512bw")]) (define_mode_attr sse4_1_avx2 [(V16QI "sse4_1") (V32QI "avx2") (V64QI "avx512bw") (V8HI "sse4_1") (V16HI "avx2") (V32HI "avx512bw") (V4SI "sse4_1") (V8SI "avx2") (V16SI "avx512f") (V2DI "sse4_1") (V4DI "avx2") (V8DI "avx512dq")]) (define_mode_attr avx_avx2 [(V4SF "avx") (V2DF "avx") (V8SF "avx") (V4DF "avx") (V4SI "avx2") (V2DI "avx2") (V8SI "avx2") (V4DI "avx2")]) (define_mode_attr vec_avx2 [(V16QI "vec") (V32QI "avx2") (V8HI "vec") (V16HI "avx2") (V4SI "vec") (V8SI "avx2") (V2DI "vec") (V4DI "avx2")]) (define_mode_attr avx2_avx512 [(V4SI "avx2") (V8SI "avx2") (V16SI "avx512f") (V2DI "avx2") (V4DI "avx2") (V8DI "avx512f") (V4SF "avx2") (V8SF "avx2") (V16SF "avx512f") (V2DF "avx2") (V4DF "avx2") (V8DF "avx512f") (V8HI "avx512vl") (V16HI "avx512vl") (V32HI "avx512bw")]) (define_mode_attr shuffletype [(V32HF "f") (V16HF "f") (V8HF "f") (V16SF "f") (V16SI "i") (V8DF "f") (V8DI "i") (V8SF "f") (V8SI "i") (V4DF "f") (V4DI "i") (V4SF "f") (V4SI "i") (V2DF "f") (V2DI "i") (V32HI "i") (V16HI "i") (V8HI "i") (V64QI "i") (V32QI "i") (V16QI "i") (V4TI "i") (V2TI "i") (V1TI "i")]) (define_mode_attr ssequartermode [(V16SF "V4SF") (V8DF "V2DF") (V16SI "V4SI") (V8DI "V2DI")]) (define_mode_attr ssequarterinsnmode [(V16SF "V4SF") (V8DF "V2DF") (V16SI "TI") (V8DI "TI")]) (define_mode_attr vecmemsuffix [(V32HF "{z}") (V16HF "{y}") (V8HF "{x}") (V16SF "{z}") (V8SF "{y}") (V4SF "{x}") (V8DF "{z}") (V4DF "{y}") (V2DF "{x}")]) (define_mode_attr ssedoublemodelower [(V16QI "v16hi") (V32QI "v32hi") (V64QI "v64hi") (V8HI "v8si") (V16HI "v16si") (V32HI "v32si") (V4SI "v4di") (V8SI "v8di") (V16SI "v16di")]) (define_mode_attr ssedoublemode [(V4SF "V8SF") (V8SF "V16SF") (V16SF "V32SF") (V2DF "V4DF") (V4DF "V8DF") (V8DF "V16DF") (V16QI "V16HI") (V32QI "V32HI") (V64QI "V64HI") (V8HI "V8SI") (V16HI "V16SI") (V32HI "V32SI") (V4SI "V4DI") (V8SI "V16SI") (V16SI "V32SI") (V4DI "V8DI") (V8DI "V16DI")]) (define_mode_attr ssebytemode [(V8DI "V64QI") (V4DI "V32QI") (V2DI "V16QI") (V16SI "V64QI") (V8SI "V32QI") (V4SI "V16QI")]) (define_mode_attr sseintconvert [(V32HI "w") (V16HI "w") (V8HI "w") (V16SI "dq") (V8SI "dq") (V4SI "dq") (V8DI "qq") (V4DI "qq") (V2DI "qq")]) ;; All 128bit vector integer modes (define_mode_iterator VI_128 [V16QI V8HI V4SI V2DI]) ;; All 256bit vector integer modes (define_mode_iterator VI_256 [V32QI V16HI V8SI V4DI]) ;; All 128 and 256bit vector integer modes (define_mode_iterator VI_128_256 [V16QI V8HI V4SI V2DI V32QI V16HI V8SI V4DI]) ;; All 256bit vector integer and HF modes (define_mode_iterator VIHF_256 [V32QI V16HI V8SI V4DI V16HF]) ;; Various 128bit vector integer mode combinations (define_mode_iterator VI12_128 [V16QI V8HI]) (define_mode_iterator VI14_128 [V16QI V4SI]) (define_mode_iterator VI124_128 [V16QI V8HI V4SI]) (define_mode_iterator VI24_128 [V8HI V4SI]) (define_mode_iterator VI248_128 [V8HI V4SI V2DI]) (define_mode_iterator VI248_256 [V16HI V8SI V4DI]) (define_mode_iterator VI248_512 [V32HI V16SI V8DI]) (define_mode_iterator VI48_128 [V4SI V2DI]) (define_mode_iterator VI148_512 [V64QI V16SI V8DI]) (define_mode_iterator VI148_256 [V32QI V8SI V4DI]) (define_mode_iterator VI148_128 [V16QI V4SI V2DI]) ;; Various 256bit and 512 vector integer mode combinations (define_mode_iterator VI124_256 [V32QI V16HI V8SI]) (define_mode_iterator VI124_256_AVX512F_AVX512BW [V32QI V16HI V8SI (V64QI "TARGET_AVX512BW") (V32HI "TARGET_AVX512BW") (V16SI "TARGET_AVX512F")]) (define_mode_iterator VI48_256 [V8SI V4DI]) (define_mode_iterator VI48_512 [V16SI V8DI]) (define_mode_iterator VI4_256_8_512 [V8SI V8DI]) (define_mode_iterator VI_AVX512BW [V16SI V8DI (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")]) (define_mode_iterator VIHF_AVX512BW [V16SI V8DI (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW") (V32HF "TARGET_AVX512BW")]) ;; Int-float size matches (define_mode_iterator VI2F_256_512 [V16HI V32HI V16HF V32HF]) (define_mode_iterator VI4F_128 [V4SI V4SF]) (define_mode_iterator VI8F_128 [V2DI V2DF]) (define_mode_iterator VI4F_256 [V8SI V8SF]) (define_mode_iterator VI8F_256 [V4DI V4DF]) (define_mode_iterator VI4F_256_512 [V8SI V8SF (V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")]) (define_mode_iterator VI48F_256_512 [V8SI V8SF (V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V8DI "TARGET_AVX512F") (V8DF "TARGET_AVX512F") (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")]) (define_mode_iterator VF48_I1248 [V16SI V16SF V8DI V8DF V32HI V64QI]) (define_mode_iterator VF48H_AVX512VL [V8DF V16SF (V8SF "TARGET_AVX512VL")]) (define_mode_iterator VF48_128 [V2DF V4SF]) (define_mode_iterator VI48F [V16SI V16SF V8DI V8DF (V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")]) (define_mode_iterator VI12_VI48F_AVX512VLBW [(V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V8DI "TARGET_AVX512F") (V8DF "TARGET_AVX512F") (V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL") (V64QI "TARGET_AVX512BW") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL && TARGET_AVX512BW") (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")]) (define_mode_iterator VI48F_256 [V8SI V8SF V4DI V4DF]) (define_mode_iterator VF_AVX512 [(V4SF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL") V16SF V8DF]) (define_mode_iterator V8_128 [V8HI V8HF]) (define_mode_iterator V16_256 [V16HI V16HF]) (define_mode_iterator V32_512 [V32HI V32HF]) ;; Mapping from float mode to required SSE level (define_mode_attr sse [(SF "sse") (DF "sse2") (HF "avx512fp16") (V4SF "sse") (V2DF "sse2") (V32HF "avx512fp16") (V16HF "avx512fp16") (V8HF "avx512fp16") (V16SF "avx512f") (V8SF "avx") (V8DF "avx512f") (V4DF "avx")]) (define_mode_attr sse2 [(V16QI "sse2") (V32QI "avx") (V64QI "avx512f") (V2DI "sse2") (V4DI "avx") (V8DI "avx512f")]) (define_mode_attr sse3 [(V16QI "sse3") (V32QI "avx")]) (define_mode_attr sse4_1 [(V4SF "sse4_1") (V2DF "sse4_1") (V8SF "avx") (V4DF "avx") (V8DF "avx512f") (V4DI "avx") (V2DI "sse4_1") (V8SI "avx") (V4SI "sse4_1") (V16QI "sse4_1") (V32QI "avx") (V8HI "sse4_1") (V16HI "avx")]) (define_mode_attr avxsizesuffix [(V64QI "512") (V32HI "512") (V16SI "512") (V8DI "512") (V32QI "256") (V16HI "256") (V8SI "256") (V4DI "256") (V16QI "") (V8HI "") (V4SI "") (V2DI "") (V32HF "512") (V16SF "512") (V8DF "512") (V16HF "256") (V8SF "256") (V4DF "256") (V8HF "") (V4SF "") (V2DF "")]) ;; SSE instruction mode (define_mode_attr sseinsnmode [(V64QI "XI") (V32HI "XI") (V16SI "XI") (V8DI "XI") (V4TI "XI") (V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI") (V2TI "OI") (V16QI "TI") (V8HI "TI") (V4SI "TI") (V2DI "TI") (V1TI "TI") (V16SF "V16SF") (V8DF "V8DF") (V8SF "V8SF") (V4DF "V4DF") (V4SF "V4SF") (V2DF "V2DF") (V8HF "TI") (V16HF "OI") (V32HF "XI") (TI "TI")]) (define_mode_attr sseintvecinsnmode [(V64QI "XI") (V32HI "XI") (V16SI "XI") (V8DI "XI") (V4TI "XI") (V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI") (V2TI "OI") (V16QI "TI") (V8HI "TI") (V4SI "TI") (V2DI "TI") (V1TI "TI") (V16SF "XI") (V8DF "XI") (V8SF "OI") (V4DF "OI") (V4SF "TI") (V2DF "TI") (TI "TI")]) ;; SSE constant -1 constraint (define_mode_attr sseconstm1 [(V64QI "BC") (V32HI "BC") (V16SI "BC") (V8DI "BC") (V4TI "BC") (V32QI "BC") (V16HI "BC") (V8SI "BC") (V4DI "BC") (V2TI "BC") (V16QI "BC") (V8HI "BC") (V4SI "BC") (V2DI "BC") (V1TI "BC") (V32HF "BF") (V16SF "BF") (V8DF "BF") (V16HF "BF") (V8SF "BF") (V4DF "BF") (V8HF "BF") (V4SF "BF") (V2DF "BF")]) ;; SSE integer instruction suffix for various modes (define_mode_attr sseintmodesuffix [(V16QI "b") (V8HI "w") (V4SI "d") (V2DI "q") (V32QI "b") (V16HI "w") (V8SI "d") (V4DI "q") (V64QI "b") (V32HI "w") (V16SI "d") (V8DI "q") (V8HF "w") (V16HF "w") (V32HF "w")]) ;; Mapping of vector modes to corresponding mask size (define_mode_attr avx512fmaskmode [(V64QI "DI") (V32QI "SI") (V16QI "HI") (V32HI "SI") (V16HI "HI") (V8HI "QI") (V4HI "QI") (V16SI "HI") (V8SI "QI") (V4SI "QI") (V8DI "QI") (V4DI "QI") (V2DI "QI") (V32HF "SI") (V16HF "HI") (V8HF "QI") (V16SF "HI") (V8SF "QI") (V4SF "QI") (V8DF "QI") (V4DF "QI") (V2DF "QI")]) ;; Mapping of vector modes to corresponding complex mask size (define_mode_attr avx512fmaskcmode [(V32HF "HI") (V16HF "QI") (V8HF "QI")]) ;; Mapping of vector modes to corresponding mask size (define_mode_attr avx512fmaskmodelower [(V64QI "di") (V32QI "si") (V16QI "hi") (V32HI "si") (V16HI "hi") (V8HI "qi") (V4HI "qi") (V16SI "hi") (V8SI "qi") (V4SI "qi") (V8DI "qi") (V4DI "qi") (V2DI "qi") (V32HF "si") (V16HF "hi") (V8HF "qi") (V16SF "hi") (V8SF "qi") (V4SF "qi") (V8DF "qi") (V4DF "qi") (V2DF "qi")]) ;; Mapping of vector modes to corresponding mask half size (define_mode_attr avx512fmaskhalfmode [(V64QI "SI") (V32QI "HI") (V16QI "QI") (V32HI "HI") (V16HI "QI") (V8HI "QI") (V4HI "QI") (V16SI "QI") (V8SI "QI") (V4SI "QI") (V8DI "QI") (V4DI "QI") (V2DI "QI") (V32HF "HI") (V16HF "QI") (V8HF "QI") (V16SF "QI") (V8SF "QI") (V4SF "QI") (V8DF "QI") (V4DF "QI") (V2DF "QI")]) ;; Mapping of vector float modes to an integer mode of the same size (define_mode_attr sseintvecmode [(V32HF "V32HI") (V16SF "V16SI") (V8DF "V8DI") (V16HF "V16HI") (V8SF "V8SI") (V4DF "V4DI") (V8HF "V8HI") (V4SF "V4SI") (V2DF "V2DI") (V16SI "V16SI") (V8DI "V8DI") (V8SI "V8SI") (V4DI "V4DI") (V4SI "V4SI") (V2DI "V2DI") (V16HI "V16HI") (V8HI "V8HI") (V32HI "V32HI") (V64QI "V64QI") (V32QI "V32QI") (V16QI "V16QI")]) (define_mode_attr sseintvecmode2 [(V8DF "XI") (V4DF "OI") (V2DF "TI") (V8SF "OI") (V4SF "TI") (V16HF "OI") (V8HF "TI")]) (define_mode_attr sseintvecmodelower [(V32HF "v32hi") (V16SF "v16si") (V8DF "v8di") (V16HF "v16hi") (V8SF "v8si") (V4DF "v4di") (V8HF "v8hi") (V4SF "v4si") (V2DF "v2di") (V8SI "v8si") (V4DI "v4di") (V4SI "v4si") (V2DI "v2di") (V16HI "v16hi") (V8HI "v8hi") (V32QI "v32qi") (V16QI "v16qi")]) ;; Mapping of vector modes to a vector mode of double size (define_mode_attr ssedoublevecmode [(V64QI "V128QI") (V32HI "V64HI") (V16SI "V32SI") (V8DI "V16DI") (V32QI "V64QI") (V16HI "V32HI") (V8SI "V16SI") (V4DI "V8DI") (V16QI "V32QI") (V8HI "V16HI") (V4SI "V8SI") (V2DI "V4DI") (V16SF "V32SF") (V8DF "V16DF") (V8SF "V16SF") (V4DF "V8DF") (V4SF "V8SF") (V2DF "V4DF") (V32HF "V64HF") (V16HF "V32HF") (V8HF "V16HF")]) ;; Mapping of vector modes to a vector mode of half size ;; instead of V1DI/V1DF, DI/DF are used for V2DI/V2DF although they are scalar. (define_mode_attr ssehalfvecmode [(V64QI "V32QI") (V32HI "V16HI") (V16SI "V8SI") (V8DI "V4DI") (V4TI "V2TI") (V32QI "V16QI") (V16HI "V8HI") (V8SI "V4SI") (V4DI "V2DI") (V16QI "V8QI") (V8HI "V4HI") (V4SI "V2SI") (V2DI "DI") (V16SF "V8SF") (V8DF "V4DF") (V8SF "V4SF") (V4DF "V2DF") (V4SF "V2SF") (V2DF "DF") (V32HF "V16HF") (V16HF "V8HF") (V8HF "V4HF")]) (define_mode_attr ssehalfvecmodelower [(V64QI "v32qi") (V32HI "v16hi") (V16SI "v8si") (V8DI "v4di") (V4TI "v2ti") (V32QI "v16qi") (V16HI "v8hi") (V8SI "v4si") (V4DI "v2di") (V16QI "v8qi") (V8HI "v4hi") (V4SI "v2si") (V16SF "v8sf") (V8DF "v4df") (V8SF "v4sf") (V4DF "v2df") (V4SF "v2sf") (V32HF "v16hf") (V16HF "v8hf") (V8HF "v4hf")]) ;; Mapping of vector modes to vector hf modes of conversion. (define_mode_attr ssePHmode [(V32HI "V32HF") (V16HI "V16HF") (V8HI "V8HF") (V16SI "V16HF") (V8SI "V8HF") (V4SI "V8HF") (V8DI "V8HF") (V4DI "V8HF") (V2DI "V8HF") (V8DF "V8HF") (V16SF "V16HF") (V8SF "V8HF")]) ;; Mapping of vector modes to vector hf modes of same element. (define_mode_attr ssePHmodelower [(V32HI "v32hf") (V16HI "v16hf") (V8HI "v8hf") (V16SI "v16hf") (V8SI "v8hf") (V4SI "v4hf") (V8DI "v8hf") (V4DI "v4hf") (V2DI "v2hf") (V8DF "v8hf") (V16SF "v16hf") (V8SF "v8hf")]) ;; Mapping of vector modes to packed single mode of the same size (define_mode_attr ssePSmode [(V16SI "V16SF") (V8DF "V16SF") (V16SF "V16SF") (V8DI "V16SF") (V64QI "V16SF") (V32QI "V8SF") (V16QI "V4SF") (V32HI "V16SF") (V16HI "V8SF") (V8HI "V4SF") (V8SI "V8SF") (V4SI "V4SF") (V4DI "V8SF") (V2DI "V4SF") (V4TI "V16SF") (V2TI "V8SF") (V1TI "V4SF") (V8SF "V8SF") (V4SF "V4SF") (V4DF "V8SF") (V2DF "V4SF") (V32HF "V16SF") (V16HF "V8SF") (V8HF "V4SF")]) (define_mode_attr ssePSmode2 [(V8DI "V8SF") (V4DI "V4SF")]) ;; Mapping of vector modes back to the scalar modes (define_mode_attr ssescalarmode [(V64QI "QI") (V32QI "QI") (V16QI "QI") (V32HI "HI") (V16HI "HI") (V8HI "HI") (V16SI "SI") (V8SI "SI") (V4SI "SI") (V8DI "DI") (V4DI "DI") (V2DI "DI") (V32HF "HF") (V16HF "HF") (V8HF "HF") (V16SF "SF") (V8SF "SF") (V4SF "SF") (V8DF "DF") (V4DF "DF") (V2DF "DF") (V4TI "TI") (V2TI "TI")]) ;; Mapping of vector modes back to the scalar modes (define_mode_attr ssescalarmodelower [(V64QI "qi") (V32QI "qi") (V16QI "qi") (V32HI "hi") (V16HI "hi") (V8HI "hi") (V16SI "si") (V8SI "si") (V4SI "si") (V8DI "di") (V4DI "di") (V2DI "di") (V32HF "hf") (V16HF "hf") (V8HF "hf") (V16SF "sf") (V8SF "sf") (V4SF "sf") (V8DF "df") (V4DF "df") (V2DF "df") (V4TI "ti") (V2TI "ti")]) ;; Mapping of vector modes to the 128bit modes (define_mode_attr ssexmmmode [(V64QI "V16QI") (V32QI "V16QI") (V16QI "V16QI") (V32HI "V8HI") (V16HI "V8HI") (V8HI "V8HI") (V16SI "V4SI") (V8SI "V4SI") (V4SI "V4SI") (V8DI "V2DI") (V4DI "V2DI") (V2DI "V2DI") (V32HF "V8HF") (V16HF "V8HF") (V8HF "V8HF") (V16SF "V4SF") (V8SF "V4SF") (V4SF "V4SF") (V8DF "V2DF") (V4DF "V2DF") (V2DF "V2DF")]) ;; Pointer size override for scalar modes (Intel asm dialect) (define_mode_attr iptr [(V64QI "b") (V32HI "w") (V16SI "k") (V8DI "q") (V32QI "b") (V16HI "w") (V8SI "k") (V4DI "q") (V16QI "b") (V8HI "w") (V4SI "k") (V2DI "q") (V32HF "w") (V16SF "k") (V8DF "q") (V16HF "w") (V8SF "k") (V4DF "q") (V8HF "w") (V4SF "k") (V2DF "q") (HF "w") (SF "k") (DF "q")]) ;; Mapping of vector modes to VPTERNLOG suffix (define_mode_attr ternlogsuffix [(V8DI "q") (V4DI "q") (V2DI "q") (V8DF "q") (V4DF "q") (V2DF "q") (V16SI "d") (V8SI "d") (V4SI "d") (V16SF "d") (V8SF "d") (V4SF "d") (V32HI "d") (V16HI "d") (V8HI "d") (V32HF "d") (V16HF "d") (V8HF "d") (V64QI "d") (V32QI "d") (V16QI "d")]) ;; Number of scalar elements in each vector type (define_mode_attr ssescalarnum [(V64QI "64") (V16SI "16") (V8DI "8") (V32QI "32") (V16HI "16") (V8SI "8") (V4DI "4") (V16QI "16") (V8HI "8") (V4SI "4") (V2DI "2") (V16SF "16") (V8DF "8") (V8SF "8") (V4DF "4") (V4SF "4") (V2DF "2")]) ;; Mask of scalar elements in each vector type (define_mode_attr ssescalarnummask [(V32QI "31") (V16HI "15") (V8SI "7") (V4DI "3") (V16QI "15") (V8HI "7") (V4SI "3") (V2DI "1") (V8SF "7") (V4DF "3") (V4SF "3") (V2DF "1")]) (define_mode_attr ssescalarsize [(V4TI "64") (V2TI "64") (V1TI "64") (V8DI "64") (V4DI "64") (V2DI "64") (V64QI "8") (V32QI "8") (V16QI "8") (V32HI "16") (V16HI "16") (V8HI "16") (V16SI "32") (V8SI "32") (V4SI "32") (V32HF "16") (V16HF "16") (V8HF "16") (V16SF "32") (V8SF "32") (V4SF "32") (V8DF "64") (V4DF "64") (V2DF "64")]) ;; SSE prefix for integer and HF vector modes (define_mode_attr sseintprefix [(V2DI "p") (V2DF "") (V4DI "p") (V4DF "") (V8DI "p") (V8DF "") (V4SI "p") (V4SF "") (V8SI "p") (V8SF "") (V16SI "p") (V16SF "") (V16QI "p") (V8HI "p") (V8HF "p") (V32QI "p") (V16HI "p") (V16HF "p") (V64QI "p") (V32HI "p") (V32HF "p")]) ;; SSE prefix for integer and HF vector comparison. (define_mode_attr ssecmpintprefix [(V2DI "p") (V2DF "") (V4DI "p") (V4DF "") (V8DI "p") (V8DF "") (V4SI "p") (V4SF "") (V8SI "p") (V8SF "") (V16SI "p") (V16SF "") (V16QI "p") (V8HI "p") (V8HF "") (V32QI "p") (V16HI "p") (V16HF "") (V64QI "p") (V32HI "p") (V32HF "")]) ;; SSE scalar suffix for vector modes (define_mode_attr ssescalarmodesuffix [(HF "sh") (SF "ss") (DF "sd") (V32HF "sh") (V16SF "ss") (V8DF "sd") (V16HF "sh") (V8SF "ss") (V4DF "sd") (V8HF "sh") (V4SF "ss") (V2DF "sd") (V16SI "d") (V8DI "q") (V8SI "d") (V4DI "q") (V4SI "d") (V2DI "q")]) ;; Pack/unpack vector modes (define_mode_attr sseunpackmode [(V16QI "V8HI") (V8HI "V4SI") (V4SI "V2DI") (V32QI "V16HI") (V16HI "V8SI") (V8SI "V4DI") (V32HI "V16SI") (V64QI "V32HI") (V16SI "V8DI")]) (define_mode_attr ssepackmode [(V8HI "V16QI") (V4SI "V8HI") (V2DI "V4SI") (V16HI "V32QI") (V8SI "V16HI") (V4DI "V8SI") (V32HI "V64QI") (V16SI "V32HI") (V8DI "V16SI")]) ;; Mapping of the max integer size for xop rotate immediate constraint (define_mode_attr sserotatemax [(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")]) ;; Mapping of mode to cast intrinsic name (define_mode_attr castmode [(V4SF "ps") (V2DF "pd") (V8SI "si") (V8SF "ps") (V4DF "pd") (V16SI "si") (V16SF "ps") (V8DF "pd")]) ;; i128 for integer vectors and TARGET_AVX2, f128 otherwise. ;; i64x4 or f64x4 for 512bit modes. (define_mode_attr i128 [(V16HF "%~128") (V32HF "i64x4") (V16SF "f64x4") (V8SF "f128") (V8DF "f64x4") (V4DF "f128") (V64QI "i64x4") (V32QI "%~128") (V32HI "i64x4") (V16HI "%~128") (V16SI "i64x4") (V8SI "%~128") (V8DI "i64x4") (V4DI "%~128")]) ;; For 256-bit modes for TARGET_AVX512VL && TARGET_AVX512DQ ;; i32x4, f32x4, i64x2 or f64x2 suffixes. (define_mode_attr i128vldq [(V8SF "f32x4") (V4DF "f64x2") (V32QI "i32x4") (V16HI "i32x4") (V8SI "i32x4") (V4DI "i64x2")]) ;; Mix-n-match (define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF]) (define_mode_iterator AVX512MODE2P [V16SI V16SF V8DF]) ;; Mapping for dbpsabbw modes (define_mode_attr dbpsadbwmode [(V32HI "V64QI") (V16HI "V32QI") (V8HI "V16QI")]) ;; Mapping suffixes for broadcast (define_mode_attr bcstscalarsuff [(V64QI "b") (V32QI "b") (V16QI "b") (V32HI "w") (V16HI "w") (V8HI "w") (V16SI "d") (V8SI "d") (V4SI "d") (V8DI "q") (V4DI "q") (V2DI "q") (V32HF "w") (V16HF "w") (V8HF "w") (V16SF "ss") (V8SF "ss") (V4SF "ss") (V8DF "sd") (V4DF "sd") (V2DF "sd")]) ;; Tie mode of assembler operand to mode iterator (define_mode_attr xtg_mode [(V16QI "x") (V8HI "x") (V4SI "x") (V2DI "x") (V8HF "x") (V4SF "x") (V2DF "x") (V32QI "t") (V16HI "t") (V8SI "t") (V4DI "t") (V16HF "t") (V8SF "t") (V4DF "t") (V64QI "g") (V32HI "g") (V16SI "g") (V8DI "g") (V32HF "g") (V16SF "g") (V8DF "g")]) ;; Half mask mode for unpacks (define_mode_attr HALFMASKMODE [(DI "SI") (SI "HI")]) ;; Double mask mode for packs (define_mode_attr DOUBLEMASKMODE [(HI "SI") (SI "DI")]) ;; Include define_subst patterns for instructions with mask (include "subst.md") ;; Patterns whose name begins with "sse{,2,3}_" are invoked by intrinsics. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Move patterns ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; All of these patterns are enabled for SSE1 as well as SSE2. ;; This is essential for maintaining stable calling conventions. (define_expand "mov" [(set (match_operand:VMOVE 0 "nonimmediate_operand") (match_operand:VMOVE 1 "nonimmediate_operand"))] "TARGET_SSE" { ix86_expand_vector_move (mode, operands); DONE; }) (define_insn "mov_internal" [(set (match_operand:VMOVE 0 "nonimmediate_operand" "=v,v ,v ,m") (match_operand:VMOVE 1 "nonimmediate_or_sse_const_operand" " C,,vm,v"))] "TARGET_SSE && (register_operand (operands[0], mode) || register_operand (operands[1], mode)) && ix86_hardreg_mov_ok (operands[0], operands[1])" { switch (get_attr_type (insn)) { case TYPE_SSELOG1: return standard_sse_constant_opcode (insn, operands); case TYPE_SSEMOV: return ix86_output_ssemov (insn, operands); default: gcc_unreachable (); } } [(set_attr "type" "sselog1,sselog1,ssemov,ssemov") (set_attr "prefix" "maybe_vex") (set (attr "mode") (cond [(match_test "TARGET_AVX") (const_string "") (ior (not (match_test "TARGET_SSE2")) (match_test "optimize_function_for_size_p (cfun)")) (const_string "V4SF") (and (match_test "mode == V2DFmode") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")) (const_string "V4SF") (and (eq_attr "alternative" "3") (match_test "TARGET_SSE_TYPELESS_STORES")) (const_string "V4SF") (and (eq_attr "alternative" "0") (match_test "TARGET_SSE_LOAD0_BY_PXOR")) (const_string "TI") ] (const_string ""))) (set (attr "enabled") (cond [(and (match_test " == 16") (eq_attr "alternative" "1")) (symbol_ref "TARGET_SSE2") (and (match_test " == 32") (eq_attr "alternative" "1")) (symbol_ref "TARGET_AVX2") ] (symbol_ref "true")))]) ;; If mem_addr points to a memory region with less than whole vector size bytes ;; of accessible memory and k is a mask that would prevent reading the inaccessible ;; bytes from mem_addr, add UNSPEC_MASKLOAD to prevent it to be transformed to vpblendd ;; See pr97642. (define_expand "_load_mask" [(set (match_operand:V48_AVX512VL 0 "register_operand") (vec_merge:V48_AVX512VL (match_operand:V48_AVX512VL 1 "nonimmediate_operand") (match_operand:V48_AVX512VL 2 "nonimm_or_0_operand") (match_operand: 3 "register_or_constm1_operand")))] "TARGET_AVX512F" { if (CONST_INT_P (operands[3])) { emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } else if (MEM_P (operands[1])) operands[1] = gen_rtx_UNSPEC (mode, gen_rtvec(1, operands[1]), UNSPEC_MASKLOAD); }) (define_insn "*_load_mask" [(set (match_operand:V48_AVX512VL 0 "register_operand" "=v") (vec_merge:V48_AVX512VL (unspec:V48_AVX512VL [(match_operand:V48_AVX512VL 1 "memory_operand" "m")] UNSPEC_MASKLOAD) (match_operand:V48_AVX512VL 2 "nonimm_or_0_operand" "0C") (match_operand: 3 "register_operand" "Yk")))] "TARGET_AVX512F" { if (FLOAT_MODE_P (GET_MODE_INNER (mode))) { if (misaligned_operand (operands[1], mode)) return "vmovu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; else return "vmova\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; } else { if (misaligned_operand (operands[1], mode)) return "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; else return "vmovdqa\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"; } } [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_load" [(set (match_operand:V48_AVX512VL 0 "register_operand") (unspec:V48_AVX512VL [(match_operand:V48_AVX512VL 1 "memory_operand")] UNSPEC_MASKLOAD))] "TARGET_AVX512F" "#" "&& 1" [(set (match_dup 0) (match_dup 1))]) (define_expand "_load_mask" [(set (match_operand:VI12_AVX512VL 0 "register_operand") (vec_merge:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12_AVX512VL 2 "nonimm_or_0_operand") (match_operand: 3 "register_or_constm1_operand")))] "TARGET_AVX512BW" { if (CONST_INT_P (operands[3])) { emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } else if (MEM_P (operands[1])) operands[1] = gen_rtx_UNSPEC (mode, gen_rtvec(1, operands[1]), UNSPEC_MASKLOAD); }) (define_insn "*_load_mask" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (vec_merge:VI12_AVX512VL (unspec:VI12_AVX512VL [(match_operand:VI12_AVX512VL 1 "memory_operand" "m")] UNSPEC_MASKLOAD) (match_operand:VI12_AVX512VL 2 "nonimm_or_0_operand" "0C") (match_operand: 3 "register_operand" "Yk")))] "TARGET_AVX512BW" "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_load" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (unspec:VI12_AVX512VL [(match_operand:VI12_AVX512VL 1 "memory_operand" "m")] UNSPEC_MASKLOAD))] "TARGET_AVX512BW" "#" "&& 1" [(set (match_dup 0) (match_dup 1))]) (define_insn "avx512f_mov_mask" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (vec_merge:VFH_128 (match_operand:VFH_128 2 "register_operand" "v") (match_operand:VFH_128 3 "nonimm_or_0_operand" "0C") (match_operand:QI 4 "register_operand" "Yk")) (match_operand:VFH_128 1 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F" "vmov\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512f_load_mask" [(set (match_operand: 0 "register_operand") (vec_merge: (vec_merge: (vec_duplicate: (match_operand:MODEFH 1 "memory_operand")) (match_operand: 2 "nonimm_or_0_operand") (match_operand:QI 3 "register_operand")) (match_dup 4) (const_int 1)))] "TARGET_AVX512F" "operands[4] = CONST0_RTX (mode);") (define_insn "*avx512f_load_mask" [(set (match_operand: 0 "register_operand" "=v") (vec_merge: (vec_merge: (vec_duplicate: (match_operand:MODEFH 1 "memory_operand" "m")) (match_operand: 2 "nonimm_or_0_operand" "0C") (match_operand:QI 3 "register_operand" "Yk")) (match_operand: 4 "const0_operand" "C") (const_int 1)))] "TARGET_AVX512F" "vmov\t{%1, %0%{%3%}%N2|%0%{3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "load") (set_attr "mode" "")]) (define_insn "avx512f_store_mask" [(set (match_operand:MODEFH 0 "memory_operand" "=m") (if_then_else:MODEFH (and:QI (match_operand:QI 2 "register_operand" "Yk") (const_int 1)) (vec_select:MODEFH (match_operand: 1 "register_operand" "v") (parallel [(const_int 0)])) (match_dup 0)))] "TARGET_AVX512F" "vmov\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "store") (set_attr "mode" "")]) (define_insn "_blendm" [(set (match_operand:V48_AVX512VL 0 "register_operand" "=v,v") (vec_merge:V48_AVX512VL (match_operand:V48_AVX512VL 2 "nonimmediate_operand" "vm,vm") (match_operand:V48_AVX512VL 1 "nonimm_or_0_operand" "0C,v") (match_operand: 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512F" { if (REG_P (operands[1]) && REGNO (operands[1]) != REGNO (operands[0])) return "vblendm\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}"; if (FLOAT_MODE_P (GET_MODE_INNER (mode))) { if (misaligned_operand (operands[2], mode)) return "vmovu\t{%2, %0%{%3%}%N1|%0%{%3%}%N1, %2}"; else return "vmova\t{%2, %0%{%3%}%N1|%0%{%3%}%N1, %2}"; } else { if (misaligned_operand (operands[2], mode)) return "vmovdqu\t{%2, %0%{%3%}%N1|%0%{%3%}%N1, %2}"; else return "vmovdqa\t{%2, %0%{%3%}%N1|%0%{%3%}%N1, %2}"; } } [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_blendm" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VI12_AVX512VL (match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm,vm") (match_operand:VI12_AVX512VL 1 "nonimm_or_0_operand" "0C,v") (match_operand: 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512BW" "@ vmovdqu\t{%2, %0%{%3%}%N1|%0%{%3%}%N1, %2} vpblendm\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_blendm" [(set (match_operand:VF_AVX512FP16 0 "register_operand" "=v,v") (vec_merge:VF_AVX512FP16 (match_operand:VF_AVX512FP16 2 "nonimmediate_operand" "vm,vm") (match_operand:VF_AVX512FP16 1 "nonimm_or_0_operand" "0C,v") (match_operand: 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512BW" "@ vmovdqu\t{%2, %0%{%3%}%N1|%0%{%3%}%N1, %2} vpblendmw\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_store_mask" [(set (match_operand:V48_AVX512VL 0 "memory_operand" "=m") (vec_merge:V48_AVX512VL (match_operand:V48_AVX512VL 1 "register_operand" "v") (match_dup 0) (match_operand: 2 "register_operand" "Yk")))] "TARGET_AVX512F" { if (FLOAT_MODE_P (GET_MODE_INNER (mode))) { if (misaligned_operand (operands[0], mode)) return "vmovu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; else return "vmova\t{%1, %0%{%2%}|%0%{%2%}, %1}"; } else { if (misaligned_operand (operands[0], mode)) return "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}"; else return "vmovdqa\t{%1, %0%{%2%}|%0%{%2%}, %1}"; } } [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "store") (set_attr "mode" "")]) (define_insn "_store_mask" [(set (match_operand:VI12HF_AVX512VL 0 "memory_operand" "=m") (vec_merge:VI12HF_AVX512VL (match_operand:VI12HF_AVX512VL 1 "register_operand" "v") (match_dup 0) (match_operand: 2 "register_operand" "Yk")))] "TARGET_AVX512BW" "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "store") (set_attr "mode" "")]) (define_expand "sse2_movq128" [(set (match_operand:V2DI 0 "register_operand") (vec_concat:V2DI (vec_select:DI (match_operand:V2DI 1 "nonimmediate_operand") (parallel [(const_int 0)])) (const_int 0)))] "TARGET_SSE2") (define_insn "*sse2_movq128_" [(set (match_operand:VI8F_128 0 "register_operand" "=v") (vec_concat:VI8F_128 (vec_select: (match_operand:VI8F_128 1 "nonimmediate_operand" "vm") (parallel [(const_int 0)])) (match_operand: 2 "const0_operand" "C")))] "TARGET_SSE2" "%vmovq\t{%1, %0|%0, %q1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) ;; Move a DI from a 32-bit register pair (e.g. %edx:%eax) to an xmm. ;; We'd rather avoid this entirely; if the 32-bit reg pair was loaded ;; from memory, we'd prefer to load the memory directly into the %xmm ;; register. To facilitate this happy circumstance, this pattern won't ;; split until after register allocation. If the 64-bit value didn't ;; come from memory, this is the best we can do. This is much better ;; than storing %edx:%eax into a stack temporary and loading an %xmm ;; from there. (define_insn_and_split "movdi_to_sse" [(set (match_operand:V4SI 0 "register_operand" "=x,x,?x") (unspec:V4SI [(match_operand:DI 1 "nonimmediate_operand" "r,m,r")] UNSPEC_MOVDI_TO_SSE)) (clobber (match_scratch:V4SI 2 "=X,X,&x"))] "!TARGET_64BIT && TARGET_SSE2 && TARGET_INTER_UNIT_MOVES_TO_VEC" "#" "&& reload_completed" [(const_int 0)] { if (register_operand (operands[1], DImode)) { /* The DImode arrived in a pair of integral registers (e.g. %edx:%eax). Assemble the 64-bit DImode value in an xmm register. */ emit_insn (gen_sse2_loadld (operands[0], CONST0_RTX (V4SImode), gen_lowpart (SImode, operands[1]))); if (TARGET_SSE4_1) emit_insn (gen_sse4_1_pinsrd (operands[0], operands[0], gen_highpart (SImode, operands[1]), GEN_INT (2))); else { emit_insn (gen_sse2_loadld (operands[2], CONST0_RTX (V4SImode), gen_highpart (SImode, operands[1]))); emit_insn (gen_vec_interleave_lowv4si (operands[0], operands[0], operands[2])); } } else if (memory_operand (operands[1], DImode)) emit_insn (gen_vec_concatv2di (gen_lowpart (V2DImode, operands[0]), operands[1], const0_rtx)); else gcc_unreachable (); DONE; } [(set_attr "isa" "sse4,*,*")]) (define_split [(set (match_operand:V4SF 0 "register_operand") (match_operand:V4SF 1 "zero_extended_scalar_load_operand"))] "TARGET_SSE && reload_completed" [(set (match_dup 0) (vec_merge:V4SF (vec_duplicate:V4SF (match_dup 1)) (match_dup 2) (const_int 1)))] { operands[1] = gen_lowpart (SFmode, operands[1]); operands[2] = CONST0_RTX (V4SFmode); }) (define_split [(set (match_operand:V2DF 0 "register_operand") (match_operand:V2DF 1 "zero_extended_scalar_load_operand"))] "TARGET_SSE2 && reload_completed" [(set (match_dup 0) (vec_concat:V2DF (match_dup 1) (match_dup 2)))] { operands[1] = gen_lowpart (DFmode, operands[1]); operands[2] = CONST0_RTX (DFmode); }) (define_expand "movmisalign" [(set (match_operand:VMOVE 0 "nonimmediate_operand") (match_operand:VMOVE 1 "nonimmediate_operand"))] "TARGET_SSE" { ix86_expand_vector_move_misalign (mode, operands); DONE; }) ;; Merge movsd/movhpd to movupd for TARGET_SSE_UNALIGNED_LOAD_OPTIMAL targets. (define_peephole2 [(set (match_operand:V2DF 0 "sse_reg_operand") (vec_concat:V2DF (match_operand:DF 1 "memory_operand") (match_operand:DF 4 "const0_operand"))) (set (match_operand:V2DF 2 "sse_reg_operand") (vec_concat:V2DF (vec_select:DF (match_dup 2) (parallel [(const_int 0)])) (match_operand:DF 3 "memory_operand")))] "TARGET_SSE2 && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL && ix86_operands_ok_for_move_multiple (operands, true, DFmode)" [(set (match_dup 2) (match_dup 5))] "operands[5] = adjust_address (operands[1], V2DFmode, 0);") (define_peephole2 [(set (match_operand:DF 0 "sse_reg_operand") (match_operand:DF 1 "memory_operand")) (set (match_operand:V2DF 2 "sse_reg_operand") (vec_concat:V2DF (match_operand:DF 4 "sse_reg_operand") (match_operand:DF 3 "memory_operand")))] "TARGET_SSE2 && TARGET_SSE_UNALIGNED_LOAD_OPTIMAL && REGNO (operands[4]) == REGNO (operands[2]) && ix86_operands_ok_for_move_multiple (operands, true, DFmode)" [(set (match_dup 2) (match_dup 5))] "operands[5] = adjust_address (operands[1], V2DFmode, 0);") ;; Merge movlpd/movhpd to movupd for TARGET_SSE_UNALIGNED_STORE_OPTIMAL targets. (define_peephole2 [(set (match_operand:DF 0 "memory_operand") (vec_select:DF (match_operand:V2DF 1 "sse_reg_operand") (parallel [(const_int 0)]))) (set (match_operand:DF 2 "memory_operand") (vec_select:DF (match_operand:V2DF 3 "sse_reg_operand") (parallel [(const_int 1)])))] "TARGET_SSE2 && TARGET_SSE_UNALIGNED_STORE_OPTIMAL && ix86_operands_ok_for_move_multiple (operands, false, DFmode)" [(set (match_dup 4) (match_dup 1))] "operands[4] = adjust_address (operands[0], V2DFmode, 0);") (define_insn "_lddqu" [(set (match_operand:VI1 0 "register_operand" "=x") (unspec:VI1 [(match_operand:VI1 1 "memory_operand" "m")] UNSPEC_LDDQU))] "TARGET_SSE3" "%vlddqu\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "movu" "1") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "0"))) (set (attr "prefix_rep") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "sse2_movnti" [(set (match_operand:SWI48 0 "memory_operand" "=m") (unspec:SWI48 [(match_operand:SWI48 1 "register_operand" "r")] UNSPEC_MOVNT))] "TARGET_SSE2" "movnti\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_data16" "0") (set_attr "mode" "")]) (define_insn "_movnt" [(set (match_operand:VF 0 "memory_operand" "=m") (unspec:VF [(match_operand:VF 1 "register_operand" "v")] UNSPEC_MOVNT))] "TARGET_SSE" "%vmovnt\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "_movnt" [(set (match_operand:VI8 0 "memory_operand" "=m") (unspec:VI8 [(match_operand:VI8 1 "register_operand" "v")] UNSPEC_MOVNT))] "TARGET_SSE2" "%vmovntdq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) ; Expand patterns for non-temporal stores. At the moment, only those ; that directly map to insns are defined; it would be possible to ; define patterns for other modes that would expand to several insns. ;; Modes handled by storent patterns. (define_mode_iterator STORENT_MODE [(DI "TARGET_SSE2 && TARGET_64BIT") (SI "TARGET_SSE2") (SF "TARGET_SSE4A") (DF "TARGET_SSE4A") (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") (V2DI "TARGET_SSE2") (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")]) (define_expand "storent" [(set (match_operand:STORENT_MODE 0 "memory_operand") (unspec:STORENT_MODE [(match_operand:STORENT_MODE 1 "register_operand")] UNSPEC_MOVNT))] "TARGET_SSE") ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Mask operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; All integer modes with AVX512BW/DQ. (define_mode_iterator SWI1248_AVX512BWDQ [(QI "TARGET_AVX512DQ") HI (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")]) ;; All integer modes with AVX512BW, where HImode operation ;; can be used instead of QImode. (define_mode_iterator SWI1248_AVX512BW [QI HI (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")]) ;; All integer modes with AVX512BW/DQ, even HImode requires DQ. (define_mode_iterator SWI1248_AVX512BWDQ2 [(QI "TARGET_AVX512DQ") (HI "TARGET_AVX512DQ") (SI "TARGET_AVX512BW") (DI "TARGET_AVX512BW")]) (define_expand "kmov" [(set (match_operand:SWI1248_AVX512BWDQ 0 "nonimmediate_operand") (match_operand:SWI1248_AVX512BWDQ 1 "nonimmediate_operand"))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))") (define_insn "k" [(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k") (any_logic:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "register_operand" "k") (match_operand:SWI1248_AVX512BW 2 "register_operand" "k"))) (unspec [(const_int 0)] UNSPEC_MASKOP)] "TARGET_AVX512F" { if (get_attr_mode (insn) == MODE_HI) return "kw\t{%2, %1, %0|%0, %1, %2}"; else return "k\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "msklog") (set_attr "prefix" "vex") (set (attr "mode") (cond [(and (match_test "mode == QImode") (not (match_test "TARGET_AVX512DQ"))) (const_string "HI") ] (const_string "")))]) (define_split [(set (match_operand:SWI1248_AVX512BW 0 "mask_reg_operand") (any_logic:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "mask_reg_operand") (match_operand:SWI1248_AVX512BW 2 "mask_reg_operand"))) (clobber (reg:CC FLAGS_REG))] "TARGET_AVX512F && reload_completed" [(parallel [(set (match_dup 0) (any_logic:SWI1248_AVX512BW (match_dup 1) (match_dup 2))) (unspec [(const_int 0)] UNSPEC_MASKOP)])]) (define_insn "kandn" [(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k") (and:SWI1248_AVX512BW (not:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "register_operand" "k")) (match_operand:SWI1248_AVX512BW 2 "register_operand" "k"))) (unspec [(const_int 0)] UNSPEC_MASKOP)] "TARGET_AVX512F" { if (get_attr_mode (insn) == MODE_HI) return "kandnw\t{%2, %1, %0|%0, %1, %2}"; else return "kandn\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "msklog") (set_attr "prefix" "vex") (set (attr "mode") (cond [(and (match_test "mode == QImode") (not (match_test "TARGET_AVX512DQ"))) (const_string "HI") ] (const_string "")))]) (define_split [(set (match_operand:SWI1248_AVX512BW 0 "mask_reg_operand") (and:SWI1248_AVX512BW (not:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "mask_reg_operand")) (match_operand:SWI1248_AVX512BW 2 "mask_reg_operand"))) (clobber (reg:CC FLAGS_REG))] "TARGET_AVX512F && reload_completed" [(parallel [(set (match_dup 0) (and:SWI1248_AVX512BW (not:SWI1248_AVX512BW (match_dup 1)) (match_dup 2))) (unspec [(const_int 0)] UNSPEC_MASKOP)])]) (define_insn "kxnor" [(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k") (not:SWI1248_AVX512BW (xor:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "register_operand" "k") (match_operand:SWI1248_AVX512BW 2 "register_operand" "k")))) (unspec [(const_int 0)] UNSPEC_MASKOP)] "TARGET_AVX512F" { if (get_attr_mode (insn) == MODE_HI) return "kxnorw\t{%2, %1, %0|%0, %1, %2}"; else return "kxnor\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "msklog") (set_attr "prefix" "vex") (set (attr "mode") (cond [(and (match_test "mode == QImode") (not (match_test "TARGET_AVX512DQ"))) (const_string "HI") ] (const_string "")))]) (define_insn "knot" [(set (match_operand:SWI1248_AVX512BW 0 "register_operand" "=k") (not:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "register_operand" "k"))) (unspec [(const_int 0)] UNSPEC_MASKOP)] "TARGET_AVX512F" { if (get_attr_mode (insn) == MODE_HI) return "knotw\t{%1, %0|%0, %1}"; else return "knot\t{%1, %0|%0, %1}"; } [(set_attr "type" "msklog") (set_attr "prefix" "vex") (set (attr "mode") (cond [(and (match_test "mode == QImode") (not (match_test "TARGET_AVX512DQ"))) (const_string "HI") ] (const_string "")))]) (define_split [(set (match_operand:SWI1248_AVX512BW 0 "mask_reg_operand") (not:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "mask_reg_operand")))] "TARGET_AVX512F && reload_completed" [(parallel [(set (match_dup 0) (not:SWI1248_AVX512BW (match_dup 1))) (unspec [(const_int 0)] UNSPEC_MASKOP)])]) (define_insn "*knotsi_1_zext" [(set (match_operand:DI 0 "register_operand" "=k") (zero_extend:DI (not:SI (match_operand:SI 1 "register_operand" "k")))) (unspec [(const_int 0)] UNSPEC_MASKOP)] "TARGET_AVX512BW" "knotd\t{%1, %0|%0, %1}"; [(set_attr "type" "msklog") (set_attr "prefix" "vex") (set_attr "mode" "SI")]) (define_split [(set (match_operand:DI 0 "mask_reg_operand") (zero_extend:DI (not:SI (match_operand:SI 1 "mask_reg_operand"))))] "TARGET_AVX512BW && reload_completed" [(parallel [(set (match_dup 0) (zero_extend:DI (not:SI (match_dup 1)))) (unspec [(const_int 0)] UNSPEC_MASKOP)])]) (define_insn "kadd" [(set (match_operand:SWI1248_AVX512BWDQ2 0 "register_operand" "=k") (plus:SWI1248_AVX512BWDQ2 (match_operand:SWI1248_AVX512BWDQ2 1 "register_operand" "k") (match_operand:SWI1248_AVX512BWDQ2 2 "register_operand" "k"))) (unspec [(const_int 0)] UNSPEC_MASKOP)] "TARGET_AVX512F" "kadd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "msklog") (set_attr "prefix" "vex") (set_attr "mode" "")]) ;; Mask variant shift mnemonics (define_code_attr mshift [(ashift "shiftl") (lshiftrt "shiftr")]) (define_insn "k" [(set (match_operand:SWI1248_AVX512BWDQ 0 "register_operand" "=k") (any_lshift:SWI1248_AVX512BWDQ (match_operand:SWI1248_AVX512BWDQ 1 "register_operand" "k") (match_operand 2 "const_0_to_255_operand" "n"))) (unspec [(const_int 0)] UNSPEC_MASKOP)] "TARGET_AVX512F" "k\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "msklog") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_split [(set (match_operand:SWI1248_AVX512BW 0 "mask_reg_operand") (any_lshift:SWI1248_AVX512BW (match_operand:SWI1248_AVX512BW 1 "mask_reg_operand") (match_operand 2 "const_int_operand"))) (clobber (reg:CC FLAGS_REG))] "TARGET_AVX512F && reload_completed" [(parallel [(set (match_dup 0) (any_lshift:SWI1248_AVX512BW (match_dup 1) (match_dup 2))) (unspec [(const_int 0)] UNSPEC_MASKOP)])]) (define_insn "ktest" [(set (reg:CC FLAGS_REG) (unspec:CC [(match_operand:SWI1248_AVX512BWDQ2 0 "register_operand" "k") (match_operand:SWI1248_AVX512BWDQ2 1 "register_operand" "k")] UNSPEC_KTEST))] "TARGET_AVX512F" "ktest\t{%1, %0|%0, %1}" [(set_attr "mode" "") (set_attr "type" "msklog") (set_attr "prefix" "vex")]) (define_insn "kortest" [(set (reg:CC FLAGS_REG) (unspec:CC [(match_operand:SWI1248_AVX512BWDQ 0 "register_operand" "k") (match_operand:SWI1248_AVX512BWDQ 1 "register_operand" "k")] UNSPEC_KORTEST))] "TARGET_AVX512F" "kortest\t{%1, %0|%0, %1}" [(set_attr "mode" "") (set_attr "type" "msklog") (set_attr "prefix" "vex")]) (define_insn "kunpckhi" [(set (match_operand:HI 0 "register_operand" "=k") (ior:HI (ashift:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "k")) (const_int 8)) (zero_extend:HI (match_operand:QI 2 "register_operand" "k"))))] "TARGET_AVX512F" "kunpckbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "mode" "HI") (set_attr "type" "msklog") (set_attr "prefix" "vex")]) (define_insn "kunpcksi" [(set (match_operand:SI 0 "register_operand" "=k") (ior:SI (ashift:SI (zero_extend:SI (match_operand:HI 1 "register_operand" "k")) (const_int 16)) (zero_extend:SI (match_operand:HI 2 "register_operand" "k"))))] "TARGET_AVX512BW" "kunpckwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "mode" "SI")]) (define_insn "kunpckdi" [(set (match_operand:DI 0 "register_operand" "=k") (ior:DI (ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "k")) (const_int 32)) (zero_extend:DI (match_operand:SI 2 "register_operand" "k"))))] "TARGET_AVX512BW" "kunpckdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "mode" "DI")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel floating point arithmetic ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_expand "2" [(set (match_operand:VFB 0 "register_operand") (absneg:VFB (match_operand:VFB 1 "register_operand")))] "TARGET_SSE" "ix86_expand_fp_absneg_operator (, mode, operands); DONE;") (define_insn_and_split "*2" [(set (match_operand:VFB 0 "register_operand" "=x,x,v,v") (absneg:VFB (match_operand:VFB 1 "vector_operand" "0,xBm,v,m"))) (use (match_operand:VFB 2 "vector_operand" "xBm,0,vm,v"))] "TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (:VFB (match_dup 1) (match_dup 2)))] { if (TARGET_AVX) { if (MEM_P (operands[1])) std::swap (operands[1], operands[2]); } else { if (operands_match_p (operands[0], operands[2])) std::swap (operands[1], operands[2]); } } [(set_attr "isa" "noavx,noavx,avx,avx")]) (define_insn_and_split "*nabs2" [(set (match_operand:VF 0 "register_operand" "=x,x,v,v") (neg:VF (abs:VF (match_operand:VF 1 "vector_operand" "0,xBm,v,m")))) (use (match_operand:VF 2 "vector_operand" "xBm,0,vm,v"))] "TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (ior:VF (match_dup 1) (match_dup 2)))] { if (TARGET_AVX) { if (MEM_P (operands[1])) std::swap (operands[1], operands[2]); } else { if (operands_match_p (operands[0], operands[2])) std::swap (operands[1], operands[2]); } } [(set_attr "isa" "noavx,noavx,avx,avx")]) (define_expand "cond_" [(set (match_operand:VFH 0 "register_operand") (vec_merge:VFH (plusminus:VFH (match_operand:VFH 2 "vector_operand") (match_operand:VFH 3 "vector_operand")) (match_operand:VFH 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] " == 64 || TARGET_AVX512VL" { emit_insn (gen_3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "3" [(set (match_operand:VFH 0 "register_operand") (plusminus:VFH (match_operand:VFH 1 "") (match_operand:VFH 2 "")))] "TARGET_SSE && && " "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*3" [(set (match_operand:VFH 0 "register_operand" "=x,v") (plusminus:VFH (match_operand:VFH 1 "" "0,v") (match_operand:VFH 2 "" "xBm,")))] "TARGET_SSE && ix86_binary_operator_ok (, mode, operands) && && " "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "prefix" "") (set_attr "mode" "")]) ;; Standard scalar operation patterns which preserve the rest of the ;; vector for combiner. (define_insn "*_vm3" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (vec_duplicate:VFH_128 (plusminus: (vec_select: (match_operand:VFH_128 1 "register_operand" "0,v") (parallel [(const_int 0)])) (match_operand: 2 "nonimmediate_operand" "xm,vm"))) (match_dup 1) (const_int 1)))] "TARGET_SSE" "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set (attr "prefix") (cond [(eq_attr "alternative" "0") (const_string "orig") (eq_attr "alternative" "1") (if_then_else (match_test "mode == V8HFmode") (const_string "evex") (const_string "vex")) ] (const_string "*"))) (set_attr "mode" "")]) (define_insn "_vm3" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (plusminus:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,v") (match_operand:VFH_128 2 "nonimmediate_operand" "xm,")) (match_dup 1) (const_int 1)))] "TARGET_SSE" "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "prefix" "") (set_attr "mode" "")]) (define_expand "cond_mul" [(set (match_operand:VFH 0 "register_operand") (vec_merge:VFH (mult:VFH (match_operand:VFH 2 "vector_operand") (match_operand:VFH 3 "vector_operand")) (match_operand:VFH 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] " == 64 || TARGET_AVX512VL" { emit_insn (gen_mul3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "mul3" [(set (match_operand:VFH 0 "register_operand") (mult:VFH (match_operand:VFH 1 "") (match_operand:VFH 2 "")))] "TARGET_SSE && && " "ix86_fixup_binary_operands_no_copy (MULT, mode, operands);") (define_insn "*mul3" [(set (match_operand:VFH 0 "register_operand" "=x,v") (mult:VFH (match_operand:VFH 1 "" "%0,v") (match_operand:VFH 2 "" "xBm,")))] "TARGET_SSE && ix86_binary_operator_ok (MULT, mode, operands) && && " "@ mul\t{%2, %0|%0, %2} vmul\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssemul") (set_attr "prefix" "") (set_attr "btver2_decode" "direct,double") (set_attr "mode" "")]) ;; Standard scalar operation patterns which preserve the rest of the ;; vector for combiner. (define_insn "*_vm3" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (vec_duplicate:VFH_128 (multdiv: (vec_select: (match_operand:VFH_128 1 "register_operand" "0,v") (parallel [(const_int 0)])) (match_operand: 2 "nonimmediate_operand" "xm,vm"))) (match_dup 1) (const_int 1)))] "TARGET_SSE" "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set (attr "prefix") (cond [(eq_attr "alternative" "0") (const_string "orig") (eq_attr "alternative" "1") (if_then_else (match_test "mode == V8HFmode") (const_string "evex") (const_string "vex")) ] (const_string "*"))) (set_attr "btver2_decode" "direct,double") (set_attr "mode" "")]) (define_insn "_vm3" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (multdiv:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,v") (match_operand:VFH_128 2 "nonimmediate_operand" "xm,")) (match_dup 1) (const_int 1)))] "TARGET_SSE" "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "prefix" "") (set_attr "btver2_decode" "direct,double") (set_attr "mode" "")]) (define_expand "div3" [(set (match_operand:VF2 0 "register_operand") (div:VF2 (match_operand:VF2 1 "register_operand") (match_operand:VF2 2 "vector_operand")))] "TARGET_SSE2") (define_expand "div3" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (div:VF_AVX512FP16VL (match_operand:VF_AVX512FP16VL 1 "register_operand") (match_operand:VF_AVX512FP16VL 2 "vector_operand")))] "TARGET_AVX512FP16" { /* Transform HF vector div to vector mul/rcp. */ if (GET_MODE_INNER (mode) == HFmode && TARGET_RECIP_VEC_DIV && optimize_insn_for_speed_p () && flag_finite_math_only && !flag_trapping_math && flag_unsafe_math_optimizations) { rtx op = gen_reg_rtx (mode); operands[2] = force_reg (mode, operands[2]); emit_insn (gen_avx512fp16_rcp2 (op, operands[2])); emit_insn (gen_mul3 (operands[0], operands[1], op)); DONE; } }) (define_expand "div3" [(set (match_operand:VF1 0 "register_operand") (div:VF1 (match_operand:VF1 1 "register_operand") (match_operand:VF1 2 "vector_operand")))] "TARGET_SSE" { if (TARGET_SSE_MATH && TARGET_RECIP_VEC_DIV && !optimize_insn_for_size_p () && flag_finite_math_only && !flag_trapping_math && flag_unsafe_math_optimizations) { ix86_emit_swdivsf (operands[0], operands[1], operands[2], mode); DONE; } }) (define_expand "cond_div" [(set (match_operand:VFH 0 "register_operand") (vec_merge:VFH (div:VFH (match_operand:VFH 2 "register_operand") (match_operand:VFH 3 "vector_operand")) (match_operand:VFH 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] " == 64 || TARGET_AVX512VL" { emit_insn (gen__div3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_insn "_div3" [(set (match_operand:VFH 0 "register_operand" "=x,v") (div:VFH (match_operand:VFH 1 "register_operand" "0,v") (match_operand:VFH 2 "" "xBm,")))] "TARGET_SSE && && " "@ div\t{%2, %0|%0, %2} vdiv\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssediv") (set_attr "prefix" "") (set_attr "mode" "")]) (define_insn "_rcp2" [(set (match_operand:VF1_128_256 0 "register_operand" "=x") (unspec:VF1_128_256 [(match_operand:VF1_128_256 1 "vector_operand" "xBm")] UNSPEC_RCP))] "TARGET_SSE" "%vrcpps\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "atom_sse_attr" "rcp") (set_attr "btver2_sse_attr" "rcp") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "sse_vmrcpv4sf2" [(set (match_operand:V4SF 0 "register_operand" "=x,x") (vec_merge:V4SF (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xm")] UNSPEC_RCP) (match_operand:V4SF 2 "register_operand" "0,x") (const_int 1)))] "TARGET_SSE" "@ rcpss\t{%1, %0|%0, %k1} vrcpss\t{%1, %2, %0|%0, %2, %k1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "atom_sse_attr" "rcp") (set_attr "btver2_sse_attr" "rcp") (set_attr "prefix" "orig,vex") (set_attr "mode" "SF")]) (define_insn "*sse_vmrcpv4sf2" [(set (match_operand:V4SF 0 "register_operand" "=x,x") (vec_merge:V4SF (vec_duplicate:V4SF (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "xm,xm")] UNSPEC_RCP)) (match_operand:V4SF 2 "register_operand" "0,x") (const_int 1)))] "TARGET_SSE" "@ rcpss\t{%1, %0|%0, %1} vrcpss\t{%1, %2, %0|%0, %2, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "atom_sse_attr" "rcp") (set_attr "btver2_sse_attr" "rcp") (set_attr "prefix" "orig,vex") (set_attr "mode" "SF")]) (define_insn "avx512fp16_rcp2" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=v") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "vm")] UNSPEC_RCP))] "TARGET_AVX512FP16" "vrcpph\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512fp16_vmrcpv8hf2" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (unspec:V8HF [(match_operand:V8HF 1 "nonimmediate_operand" "vm")] UNSPEC_RCP) (match_operand:V8HF 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vrcpsh\t{%1, %2, %0|%0, %2, %w1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "HF")]) (define_insn "*avx512fp16_vmrcpv8hf2" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (vec_duplicate:V8HF (unspec:HF [(match_operand:HF 1 "nonimmediate_operand" "vm")] UNSPEC_RCP)) (match_operand:V8HF 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vrcpsh\t{%1, %2, %0|%0, %2, %w1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "HF")]) (define_insn "rcp14" [(set (match_operand:VF_AVX512VL 0 "register_operand" "=v") (unspec:VF_AVX512VL [(match_operand:VF_AVX512VL 1 "nonimmediate_operand" "vm")] UNSPEC_RCP14))] "TARGET_AVX512F" "vrcp14\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "srcp14" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "nonimmediate_operand" "vm")] UNSPEC_RCP14) (match_operand:VF_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F" "vrcp14\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "srcp14_mask" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "nonimmediate_operand" "vm")] UNSPEC_RCP14) (match_operand:VF_128 3 "nonimm_or_0_operand" "0C") (match_operand: 4 "register_operand" "Yk")) (match_operand:VF_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F" "vrcp14\t{%1, %2, %0%{%4%}%N3|%0%{%4%}%N3, %2, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "sqrt2" [(set (match_operand:VF2H 0 "register_operand") (sqrt:VF2H (match_operand:VF2H 1 "vector_operand")))] "TARGET_SSE2") (define_expand "sqrt2" [(set (match_operand:VF1 0 "register_operand") (sqrt:VF1 (match_operand:VF1 1 "vector_operand")))] "TARGET_SSE" { if (TARGET_SSE_MATH && TARGET_RECIP_VEC_SQRT && !optimize_insn_for_size_p () && flag_finite_math_only && !flag_trapping_math && flag_unsafe_math_optimizations) { ix86_emit_swsqrtsf (operands[0], operands[1], mode, false); DONE; } }) (define_insn "_sqrt2" [(set (match_operand:VFH 0 "register_operand" "=x,v") (sqrt:VFH (match_operand:VFH 1 "" "xBm,")))] "TARGET_SSE && && " "@ sqrt\t{%1, %0|%0, %1} vsqrt\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "atom_sse_attr" "sqrt") (set_attr "btver2_sse_attr" "sqrt") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "_vmsqrt2" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (sqrt:VFH_128 (match_operand:VFH_128 1 "nonimmediate_operand" "xm,")) (match_operand:VFH_128 2 "register_operand" "0,v") (const_int 1)))] "TARGET_SSE" "@ sqrt\t{%1, %0|%0, %1} vsqrt\t{%1, %2, %0|%0, %2, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "atom_sse_attr" "sqrt") (set_attr "prefix" "") (set_attr "btver2_sse_attr" "sqrt") (set_attr "mode" "")]) (define_insn "*_vmsqrt2" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (vec_duplicate:VFH_128 (sqrt: (match_operand: 1 "nonimmediate_operand" "xm,"))) (match_operand:VFH_128 2 "register_operand" "0,v") (const_int 1)))] "TARGET_SSE" "@ sqrt\t{%1, %0|%0, %1} vsqrt\t{%1, %2, %0|%0, %2, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "atom_sse_attr" "sqrt") (set_attr "prefix" "") (set_attr "btver2_sse_attr" "sqrt") (set_attr "mode" "")]) (define_expand "rsqrt2" [(set (match_operand:VF1_AVX512ER_128_256 0 "register_operand") (unspec:VF1_AVX512ER_128_256 [(match_operand:VF1_AVX512ER_128_256 1 "vector_operand")] UNSPEC_RSQRT))] "TARGET_SSE && TARGET_SSE_MATH" { ix86_emit_swsqrtsf (operands[0], operands[1], mode, true); DONE; }) (define_expand "rsqrt2" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand")] UNSPEC_RSQRT))] "TARGET_AVX512FP16") (define_insn "_rsqrt2" [(set (match_operand:VF1_128_256 0 "register_operand" "=x") (unspec:VF1_128_256 [(match_operand:VF1_128_256 1 "vector_operand" "xBm")] UNSPEC_RSQRT))] "TARGET_SSE" "%vrsqrtps\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "_rsqrt2" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=v") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand" "vBm")] UNSPEC_RSQRT))] "TARGET_AVX512FP16" "vrsqrtph\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "rsqrt14" [(set (match_operand:VF_AVX512VL 0 "register_operand" "=v") (unspec:VF_AVX512VL [(match_operand:VF_AVX512VL 1 "nonimmediate_operand" "vm")] UNSPEC_RSQRT14))] "TARGET_AVX512F" "vrsqrt14\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "rsqrt14" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "nonimmediate_operand" "vm")] UNSPEC_RSQRT14) (match_operand:VF_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F" "vrsqrt14\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "rsqrt14__mask" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "nonimmediate_operand" "vm")] UNSPEC_RSQRT14) (match_operand:VF_128 3 "nonimm_or_0_operand" "0C") (match_operand: 4 "register_operand" "Yk")) (match_operand:VF_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F" "vrsqrt14\t{%1, %2, %0%{%4%}%N3|%0%{%4%}%N3, %2, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "sse_vmrsqrtv4sf2" [(set (match_operand:V4SF 0 "register_operand" "=x,x") (vec_merge:V4SF (unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xm")] UNSPEC_RSQRT) (match_operand:V4SF 2 "register_operand" "0,x") (const_int 1)))] "TARGET_SSE" "@ rsqrtss\t{%1, %0|%0, %k1} vrsqrtss\t{%1, %2, %0|%0, %2, %k1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "prefix" "orig,vex") (set_attr "mode" "SF")]) (define_insn "*sse_vmrsqrtv4sf2" [(set (match_operand:V4SF 0 "register_operand" "=x,x") (vec_merge:V4SF (vec_duplicate:V4SF (unspec:SF [(match_operand:SF 1 "nonimmediate_operand" "xm,xm")] UNSPEC_RSQRT)) (match_operand:V4SF 2 "register_operand" "0,x") (const_int 1)))] "TARGET_SSE" "@ rsqrtss\t{%1, %0|%0, %1} vrsqrtss\t{%1, %2, %0|%0, %2, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "prefix" "orig,vex") (set_attr "mode" "SF")]) (define_insn "avx512fp16_vmrsqrtv8hf2" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (unspec:V8HF [(match_operand:V8HF 1 "nonimmediate_operand" "vm")] UNSPEC_RSQRT) (match_operand:V8HF 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vrsqrtsh\t{%1, %2, %0|%0, %2, %w1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "HF")]) (define_insn "*avx512fp16_vmrsqrtv8hf2" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (vec_duplicate:V8HF (unspec:HF [(match_operand:HF 1 "nonimmediate_operand" "vm")] UNSPEC_RSQRT)) (match_operand:V8HF 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vrsqrtsh\t{%1, %2, %0|%0, %2, %w1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "HF")]) (define_expand "cond_" [(set (match_operand:VFH 0 "register_operand") (vec_merge:VFH (smaxmin:VFH (match_operand:VFH 2 "vector_operand") (match_operand:VFH 3 "vector_operand")) (match_operand:VFH 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] " == 64 || TARGET_AVX512VL" { emit_insn (gen_3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "3" [(set (match_operand:VFH 0 "register_operand") (smaxmin:VFH (match_operand:VFH 1 "") (match_operand:VFH 2 "")))] "TARGET_SSE && && " { if (!flag_finite_math_only || flag_signed_zeros) { operands[1] = force_reg (mode, operands[1]); emit_insn (gen_ieee_3 (operands[0], operands[1], operands[2] )); DONE; } else ix86_fixup_binary_operands_no_copy (, mode, operands); }) ;; These versions of the min/max patterns are intentionally ignorant of ;; their behavior wrt -0.0 and NaN (via the commutative operand mark). ;; Since both the tree-level MAX_EXPR and the rtl-level SMAX operator ;; are undefined in this condition, we're certain this is correct. (define_insn "*3" [(set (match_operand:VFH 0 "register_operand" "=x,v") (smaxmin:VFH (match_operand:VFH 1 "" "%0,v") (match_operand:VFH 2 "" "xBm,")))] "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2])) && && " "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "btver2_sse_attr" "maxmin") (set_attr "prefix" "") (set_attr "mode" "")]) ;; These versions of the min/max patterns implement exactly the operations ;; min = (op1 < op2 ? op1 : op2) ;; max = (!(op1 < op2) ? op1 : op2) ;; Their operands are not commutative, and thus they may be used in the ;; presence of -0.0 and NaN. (define_insn "ieee_3" [(set (match_operand:VFH 0 "register_operand" "=x,v") (unspec:VFH [(match_operand:VFH 1 "register_operand" "0,v") (match_operand:VFH 2 "" "xBm,")] IEEE_MAXMIN))] "TARGET_SSE && && " "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "btver2_sse_attr" "maxmin") (set_attr "prefix" "") (set_attr "mode" "")]) ;; Standard scalar operation patterns which preserve the rest of the ;; vector for combiner. (define_insn "*ieee_3" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (vec_duplicate:VFH_128 (unspec: [(vec_select: (match_operand:VFH_128 1 "register_operand" "0,v") (parallel [(const_int 0)])) (match_operand: 2 "nonimmediate_operand" "xm,vm")] IEEE_MAXMIN)) (match_dup 1) (const_int 1)))] "TARGET_SSE" "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "btver2_sse_attr" "maxmin") (set (attr "prefix") (cond [(eq_attr "alternative" "0") (const_string "orig") (eq_attr "alternative" "1") (if_then_else (match_test "mode == V8HFmode") (const_string "evex") (const_string "vex")) ] (const_string "*"))) (set_attr "mode" "")]) (define_insn "_vm3" [(set (match_operand:VFH_128 0 "register_operand" "=x,v") (vec_merge:VFH_128 (smaxmin:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,v") (match_operand:VFH_128 2 "nonimmediate_operand" "xm,")) (match_dup 1) (const_int 1)))] "TARGET_SSE" "@ \t{%2, %0|%0, %2} v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sse") (set_attr "btver2_sse_attr" "maxmin") (set_attr "prefix" "") (set_attr "mode" "")]) (define_mode_attr addsub_cst [(V4DF "5") (V2DF "1") (V4SF "5") (V8SF "85")]) (define_insn "vec_addsub3" [(set (match_operand:VF_128_256 0 "register_operand" "=x,x") (vec_merge:VF_128_256 (minus:VF_128_256 (match_operand:VF_128_256 1 "register_operand" "0,x") (match_operand:VF_128_256 2 "vector_operand" "xBm, xm")) (plus:VF_128_256 (match_dup 1) (match_dup 2)) (const_int )))] "TARGET_SSE3" "@ addsub\t{%2, %0|%0, %2} vaddsub\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set (attr "atom_unit") (if_then_else (match_test "mode == V2DFmode") (const_string "complex") (const_string "*"))) (set_attr "prefix" "orig,vex") (set (attr "prefix_rep") (if_then_else (and (match_test "mode == V4SFmode") (eq_attr "alternative" "0")) (const_string "1") (const_string "*"))) (set_attr "mode" "")]) (define_split [(set (match_operand:VF_128_256 0 "register_operand") (match_operator:VF_128_256 6 "addsub_vm_operator" [(minus:VF_128_256 (match_operand:VF_128_256 1 "register_operand") (match_operand:VF_128_256 2 "vector_operand")) (plus:VF_128_256 (match_operand:VF_128_256 3 "vector_operand") (match_operand:VF_128_256 4 "vector_operand")) (match_operand 5 "const_int_operand")]))] "TARGET_SSE3 && can_create_pseudo_p () && ((rtx_equal_p (operands[1], operands[3]) && rtx_equal_p (operands[2], operands[4])) || (rtx_equal_p (operands[1], operands[4]) && rtx_equal_p (operands[2], operands[3])))" [(set (match_dup 0) (vec_merge:VF_128_256 (minus:VF_128_256 (match_dup 1) (match_dup 2)) (plus:VF_128_256 (match_dup 1) (match_dup 2)) (match_dup 5)))]) (define_split [(set (match_operand:VF_128_256 0 "register_operand") (match_operator:VF_128_256 6 "addsub_vm_operator" [(plus:VF_128_256 (match_operand:VF_128_256 1 "vector_operand") (match_operand:VF_128_256 2 "vector_operand")) (minus:VF_128_256 (match_operand:VF_128_256 3 "register_operand") (match_operand:VF_128_256 4 "vector_operand")) (match_operand 5 "const_int_operand")]))] "TARGET_SSE3 && can_create_pseudo_p () && ((rtx_equal_p (operands[1], operands[3]) && rtx_equal_p (operands[2], operands[4])) || (rtx_equal_p (operands[1], operands[4]) && rtx_equal_p (operands[2], operands[3])))" [(set (match_dup 0) (vec_merge:VF_128_256 (minus:VF_128_256 (match_dup 3) (match_dup 4)) (plus:VF_128_256 (match_dup 3) (match_dup 4)) (match_dup 5)))] { /* Negate mask bits to compensate for swapped PLUS and MINUS RTXes. */ operands[5] = GEN_INT (~INTVAL (operands[5]) & ((HOST_WIDE_INT_1U << GET_MODE_NUNITS (mode)) - 1)); }) (define_split [(set (match_operand:VF_128_256 0 "register_operand") (match_operator:VF_128_256 7 "addsub_vs_operator" [(vec_concat: (minus:VF_128_256 (match_operand:VF_128_256 1 "register_operand") (match_operand:VF_128_256 2 "vector_operand")) (plus:VF_128_256 (match_operand:VF_128_256 3 "vector_operand") (match_operand:VF_128_256 4 "vector_operand"))) (match_parallel 5 "addsub_vs_parallel" [(match_operand 6 "const_int_operand")])]))] "TARGET_SSE3 && can_create_pseudo_p () && ((rtx_equal_p (operands[1], operands[3]) && rtx_equal_p (operands[2], operands[4])) || (rtx_equal_p (operands[1], operands[4]) && rtx_equal_p (operands[2], operands[3])))" [(set (match_dup 0) (vec_merge:VF_128_256 (minus:VF_128_256 (match_dup 1) (match_dup 2)) (plus:VF_128_256 (match_dup 1) (match_dup 2)) (match_dup 5)))] { int i, nelt = XVECLEN (operands[5], 0); HOST_WIDE_INT ival = 0; for (i = 0; i < nelt; i++) if (INTVAL (XVECEXP (operands[5], 0, i)) < GET_MODE_NUNITS (mode)) ival |= HOST_WIDE_INT_1 << i; operands[5] = GEN_INT (ival); }) (define_split [(set (match_operand:VF_128_256 0 "register_operand") (match_operator:VF_128_256 7 "addsub_vs_operator" [(vec_concat: (plus:VF_128_256 (match_operand:VF_128_256 1 "vector_operand") (match_operand:VF_128_256 2 "vector_operand")) (minus:VF_128_256 (match_operand:VF_128_256 3 "register_operand") (match_operand:VF_128_256 4 "vector_operand"))) (match_parallel 5 "addsub_vs_parallel" [(match_operand 6 "const_int_operand")])]))] "TARGET_SSE3 && can_create_pseudo_p () && ((rtx_equal_p (operands[1], operands[3]) && rtx_equal_p (operands[2], operands[4])) || (rtx_equal_p (operands[1], operands[4]) && rtx_equal_p (operands[2], operands[3])))" [(set (match_dup 0) (vec_merge:VF_128_256 (minus:VF_128_256 (match_dup 3) (match_dup 4)) (plus:VF_128_256 (match_dup 3) (match_dup 4)) (match_dup 5)))] { int i, nelt = XVECLEN (operands[5], 0); HOST_WIDE_INT ival = 0; for (i = 0; i < nelt; i++) if (INTVAL (XVECEXP (operands[5], 0, i)) >= GET_MODE_NUNITS (mode)) ival |= HOST_WIDE_INT_1 << i; operands[5] = GEN_INT (ival); }) (define_insn "avx_hv4df3" [(set (match_operand:V4DF 0 "register_operand" "=x") (vec_concat:V4DF (vec_concat:V2DF (plusminus:DF (vec_select:DF (match_operand:V4DF 1 "register_operand" "x") (parallel [(const_int 0)])) (vec_select:DF (match_dup 1) (parallel [(const_int 1)]))) (plusminus:DF (vec_select:DF (match_operand:V4DF 2 "nonimmediate_operand" "xm") (parallel [(const_int 0)])) (vec_select:DF (match_dup 2) (parallel [(const_int 1)])))) (vec_concat:V2DF (plusminus:DF (vec_select:DF (match_dup 1) (parallel [(const_int 2)])) (vec_select:DF (match_dup 1) (parallel [(const_int 3)]))) (plusminus:DF (vec_select:DF (match_dup 2) (parallel [(const_int 2)])) (vec_select:DF (match_dup 2) (parallel [(const_int 3)]))))))] "TARGET_AVX" "vhpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseadd") (set_attr "prefix" "vex") (set_attr "mode" "V4DF")]) (define_expand "sse3_haddv2df3" [(set (match_operand:V2DF 0 "register_operand") (vec_concat:V2DF (plus:DF (vec_select:DF (match_operand:V2DF 1 "register_operand") (parallel [(const_int 0)])) (vec_select:DF (match_dup 1) (parallel [(const_int 1)]))) (plus:DF (vec_select:DF (match_operand:V2DF 2 "vector_operand") (parallel [(const_int 0)])) (vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))] "TARGET_SSE3") (define_insn "*sse3_haddv2df3" [(set (match_operand:V2DF 0 "register_operand" "=x,x") (vec_concat:V2DF (plus:DF (vec_select:DF (match_operand:V2DF 1 "register_operand" "0,x") (parallel [(match_operand:SI 3 "const_0_to_1_operand")])) (vec_select:DF (match_dup 1) (parallel [(match_operand:SI 4 "const_0_to_1_operand")]))) (plus:DF (vec_select:DF (match_operand:V2DF 2 "vector_operand" "xBm,xm") (parallel [(match_operand:SI 5 "const_0_to_1_operand")])) (vec_select:DF (match_dup 2) (parallel [(match_operand:SI 6 "const_0_to_1_operand")])))))] "TARGET_SSE3 && INTVAL (operands[3]) != INTVAL (operands[4]) && INTVAL (operands[5]) != INTVAL (operands[6])" "@ haddpd\t{%2, %0|%0, %2} vhaddpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "prefix" "orig,vex") (set_attr "mode" "V2DF")]) (define_insn "sse3_hsubv2df3" [(set (match_operand:V2DF 0 "register_operand" "=x,x") (vec_concat:V2DF (minus:DF (vec_select:DF (match_operand:V2DF 1 "register_operand" "0,x") (parallel [(const_int 0)])) (vec_select:DF (match_dup 1) (parallel [(const_int 1)]))) (minus:DF (vec_select:DF (match_operand:V2DF 2 "vector_operand" "xBm,xm") (parallel [(const_int 0)])) (vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))] "TARGET_SSE3" "@ hsubpd\t{%2, %0|%0, %2} vhsubpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "prefix" "orig,vex") (set_attr "mode" "V2DF")]) (define_insn "*sse3_haddv2df3_low" [(set (match_operand:DF 0 "register_operand" "=x,x") (plus:DF (vec_select:DF (match_operand:V2DF 1 "register_operand" "0,x") (parallel [(match_operand:SI 2 "const_0_to_1_operand")])) (vec_select:DF (match_dup 1) (parallel [(match_operand:SI 3 "const_0_to_1_operand")]))))] "TARGET_SSE3 && TARGET_V2DF_REDUCTION_PREFER_HADDPD && INTVAL (operands[2]) != INTVAL (operands[3])" "@ haddpd\t{%0, %0|%0, %0} vhaddpd\t{%1, %1, %0|%0, %1, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd1") (set_attr "prefix" "orig,vex") (set_attr "mode" "V2DF")]) (define_insn "*sse3_hsubv2df3_low" [(set (match_operand:DF 0 "register_operand" "=x,x") (minus:DF (vec_select:DF (match_operand:V2DF 1 "register_operand" "0,x") (parallel [(const_int 0)])) (vec_select:DF (match_dup 1) (parallel [(const_int 1)]))))] "TARGET_SSE3 && TARGET_V2DF_REDUCTION_PREFER_HADDPD" "@ hsubpd\t{%0, %0|%0, %0} vhsubpd\t{%1, %1, %0|%0, %1, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd1") (set_attr "prefix" "orig,vex") (set_attr "mode" "V2DF")]) (define_insn "avx_hv8sf3" [(set (match_operand:V8SF 0 "register_operand" "=x") (vec_concat:V8SF (vec_concat:V4SF (vec_concat:V2SF (plusminus:SF (vec_select:SF (match_operand:V8SF 1 "register_operand" "x") (parallel [(const_int 0)])) (vec_select:SF (match_dup 1) (parallel [(const_int 1)]))) (plusminus:SF (vec_select:SF (match_dup 1) (parallel [(const_int 2)])) (vec_select:SF (match_dup 1) (parallel [(const_int 3)])))) (vec_concat:V2SF (plusminus:SF (vec_select:SF (match_operand:V8SF 2 "nonimmediate_operand" "xm") (parallel [(const_int 0)])) (vec_select:SF (match_dup 2) (parallel [(const_int 1)]))) (plusminus:SF (vec_select:SF (match_dup 2) (parallel [(const_int 2)])) (vec_select:SF (match_dup 2) (parallel [(const_int 3)]))))) (vec_concat:V4SF (vec_concat:V2SF (plusminus:SF (vec_select:SF (match_dup 1) (parallel [(const_int 4)])) (vec_select:SF (match_dup 1) (parallel [(const_int 5)]))) (plusminus:SF (vec_select:SF (match_dup 1) (parallel [(const_int 6)])) (vec_select:SF (match_dup 1) (parallel [(const_int 7)])))) (vec_concat:V2SF (plusminus:SF (vec_select:SF (match_dup 2) (parallel [(const_int 4)])) (vec_select:SF (match_dup 2) (parallel [(const_int 5)]))) (plusminus:SF (vec_select:SF (match_dup 2) (parallel [(const_int 6)])) (vec_select:SF (match_dup 2) (parallel [(const_int 7)])))))))] "TARGET_AVX" "vhps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseadd") (set_attr "prefix" "vex") (set_attr "mode" "V8SF")]) (define_insn "sse3_hv4sf3" [(set (match_operand:V4SF 0 "register_operand" "=x,x") (vec_concat:V4SF (vec_concat:V2SF (plusminus:SF (vec_select:SF (match_operand:V4SF 1 "register_operand" "0,x") (parallel [(const_int 0)])) (vec_select:SF (match_dup 1) (parallel [(const_int 1)]))) (plusminus:SF (vec_select:SF (match_dup 1) (parallel [(const_int 2)])) (vec_select:SF (match_dup 1) (parallel [(const_int 3)])))) (vec_concat:V2SF (plusminus:SF (vec_select:SF (match_operand:V4SF 2 "vector_operand" "xBm,xm") (parallel [(const_int 0)])) (vec_select:SF (match_dup 2) (parallel [(const_int 1)]))) (plusminus:SF (vec_select:SF (match_dup 2) (parallel [(const_int 2)])) (vec_select:SF (match_dup 2) (parallel [(const_int 3)]))))))] "TARGET_SSE3" "@ hps\t{%2, %0|%0, %2} vhps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseadd") (set_attr "atom_unit" "complex") (set_attr "prefix" "orig,vex") (set_attr "prefix_rep" "1,*") (set_attr "mode" "V4SF")]) (define_mode_iterator REDUC_SSE_PLUS_MODE [(V2DF "TARGET_SSE") (V4SF "TARGET_SSE") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")]) (define_expand "reduc_plus_scal_" [(plus:REDUC_SSE_PLUS_MODE (match_operand: 0 "register_operand") (match_operand:REDUC_SSE_PLUS_MODE 1 "register_operand"))] "" { rtx tmp = gen_reg_rtx (mode); ix86_expand_reduc (gen_add3, tmp, operands[1]); emit_insn (gen_vec_extract (operands[0], tmp, const0_rtx)); DONE; }) (define_expand "reduc_plus_scal_v16qi" [(plus:V16QI (match_operand:QI 0 "register_operand") (match_operand:V16QI 1 "register_operand"))] "TARGET_SSE2" { rtx tmp = gen_reg_rtx (V1TImode); emit_insn (gen_sse2_lshrv1ti3 (tmp, gen_lowpart (V1TImode, operands[1]), GEN_INT (64))); rtx tmp2 = gen_reg_rtx (V16QImode); emit_insn (gen_addv16qi3 (tmp2, operands[1], gen_lowpart (V16QImode, tmp))); rtx tmp3 = gen_reg_rtx (V16QImode); emit_move_insn (tmp3, CONST0_RTX (V16QImode)); rtx tmp4 = gen_reg_rtx (V2DImode); emit_insn (gen_sse2_psadbw (tmp4, tmp2, tmp3)); tmp4 = gen_lowpart (V16QImode, tmp4); emit_insn (gen_vec_extractv16qiqi (operands[0], tmp4, const0_rtx)); DONE; }) (define_mode_iterator REDUC_PLUS_MODE [(V4DF "TARGET_AVX") (V8SF "TARGET_AVX") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8DF "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V32QI "TARGET_AVX") (V64QI "TARGET_AVX512F")]) (define_expand "reduc_plus_scal_" [(plus:REDUC_PLUS_MODE (match_operand: 0 "register_operand") (match_operand:REDUC_PLUS_MODE 1 "register_operand"))] "" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_extract_hi_ (tmp, operands[1])); rtx tmp2 = gen_reg_rtx (mode); rtx tmp3 = gen_lowpart (mode, operands[1]); emit_insn (gen_add3 (tmp2, tmp, tmp3)); emit_insn (gen_reduc_plus_scal_ (operands[0], tmp2)); DONE; }) ;; Modes handled by reduc_sm{in,ax}* patterns. (define_mode_iterator REDUC_SSE_SMINMAX_MODE [(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V4SF "TARGET_SSE") (V2DF "TARGET_SSE") (V4SI "TARGET_SSE2") (V8HI "TARGET_SSE2") (V16QI "TARGET_SSE2") (V2DI "TARGET_SSE4_2")]) (define_expand "reduc__scal_" [(smaxmin:REDUC_SSE_SMINMAX_MODE (match_operand: 0 "register_operand") (match_operand:REDUC_SSE_SMINMAX_MODE 1 "register_operand"))] "" { rtx tmp = gen_reg_rtx (mode); ix86_expand_reduc (gen_3, tmp, operands[1]); emit_insn (gen_vec_extract (operands[0], tmp, const0_rtx)); DONE; }) (define_mode_iterator REDUC_SMINMAX_MODE [(V32QI "TARGET_AVX2") (V16HI "TARGET_AVX2") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2") (V8SF "TARGET_AVX") (V4DF "TARGET_AVX") (V64QI "TARGET_AVX512BW") (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V32HI "TARGET_AVX512BW") (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")]) (define_expand "reduc__scal_" [(smaxmin:REDUC_SMINMAX_MODE (match_operand: 0 "register_operand") (match_operand:REDUC_SMINMAX_MODE 1 "register_operand"))] "" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_extract_hi_ (tmp, operands[1])); rtx tmp2 = gen_reg_rtx (mode); emit_insn (gen_3 (tmp2, tmp, gen_lowpart (mode, operands[1]))); emit_insn (gen_reduc__scal_ (operands[0], tmp2)); DONE; }) (define_expand "reduc__scal_" [(umaxmin:VI_AVX512BW (match_operand: 0 "register_operand") (match_operand:VI_AVX512BW 1 "register_operand"))] "TARGET_AVX512F" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_extract_hi_ (tmp, operands[1])); rtx tmp2 = gen_reg_rtx (mode); emit_insn (gen_3 (tmp2, tmp, gen_lowpart (mode, operands[1]))); emit_insn (gen_reduc__scal_ (operands[0], tmp2)); DONE; }) (define_expand "reduc__scal_" [(umaxmin:VI_256 (match_operand: 0 "register_operand") (match_operand:VI_256 1 "register_operand"))] "TARGET_AVX2" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_extract_hi_ (tmp, operands[1])); rtx tmp2 = gen_reg_rtx (mode); emit_insn (gen_3 (tmp2, tmp, gen_lowpart (mode, operands[1]))); rtx tmp3 = gen_reg_rtx (mode); ix86_expand_reduc (gen_3, tmp3, tmp2); emit_insn (gen_vec_extract (operands[0], tmp3, const0_rtx)); DONE; }) (define_expand "reduc_umin_scal_v8hi" [(umin:V8HI (match_operand:HI 0 "register_operand") (match_operand:V8HI 1 "register_operand"))] "TARGET_SSE4_1" { rtx tmp = gen_reg_rtx (V8HImode); ix86_expand_reduc (gen_uminv8hi3, tmp, operands[1]); emit_insn (gen_vec_extractv8hihi (operands[0], tmp, const0_rtx)); DONE; }) (define_insn "reducep" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "" "") (match_operand:SI 2 "const_0_to_255_operand")] UNSPEC_REDUCE))] "TARGET_AVX512DQ || (VALID_AVX512FP16_REG_MODE (mode))" "vreduce\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "reduces" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (unspec:VFH_128 [(match_operand:VFH_128 1 "register_operand" "v") (match_operand:VFH_128 2 "" "") (match_operand:SI 3 "const_0_to_255_operand")] UNSPEC_REDUCE) (match_dup 1) (const_int 1)))] "TARGET_AVX512DQ || (VALID_AVX512FP16_REG_MODE (mode))" "vreduce\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel floating point comparisons ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn "avx_cmp3" [(set (match_operand:VF_128_256 0 "register_operand" "=x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "register_operand" "x") (match_operand:VF_128_256 2 "nonimmediate_operand" "xm") (match_operand:SI 3 "const_0_to_31_operand" "n")] UNSPEC_PCMP))] "TARGET_AVX" "vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn_and_split "*avx_cmp3_1" [(set (match_operand: 0 "register_operand") (vec_merge: (match_operand: 1 "vector_all_ones_operand") (match_operand: 2 "const0_operand") (unspec: [(match_operand:VF_128_256 3 "register_operand") (match_operand:VF_128_256 4 "nonimmediate_operand") (match_operand:SI 5 "const_0_to_31_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 6) (unspec:VF_128_256 [(match_dup 3) (match_dup 4) (match_dup 5)] UNSPEC_PCMP)) (set (match_dup 0) (match_dup 7))] { operands[6] = gen_reg_rtx (mode); operands[7] = lowpart_subreg (GET_MODE (operands[0]), operands[6], mode); }) (define_insn_and_split "*avx_cmp3_2" [(set (match_operand: 0 "register_operand") (vec_merge: (match_operand: 1 "vector_all_ones_operand") (match_operand: 2 "const0_operand") (not: (unspec: [(match_operand:VF_128_256 3 "register_operand") (match_operand:VF_128_256 4 "nonimmediate_operand") (match_operand:SI 5 "const_0_to_31_operand")] UNSPEC_PCMP))))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 6) (unspec:VF_128_256 [(match_dup 3) (match_dup 4) (match_dup 5)] UNSPEC_PCMP)) (set (match_dup 0) (match_dup 7))] { operands[5] = GEN_INT (INTVAL (operands[5]) ^ 4); operands[6] = gen_reg_rtx (mode); operands[7] = lowpart_subreg (GET_MODE (operands[0]), operands[6], mode); }) (define_insn_and_split "*avx_cmp3_3" [(set (match_operand:VF_128_256 0 "register_operand") (vec_merge:VF_128_256 (match_operand:VF_128_256 1 "float_vector_all_ones_operand") (match_operand:VF_128_256 2 "const0_operand") (unspec: [(match_operand:VF_128_256 3 "register_operand") (match_operand:VF_128_256 4 "nonimmediate_operand") (match_operand:SI 5 "const_0_to_31_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:VF_128_256 [(match_dup 3) (match_dup 4) (match_dup 5)] UNSPEC_PCMP))]) (define_insn_and_split "*avx_cmp3_4" [(set (match_operand:VF_128_256 0 "register_operand") (vec_merge:VF_128_256 (match_operand:VF_128_256 1 "float_vector_all_ones_operand") (match_operand:VF_128_256 2 "const0_operand") (not: (unspec: [(match_operand:VF_128_256 3 "register_operand") (match_operand:VF_128_256 4 "nonimmediate_operand") (match_operand:SI 5 "const_0_to_31_operand")] UNSPEC_PCMP))))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:VF_128_256 [(match_dup 3) (match_dup 4) (match_dup 5)] UNSPEC_PCMP))] "operands[5] = GEN_INT (INTVAL (operands[5]) ^ 4);") (define_insn_and_split "*avx_cmp3_lt" [(set (match_operand:VF_128_256 0 "register_operand") (vec_merge:VF_128_256 (match_operand:VF_128_256 1 "vector_operand") (match_operand:VF_128_256 2 "vector_operand") (unspec: [(match_operand: 3 "register_operand") (match_operand: 4 "const0_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split () /* LT or GE 0 */ && ((INTVAL (operands[5]) == 1) || (INTVAL (operands[5]) == 5))" "#" "&& 1" [(set (match_dup 0) (unspec:VF_128_256 [(match_dup 2) (match_dup 1) (lt:VF_128_256 (match_dup 3) (match_dup 4))] UNSPEC_BLENDV))] { if (INTVAL (operands[5]) == 5) std::swap (operands[1], operands[2]); operands[2] = force_reg (mode, operands[2]); }) (define_insn_and_split "*avx_cmp3_ltint" [(set (match_operand:VI48_AVX 0 "register_operand") (vec_merge:VI48_AVX (match_operand:VI48_AVX 1 "vector_operand") (match_operand:VI48_AVX 2 "vector_operand") (unspec: [(match_operand:VI48_AVX 3 "register_operand") (match_operand:VI48_AVX 4 "const0_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split () /* LT or GE 0 */ && ((INTVAL (operands[5]) == 1) || (INTVAL (operands[5]) == 5))" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 2) (match_dup 1) (subreg: (lt:VI48_AVX (match_dup 3) (match_dup 4)) 0)] UNSPEC_BLENDV))] { if (INTVAL (operands[5]) == 5) std::swap (operands[1], operands[2]); operands[0] = gen_lowpart (mode, operands[0]); operands[1] = gen_lowpart (mode, operands[1]); operands[2] = force_reg (mode, gen_lowpart (mode, operands[2])); }) (define_insn_and_split "*avx_cmp3_ltint_not" [(set (match_operand:VI48_AVX 0 "register_operand") (vec_merge:VI48_AVX (match_operand:VI48_AVX 1 "vector_operand") (match_operand:VI48_AVX 2 "vector_operand") (unspec: [(subreg:VI48_AVX (not: (match_operand: 3 "vector_operand")) 0) (match_operand:VI48_AVX 4 "const0_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split () /* not LT or GE 0 */ && ((INTVAL (operands[5]) == 1) || (INTVAL (operands[5]) == 5))" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (subreg: (lt:VI48_AVX (match_dup 3) (match_dup 4)) 0)] UNSPEC_BLENDV))] { if (INTVAL (operands[5]) == 5) std::swap (operands[1], operands[2]); operands[0] = gen_lowpart (mode, operands[0]); operands[1] = force_reg (mode, gen_lowpart (mode, operands[1])); operands[2] = gen_lowpart (mode, operands[2]); operands[3] = lowpart_subreg (mode, operands[3], mode); }) (define_insn "avx_vmcmp3" [(set (match_operand:VF_128 0 "register_operand" "=x") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "register_operand" "x") (match_operand:VF_128 2 "nonimmediate_operand" "xm") (match_operand:SI 3 "const_0_to_31_operand" "n")] UNSPEC_PCMP) (match_dup 1) (const_int 1)))] "TARGET_AVX" "vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "*_maskcmp3_comm" [(set (match_operand:VF_128_256 0 "register_operand" "=x,x") (match_operator:VF_128_256 3 "sse_comparison_operator" [(match_operand:VF_128_256 1 "register_operand" "%0,x") (match_operand:VF_128_256 2 "vector_operand" "xBm,xm")]))] "TARGET_SSE && GET_RTX_CLASS (GET_CODE (operands[3])) == RTX_COMM_COMPARE" "@ cmp%D3\t{%2, %0|%0, %2} vcmp%D3\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_insn "_maskcmp3" [(set (match_operand:VF_128_256 0 "register_operand" "=x,x") (match_operator:VF_128_256 3 "sse_comparison_operator" [(match_operand:VF_128_256 1 "register_operand" "0,x") (match_operand:VF_128_256 2 "vector_operand" "xBm,xm")]))] "TARGET_SSE" "@ cmp%D3\t{%2, %0|%0, %2} vcmp%D3\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_insn "_vmmaskcmp3" [(set (match_operand:VF_128 0 "register_operand" "=x,x") (vec_merge:VF_128 (match_operator:VF_128 3 "sse_comparison_operator" [(match_operand:VF_128 1 "register_operand" "0,x") (match_operand:VF_128 2 "nonimmediate_operand" "xm,xm")]) (match_dup 1) (const_int 1)))] "TARGET_SSE" "@ cmp%D3\t{%2, %0|%0, %2} vcmp%D3\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssecmp") (set_attr "length_immediate" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_mode_attr cmp_imm_predicate [(V32HF "const_0_to_31_operand") (V16SF "const_0_to_31_operand") (V8DF "const_0_to_31_operand") (V16SI "const_0_to_7_operand") (V8DI "const_0_to_7_operand") (V16HF "const_0_to_31_operand") (V8SF "const_0_to_31_operand") (V4DF "const_0_to_31_operand") (V8SI "const_0_to_7_operand") (V4DI "const_0_to_7_operand") (V8HF "const_0_to_31_operand") (V4SF "const_0_to_31_operand") (V2DF "const_0_to_31_operand") (V4SI "const_0_to_7_operand") (V2DI "const_0_to_7_operand") (V32HI "const_0_to_7_operand") (V64QI "const_0_to_7_operand") (V16HI "const_0_to_7_operand") (V32QI "const_0_to_7_operand") (V8HI "const_0_to_7_operand") (V16QI "const_0_to_7_operand")]) (define_insn "_cmp3" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:V48H_AVX512VL 1 "register_operand" "v") (match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "") (match_operand:SI 3 "" "n")] UNSPEC_PCMP))] "TARGET_AVX512F && " "vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;; Since vpcmpd implicitly clear the upper bits of dest, transform ;; vpcmpd + zero_extend to vpcmpd since the instruction (define_insn_and_split "*_cmp3_zero_extend" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:V48H_AVX512VL 1 "nonimmediate_operand") (match_operand:V48H_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand" "n")] UNSPEC_PCMP)))] "TARGET_AVX512F && (!VALID_MASK_AVX512BW_MODE (mode) || TARGET_AVX512BW) && ix86_pre_reload_split () && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode))" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMP))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_cmp3_zero_extend_2" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:V48H_AVX512VL 1 "nonimmediate_operand") (match_operand:V48H_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_PCMP))) (set (match_operand: 4 "register_operand") (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMP))] "TARGET_AVX512F && (!VALID_MASK_AVX512BW_MODE (mode) || TARGET_AVX512BW) && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode)) && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMP)) (set (match_dup 4) (match_dup 0))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_cmp3" [(set (match_operand: 0 "register_operand") (not: (unspec: [(match_operand:V48_AVX512VL 1 "register_operand") (match_operand:V48_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "" "n")] UNSPEC_PCMP)))] "TARGET_AVX512F && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 4)] UNSPEC_PCMP))] "operands[4] = GEN_INT (INTVAL (operands[3]) ^ 4);") (define_insn "_cmp3" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI12_AVX512VL 1 "register_operand" "v") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "" "n")] UNSPEC_PCMP))] "TARGET_AVX512BW" "vpcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_cmp3_zero_extend" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512BW && ix86_pre_reload_split () && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode))" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMP))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_cmp3_zero_extend_2" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_PCMP))) (set (match_operand: 4 "register_operand") (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMP))] "TARGET_AVX512BW && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode)) && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMP)) (set (match_dup 4) (match_dup 0))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_int_iterator UNSPEC_PCMP_ITER [UNSPEC_PCMP UNSPEC_UNSIGNED_PCMP]) (define_insn_and_split "*_cmp3" [(set (match_operand: 0 "register_operand") (not: (unspec: [(match_operand:VI12_AVX512VL 1 "register_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "")] UNSPEC_PCMP_ITER)))] "TARGET_AVX512BW && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 4)] UNSPEC_PCMP_ITER))] "operands[4] = GEN_INT (INTVAL (operands[3]) ^ 4);") (define_insn "*_eq3_1" [(set (match_operand: 0 "register_operand" "=k,k") (unspec: [(match_operand:VI12_AVX512VL 1 "nonimm_or_0_operand" "%v,v") (match_operand:VI12_AVX512VL 2 "nonimm_or_0_operand" "vm,C") (const_int 0)] UNSPEC_PCMP_ITER))] "TARGET_AVX512BW && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ vpcmpeq\t{%2, %1, %0|%0, %1, %2} vptestnm\t{%1, %1, %0|%0, %1, %1}" [(set_attr "type" "ssecmp") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_ucmp3" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI12_AVX512VL 1 "register_operand" "v") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "const_0_to_7_operand" "n")] UNSPEC_UNSIGNED_PCMP))] "TARGET_AVX512BW" "vpcmpu\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_ucmp3_zero_extend" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_UNSIGNED_PCMP)))] "TARGET_AVX512BW && ix86_pre_reload_split () && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode))" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_UNSIGNED_PCMP))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_ucmp3_zero_extend_2" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_UNSIGNED_PCMP))) (set (match_operand: 4 "register_operand") (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_UNSIGNED_PCMP))] "TARGET_AVX512BW && ix86_pre_reload_split () && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode)) && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_UNSIGNED_PCMP)) (set (match_dup 4) (match_dup 0))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*_eq3_1" [(set (match_operand: 0 "register_operand" "=k,k") (unspec: [(match_operand:VI48_AVX512VL 1 "nonimm_or_0_operand" "%v,v") (match_operand:VI48_AVX512VL 2 "nonimm_or_0_operand" "vm,C") (const_int 0)] UNSPEC_PCMP_ITER))] "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ vpcmpeq\t{%2, %1, %0|%0, %1, %2} vptestnm\t{%1, %1, %0|%0, %1, %1}" [(set_attr "type" "ssecmp") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_ucmp3" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI48_AVX512VL 1 "register_operand" "v") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "const_0_to_7_operand" "n")] UNSPEC_UNSIGNED_PCMP))] "TARGET_AVX512F" "vpcmpu\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_ucmp3_zero_extend" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_UNSIGNED_PCMP)))] "TARGET_AVX512F && (!VALID_MASK_AVX512BW_MODE (mode) || TARGET_AVX512BW) && ix86_pre_reload_split () && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode))" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_UNSIGNED_PCMP))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_ucmp3_zero_extend_2" [(set (match_operand:SWI248x 0 "register_operand") (zero_extend:SWI248x (unspec: [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_UNSIGNED_PCMP))) (set (match_operand: 4 "register_operand") (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_UNSIGNED_PCMP))] "TARGET_AVX512F && (!VALID_MASK_AVX512BW_MODE (mode) || TARGET_AVX512BW) && (GET_MODE_NUNITS (mode) < GET_MODE_PRECISION (mode)) && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_UNSIGNED_PCMP)) (set (match_dup 4) (match_dup 0))] { operands[1] = force_reg (mode, operands[1]); operands[0] = lowpart_subreg (mode, operands[0], mode); } [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*_ucmp3" [(set (match_operand: 0 "register_operand") (not: (unspec: [(match_operand:VI48_AVX512VL 1 "register_operand") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_7_operand")] UNSPEC_UNSIGNED_PCMP)))] "TARGET_AVX512F && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 4)] UNSPEC_UNSIGNED_PCMP))] "operands[4] = GEN_INT (INTVAL (operands[3]) ^ 4);") (define_int_attr pcmp_signed_mask [(UNSPEC_PCMP "3") (UNSPEC_UNSIGNED_PCMP "1")]) ;; PR96906 - optimize vpsubusw compared to 0 into vpcmpleuw or vpcmpnltuw. ;; For signed comparison, handle EQ 0: NEQ 4, ;; for unsigned comparison extra handle LE:2, NLE:6, equivalent to EQ and NEQ. (define_insn_and_split "*_ucmp3_1" [(set (match_operand: 0 "register_operand") (unspec: [(us_minus:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "vector_operand") (match_operand:VI12_AVX512VL 2 "vector_operand")) (match_operand:VI12_AVX512VL 3 "const0_operand") (match_operand:SI 4 "const_0_to_7_operand")] UNSPEC_PCMP_ITER))] "TARGET_AVX512BW && ix86_pre_reload_split () && ix86_binary_operator_ok (US_MINUS, mode, operands) && (INTVAL (operands[4]) & ) == 0" "#" "&& 1" [(const_int 0)] { /* LE: 2, NLT: 5, NLE: 6, LT: 1 */ int cmp_predicate = 2; /* LE */ if (MEM_P (operands[1])) { std::swap (operands[1], operands[2]); cmp_predicate = 5; /* NLT (GE) */ } if ((INTVAL (operands[4]) & 4) != 0) cmp_predicate ^= 4; /* Invert the comparison to NLE (GT) or LT. */ emit_insn (gen__ucmp3 (operands[0], operands[1],operands[2], GEN_INT (cmp_predicate))); DONE; }) (define_insn "avx512f_vmcmp3" [(set (match_operand: 0 "register_operand" "=k") (and: (unspec: [(match_operand:VFH_128 1 "register_operand" "v") (match_operand:VFH_128 2 "" "") (match_operand:SI 3 "const_0_to_31_operand" "n")] UNSPEC_PCMP) (const_int 1)))] "TARGET_AVX512F" "vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_vmcmp3_mask" [(set (match_operand: 0 "register_operand" "=k") (and: (unspec: [(match_operand:VFH_128 1 "register_operand" "v") (match_operand:VFH_128 2 "" "") (match_operand:SI 3 "const_0_to_31_operand" "n")] UNSPEC_PCMP) (and: (match_operand: 4 "register_operand" "Yk") (const_int 1))))] "TARGET_AVX512F" "vcmp\t{%3, %2, %1, %0%{%4%}|%0%{%4%}, %1, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_comi" [(set (reg:CCFP FLAGS_REG) (compare:CCFP (vec_select:MODEFH (match_operand: 0 "register_operand" "v") (parallel [(const_int 0)])) (vec_select:MODEFH (match_operand: 1 "" "") (parallel [(const_int 0)]))))] "SSE_FLOAT_MODE_P (mode)" "%vcomi\t{%1, %0|%0, %1}" [(set_attr "type" "ssecomi") (set_attr "prefix" "maybe_vex") (set_attr "prefix_rep" "0") (set (attr "prefix_data16") (if_then_else (eq_attr "mode" "DF") (const_string "1") (const_string "0"))) (set_attr "mode" "")]) (define_expand "vec_cmp" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:V48H_AVX512VL 2 "register_operand") (match_operand:V48H_AVX512VL 3 "nonimmediate_operand")]))] "TARGET_AVX512F" { bool ok = ix86_expand_mask_vec_cmp (operands[0], GET_CODE (operands[1]), operands[2], operands[3]); gcc_assert (ok); DONE; }) (define_expand "vec_cmp" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VI12_AVX512VL 2 "register_operand") (match_operand:VI12_AVX512VL 3 "nonimmediate_operand")]))] "TARGET_AVX512BW" { bool ok = ix86_expand_mask_vec_cmp (operands[0], GET_CODE (operands[1]), operands[2], operands[3]); gcc_assert (ok); DONE; }) (define_expand "vec_cmp" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VI_256 2 "register_operand") (match_operand:VI_256 3 "nonimmediate_operand")]))] "TARGET_AVX2" { bool ok = ix86_expand_int_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmp" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VI124_128 2 "register_operand") (match_operand:VI124_128 3 "vector_operand")]))] "TARGET_SSE2" { bool ok = ix86_expand_int_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmpv2div2di" [(set (match_operand:V2DI 0 "register_operand") (match_operator:V2DI 1 "" [(match_operand:V2DI 2 "register_operand") (match_operand:V2DI 3 "vector_operand")]))] "TARGET_SSE4_2" { bool ok = ix86_expand_int_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmp" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VF_256 2 "register_operand") (match_operand:VF_256 3 "nonimmediate_operand")]))] "TARGET_AVX" { bool ok = ix86_expand_fp_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmp" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VF_128 2 "register_operand") (match_operand:VF_128 3 "vector_operand")]))] "TARGET_SSE" { bool ok = ix86_expand_fp_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmpu" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VI48_AVX512VL 2 "register_operand") (match_operand:VI48_AVX512VL 3 "nonimmediate_operand")]))] "TARGET_AVX512F" { bool ok = ix86_expand_mask_vec_cmp (operands[0], GET_CODE (operands[1]), operands[2], operands[3]); gcc_assert (ok); DONE; }) (define_expand "vec_cmpu" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VI12_AVX512VL 2 "register_operand") (match_operand:VI12_AVX512VL 3 "nonimmediate_operand")]))] "TARGET_AVX512BW" { bool ok = ix86_expand_mask_vec_cmp (operands[0], GET_CODE (operands[1]), operands[2], operands[3]); gcc_assert (ok); DONE; }) (define_expand "vec_cmpu" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VI_256 2 "register_operand") (match_operand:VI_256 3 "nonimmediate_operand")]))] "TARGET_AVX2" { bool ok = ix86_expand_int_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmpu" [(set (match_operand: 0 "register_operand") (match_operator: 1 "" [(match_operand:VI124_128 2 "register_operand") (match_operand:VI124_128 3 "vector_operand")]))] "TARGET_SSE2" { bool ok = ix86_expand_int_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmpuv2div2di" [(set (match_operand:V2DI 0 "register_operand") (match_operator:V2DI 1 "" [(match_operand:V2DI 2 "register_operand") (match_operand:V2DI 3 "vector_operand")]))] "TARGET_SSE4_2" { bool ok = ix86_expand_int_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vec_cmpeqv2div2di" [(set (match_operand:V2DI 0 "register_operand") (match_operator:V2DI 1 "" [(match_operand:V2DI 2 "register_operand") (match_operand:V2DI 3 "vector_operand")]))] "TARGET_SSE4_1" { bool ok = ix86_expand_int_vec_cmp (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand:V_512 0 "register_operand") (if_then_else:V_512 (match_operator 3 "" [(match_operand:VF_512 4 "nonimmediate_operand") (match_operand:VF_512 5 "nonimmediate_operand")]) (match_operand:V_512 1 "general_operand") (match_operand:V_512 2 "general_operand")))] "TARGET_AVX512F && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_fp_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand:V_256 0 "register_operand") (if_then_else:V_256 (match_operator 3 "" [(match_operand:VF_256 4 "nonimmediate_operand") (match_operand:VF_256 5 "nonimmediate_operand")]) (match_operand:V_256 1 "general_operand") (match_operand:V_256 2 "general_operand")))] "TARGET_AVX && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_fp_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand:V_128 0 "register_operand") (if_then_else:V_128 (match_operator 3 "" [(match_operand:VF_128 4 "vector_operand") (match_operand:VF_128 5 "vector_operand")]) (match_operand:V_128 1 "general_operand") (match_operand:V_128 2 "general_operand")))] "TARGET_SSE && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_fp_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (if_then_else:VF_AVX512FP16VL (match_operator 3 "" [(match_operand:VF_AVX512FP16VL 4 "vector_operand") (match_operand:VF_AVX512FP16VL 5 "vector_operand")]) (match_operand:VF_AVX512FP16VL 1 "general_operand") (match_operand:VF_AVX512FP16VL 2 "general_operand")))] "TARGET_AVX512FP16" { bool ok = ix86_expand_fp_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (if_then_else:VF_AVX512FP16VL (match_operator 3 "" [(match_operand: 4 "vector_operand") (match_operand: 5 "vector_operand")]) (match_operand:VF_AVX512FP16VL 1 "general_operand") (match_operand:VF_AVX512FP16VL 2 "general_operand")))] "TARGET_AVX512FP16" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand: 0 "register_operand") (if_then_else: (match_operator 3 "" [(match_operand:VF_AVX512FP16VL 4 "vector_operand") (match_operand:VF_AVX512FP16VL 5 "vector_operand")]) (match_operand: 1 "general_operand") (match_operand: 2 "general_operand")))] "TARGET_AVX512FP16" { bool ok = ix86_expand_fp_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond_mask_" [(set (match_operand:V48_AVX512VL 0 "register_operand") (vec_merge:V48_AVX512VL (match_operand:V48_AVX512VL 1 "nonimmediate_operand") (match_operand:V48_AVX512VL 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")))] "TARGET_AVX512F") (define_expand "vcond_mask_" [(set (match_operand:VI12HF_AVX512VL 0 "register_operand") (vec_merge:VI12HF_AVX512VL (match_operand:VI12HF_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12HF_AVX512VL 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")))] "TARGET_AVX512BW") ;; As vcondv4div4df and vcondv8siv8sf are enabled already with TARGET_AVX, ;; and their condition can be folded late into a constant, we need to ;; support vcond_mask_v4div4di and vcond_mask_v8siv8si for TARGET_AVX. (define_mode_iterator VI_256_AVX2 [(V32QI "TARGET_AVX2") (V16HI "TARGET_AVX2") V8SI V4DI]) (define_expand "vcond_mask_" [(set (match_operand:VI_256_AVX2 0 "register_operand") (vec_merge:VI_256_AVX2 (match_operand:VI_256_AVX2 1 "nonimmediate_operand") (match_operand:VI_256_AVX2 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")))] "TARGET_AVX" { ix86_expand_sse_movcc (operands[0], operands[3], operands[1], operands[2]); DONE; }) (define_expand "vcond_mask_" [(set (match_operand:VI124_128 0 "register_operand") (vec_merge:VI124_128 (match_operand:VI124_128 1 "vector_operand") (match_operand:VI124_128 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")))] "TARGET_SSE2" { ix86_expand_sse_movcc (operands[0], operands[3], operands[1], operands[2]); DONE; }) (define_expand "vcond_mask_v2div2di" [(set (match_operand:V2DI 0 "register_operand") (vec_merge:V2DI (match_operand:V2DI 1 "vector_operand") (match_operand:V2DI 2 "nonimm_or_0_operand") (match_operand:V2DI 3 "register_operand")))] "TARGET_SSE4_2" { ix86_expand_sse_movcc (operands[0], operands[3], operands[1], operands[2]); DONE; }) (define_expand "vcond_mask_" [(set (match_operand:VF_256 0 "register_operand") (vec_merge:VF_256 (match_operand:VF_256 1 "nonimmediate_operand") (match_operand:VF_256 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")))] "TARGET_AVX" { ix86_expand_sse_movcc (operands[0], operands[3], operands[1], operands[2]); DONE; }) (define_expand "vcond_mask_" [(set (match_operand:VF_128 0 "register_operand") (vec_merge:VF_128 (match_operand:VF_128 1 "vector_operand") (match_operand:VF_128 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")))] "TARGET_SSE" { ix86_expand_sse_movcc (operands[0], operands[3], operands[1], operands[2]); DONE; }) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel floating point logical operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn "_andnot3" [(set (match_operand:VFB_128_256 0 "register_operand" "=x,x,v,v") (and:VFB_128_256 (not:VFB_128_256 (match_operand:VFB_128_256 1 "register_operand" "0,x,v,v")) (match_operand:VFB_128_256 2 "vector_operand" "xBm,xm,vm,vm")))] "TARGET_SSE && " { char buf[128]; const char *ops; const char *suffix; switch (which_alternative) { case 0: ops = "andn%s\t{%%2, %%0|%%0, %%2}"; break; case 1: case 2: case 3: ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; default: gcc_unreachable (); } switch (get_attr_mode (insn)) { case MODE_V16HF: case MODE_V8HF: case MODE_V8SF: case MODE_V4SF: suffix = "ps"; break; case MODE_OI: case MODE_TI: /* There is no vandnp[sd] in avx512f. Use vpandn[qd]. */ suffix = GET_MODE_INNER (mode) == DFmode ? "q" : "d"; ops = "vpandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; default: suffix = ""; } snprintf (buf, sizeof (buf), ops, suffix); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx512dq,avx512f") (set_attr "type" "sselog") (set_attr "prefix" "orig,maybe_vex,evex,evex") (set (attr "mode") (cond [(and (match_test "") (and (eq_attr "alternative" "1") (match_test "!TARGET_AVX512DQ"))) (const_string "") (eq_attr "alternative" "3") (const_string "") (match_test "TARGET_AVX") (const_string "") (match_test "optimize_function_for_size_p (cfun)") (const_string "V4SF") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") ] (const_string "")))]) (define_insn "_andnot3" [(set (match_operand:VFB_512 0 "register_operand" "=v") (and:VFB_512 (not:VFB_512 (match_operand:VFB_512 1 "register_operand" "v")) (match_operand:VFB_512 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" { char buf[128]; const char *ops; const char *suffix; suffix = ""; ops = ""; /* Since there are no vandnp[sd] without AVX512DQ nor vandnph, use vp[dq]. */ if (!TARGET_AVX512DQ || mode == V32HFmode) { suffix = GET_MODE_INNER (mode) == DFmode ? "q" : "d"; ops = "p"; } snprintf (buf, sizeof (buf), "v%sandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}", ops, suffix); output_asm_insn (buf, operands); return ""; } [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set (attr "mode") (if_then_else (match_test "TARGET_AVX512DQ") (const_string "") (const_string "XI")))]) (define_expand "3" [(set (match_operand:VFB_128_256 0 "register_operand") (any_logic:VFB_128_256 (match_operand:VFB_128_256 1 "vector_operand") (match_operand:VFB_128_256 2 "vector_operand")))] "TARGET_SSE && " "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_expand "3" [(set (match_operand:VFB_512 0 "register_operand") (any_logic:VFB_512 (match_operand:VFB_512 1 "nonimmediate_operand") (match_operand:VFB_512 2 "nonimmediate_operand")))] "TARGET_AVX512F" "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*3" [(set (match_operand:VFB_128_256 0 "register_operand" "=x,x,v,v") (any_logic:VFB_128_256 (match_operand:VFB_128_256 1 "vector_operand" "%0,x,v,v") (match_operand:VFB_128_256 2 "vector_operand" "xBm,xm,vm,vm")))] "TARGET_SSE && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" { char buf[128]; const char *ops; const char *suffix; switch (which_alternative) { case 0: ops = "%s\t{%%2, %%0|%%0, %%2}"; break; case 1: case 2: case 3: ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; default: gcc_unreachable (); } switch (get_attr_mode (insn)) { case MODE_V16HF: case MODE_V8HF: case MODE_V8SF: case MODE_V4SF: suffix = "ps"; break; case MODE_OI: case MODE_TI: /* There is no vp[sd] in avx512f. Use vp[qd]. */ suffix = GET_MODE_INNER (mode) == DFmode ? "q" : "d"; ops = "vp%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; default: suffix = ""; } snprintf (buf, sizeof (buf), ops, suffix); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx512dq,avx512f") (set_attr "type" "sselog") (set_attr "prefix" "orig,maybe_evex,evex,evex") (set (attr "mode") (cond [(and (match_test "") (and (eq_attr "alternative" "1") (match_test "!TARGET_AVX512DQ"))) (const_string "") (eq_attr "alternative" "3") (const_string "") (match_test "TARGET_AVX") (const_string "") (match_test "optimize_function_for_size_p (cfun)") (const_string "V4SF") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") ] (const_string "")))]) (define_insn "*3" [(set (match_operand:VFB_512 0 "register_operand" "=v") (any_logic:VFB_512 (match_operand:VFB_512 1 "nonimmediate_operand" "%v") (match_operand:VFB_512 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))" { char buf[128]; const char *ops; const char *suffix; suffix = ""; ops = ""; /* Since there are no vp[sd] without AVX512DQ nor vph, use vp[dq]. */ if (!TARGET_AVX512DQ || mode == V32HFmode) { suffix = GET_MODE_INNER (mode) == DFmode ? "q" : "d"; ops = "p"; } snprintf (buf, sizeof (buf), "v%s%s\t{%%2, %%1, %%0|%%0, %%1, %%2}", ops, suffix); output_asm_insn (buf, operands); return ""; } [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set (attr "mode") (if_then_else (match_test "TARGET_AVX512DQ") (const_string "") (const_string "XI")))]) ;; Generic part doesn't support the simpliciation of logic operation with ;; float_vector_all_ones_operand since it's not valid rtl. Add combine spiltter ;; for them, it should be safe since there's no SIMD Floating-Point Exceptions. (define_insn_and_split "*bit_and_float_vector_all_ones" [(set (match_operand:VFB 0 "nonimmediate_operand") (and:VFB (match_operand:VFB 1 "nonimmediate_operand") (match_operand:VFB 2 "float_vector_all_ones_operand")))] "TARGET_SSE && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (match_dup 1))] "operands[1] = force_reg (mode, operands[1]);") (define_expand "copysign3" [(set (match_dup 4) (and:VFB (not:VFB (match_dup 3)) (match_operand:VFB 1 "vector_operand"))) (set (match_dup 5) (and:VFB (match_dup 3) (match_operand:VFB 2 "vector_operand"))) (set (match_operand:VFB 0 "register_operand") (ior:VFB (match_dup 4) (match_dup 5)))] "TARGET_SSE" { operands[3] = ix86_build_signbit_mask (mode, 1, 0); operands[4] = gen_reg_rtx (mode); operands[5] = gen_reg_rtx (mode); }) (define_expand "xorsign3" [(set (match_dup 4) (and:VFB (match_dup 3) (match_operand:VFB 2 "vector_operand"))) (set (match_operand:VFB 0 "register_operand") (xor:VFB (match_dup 4) (match_operand:VFB 1 "vector_operand")))] "TARGET_SSE" { operands[3] = ix86_build_signbit_mask (mode, 1, 0); operands[4] = gen_reg_rtx (mode); }) (define_expand "signbit2" [(set (match_operand: 0 "register_operand") (lshiftrt: (subreg: (match_operand:VF1_AVX2 1 "register_operand") 0) (match_dup 2)))] "TARGET_SSE2" "operands[2] = GEN_INT (GET_MODE_UNIT_BITSIZE (mode)-1);") ;; Also define scalar versions. These are used for abs, neg, and ;; conditional move. Using subregs into vector modes causes register ;; allocation lossage. These patterns do not allow memory operands ;; because the native instructions read the full 128-bits. (define_insn "*andnot3" [(set (match_operand:MODEF 0 "register_operand" "=x,x,v,v") (and:MODEF (not:MODEF (match_operand:MODEF 1 "register_operand" "0,x,v,v")) (match_operand:MODEF 2 "register_operand" "x,x,v,v")))] "SSE_FLOAT_MODE_P (mode)" { char buf[128]; const char *ops; const char *suffix = (get_attr_mode (insn) == MODE_V4SF) ? "ps" : ""; switch (which_alternative) { case 0: ops = "andn%s\t{%%2, %%0|%%0, %%2}"; break; case 1: ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; case 2: if (TARGET_AVX512DQ) ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; else { suffix = mode == DFmode ? "q" : "d"; ops = "vpandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; } break; case 3: if (TARGET_AVX512DQ) ops = "vandn%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; else { suffix = mode == DFmode ? "q" : "d"; ops = "vpandn%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; } break; default: gcc_unreachable (); } snprintf (buf, sizeof (buf), ops, suffix); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") (cond [(eq_attr "alternative" "2") (if_then_else (match_test "TARGET_AVX512DQ") (const_string "") (const_string "TI")) (eq_attr "alternative" "3") (if_then_else (match_test "TARGET_AVX512DQ") (const_string "") (const_string "XI")) (match_test "TARGET_AVX") (const_string "") (match_test "optimize_function_for_size_p (cfun)") (const_string "V4SF") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") ] (const_string "")))]) (define_insn "*andnottf3" [(set (match_operand:TF 0 "register_operand" "=x,x,v,v") (and:TF (not:TF (match_operand:TF 1 "register_operand" "0,x,v,v")) (match_operand:TF 2 "vector_operand" "xBm,xm,vm,v")))] "TARGET_SSE" { char buf[128]; const char *ops; const char *tmp = (which_alternative >= 2 ? "pandnq" : get_attr_mode (insn) == MODE_V4SF ? "andnps" : "pandn"); switch (which_alternative) { case 0: ops = "%s\t{%%2, %%0|%%0, %%2}"; break; case 1: case 2: ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; case 3: ops = "v%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; break; default: gcc_unreachable (); } snprintf (buf, sizeof (buf), ops, tmp); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") (set (attr "prefix_data16") (if_then_else (and (eq_attr "alternative" "0") (eq_attr "mode" "TI")) (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") (cond [(eq_attr "alternative" "2") (const_string "TI") (eq_attr "alternative" "3") (const_string "XI") (match_test "TARGET_AVX") (const_string "TI") (ior (not (match_test "TARGET_SSE2")) (match_test "optimize_function_for_size_p (cfun)")) (const_string "V4SF") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") ] (const_string "TI")))]) (define_insn "3" [(set (match_operand:MODEF 0 "register_operand" "=x,x,v,v") (any_logic:MODEF (match_operand:MODEF 1 "register_operand" "%0,x,v,v") (match_operand:MODEF 2 "register_operand" "x,x,v,v")))] "SSE_FLOAT_MODE_P (mode)" { char buf[128]; const char *ops; const char *suffix = (get_attr_mode (insn) == MODE_V4SF) ? "ps" : ""; switch (which_alternative) { case 0: ops = "%s\t{%%2, %%0|%%0, %%2}"; break; case 2: if (!TARGET_AVX512DQ) { suffix = mode == DFmode ? "q" : "d"; ops = "vp%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; } /* FALLTHRU */ case 1: ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; case 3: if (TARGET_AVX512DQ) ops = "v%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; else { suffix = mode == DFmode ? "q" : "d"; ops = "vp%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; } break; default: gcc_unreachable (); } snprintf (buf, sizeof (buf), ops, suffix); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") (cond [(eq_attr "alternative" "2") (if_then_else (match_test "TARGET_AVX512DQ") (const_string "") (const_string "TI")) (eq_attr "alternative" "3") (if_then_else (match_test "TARGET_AVX512DQ") (const_string "") (const_string "XI")) (match_test "TARGET_AVX") (const_string "") (match_test "optimize_function_for_size_p (cfun)") (const_string "V4SF") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") ] (const_string "")))]) (define_expand "tf3" [(set (match_operand:TF 0 "register_operand") (any_logic:TF (match_operand:TF 1 "vector_operand") (match_operand:TF 2 "vector_operand")))] "TARGET_SSE" "ix86_fixup_binary_operands_no_copy (, TFmode, operands);") (define_insn "*tf3" [(set (match_operand:TF 0 "register_operand" "=x,x,v,v") (any_logic:TF (match_operand:TF 1 "vector_operand" "%0,x,v,v") (match_operand:TF 2 "vector_operand" "xBm,xm,vm,v")))] "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2]))" { char buf[128]; const char *ops; const char *tmp = (which_alternative >= 2 ? "pq" : get_attr_mode (insn) == MODE_V4SF ? "ps" : "p"); switch (which_alternative) { case 0: ops = "%s\t{%%2, %%0|%%0, %%2}"; break; case 1: case 2: ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; case 3: ops = "v%s\t{%%g2, %%g1, %%g0|%%g0, %%g1, %%g2}"; break; default: gcc_unreachable (); } snprintf (buf, sizeof (buf), ops, tmp); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx512vl,avx512f") (set_attr "type" "sselog") (set (attr "prefix_data16") (if_then_else (and (eq_attr "alternative" "0") (eq_attr "mode" "TI")) (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,vex,evex,evex") (set (attr "mode") (cond [(eq_attr "alternative" "2") (const_string "TI") (eq_attr "alternative" "3") (const_string "QI") (match_test "TARGET_AVX") (const_string "TI") (ior (not (match_test "TARGET_SSE2")) (match_test "optimize_function_for_size_p (cfun)")) (const_string "V4SF") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") ] (const_string "TI")))]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; FMA floating point multiply/accumulate instructions. These include ;; scalar versions of the instructions as well as vector versions. ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; The standard names for scalar FMA are only available with SSE math enabled. ;; CPUID bit AVX512F enables evex encoded scalar and 512-bit fma. It doesn't ;; care about FMA bit, so we enable fma for TARGET_AVX512F even when TARGET_FMA ;; and TARGET_FMA4 are both false. ;; TODO: In theory AVX512F does not automatically imply FMA, and without FMA ;; one must force the EVEX encoding of the fma insns. Ideally we'd improve ;; GAS to allow proper prefix selection. However, for the moment all hardware ;; that supports AVX512F also supports FMA so we can ignore this for now. (define_mode_iterator FMAMODEM [(SF "TARGET_SSE_MATH && (TARGET_FMA || TARGET_FMA4 || TARGET_AVX512F)") (DF "TARGET_SSE_MATH && (TARGET_FMA || TARGET_FMA4 || TARGET_AVX512F)") (V4SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V2DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V8SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V4DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F") (HF "TARGET_AVX512FP16") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V32HF "TARGET_AVX512FP16")]) (define_expand "fma4" [(set (match_operand:FMAMODEM 0 "register_operand") (fma:FMAMODEM (match_operand:FMAMODEM 1 "nonimmediate_operand") (match_operand:FMAMODEM 2 "nonimmediate_operand") (match_operand:FMAMODEM 3 "nonimmediate_operand")))]) (define_expand "fms4" [(set (match_operand:FMAMODEM 0 "register_operand") (fma:FMAMODEM (match_operand:FMAMODEM 1 "nonimmediate_operand") (match_operand:FMAMODEM 2 "nonimmediate_operand") (neg:FMAMODEM (match_operand:FMAMODEM 3 "nonimmediate_operand"))))]) (define_expand "fnma4" [(set (match_operand:FMAMODEM 0 "register_operand") (fma:FMAMODEM (neg:FMAMODEM (match_operand:FMAMODEM 1 "nonimmediate_operand")) (match_operand:FMAMODEM 2 "nonimmediate_operand") (match_operand:FMAMODEM 3 "nonimmediate_operand")))]) (define_expand "fnms4" [(set (match_operand:FMAMODEM 0 "register_operand") (fma:FMAMODEM (neg:FMAMODEM (match_operand:FMAMODEM 1 "nonimmediate_operand")) (match_operand:FMAMODEM 2 "nonimmediate_operand") (neg:FMAMODEM (match_operand:FMAMODEM 3 "nonimmediate_operand"))))]) ;; The builtins for intrinsics are not constrained by SSE math enabled. (define_mode_iterator FMAMODE_AVX512 [(SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512F") (DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512F") (V4SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V2DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V8SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V4DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")]) (define_mode_iterator FMAMODE [SF DF V4SF V2DF V8SF V4DF]) (define_expand "fma4i_fmadd_" [(set (match_operand:FMAMODE_AVX512 0 "register_operand") (fma:FMAMODE_AVX512 (match_operand:FMAMODE_AVX512 1 "nonimmediate_operand") (match_operand:FMAMODE_AVX512 2 "nonimmediate_operand") (match_operand:FMAMODE_AVX512 3 "nonimmediate_operand")))]) (define_expand "fma4i_fmsub_" [(set (match_operand:FMAMODE_AVX512 0 "register_operand") (fma:FMAMODE_AVX512 (match_operand:FMAMODE_AVX512 1 "nonimmediate_operand") (match_operand:FMAMODE_AVX512 2 "nonimmediate_operand") (neg:FMAMODE_AVX512 (match_operand:FMAMODE_AVX512 3 "nonimmediate_operand"))))]) (define_expand "fma4i_fnmadd_" [(set (match_operand:FMAMODE_AVX512 0 "register_operand") (fma:FMAMODE_AVX512 (neg:FMAMODE_AVX512 (match_operand:FMAMODE_AVX512 1 "nonimmediate_operand")) (match_operand:FMAMODE_AVX512 2 "nonimmediate_operand") (match_operand:FMAMODE_AVX512 3 "nonimmediate_operand")))]) (define_expand "fma4i_fnmsub_" [(set (match_operand:FMAMODE_AVX512 0 "register_operand") (fma:FMAMODE_AVX512 (neg:FMAMODE_AVX512 (match_operand:FMAMODE_AVX512 1 "nonimmediate_operand")) (match_operand:FMAMODE_AVX512 2 "nonimmediate_operand") (neg:FMAMODE_AVX512 (match_operand:FMAMODE_AVX512 3 "nonimmediate_operand"))))]) (define_expand "_fmadd__maskz" [(match_operand:VFH_AVX512VL 0 "register_operand") (match_operand:VFH_AVX512VL 1 "") (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512F && " { emit_insn (gen_fma_fmadd__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "*fma_fmadd_" [(set (match_operand:FMAMODE 0 "register_operand" "=v,v,v,x,x") (fma:FMAMODE (match_operand:FMAMODE 1 "nonimmediate_operand" "%0,0,v,x,x") (match_operand:FMAMODE 2 "nonimmediate_operand" "vm,v,vm,x,m") (match_operand:FMAMODE 3 "nonimmediate_operand" "v,vm,0,xm,x")))] "TARGET_FMA || TARGET_FMA4" "@ vfmadd132\t{%2, %3, %0|%0, %3, %2} vfmadd213\t{%3, %2, %0|%0, %2, %3} vfmadd231\t{%2, %1, %0|%0, %1, %2} vfmadd\t{%3, %2, %1, %0|%0, %1, %2, %3} vfmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "fma,fma,fma,fma4,fma4") (set_attr "type" "ssemuladd") (set_attr "mode" "")]) ;; Suppose AVX-512F as baseline (define_mode_iterator VFH_SF_AVX512VL [(V32HF "TARGET_AVX512FP16") (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (HF "TARGET_AVX512FP16") SF V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") DF V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")]) (define_insn "fma_fmadd_" [(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v") (fma:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v") (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))] "TARGET_AVX512F && && " "@ vfmadd132\t{%2, %3, %0|%0, %3, %2} vfmadd213\t{%3, %2, %0|%0, %2, %3} vfmadd231\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "cond_fma" [(set (match_operand:VFH_AVX512VL 0 "register_operand") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (match_operand:VFH_AVX512VL 2 "vector_operand") (match_operand:VFH_AVX512VL 3 "vector_operand") (match_operand:VFH_AVX512VL 4 "vector_operand")) (match_operand:VFH_AVX512VL 5 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_fma4 (tmp, operands[2], operands[3], operands[4])); emit_move_insn (operands[0], gen_rtx_VEC_MERGE (mode, tmp, operands[5], operands[1])); DONE; }) (define_insn "_fmadd__mask" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "register_operand" "0,0") (match_operand:VFH_AVX512VL 2 "" ",v") (match_operand:VFH_AVX512VL 3 "" "v,")) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] "TARGET_AVX512F && " "@ vfmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fmadd__mask3" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "" "%v") (match_operand:VFH_AVX512VL 2 "" "") (match_operand:VFH_AVX512VL 3 "register_operand" "0")) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vfmadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fma_fmsub_" [(set (match_operand:FMAMODE 0 "register_operand" "=v,v,v,x,x") (fma:FMAMODE (match_operand:FMAMODE 1 "nonimmediate_operand" "%0,0,v,x,x") (match_operand:FMAMODE 2 "nonimmediate_operand" "vm,v,vm,x,m") (neg:FMAMODE (match_operand:FMAMODE 3 "nonimmediate_operand" "v,vm,0,xm,x"))))] "TARGET_FMA || TARGET_FMA4" "@ vfmsub132\t{%2, %3, %0|%0, %3, %2} vfmsub213\t{%3, %2, %0|%0, %2, %3} vfmsub231\t{%2, %1, %0|%0, %1, %2} vfmsub\t{%3, %2, %1, %0|%0, %1, %2, %3} vfmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "fma,fma,fma,fma4,fma4") (set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "_fmsub__maskz" [(match_operand:VFH_AVX512VL 0 "register_operand") (match_operand:VFH_AVX512VL 1 "") (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512F && " { emit_insn (gen_fma_fmsub__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "fma_fmsub_" [(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v") (fma:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v") (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (neg:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))] "TARGET_AVX512F && && " "@ vfmsub132\t{%2, %3, %0|%0, %3, %2} vfmsub213\t{%3, %2, %0|%0, %2, %3} vfmsub231\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "cond_fms" [(set (match_operand:VFH_AVX512VL 0 "register_operand") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (match_operand:VFH_AVX512VL 2 "vector_operand") (match_operand:VFH_AVX512VL 3 "vector_operand") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 4 "vector_operand"))) (match_operand:VFH_AVX512VL 5 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_fms4 (tmp, operands[2], operands[3], operands[4])); emit_move_insn (operands[0], gen_rtx_VEC_MERGE (mode, tmp, operands[5], operands[1])); DONE; }) (define_insn "_fmsub__mask" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "register_operand" "0,0") (match_operand:VFH_AVX512VL 2 "" ",v") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 3 "" "v,"))) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] "TARGET_AVX512F" "@ vfmsub132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfmsub213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fmsub__mask3" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "" "%v") (match_operand:VFH_AVX512VL 2 "" "") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 3 "register_operand" "0"))) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F && " "vfmsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fma_fnmadd_" [(set (match_operand:FMAMODE 0 "register_operand" "=v,v,v,x,x") (fma:FMAMODE (neg:FMAMODE (match_operand:FMAMODE 1 "nonimmediate_operand" "%0,0,v,x,x")) (match_operand:FMAMODE 2 "nonimmediate_operand" "vm,v,vm,x,m") (match_operand:FMAMODE 3 "nonimmediate_operand" "v,vm,0,xm,x")))] "TARGET_FMA || TARGET_FMA4" "@ vfnmadd132\t{%2, %3, %0|%0, %3, %2} vfnmadd213\t{%3, %2, %0|%0, %2, %3} vfnmadd231\t{%2, %1, %0|%0, %1, %2} vfnmadd\t{%3, %2, %1, %0|%0, %1, %2, %3} vfnmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "fma,fma,fma,fma4,fma4") (set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "_fnmadd__maskz" [(match_operand:VFH_AVX512VL 0 "register_operand") (match_operand:VFH_AVX512VL 1 "") (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512F && " { emit_insn (gen_fma_fnmadd__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "fma_fnmadd_" [(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v") (fma:VFH_SF_AVX512VL (neg:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v")) (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (match_operand:VFH_SF_AVX512VL 3 "" "v,,0")))] "TARGET_AVX512F && && " "@ vfnmadd132\t{%2, %3, %0|%0, %3, %2} vfnmadd213\t{%3, %2, %0|%0, %2, %3} vfnmadd231\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "cond_fnma" [(set (match_operand:VFH_AVX512VL 0 "register_operand") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 2 "vector_operand")) (match_operand:VFH_AVX512VL 3 "vector_operand") (match_operand:VFH_AVX512VL 4 "vector_operand")) (match_operand:VFH_AVX512VL 5 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_fnma4 (tmp, operands[2], operands[3], operands[4])); emit_move_insn (operands[0], gen_rtx_VEC_MERGE (mode, tmp, operands[5], operands[1])); DONE; }) (define_insn "_fnmadd__mask" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "register_operand" "0,0")) (match_operand:VFH_AVX512VL 2 "" ",v") (match_operand:VFH_AVX512VL 3 "" "v,")) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] "TARGET_AVX512F && " "@ vfnmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfnmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fnmadd__mask3" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "" "%v")) (match_operand:VFH_AVX512VL 2 "" "") (match_operand:VFH_AVX512VL 3 "register_operand" "0")) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F && " "vfnmadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fma_fnmsub_" [(set (match_operand:FMAMODE 0 "register_operand" "=v,v,v,x,x") (fma:FMAMODE (neg:FMAMODE (match_operand:FMAMODE 1 "nonimmediate_operand" "%0,0,v,x,x")) (match_operand:FMAMODE 2 "nonimmediate_operand" "vm,v,vm,x,m") (neg:FMAMODE (match_operand:FMAMODE 3 "nonimmediate_operand" "v,vm,0,xm,x"))))] "TARGET_FMA || TARGET_FMA4" "@ vfnmsub132\t{%2, %3, %0|%0, %3, %2} vfnmsub213\t{%3, %2, %0|%0, %2, %3} vfnmsub231\t{%2, %1, %0|%0, %1, %2} vfnmsub\t{%3, %2, %1, %0|%0, %1, %2, %3} vfnmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "fma,fma,fma,fma4,fma4") (set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "_fnmsub__maskz" [(match_operand:VFH_AVX512VL 0 "register_operand") (match_operand:VFH_AVX512VL 1 "") (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512F && " { emit_insn (gen_fma_fnmsub__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "fma_fnmsub_" [(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v") (fma:VFH_SF_AVX512VL (neg:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v")) (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (neg:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))))] "TARGET_AVX512F && && " "@ vfnmsub132\t{%2, %3, %0|%0, %3, %2} vfnmsub213\t{%3, %2, %0|%0, %2, %3} vfnmsub231\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "cond_fnms" [(set (match_operand:VFH_AVX512VL 0 "register_operand") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 2 "vector_operand")) (match_operand:VFH_AVX512VL 3 "vector_operand") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 4 "vector_operand"))) (match_operand:VFH_AVX512VL 5 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_fnms4 (tmp, operands[2], operands[3], operands[4])); emit_move_insn (operands[0], gen_rtx_VEC_MERGE (mode, tmp, operands[5], operands[1])); DONE; }) (define_insn "_fnmsub__mask" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "register_operand" "0,0")) (match_operand:VFH_AVX512VL 2 "" ",v") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 3 "" "v,"))) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] "TARGET_AVX512F && " "@ vfnmsub132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfnmsub213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fnmsub__mask3" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (vec_merge:VFH_AVX512VL (fma:VFH_AVX512VL (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 1 "" "%v")) (match_operand:VFH_AVX512VL 2 "" "") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 3 "register_operand" "0"))) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vfnmsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) ;; FMA parallel floating point multiply addsub and subadd operations. ;; It would be possible to represent these without the UNSPEC as ;; ;; (vec_merge ;; (fma op1 op2 op3) ;; (fma op1 op2 (neg op3)) ;; (merge-const)) ;; ;; But this doesn't seem useful in practice. (define_expand "vec_fmaddsub4" [(set (match_operand:VF 0 "register_operand") (unspec:VF [(match_operand:VF 1 "nonimmediate_operand") (match_operand:VF 2 "nonimmediate_operand") (match_operand:VF 3 "nonimmediate_operand")] UNSPEC_FMADDSUB))] "TARGET_FMA || TARGET_FMA4 || ( == 64 || TARGET_AVX512VL)") (define_expand "vec_fmsubadd4" [(set (match_operand:VF 0 "register_operand") (unspec:VF [(match_operand:VF 1 "nonimmediate_operand") (match_operand:VF 2 "nonimmediate_operand") (neg:VF (match_operand:VF 3 "nonimmediate_operand"))] UNSPEC_FMADDSUB))] "TARGET_FMA || TARGET_FMA4 || ( == 64 || TARGET_AVX512VL)") (define_expand "fmaddsub_" [(set (match_operand:VF 0 "register_operand") (unspec:VF [(match_operand:VF 1 "nonimmediate_operand") (match_operand:VF 2 "nonimmediate_operand") (match_operand:VF 3 "nonimmediate_operand")] UNSPEC_FMADDSUB))] "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512F") (define_expand "_fmaddsub__maskz" [(match_operand:VFH_AVX512VL 0 "register_operand") (match_operand:VFH_AVX512VL 1 "") (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512F" { emit_insn (gen_fma_fmaddsub__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_expand "_fmsubadd__maskz" [(match_operand:VFH_AVX512VL 0 "register_operand") (match_operand:VFH_AVX512VL 1 "") (match_operand:VFH_AVX512VL 2 "") (match_operand:VFH_AVX512VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512F" { emit_insn (gen_fma_fmsubadd__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "*fma_fmaddsub_" [(set (match_operand:VF_128_256 0 "register_operand" "=v,v,v,x,x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "nonimmediate_operand" "%0,0,v,x,x") (match_operand:VF_128_256 2 "nonimmediate_operand" "vm,v,vm,x,m") (match_operand:VF_128_256 3 "nonimmediate_operand" "v,vm,0,xm,x")] UNSPEC_FMADDSUB))] "TARGET_FMA || TARGET_FMA4" "@ vfmaddsub132\t{%2, %3, %0|%0, %3, %2} vfmaddsub213\t{%3, %2, %0|%0, %2, %3} vfmaddsub231\t{%2, %1, %0|%0, %1, %2} vfmaddsub\t{%3, %2, %1, %0|%0, %1, %2, %3} vfmaddsub\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "fma,fma,fma,fma4,fma4") (set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "fma_fmaddsub_" [(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v") (unspec:VFH_SF_AVX512VL [(match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v") (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (match_operand:VFH_SF_AVX512VL 3 "" "v,,0")] UNSPEC_FMADDSUB))] "TARGET_AVX512F && && " "@ vfmaddsub132\t{%2, %3, %0|%0, %3, %2} vfmaddsub213\t{%3, %2, %0|%0, %2, %3} vfmaddsub231\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fmaddsub__mask" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VFH_AVX512VL (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "register_operand" "0,0") (match_operand:VFH_AVX512VL 2 "" ",v") (match_operand:VFH_AVX512VL 3 "" "v,")] UNSPEC_FMADDSUB) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] "TARGET_AVX512F" "@ vfmaddsub132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfmaddsub213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fmaddsub__mask3" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (vec_merge:VFH_AVX512VL (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "register_operand" "v") (match_operand:VFH_AVX512VL 2 "" "") (match_operand:VFH_AVX512VL 3 "register_operand" "0")] UNSPEC_FMADDSUB) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vfmaddsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fma_fmsubadd_" [(set (match_operand:VF_128_256 0 "register_operand" "=v,v,v,x,x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "nonimmediate_operand" "%0,0,v,x,x") (match_operand:VF_128_256 2 "nonimmediate_operand" "vm,v,vm,x,m") (neg:VF_128_256 (match_operand:VF_128_256 3 "nonimmediate_operand" "v,vm,0,xm,x"))] UNSPEC_FMADDSUB))] "TARGET_FMA || TARGET_FMA4" "@ vfmsubadd132\t{%2, %3, %0|%0, %3, %2} vfmsubadd213\t{%3, %2, %0|%0, %2, %3} vfmsubadd231\t{%2, %1, %0|%0, %1, %2} vfmsubadd\t{%3, %2, %1, %0|%0, %1, %2, %3} vfmsubadd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "fma,fma,fma,fma4,fma4") (set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "fma_fmsubadd_" [(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v") (unspec:VFH_SF_AVX512VL [(match_operand:VFH_SF_AVX512VL 1 "" "%0,0,v") (match_operand:VFH_SF_AVX512VL 2 "" ",v,") (neg:VFH_SF_AVX512VL (match_operand:VFH_SF_AVX512VL 3 "" "v,,0"))] UNSPEC_FMADDSUB))] "TARGET_AVX512F && && " "@ vfmsubadd132\t{%2, %3, %0|%0, %3, %2} vfmsubadd213\t{%3, %2, %0|%0, %2, %3} vfmsubadd231\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fmsubadd__mask" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VFH_AVX512VL (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "register_operand" "0,0") (match_operand:VFH_AVX512VL 2 "" ",v") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 3 "" "v,"))] UNSPEC_FMADDSUB) (match_dup 1) (match_operand: 4 "register_operand" "Yk,Yk")))] "TARGET_AVX512F" "@ vfmsubadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfmsubadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "_fmsubadd__mask3" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (vec_merge:VFH_AVX512VL (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "register_operand" "v") (match_operand:VFH_AVX512VL 2 "" "") (neg:VFH_AVX512VL (match_operand:VFH_AVX512VL 3 "register_operand" "0"))] UNSPEC_FMADDSUB) (match_dup 3) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vfmsubadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) ;; FMA3 floating point scalar intrinsics. These merge result with ;; high-order elements from the destination register. (define_expand "fmai_vmfmadd_" [(set (match_operand:VFH_128 0 "register_operand") (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand") (match_operand:VFH_128 2 "") (match_operand:VFH_128 3 "")) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_expand "fmai_vmfmsub_" [(set (match_operand:VFH_128 0 "register_operand") (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand") (match_operand:VFH_128 2 "") (neg:VFH_128 (match_operand:VFH_128 3 ""))) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_expand "fmai_vmfnmadd_" [(set (match_operand:VFH_128 0 "register_operand") (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "")) (match_operand:VFH_128 1 "register_operand") (match_operand:VFH_128 3 "")) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_expand "fmai_vmfnmsub_" [(set (match_operand:VFH_128 0 "register_operand") (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "")) (match_operand:VFH_128 1 "register_operand") (neg:VFH_128 (match_operand:VFH_128 3 ""))) (match_dup 1) (const_int 1)))] "TARGET_FMA") (define_insn "*fmai_fmadd_" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 2 "" ", v") (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" "@ vfmadd132\t{%2, %3, %0|%0, %3, %2} vfmadd213\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fmai_fmsub_" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 2 "" ",v") (neg:VFH_128 (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" "@ vfmsub132\t{%2, %3, %0|%0, %3, %2} vfmsub213\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fmai_fnmadd_" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" ",v")) (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" "@ vfnmadd132\t{%2, %3, %0|%0, %3, %2} vfnmadd213\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fmai_fnmsub_" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" ",v")) (match_operand:VFH_128 1 "register_operand" "0,0") (neg:VFH_128 (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (const_int 1)))] "TARGET_FMA || TARGET_AVX512F" "@ vfnmsub132\t{%2, %3, %0|%0, %3, %2} vfnmsub213\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "avx512f_vmfmadd__mask" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 2 "" ",v") (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "avx512f_vmfmadd__mask3" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "" "%v") (match_operand:VFH_128 2 "" "") (match_operand:VFH_128 3 "register_operand" "0")) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) (const_int 1)))] "TARGET_AVX512F" "vfmadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %3, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "avx512f_vmfmadd__maskz" [(match_operand:VFH_128 0 "register_operand") (match_operand:VFH_128 1 "") (match_operand:VFH_128 2 "") (match_operand:VFH_128 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512F" { emit_insn (gen_avx512f_vmfmadd__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "avx512f_vmfmadd__maskz_1" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 2 "" ",v") (match_operand:VFH_128 3 "" "v,")) (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfmadd132\t{%2, %3, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %3, %2} vfmadd213\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*avx512f_vmfmsub__mask" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 2 "" ",v") (neg:VFH_128 (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfmsub132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfmsub213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "avx512f_vmfmsub__mask3" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "" "%v") (match_operand:VFH_128 2 "" "") (neg:VFH_128 (match_operand:VFH_128 3 "register_operand" "0"))) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) (const_int 1)))] "TARGET_AVX512F" "vfmsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %3, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*avx512f_vmfmsub__maskz_1" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 2 "" ",v") (neg:VFH_128 (match_operand:VFH_128 3 "" "v,"))) (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfmsub132\t{%2, %3, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %3, %2} vfmsub213\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "avx512f_vmfnmadd__mask" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" ",v")) (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 3 "" "v,")) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfnmadd132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfnmadd213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "avx512f_vmfnmadd__mask3" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" "")) (match_operand:VFH_128 1 "" "%v") (match_operand:VFH_128 3 "register_operand" "0")) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) (const_int 1)))] "TARGET_AVX512F" "vfnmadd231\t{%2, %1, %0%{%4%}|%0%{%4%}, %3, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "avx512f_vmfnmadd__maskz" [(match_operand:VFH_128 0 "register_operand") (match_operand:VFH_128 1 "") (match_operand:VFH_128 2 "") (match_operand:VFH_128 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512F" { emit_insn (gen_avx512f_vmfnmadd__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "avx512f_vmfnmadd__maskz_1" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" ",v")) (match_operand:VFH_128 1 "register_operand" "0,0") (match_operand:VFH_128 3 "" "v,")) (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfnmadd132\t{%2, %3, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %3, %2} vfnmadd213\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*avx512f_vmfnmsub__mask" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" ",v")) (match_operand:VFH_128 1 "register_operand" "0,0") (neg:VFH_128 (match_operand:VFH_128 3 "" "v,"))) (match_dup 1) (match_operand:QI 4 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfnmsub132\t{%2, %3, %0%{%4%}|%0%{%4%}, %3, %2} vfnmsub213\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*avx512f_vmfnmsub__mask3" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" "")) (match_operand:VFH_128 1 "" "%v") (neg:VFH_128 (match_operand:VFH_128 3 "register_operand" "0"))) (match_dup 3) (match_operand:QI 4 "register_operand" "Yk")) (match_dup 3) (const_int 1)))] "TARGET_AVX512F" "vfnmsub231\t{%2, %1, %0%{%4%}|%0%{%4%}, %3, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*avx512f_vmfnmsub__maskz_1" [(set (match_operand:VFH_128 0 "register_operand" "=v,v") (vec_merge:VFH_128 (vec_merge:VFH_128 (fma:VFH_128 (neg:VFH_128 (match_operand:VFH_128 2 "" ",v")) (match_operand:VFH_128 1 "register_operand" "0,0") (neg:VFH_128 (match_operand:VFH_128 3 "" "v,"))) (match_operand:VFH_128 4 "const0_operand" "C,C") (match_operand:QI 5 "register_operand" "Yk,Yk")) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "@ vfnmsub132\t{%2, %3, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %3, %2} vfnmsub213\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) ;; FMA4 floating point scalar intrinsics. These write the ;; entire destination register, with the high-order elements zeroed. (define_expand "fma4i_vmfmadd_" [(set (match_operand:VF_128 0 "register_operand") (vec_merge:VF_128 (fma:VF_128 (match_operand:VF_128 1 "nonimmediate_operand") (match_operand:VF_128 2 "nonimmediate_operand") (match_operand:VF_128 3 "nonimmediate_operand")) (match_dup 4) (const_int 1)))] "TARGET_FMA4" "operands[4] = CONST0_RTX (mode);") (define_insn "*fma4i_vmfmadd_" [(set (match_operand:VF_128 0 "register_operand" "=x,x") (vec_merge:VF_128 (fma:VF_128 (match_operand:VF_128 1 "nonimmediate_operand" "%x,x") (match_operand:VF_128 2 "nonimmediate_operand" " x,m") (match_operand:VF_128 3 "nonimmediate_operand" "xm,x")) (match_operand:VF_128 4 "const0_operand") (const_int 1)))] "TARGET_FMA4" "vfmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fma4i_vmfmsub_" [(set (match_operand:VF_128 0 "register_operand" "=x,x") (vec_merge:VF_128 (fma:VF_128 (match_operand:VF_128 1 "nonimmediate_operand" "%x,x") (match_operand:VF_128 2 "nonimmediate_operand" " x,m") (neg:VF_128 (match_operand:VF_128 3 "nonimmediate_operand" "xm,x"))) (match_operand:VF_128 4 "const0_operand") (const_int 1)))] "TARGET_FMA4" "vfmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fma4i_vmfnmadd_" [(set (match_operand:VF_128 0 "register_operand" "=x,x") (vec_merge:VF_128 (fma:VF_128 (neg:VF_128 (match_operand:VF_128 1 "nonimmediate_operand" "%x,x")) (match_operand:VF_128 2 "nonimmediate_operand" " x,m") (match_operand:VF_128 3 "nonimmediate_operand" "xm,x")) (match_operand:VF_128 4 "const0_operand") (const_int 1)))] "TARGET_FMA4" "vfnmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn "*fma4i_vmfnmsub_" [(set (match_operand:VF_128 0 "register_operand" "=x,x") (vec_merge:VF_128 (fma:VF_128 (neg:VF_128 (match_operand:VF_128 1 "nonimmediate_operand" "%x,x")) (match_operand:VF_128 2 "nonimmediate_operand" " x,m") (neg:VF_128 (match_operand:VF_128 3 "nonimmediate_operand" "xm,x"))) (match_operand:VF_128 4 "const0_operand") (const_int 1)))] "TARGET_FMA4" "vfnmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Complex type operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_int_iterator UNSPEC_COMPLEX_F_C_MA [UNSPEC_COMPLEX_FMA UNSPEC_COMPLEX_FCMA]) (define_int_iterator UNSPEC_COMPLEX_F_C_MA_PAIR [UNSPEC_COMPLEX_FMA_PAIR UNSPEC_COMPLEX_FCMA_PAIR]) (define_int_iterator UNSPEC_COMPLEX_F_C_MUL [UNSPEC_COMPLEX_FMUL UNSPEC_COMPLEX_FCMUL]) (define_int_attr complexopname [(UNSPEC_COMPLEX_FMA "fmaddc") (UNSPEC_COMPLEX_FCMA "fcmaddc") (UNSPEC_COMPLEX_FMUL "fmulc") (UNSPEC_COMPLEX_FCMUL "fcmulc")]) (define_int_attr complexpairopname [(UNSPEC_COMPLEX_FMA_PAIR "fmaddc") (UNSPEC_COMPLEX_FCMA_PAIR "fcmaddc")]) (define_int_attr conj_op [(UNSPEC_COMPLEX_FMA "") (UNSPEC_COMPLEX_FCMA "_conj") (UNSPEC_COMPLEX_FMUL "") (UNSPEC_COMPLEX_FCMUL "_conj")]) (define_mode_attr complexmove [(V32HF "avx512f_loadv16sf") (V16HF "avx512vl_loadv8sf") (V8HF "avx512vl_loadv4sf")]) (define_expand "_fmaddc__mask1" [(match_operand:VF_AVX512FP16VL 0 "register_operand") (match_operand:VF_AVX512FP16VL 1 "") (match_operand:VF_AVX512FP16VL 2 "") (match_operand:VF_AVX512FP16VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512FP16 && " { rtx op0, op1; if () emit_insn (gen__fmaddc__mask ( operands[0], operands[1], operands[2], operands[3], operands[4])); else emit_insn (gen__fmaddc__mask (operands[0], operands[1], operands[2], operands[3], operands[4])); op0 = lowpart_subreg (mode, operands[0], mode); op1 = lowpart_subreg (mode, operands[1], mode); emit_insn (gen__mask (op0, op0, op1, operands[4])); DONE; }) (define_expand "_fmaddc__maskz" [(match_operand:VF_AVX512FP16VL 0 "register_operand") (match_operand:VF_AVX512FP16VL 1 "") (match_operand:VF_AVX512FP16VL 2 "") (match_operand:VF_AVX512FP16VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512FP16 && " { emit_insn (gen_fma_fmaddc__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_expand "_fcmaddc__mask1" [(match_operand:VF_AVX512FP16VL 0 "register_operand") (match_operand:VF_AVX512FP16VL 1 "") (match_operand:VF_AVX512FP16VL 2 "") (match_operand:VF_AVX512FP16VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512FP16 && " { rtx op0, op1; if () emit_insn (gen__fcmaddc__mask ( operands[0], operands[1], operands[2], operands[3], operands[4])); else { emit_insn (gen__fcmaddc__mask (operands[0], operands[1], operands[2], operands[3], operands[4])); } op0 = lowpart_subreg (mode, operands[0], mode); op1 = lowpart_subreg (mode, operands[1], mode); emit_insn (gen__mask (op0, op0, op1, operands[4])); DONE; }) (define_expand "_fcmaddc__maskz" [(match_operand:VF_AVX512FP16VL 0 "register_operand") (match_operand:VF_AVX512FP16VL 1 "") (match_operand:VF_AVX512FP16VL 2 "") (match_operand:VF_AVX512FP16VL 3 "") (match_operand: 4 "register_operand")] "TARGET_AVX512FP16 && " { emit_insn (gen_fma_fcmaddc__maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_expand "cmla4" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand") (match_operand:VF_AVX512FP16VL 2 "vector_operand") (match_operand:VF_AVX512FP16VL 3 "vector_operand")] UNSPEC_COMPLEX_F_C_MA))] "TARGET_AVX512FP16") (define_insn "fma__" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "" "%v") (match_operand:VF_AVX512FP16VL 2 "" "") (match_operand:VF_AVX512FP16VL 3 "" "0")] UNSPEC_COMPLEX_F_C_MA))] "TARGET_AVX512FP16 && && " "v\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_insn_and_split "fma__fadd_fmul" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (plus:VF_AVX512FP16VL (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand") (match_operand:VF_AVX512FP16VL 2 "vector_operand")] UNSPEC_COMPLEX_FMUL) (match_operand:VF_AVX512FP16VL 3 "vector_operand")))] "TARGET_AVX512FP16 && flag_unsafe_math_optimizations && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:VF_AVX512FP16VL [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_COMPLEX_FMA))]) (define_insn_and_split "fma__fadd_fcmul" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (plus:VF_AVX512FP16VL (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand") (match_operand:VF_AVX512FP16VL 2 "vector_operand")] UNSPEC_COMPLEX_FCMUL) (match_operand:VF_AVX512FP16VL 3 "vector_operand")))] "TARGET_AVX512FP16 && flag_unsafe_math_optimizations && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:VF_AVX512FP16VL [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_COMPLEX_FCMA))]) (define_insn_and_split "fma___fma_zero" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (plus:VF_AVX512FP16VL (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand") (match_operand:VF_AVX512FP16VL 2 "vector_operand") (match_operand:VF_AVX512FP16VL 3 "const0_operand")] UNSPEC_COMPLEX_F_C_MA) (match_operand:VF_AVX512FP16VL 4 "vector_operand")))] "TARGET_AVX512FP16 && flag_unsafe_math_optimizations && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:VF_AVX512FP16VL [(match_dup 1) (match_dup 2) (match_dup 4)] UNSPEC_COMPLEX_F_C_MA))]) (define_insn "fma___pair" [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=&v") (unspec:VF1_AVX512VL [(match_operand:VF1_AVX512VL 1 "vector_operand" "%v") (match_operand:VF1_AVX512VL 2 "bcst_vector_operand" "vmBr") (match_operand:VF1_AVX512VL 3 "vector_operand" "0")] UNSPEC_COMPLEX_F_C_MA_PAIR))] "TARGET_AVX512FP16" "vph\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd")]) (define_insn_and_split "fma__fmaddc_bcst" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand") (subreg:VF_AVX512FP16VL (match_operand: 2 "bcst_vector_operand") 0) (match_operand:VF_AVX512FP16VL 3 "vector_operand")] UNSPEC_COMPLEX_FMA))] "TARGET_AVX512FP16" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_COMPLEX_FMA_PAIR))] { operands[0] = lowpart_subreg (mode, operands[0], mode); operands[1] = lowpart_subreg (mode, operands[1], mode); operands[3] = lowpart_subreg (mode, operands[3], mode); }) (define_insn_and_split "fma__fcmaddc_bcst" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand") (subreg:VF_AVX512FP16VL (match_operand: 2 "bcst_vector_operand") 0) (match_operand:VF_AVX512FP16VL 3 "vector_operand")] UNSPEC_COMPLEX_FCMA))] "TARGET_AVX512FP16" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_COMPLEX_FCMA_PAIR))] { operands[0] = lowpart_subreg (mode, operands[0], mode); operands[1] = lowpart_subreg (mode, operands[1], mode); operands[3] = lowpart_subreg (mode, operands[3], mode); }) (define_insn "___mask" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v") (vec_merge:VF_AVX512FP16VL (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "%v") (match_operand:VF_AVX512FP16VL 2 "nonimmediate_operand" "") (match_operand:VF_AVX512FP16VL 3 "register_operand" "0")] UNSPEC_COMPLEX_F_C_MA) (match_dup 1) (unspec: [(match_operand: 4 "register_operand" "Yk")] UNSPEC_COMPLEX_MASK)))] "TARGET_AVX512FP16 && " "v\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "")]) (define_expand "cmul3" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "vector_operand") (match_operand:VF_AVX512FP16VL 2 "vector_operand")] UNSPEC_COMPLEX_F_C_MUL))] "TARGET_AVX512FP16") (define_insn "__" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v") (unspec:VF_AVX512FP16VL [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "%v") (match_operand:VF_AVX512FP16VL 2 "nonimmediate_operand" "")] UNSPEC_COMPLEX_F_C_MUL))] "TARGET_AVX512FP16 && " { if (TARGET_DEST_FALSE_DEP_FOR_GLC && ) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "v\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "ssemul") (set_attr "mode" "")]) (define_expand "avx512fp16_fmaddcsh_v8hf_maskz" [(match_operand:V8HF 0 "register_operand") (match_operand:V8HF 1 "") (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512FP16 && " { emit_insn (gen_avx512fp16_fma_fmaddcsh_v8hf_maskz ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (V8HFmode), operands[4])); DONE; }) (define_expand "avx512fp16_fmaddcsh_v8hf_mask1" [(match_operand:V8HF 0 "register_operand") (match_operand:V8HF 1 "") (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512FP16 && " { rtx op0, op1; if () emit_insn (gen_avx512fp16_fmaddcsh_v8hf_mask ( operands[0], operands[1], operands[2], operands[3], operands[4])); else emit_insn (gen_avx512fp16_fmaddcsh_v8hf_mask (operands[0], operands[1], operands[2], operands[3], operands[4])); if (TARGET_AVX512VL) { op0 = lowpart_subreg (V4SFmode, operands[0], V8HFmode); op1 = lowpart_subreg (V4SFmode, operands[1], V8HFmode); emit_insn (gen_avx512vl_loadv4sf_mask (op0, op0, op1, operands[4])); } else { rtx mask, tmp, vec_mask; mask = lowpart_subreg (SImode, operands[4], QImode), tmp = gen_reg_rtx (SImode); emit_insn (gen_ashlsi3 (tmp, mask, GEN_INT (31))); vec_mask = gen_reg_rtx (V4SImode); emit_insn (gen_rtx_SET (vec_mask, CONST0_RTX (V4SImode))); emit_insn (gen_vec_setv4si_0 (vec_mask, vec_mask, tmp)); vec_mask = lowpart_subreg (V4SFmode, vec_mask, V4SImode); op0 = lowpart_subreg (V4SFmode, operands[0], V8HFmode); op1 = lowpart_subreg (V4SFmode, operands[1], V8HFmode); emit_insn (gen_sse4_1_blendvps (op0, op1, op0, vec_mask)); } DONE; }) (define_expand "avx512fp16_fcmaddcsh_v8hf_maskz" [(match_operand:V8HF 0 "register_operand") (match_operand:V8HF 1 "") (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512FP16 && " { emit_insn (gen_avx512fp16_fma_fcmaddcsh_v8hf_maskz ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (V8HFmode), operands[4])); DONE; }) (define_expand "avx512fp16_fcmaddcsh_v8hf_mask1" [(match_operand:V8HF 0 "register_operand") (match_operand:V8HF 1 "") (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512FP16 && " { rtx op0, op1; if () emit_insn (gen_avx512fp16_fcmaddcsh_v8hf_mask ( operands[0], operands[1], operands[2], operands[3], operands[4])); else emit_insn (gen_avx512fp16_fcmaddcsh_v8hf_mask (operands[0], operands[1], operands[2], operands[3], operands[4])); if (TARGET_AVX512VL) { op0 = lowpart_subreg (V4SFmode, operands[0], V8HFmode); op1 = lowpart_subreg (V4SFmode, operands[1], V8HFmode); emit_insn (gen_avx512vl_loadv4sf_mask (op0, op0, op1, operands[4])); } else { rtx mask, tmp, vec_mask; mask = lowpart_subreg (SImode, operands[4], QImode), tmp = gen_reg_rtx (SImode); emit_insn (gen_ashlsi3 (tmp, mask, GEN_INT (31))); vec_mask = gen_reg_rtx (V4SImode); emit_insn (gen_rtx_SET (vec_mask, CONST0_RTX (V4SImode))); emit_insn (gen_vec_setv4si_0 (vec_mask, vec_mask, tmp)); vec_mask = lowpart_subreg (V4SFmode, vec_mask, V4SImode); op0 = lowpart_subreg (V4SFmode, operands[0], V8HFmode); op1 = lowpart_subreg (V4SFmode, operands[1], V8HFmode); emit_insn (gen_sse4_1_blendvps (op0, op1, op0, vec_mask)); } DONE; }) (define_expand "avx512fp16_fcmaddcsh_v8hf_mask3" [(match_operand:V8HF 0 "register_operand") (match_operand:V8HF 1 "") (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512FP16 && " { rtx op0, op1; if () emit_insn (gen_avx512fp16_fcmaddcsh_v8hf_mask ( operands[0], operands[1], operands[2], operands[3], operands[4])); else emit_insn (gen_avx512fp16_fcmaddcsh_v8hf_mask (operands[0], operands[1], operands[2], operands[3], operands[4])); op0 = lowpart_subreg (V4SFmode, operands[0], V8HFmode); op1 = lowpart_subreg (V4SFmode, operands[3], V8HFmode); emit_insn (gen_sse_movss (op0, op1, op0)); DONE; }) (define_expand "avx512fp16_fmaddcsh_v8hf_mask3" [(match_operand:V8HF 0 "register_operand") (match_operand:V8HF 1 "") (match_operand:V8HF 2 "") (match_operand:V8HF 3 "") (match_operand:QI 4 "register_operand")] "TARGET_AVX512FP16 && " { rtx op0, op1; if () emit_insn (gen_avx512fp16_fmaddcsh_v8hf_mask ( operands[0], operands[1], operands[2], operands[3], operands[4])); else emit_insn (gen_avx512fp16_fmaddcsh_v8hf_mask (operands[0], operands[1], operands[2], operands[3], operands[4])); op0 = lowpart_subreg (V4SFmode, operands[0], V8HFmode); op1 = lowpart_subreg (V4SFmode, operands[3], V8HFmode); emit_insn (gen_sse_movss (op0, op1, op0)); DONE; }) (define_insn "avx512fp16_fma_sh_v8hf" [(set (match_operand:V8HF 0 "register_operand" "=&v") (vec_merge:V8HF (unspec:V8HF [(match_operand:V8HF 1 "" "v") (match_operand:V8HF 2 "" "") (match_operand:V8HF 3 "" "0")] UNSPEC_COMPLEX_F_C_MA) (match_dup 2) (const_int 3)))] "TARGET_AVX512FP16" "vsh\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "V8HF")]) (define_insn "avx512fp16_sh_v8hf_mask" [(set (match_operand:V8HF 0 "register_operand" "=&v") (vec_merge:V8HF (vec_merge:V8HF (unspec:V8HF [(match_operand:V8HF 1 "" "v") (match_operand:V8HF 2 "" "") (match_operand:V8HF 3 "" "0")] UNSPEC_COMPLEX_F_C_MA) (match_dup 1) (unspec:QI [(match_operand:QI 4 "register_operand" "Yk")] UNSPEC_COMPLEX_MASK)) (match_dup 2) (const_int 3)))] "TARGET_AVX512FP16" "vsh\t{%2, %1, %0%{%4%}|%0%{%4%}, %1, %2}" [(set_attr "type" "ssemuladd") (set_attr "mode" "V8HF")]) (define_insn "avx512fp16_sh_v8hf" [(set (match_operand:V8HF 0 "register_operand" "=&v") (vec_merge:V8HF (unspec:V8HF [(match_operand:V8HF 1 "nonimmediate_operand" "v") (match_operand:V8HF 2 "" "")] UNSPEC_COMPLEX_F_C_MUL) (match_dup 1) (const_int 3)))] "TARGET_AVX512FP16" { if (TARGET_DEST_FALSE_DEP_FOR_GLC && ) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vsh\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "ssemul") (set_attr "mode" "V8HF")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel half-precision floating point conversion operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_int_iterator UNSPEC_US_FIX_NOTRUNC [UNSPEC_UNSIGNED_FIX_NOTRUNC UNSPEC_FIX_NOTRUNC]) (define_int_attr sseintconvertsignprefix [(UNSPEC_UNSIGNED_FIX_NOTRUNC "u") (UNSPEC_FIX_NOTRUNC "")]) (define_mode_attr qq2phsuff [(V32HI "") (V16HI "") (V8HI "") (V16SI "") (V8SI "{y}") (V4SI "{x}") (V8DI "{z}") (V4DI "{y}") (V2DI "{x}") (V16SF "") (V8SF "{y}") (V4SF "{x}") (V8DF "{z}") (V4DF "{y}") (V2DF "{x}")]) (define_insn "avx512fp16_vcvtph2_" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (unspec:VI248_AVX512VL [(match_operand: 1 "" "")] UNSPEC_US_FIX_NOTRUNC))] "TARGET_AVX512FP16" "vcvtph2\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "float2" [(set (match_operand: 0 "register_operand") (any_float: (match_operand:VI2H_AVX512VL 1 "nonimmediate_operand")))] "TARGET_AVX512FP16") (define_insn "avx512fp16_vcvt2ph_" [(set (match_operand: 0 "register_operand" "=v") (any_float: (match_operand:VI2H_AVX512VL 1 "" "")))] "TARGET_AVX512FP16" "vcvt2ph\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "floatv4hf2" [(set (match_operand:V4HF 0 "register_operand") (any_float:V4HF (match_operand:VI4_128_8_256 1 "vector_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { operands[0] = lowpart_subreg (V8HFmode, operands[0], V4HFmode); emit_insn (gen_avx512fp16_floatv4hf2 (operands[0], operands[1])); DONE; }) (define_expand "avx512fp16_floatv4hf2" [(set (match_operand:V8HF 0 "register_operand") (vec_concat:V8HF (any_float:V4HF (match_operand:VI4_128_8_256 1 "vector_operand")) (match_dup 2)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[2] = CONST0_RTX (V4HFmode);") (define_insn "*avx512fp16_vcvt2ph_" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (any_float:V4HF (match_operand:VI4_128_8_256 1 "vector_operand" "vm")) (match_operand:V4HF 2 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvt2ph\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512fp16_vcvt2ph__mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V4HF (any_float:V4HF (match_operand:VI4_128_8_256 1 "vector_operand" "vm")) (vec_select:V4HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 3 "register_operand" "Yk")) (match_dup 4)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[4] = CONST0_RTX (V4HFmode);") (define_insn "*avx512fp16_vcvt2ph__mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V4HF (any_float:V4HF (match_operand:VI4_128_8_256 1 "vector_operand" "vm")) (vec_select:V4HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 3 "register_operand" "Yk")) (match_operand:V4HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvt2ph\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512fp16_vcvt2ph__mask_1" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V4HF (any_float:V4HF (match_operand:VI4_128_8_256 1 "vector_operand" "vm")) (match_operand:V4HF 3 "const0_operand" "C") (match_operand:QI 2 "register_operand" "Yk")) (match_operand:V4HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvt2ph\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "floatv2div2hf2" [(set (match_operand:V2HF 0 "register_operand") (any_float:V2HF (match_operand:V2DI 1 "vector_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { operands[0] = lowpart_subreg (V8HFmode, operands[0], V2HFmode); emit_insn (gen_avx512fp16_floatv2div2hf2 (operands[0], operands[1])); DONE; }) (define_expand "avx512fp16_floatv2div2hf2" [(set (match_operand:V8HF 0 "register_operand") (vec_concat:V8HF (any_float:V2HF (match_operand:V2DI 1 "vector_operand")) (match_dup 2)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[2] = CONST0_RTX (V6HFmode);") (define_insn "*avx512fp16_vcvtqq2ph_v2di" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (any_float:V2HF (match_operand:V2DI 1 "vector_operand" "vm")) (match_operand:V6HF 2 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtqq2ph{x}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_expand "avx512fp16_vcvtqq2ph_v2di_mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V2HF (any_float:V2HF (match_operand:V2DI 1 "vector_operand" "vm")) (vec_select:V2HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (match_dup 4)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[4] = CONST0_RTX (V6HFmode);") (define_insn "*avx512fp16_vcvtqq2ph_v2di_mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V2HF (any_float:V2HF (match_operand:V2DI 1 "vector_operand" "vm")) (vec_select:V2HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (match_operand:V6HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtqq2ph{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512fp16_vcvtqq2ph_v2di_mask_1" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V2HF (any_float:V2HF (match_operand:V2DI 1 "vector_operand" "vm")) (match_operand:V2HF 3 "const0_operand" "C") (match_operand:QI 2 "register_operand" "Yk")) (match_operand:V6HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtqq2ph{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "avx512fp16_vcvtsh2si" [(set (match_operand:SWI48 0 "register_operand" "=r") (unspec:SWI48 [(vec_select:HF (match_operand:V8HF 1 "register_operand" "v") (parallel [(const_int 0)]))] UNSPEC_US_FIX_NOTRUNC))] "TARGET_AVX512FP16" "vcvtsh2si\t{%1, %0|%0, %1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512fp16_vcvtsh2si_2" [(set (match_operand:SWI48 0 "register_operand" "=r,r") (unspec:SWI48 [(match_operand:HF 1 "nonimmediate_operand" "v,m")] UNSPEC_US_FIX_NOTRUNC))] "TARGET_AVX512FP16" "vcvtsh2si\t{%1, %0|%0, %1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_mode_attr sseicvtsuffix [(SI "l") (DI "q")]) (define_insn "avx512fp16_vcvtsi2sh" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (vec_duplicate:V8HF (any_float:HF (match_operand:SWI48 2 "" ""))) (match_operand:V8HF 1 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vcvtsi2sh{}\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "HF")]) (define_expand "fix_trunc2" [(set (match_operand:VI2H_AVX512VL 0 "register_operand") (any_fix:VI2H_AVX512VL (match_operand: 1 "nonimmediate_operand")))] "TARGET_AVX512FP16") (define_insn "avx512fp16_fix_trunc2" [(set (match_operand:VI2H_AVX512VL 0 "register_operand" "=v") (any_fix:VI2H_AVX512VL (match_operand: 1 "" "")))] "TARGET_AVX512FP16" "vcvttph2\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "fix_truncv4hf2" [(set (match_operand:VI4_128_8_256 0 "register_operand") (any_fix:VI4_128_8_256 (match_operand:V4HF 1 "nonimmediate_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { if (!MEM_P (operands[1])) { operands[1] = lowpart_subreg (V8HFmode, operands[1], V4HFmode); emit_insn (gen_avx512fp16_fix_trunc2 (operands[0], operands[1])); DONE; } }) (define_insn "avx512fp16_fix_trunc2" [(set (match_operand:VI4_128_8_256 0 "register_operand" "=v") (any_fix:VI4_128_8_256 (vec_select:V4HF (match_operand:V8HF 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvttph2\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512fp16_fix_trunc2_load" [(set (match_operand:VI4_128_8_256 0 "register_operand" "=v") (any_fix:VI4_128_8_256 (match_operand:V4HF 1 "memory_operand" "m")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvttph2\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "fix_truncv2hfv2di2" [(set (match_operand:V2DI 0 "register_operand") (any_fix:V2DI (match_operand:V2HF 1 "nonimmediate_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { if (!MEM_P (operands[1])) { operands[1] = lowpart_subreg (V8HFmode, operands[1], V2HFmode); emit_insn (gen_avx512fp16_fix_truncv2di2 (operands[0], operands[1])); DONE; } }) (define_insn "avx512fp16_fix_truncv2di2" [(set (match_operand:V2DI 0 "register_operand" "=v") (any_fix:V2DI (vec_select:V2HF (match_operand:V8HF 1 "nonimmediate_operand" "v") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvttph2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512fp16_fix_truncv2di2_load" [(set (match_operand:V2DI 0 "register_operand" "=v") (any_fix:V2DI (match_operand:V2HF 1 "memory_operand" "m")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvttph2qq\t{%1, %0|%0, %k1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "avx512fp16_fix_trunc2" [(set (match_operand:SWI48 0 "register_operand" "=r") (any_fix:SWI48 (vec_select:HF (match_operand:V8HF 1 "register_operand" "v") (parallel [(const_int 0)]))))] "TARGET_AVX512FP16" "%vcvttsh2si\t{%1, %0|%0, %k1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512fp16_fix_trunc2_mem" [(set (match_operand:SWI48 0 "register_operand" "=r") (any_fix:SWI48 (match_operand:HF 1 "memory_operand" "vm")))] "TARGET_AVX512FP16" "%vcvttsh2si\t{%1, %0|%0, %1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_mode_attr ph2pssuffix [(V16SF "x") (V8SF "x") (V4SF "x") (V8DF "") (V4DF "") (V2DF "")]) (define_expand "extend2" [(set (match_operand:VF48H_AVX512VL 0 "register_operand") (float_extend:VF48H_AVX512VL (match_operand: 1 "nonimmediate_operand")))] "TARGET_AVX512FP16") (define_insn "avx512fp16_float_extend_ph2" [(set (match_operand:VF48H_AVX512VL 0 "register_operand" "=v") (float_extend:VF48H_AVX512VL (match_operand: 1 "" "")))] "TARGET_AVX512FP16" "vcvtph2\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "extendv4hf2" [(set (match_operand:VF4_128_8_256 0 "register_operand") (float_extend:VF4_128_8_256 (match_operand:V4HF 1 "nonimmediate_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { if (!MEM_P (operands[1])) { operands[1] = lowpart_subreg (V8HFmode, operands[1], V4HFmode); emit_insn (gen_avx512fp16_float_extend_ph2 (operands[0], operands[1])); DONE; } }) (define_insn "avx512fp16_float_extend_ph2" [(set (match_operand:VF4_128_8_256 0 "register_operand" "=v") (float_extend:VF4_128_8_256 (vec_select:V4HF (match_operand:V8HF 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtph2\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512fp16_float_extend_ph2_load" [(set (match_operand:VF4_128_8_256 0 "register_operand" "=v") (float_extend:VF4_128_8_256 (match_operand:V4HF 1 "memory_operand" "m")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtph2\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "extendv2hfv2df2" [(set (match_operand:V2DF 0 "register_operand") (float_extend:V2DF (match_operand:V2HF 1 "nonimmediate_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { if (!MEM_P (operands[1])) { operands[1] = lowpart_subreg (V8HFmode, operands[1], V2HFmode); emit_insn (gen_avx512fp16_float_extend_phv2df2 (operands[0], operands[1])); DONE; } }) (define_insn "avx512fp16_float_extend_phv2df2" [(set (match_operand:V2DF 0 "register_operand" "=v") (float_extend:V2DF (vec_select:V2HF (match_operand:V8HF 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtph2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512fp16_float_extend_phv2df2_load" [(set (match_operand:V2DF 0 "register_operand" "=v") (float_extend:V2DF (match_operand:V2HF 1 "memory_operand" "m")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtph2pd\t{%1, %0|%0, %k1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_expand "trunc2" [(set (match_operand: 0 "register_operand") (float_truncate: (match_operand:VF48H_AVX512VL 1 "nonimmediate_operand")))] "TARGET_AVX512FP16") (define_insn "avx512fp16_vcvt2ph_" [(set (match_operand: 0 "register_operand" "=v") (float_truncate: (match_operand:VF48H_AVX512VL 1 "" "")))] "TARGET_AVX512FP16" "vcvt2ph\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "truncv4hf2" [(set (match_operand:V4HF 0 "register_operand") (float_truncate:V4HF (match_operand:VF4_128_8_256 1 "vector_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { operands[0] = lowpart_subreg (V8HFmode, operands[0], V4HFmode); emit_insn (gen_avx512fp16_truncv4hf2 (operands[0], operands[1])); DONE; }) (define_expand "avx512fp16_truncv4hf2" [(set (match_operand:V8HF 0 "register_operand") (vec_concat:V8HF (float_truncate:V4HF (match_operand:VF4_128_8_256 1 "vector_operand")) (match_dup 2)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[2] = CONST0_RTX (V4HFmode);") (define_insn "*avx512fp16_vcvt2ph_" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (float_truncate:V4HF (match_operand:VF4_128_8_256 1 "vector_operand" "vm")) (match_operand:V4HF 2 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvt2ph\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512fp16_vcvt2ph__mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V4HF (float_truncate:V4HF (match_operand:VF4_128_8_256 1 "vector_operand" "vm")) (vec_select:V4HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 3 "register_operand" "Yk")) (match_dup 4)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[4] = CONST0_RTX (V4HFmode);") (define_insn "*avx512fp16_vcvt2ph__mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V4HF (float_truncate:V4HF (match_operand:VF4_128_8_256 1 "vector_operand" "vm")) (vec_select:V4HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 3 "register_operand" "Yk")) (match_operand:V4HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvt2ph\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512fp16_vcvt2ph__mask_1" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V4HF (float_truncate:V4HF (match_operand:VF4_128_8_256 1 "vector_operand" "vm")) (match_operand:V4HF 3 "const0_operand" "C") (match_operand:QI 2 "register_operand" "Yk")) (match_operand:V4HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvt2ph\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "truncv2dfv2hf2" [(set (match_operand:V2HF 0 "register_operand") (float_truncate:V2HF (match_operand:V2DF 1 "vector_operand")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" { operands[0] = lowpart_subreg (V8HFmode, operands[0], V2HFmode); emit_insn (gen_avx512fp16_truncv2dfv2hf2 (operands[0], operands[1])); DONE; }) (define_expand "avx512fp16_truncv2dfv2hf2" [(set (match_operand:V8HF 0 "register_operand") (vec_concat:V8HF (float_truncate:V2HF (match_operand:V2DF 1 "vector_operand")) (match_dup 2)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[2] = CONST0_RTX (V6HFmode);") (define_insn "*avx512fp16_vcvtpd2ph_v2df" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (float_truncate:V2HF (match_operand:V2DF 1 "vector_operand" "vm")) (match_operand:V6HF 2 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtpd2ph{x}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_expand "avx512fp16_vcvtpd2ph_v2df_mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V2HF (float_truncate:V2HF (match_operand:V2DF 1 "vector_operand" "vm")) (vec_select:V2HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (match_dup 4)))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "operands[4] = CONST0_RTX (V6HFmode);") (define_insn "*avx512fp16_vcvtpd2ph_v2df_mask" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V2HF (float_truncate:V2HF (match_operand:V2DF 1 "vector_operand" "vm")) (vec_select:V2HF (match_operand:V8HF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (match_operand:V6HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtpd2ph{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512fp16_vcvtpd2ph_v2df_mask_1" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_concat:V8HF (vec_merge:V2HF (float_truncate:V2HF (match_operand:V2DF 1 "vector_operand" "vm")) (match_operand:V2HF 3 "const0_operand" "C") (match_operand:QI 2 "register_operand" "Yk")) (match_operand:V6HF 4 "const0_operand" "C")))] "TARGET_AVX512FP16 && TARGET_AVX512VL" "vcvtpd2ph{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "avx512fp16_vcvtsh2" [(set (match_operand:VF48_128 0 "register_operand" "=v") (vec_merge:VF48_128 (vec_duplicate:VF48_128 (float_extend: (vec_select:HF (match_operand:V8HF 1 "register_operand" "v") (parallel [(const_int 0)])))) (match_operand:VF48_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vcvtsh2\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "avx512fp16_vcvtsh2_mem" [(set (match_operand:VF48_128 0 "register_operand" "=v") (vec_merge:VF48_128 (vec_duplicate:VF48_128 (float_extend: (match_operand:HF 1 "memory_operand" "m"))) (match_operand:VF48_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vcvtsh2\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "avx512fp16_vcvt2sh" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (vec_duplicate:V8HF (float_truncate:HF (vec_select: (match_operand:VF48_128 1 "register_operand" "v") (parallel [(const_int 0)])))) (match_operand:V8HF 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vcvt2sh\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "avx512fp16_vcvt2sh_mem" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (vec_duplicate:V8HF (float_truncate:HF (match_operand:MODEF 1 "memory_operand" "m"))) (match_operand:V8HF 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vcvt2sh\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel single-precision floating point conversion operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn_and_split "sse_cvtpi2ps" [(set (match_operand:V4SF 0 "register_operand" "=x,x,Yv") (vec_merge:V4SF (vec_duplicate:V4SF (float:V2SF (match_operand:V2SI 2 "register_mmxmem_operand" "ym,x,Yv"))) (match_operand:V4SF 1 "register_operand" "0,0,Yv") (const_int 3))) (clobber (match_scratch:V4SF 3 "=X,x,Yv"))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSE" "@ cvtpi2ps\t{%2, %0|%0, %2} # #" "TARGET_SSE2 && reload_completed && SSE_REG_P (operands[2])" [(const_int 0)] { rtx op2 = lowpart_subreg (V4SImode, operands[2], GET_MODE (operands[2])); /* Generate SSE2 cvtdq2ps. */ emit_insn (gen_floatv4siv4sf2 (operands[3], op2)); /* Merge operands[3] with operands[0]. */ rtx mask, op1; if (TARGET_AVX) { mask = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, GEN_INT (0), GEN_INT (1), GEN_INT (6), GEN_INT (7))); op1 = gen_rtx_VEC_CONCAT (V8SFmode, operands[3], operands[1]); op2 = gen_rtx_VEC_SELECT (V4SFmode, op1, mask); emit_insn (gen_rtx_SET (operands[0], op2)); } else { /* NB: SSE can only concatenate OP0 and OP3 to OP0. */ mask = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, GEN_INT (2), GEN_INT (3), GEN_INT (4), GEN_INT (5))); op1 = gen_rtx_VEC_CONCAT (V8SFmode, operands[0], operands[3]); op2 = gen_rtx_VEC_SELECT (V4SFmode, op1, mask); emit_insn (gen_rtx_SET (operands[0], op2)); /* Swap bits 0:63 with bits 64:127. */ mask = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, GEN_INT (2), GEN_INT (3), GEN_INT (0), GEN_INT (1))); rtx dest = lowpart_subreg (V4SImode, operands[0], GET_MODE (operands[0])); op1 = gen_rtx_VEC_SELECT (V4SImode, dest, mask); emit_insn (gen_rtx_SET (dest, op1)); } DONE; } [(set_attr "mmx_isa" "native,sse_noavx,avx") (set_attr "type" "ssecvt") (set_attr "mode" "V4SF")]) (define_insn_and_split "sse_cvtps2pi" [(set (match_operand:V2SI 0 "register_operand" "=y,Yv") (vec_select:V2SI (unspec:V4SI [(match_operand:V4SF 1 "nonimmediate_operand" "xm,YvBm")] UNSPEC_FIX_NOTRUNC) (parallel [(const_int 0) (const_int 1)])))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSE" "@ cvtps2pi\t{%1, %0|%0, %q1} #" "TARGET_SSE2 && reload_completed && SSE_REG_P (operands[0])" [(const_int 0)] { rtx op1 = lowpart_subreg (V2SFmode, operands[1], GET_MODE (operands[1])); rtx tmp = lowpart_subreg (V4SFmode, operands[0], GET_MODE (operands[0])); op1 = gen_rtx_VEC_CONCAT (V4SFmode, op1, CONST0_RTX (V2SFmode)); emit_insn (gen_rtx_SET (tmp, op1)); rtx dest = lowpart_subreg (V4SImode, operands[0], GET_MODE (operands[0])); emit_insn (gen_sse2_fix_notruncv4sfv4si (dest, tmp)); DONE; } [(set_attr "isa" "*,sse2") (set_attr "mmx_isa" "native,*") (set_attr "type" "ssecvt") (set_attr "unit" "mmx,*") (set_attr "mode" "DI")]) (define_insn_and_split "sse_cvttps2pi" [(set (match_operand:V2SI 0 "register_operand" "=y,Yv") (vec_select:V2SI (fix:V4SI (match_operand:V4SF 1 "nonimmediate_operand" "xm,YvBm")) (parallel [(const_int 0) (const_int 1)])))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSE" "@ cvttps2pi\t{%1, %0|%0, %q1} #" "TARGET_SSE2 && reload_completed && SSE_REG_P (operands[0])" [(const_int 0)] { rtx op1 = lowpart_subreg (V2SFmode, operands[1], GET_MODE (operands[1])); rtx tmp = lowpart_subreg (V4SFmode, operands[0], GET_MODE (operands[0])); op1 = gen_rtx_VEC_CONCAT (V4SFmode, op1, CONST0_RTX (V2SFmode)); emit_insn (gen_rtx_SET (tmp, op1)); rtx dest = lowpart_subreg (V4SImode, operands[0], GET_MODE (operands[0])); emit_insn (gen_fix_truncv4sfv4si2 (dest, tmp)); DONE; } [(set_attr "isa" "*,sse2") (set_attr "mmx_isa" "native,*") (set_attr "type" "ssecvt") (set_attr "unit" "mmx,*") (set_attr "prefix_rep" "0") (set_attr "mode" "SF")]) (define_insn "sse_cvtsi2ss" [(set (match_operand:V4SF 0 "register_operand" "=x,x,v") (vec_merge:V4SF (vec_duplicate:V4SF (float:SF (match_operand:SWI48 2 "" "r,m,"))) (match_operand:V4SF 1 "register_operand" "0,0,v") (const_int 1)))] "TARGET_SSE" "@ cvtsi2ss\t{%2, %0|%0, %2} cvtsi2ss\t{%2, %0|%0, %2} vcvtsi2ss\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sseicvt") (set_attr "athlon_decode" "vector,double,*") (set_attr "amdfam10_decode" "vector,double,*") (set_attr "bdver1_decode" "double,direct,*") (set_attr "btver2_decode" "double,double,double") (set_attr "znver1_decode" "double,double,double") (set (attr "length_vex") (if_then_else (and (match_test "mode == DImode") (eq_attr "alternative" "2")) (const_string "4") (const_string "*"))) (set (attr "prefix_rex") (if_then_else (and (match_test "mode == DImode") (eq_attr "alternative" "0,1")) (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "SF")]) (define_insn "sse_cvtss2si" [(set (match_operand:SWI48 0 "register_operand" "=r,r") (unspec:SWI48 [(vec_select:SF (match_operand:V4SF 1 "" "v,") (parallel [(const_int 0)]))] UNSPEC_FIX_NOTRUNC))] "TARGET_SSE" "%vcvtss2si\t{%1, %0|%0, %k1}" [(set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,vector") (set_attr "bdver1_decode" "double,double") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "sse_cvtss2si_2" [(set (match_operand:SWI48 0 "register_operand" "=r,r") (unspec:SWI48 [(match_operand:SF 1 "nonimmediate_operand" "v,m")] UNSPEC_FIX_NOTRUNC))] "TARGET_SSE" "%vcvtss2si\t{%1, %0|%0, %1}" [(set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,vector") (set_attr "amdfam10_decode" "double,double") (set_attr "bdver1_decode" "double,double") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "sse_cvttss2si" [(set (match_operand:SWI48 0 "register_operand" "=r,r") (fix:SWI48 (vec_select:SF (match_operand:V4SF 1 "" "v,") (parallel [(const_int 0)]))))] "TARGET_SSE" "%vcvttss2si\t{%1, %0|%0, %k1}" [(set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,vector") (set_attr "amdfam10_decode" "double,double") (set_attr "bdver1_decode" "double,double") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "cvtusi232" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (vec_duplicate:VF_128 (unsigned_float: (match_operand:SI 2 "" ""))) (match_operand:VF_128 1 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F && " "vcvtusi2{l}\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "cvtusi264" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (vec_duplicate:VF_128 (unsigned_float: (match_operand:DI 2 "" ""))) (match_operand:VF_128 1 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F && TARGET_64BIT" "vcvtusi2{q}\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "float2" [(set (match_operand:VF1 0 "register_operand" "=x,v") (float:VF1 (match_operand: 1 "" "xBm,")))] "TARGET_SSE2 && && " "@ cvtdq2ps\t{%1, %0|%0, %1} vcvtdq2ps\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssecvt") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "ufloat2" [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v") (unsigned_float:VF1_AVX512VL (match_operand: 1 "nonimmediate_operand" "")))] "TARGET_AVX512F" "vcvtudq2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "floatuns2" [(match_operand:VF1 0 "register_operand") (match_operand: 1 "register_operand")] "TARGET_SSE2 && (mode == V4SFmode || TARGET_AVX2)" { if (mode == V16SFmode) emit_insn (gen_ufloatv16siv16sf2 (operands[0], operands[1])); else if (TARGET_AVX512VL) { if (mode == V4SFmode) emit_insn (gen_ufloatv4siv4sf2 (operands[0], operands[1])); else emit_insn (gen_ufloatv8siv8sf2 (operands[0], operands[1])); } else ix86_expand_vector_convert_uns_vsivsf (operands[0], operands[1]); DONE; }) ;; For _fix_notrunc insn pattern (define_mode_attr sf2simodelower [(V16SI "v16sf") (V8SI "v8sf") (V4SI "v4sf")]) (define_insn "_fix_notrunc" [(set (match_operand:VI4_AVX 0 "register_operand" "=v") (unspec:VI4_AVX [(match_operand: 1 "vector_operand" "vBm")] UNSPEC_FIX_NOTRUNC))] "TARGET_SSE2 && " "%vcvtps2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "avx512f_fix_notruncv16sfv16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI [(match_operand:V16SF 1 "" "")] UNSPEC_FIX_NOTRUNC))] "TARGET_AVX512F" "vcvtps2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "_ufix_notrunc" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (unspec:VI4_AVX512VL [(match_operand: 1 "nonimmediate_operand" "")] UNSPEC_UNSIGNED_FIX_NOTRUNC))] "TARGET_AVX512F" "vcvtps2udq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_cvtps2qq" [(set (match_operand:VI8_256_512 0 "register_operand" "=v") (unspec:VI8_256_512 [(match_operand: 1 "nonimmediate_operand" "")] UNSPEC_FIX_NOTRUNC))] "TARGET_AVX512DQ && " "vcvtps2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_cvtps2qqv2di" [(set (match_operand:V2DI 0 "register_operand" "=v") (unspec:V2DI [(vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1)]))] UNSPEC_FIX_NOTRUNC))] "TARGET_AVX512DQ && TARGET_AVX512VL" "vcvtps2qq\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "avx512dq_cvtps2uqq" [(set (match_operand:VI8_256_512 0 "register_operand" "=v") (unspec:VI8_256_512 [(match_operand: 1 "nonimmediate_operand" "")] UNSPEC_UNSIGNED_FIX_NOTRUNC))] "TARGET_AVX512DQ && " "vcvtps2uqq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_cvtps2uqqv2di" [(set (match_operand:V2DI 0 "register_operand" "=v") (unspec:V2DI [(vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1)]))] UNSPEC_UNSIGNED_FIX_NOTRUNC))] "TARGET_AVX512DQ && TARGET_AVX512VL" "vcvtps2uqq\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "fix_truncv16sfv16si2" [(set (match_operand:V16SI 0 "register_operand" "=v") (any_fix:V16SI (match_operand:V16SF 1 "" "")))] "TARGET_AVX512F" "vcvttps2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "fix_truncv8sfv8si2" [(set (match_operand:V8SI 0 "register_operand" "=v") (fix:V8SI (match_operand:V8SF 1 "nonimmediate_operand" "vm")))] "TARGET_AVX && " "vcvttps2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "") (set_attr "mode" "OI")]) (define_insn "fix_truncv4sfv4si2" [(set (match_operand:V4SI 0 "register_operand" "=v") (fix:V4SI (match_operand:V4SF 1 "vector_operand" "vBm")))] "TARGET_SSE2 && " "%vcvttps2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set (attr "prefix_rep") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "0"))) (set_attr "prefix_data16" "0") (set_attr "prefix" "") (set_attr "mode" "TI")]) (define_expand "fixuns_trunc2" [(match_operand: 0 "register_operand") (match_operand:VF1 1 "register_operand")] "TARGET_SSE2" { if (mode == V16SFmode) emit_insn (gen_ufix_truncv16sfv16si2 (operands[0], operands[1])); else { rtx tmp[3]; tmp[0] = ix86_expand_adjust_ufix_to_sfix_si (operands[1], &tmp[2]); tmp[1] = gen_reg_rtx (mode); emit_insn (gen_fix_trunc2 (tmp[1], tmp[0])); emit_insn (gen_xor3 (operands[0], tmp[1], tmp[2])); } DONE; }) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel double-precision floating point conversion operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn "sse2_cvtpi2pd" [(set (match_operand:V2DF 0 "register_operand" "=v,?!x") (float:V2DF (match_operand:V2SI 1 "nonimmediate_operand" "vBm,yBm")))] "TARGET_SSE2" "@ %vcvtdq2pd\t{%1, %0|%0, %1} cvtpi2pd\t{%1, %0|%0, %1}" [(set_attr "mmx_isa" "*,native") (set_attr "type" "ssecvt") (set_attr "unit" "*,mmx") (set_attr "prefix_data16" "*,1") (set_attr "prefix" "maybe_vex,*") (set_attr "mode" "V2DF")]) (define_expand "floatv2siv2df2" [(set (match_operand:V2DF 0 "register_operand") (float:V2DF (match_operand:V2SI 1 "nonimmediate_operand")))] "TARGET_MMX_WITH_SSE") (define_insn "floatunsv2siv2df2" [(set (match_operand:V2DF 0 "register_operand" "=v") (unsigned_float:V2DF (match_operand:V2SI 1 "nonimmediate_operand" "vm")))] "TARGET_MMX_WITH_SSE && TARGET_AVX512VL" "vcvtudq2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V2DF")]) (define_insn "sse2_cvtpd2pi" [(set (match_operand:V2SI 0 "register_operand" "=v,?!y") (unspec:V2SI [(match_operand:V2DF 1 "vector_operand" "vBm,xBm")] UNSPEC_FIX_NOTRUNC))] "TARGET_SSE2" "@ * return TARGET_AVX ? \"vcvtpd2dq{x}\t{%1, %0|%0, %1}\" : \"cvtpd2dq\t{%1, %0|%0, %1}\"; cvtpd2pi\t{%1, %0|%0, %1}" [(set_attr "mmx_isa" "*,native") (set_attr "type" "ssecvt") (set_attr "unit" "*,mmx") (set_attr "amdfam10_decode" "double") (set_attr "athlon_decode" "vector") (set_attr "bdver1_decode" "double") (set_attr "prefix_data16" "*,1") (set_attr "prefix" "maybe_vex,*") (set_attr "mode" "TI")]) (define_insn "sse2_cvttpd2pi" [(set (match_operand:V2SI 0 "register_operand" "=v,?!y") (fix:V2SI (match_operand:V2DF 1 "vector_operand" "vBm,xBm")))] "TARGET_SSE2" "@ * return TARGET_AVX ? \"vcvttpd2dq{x}\t{%1, %0|%0, %1}\" : \"cvttpd2dq\t{%1, %0|%0, %1}\"; cvttpd2pi\t{%1, %0|%0, %1}" [(set_attr "mmx_isa" "*,native") (set_attr "type" "ssecvt") (set_attr "unit" "*,mmx") (set_attr "amdfam10_decode" "double") (set_attr "athlon_decode" "vector") (set_attr "bdver1_decode" "double") (set_attr "prefix_data16" "*,1") (set_attr "prefix" "maybe_vex,*") (set_attr "mode" "TI")]) (define_expand "fix_truncv2dfv2si2" [(set (match_operand:V2SI 0 "register_operand") (fix:V2SI (match_operand:V2DF 1 "vector_operand")))] "TARGET_MMX_WITH_SSE") (define_insn "fixuns_truncv2dfv2si2" [(set (match_operand:V2SI 0 "register_operand" "=v") (unsigned_fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "vm")))] "TARGET_MMX_WITH_SSE && TARGET_AVX512VL" "vcvttpd2udq{x}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "sse2_cvtsi2sd" [(set (match_operand:V2DF 0 "register_operand" "=x,x,v") (vec_merge:V2DF (vec_duplicate:V2DF (float:DF (match_operand:SI 2 "nonimmediate_operand" "r,m,rm"))) (match_operand:V2DF 1 "register_operand" "0,0,v") (const_int 1)))] "TARGET_SSE2" "@ cvtsi2sd{l}\t{%2, %0|%0, %2} cvtsi2sd{l}\t{%2, %0|%0, %2} vcvtsi2sd{l}\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,direct,*") (set_attr "amdfam10_decode" "vector,double,*") (set_attr "bdver1_decode" "double,direct,*") (set_attr "btver2_decode" "double,double,double") (set_attr "znver1_decode" "double,double,double") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "DF")]) (define_insn "sse2_cvtsi2sdq" [(set (match_operand:V2DF 0 "register_operand" "=x,x,v") (vec_merge:V2DF (vec_duplicate:V2DF (float:DF (match_operand:DI 2 "" "r,m,"))) (match_operand:V2DF 1 "register_operand" "0,0,v") (const_int 1)))] "TARGET_SSE2 && TARGET_64BIT" "@ cvtsi2sd{q}\t{%2, %0|%0, %2} cvtsi2sd{q}\t{%2, %0|%0, %2} vcvtsi2sd{q}\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,direct,*") (set_attr "amdfam10_decode" "vector,double,*") (set_attr "bdver1_decode" "double,direct,*") (set_attr "length_vex" "*,*,4") (set_attr "prefix_rex" "1,1,*") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "DF")]) (define_insn "avx512f_vcvtss2usi" [(set (match_operand:SWI48 0 "register_operand" "=r") (unspec:SWI48 [(vec_select:SF (match_operand:V4SF 1 "" "") (parallel [(const_int 0)]))] UNSPEC_UNSIGNED_FIX_NOTRUNC))] "TARGET_AVX512F" "vcvtss2usi\t{%1, %0|%0, %k1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_vcvttss2usi" [(set (match_operand:SWI48 0 "register_operand" "=r") (unsigned_fix:SWI48 (vec_select:SF (match_operand:V4SF 1 "" "") (parallel [(const_int 0)]))))] "TARGET_AVX512F" "vcvttss2usi\t{%1, %0|%0, %k1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_vcvtsd2usi" [(set (match_operand:SWI48 0 "register_operand" "=r") (unspec:SWI48 [(vec_select:DF (match_operand:V2DF 1 "" "") (parallel [(const_int 0)]))] UNSPEC_UNSIGNED_FIX_NOTRUNC))] "TARGET_AVX512F" "vcvtsd2usi\t{%1, %0|%0, %q1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_vcvttsd2usi" [(set (match_operand:SWI48 0 "register_operand" "=r") (unsigned_fix:SWI48 (vec_select:DF (match_operand:V2DF 1 "" "") (parallel [(const_int 0)]))))] "TARGET_AVX512F" "vcvttsd2usi\t{%1, %0|%0, %q1}" [(set_attr "type" "sseicvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "sse2_cvtsd2si" [(set (match_operand:SWI48 0 "register_operand" "=r,r") (unspec:SWI48 [(vec_select:DF (match_operand:V2DF 1 "" "v,") (parallel [(const_int 0)]))] UNSPEC_FIX_NOTRUNC))] "TARGET_SSE2" "%vcvtsd2si\t{%1, %0|%0, %q1}" [(set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,vector") (set_attr "bdver1_decode" "double,double") (set_attr "btver2_decode" "double,double") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "sse2_cvtsd2si_2" [(set (match_operand:SWI48 0 "register_operand" "=r,r") (unspec:SWI48 [(match_operand:DF 1 "nonimmediate_operand" "v,m")] UNSPEC_FIX_NOTRUNC))] "TARGET_SSE2" "%vcvtsd2si\t{%1, %0|%0, %q1}" [(set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,vector") (set_attr "amdfam10_decode" "double,double") (set_attr "bdver1_decode" "double,double") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "sse2_cvttsd2si" [(set (match_operand:SWI48 0 "register_operand" "=r,r") (fix:SWI48 (vec_select:DF (match_operand:V2DF 1 "" "v,") (parallel [(const_int 0)]))))] "TARGET_SSE2" "%vcvttsd2si\t{%1, %0|%0, %q1}" [(set_attr "type" "sseicvt") (set_attr "athlon_decode" "double,vector") (set_attr "amdfam10_decode" "double,double") (set_attr "bdver1_decode" "double,double") (set_attr "btver2_decode" "double,double") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) ;; For float2 insn pattern (define_mode_attr si2dfmode [(V8DF "V8SI") (V4DF "V4SI")]) (define_mode_attr si2dfmodelower [(V8DF "v8si") (V4DF "v4si")]) (define_insn "float2" [(set (match_operand:VF2_512_256 0 "register_operand" "=v") (float:VF2_512_256 (match_operand: 1 "nonimmediate_operand" "vm")))] "TARGET_AVX && " "vcvtdq2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "float2" [(set (match_operand:VF2_AVX512VL 0 "register_operand" "=v") (any_float:VF2_AVX512VL (match_operand: 1 "nonimmediate_operand" "")))] "TARGET_AVX512DQ" "vcvtqq2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;; For float insn patterns (define_mode_attr qq2pssuff [(V8SF "") (V4SF "{y}")]) (define_mode_attr sselongvecmode [(V8SF "V8DI") (V4SF "V4DI")]) (define_mode_attr sselongvecmodelower [(V8SF "v8di") (V4SF "v4di")]) (define_mode_attr sseintvecmode3 [(V8SF "XI") (V4SF "OI") (V8DF "OI") (V4DF "TI")]) (define_insn "float2" [(set (match_operand:VF1_128_256VL 0 "register_operand" "=v") (any_float:VF1_128_256VL (match_operand: 1 "nonimmediate_operand" "")))] "TARGET_AVX512DQ && " "vcvtqq2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512dq_floatv2div2sf2" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (any_float:V2SF (match_operand:V2DI 1 "nonimmediate_operand" "vm")) (match_dup 2)))] "TARGET_AVX512DQ && TARGET_AVX512VL" "operands[2] = CONST0_RTX (V2SFmode);") (define_insn "*avx512dq_floatv2div2sf2" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (any_float:V2SF (match_operand:V2DI 1 "nonimmediate_operand" "vm")) (match_operand:V2SF 2 "const0_operand" "C")))] "TARGET_AVX512DQ && TARGET_AVX512VL" "vcvtqq2ps{x}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V4SF")]) (define_expand "floatv2div2sf2" [(set (match_operand:V2SF 0 "register_operand") (any_float:V2SF (match_operand:V2DI 1 "nonimmediate_operand")))] "TARGET_AVX512DQ && TARGET_AVX512VL" { rtx op0 = gen_reg_rtx (V4SFmode); emit_insn (gen_avx512dq_floatv2div2sf2 (op0, operands[1])); emit_move_insn (operands[0], lowpart_subreg (V2SFmode, op0, V4SFmode)); DONE; }) (define_mode_attr vpckfloat_concat_mode [(V8DI "v16sf") (V4DI "v8sf") (V2DI "v8sf")]) (define_mode_attr vpckfloat_temp_mode [(V8DI "V8SF") (V4DI "V4SF") (V2DI "V4SF")]) (define_mode_attr vpckfloat_op_mode [(V8DI "v8sf") (V4DI "v4sf") (V2DI "v2sf")]) (define_expand "vec_pack_float_" [(match_operand: 0 "register_operand") (any_float: (match_operand:VI8_AVX512VL 1 "register_operand")) (match_operand:VI8_AVX512VL 2 "register_operand")] "TARGET_AVX512DQ" { rtx r1 = gen_reg_rtx (mode); rtx r2 = gen_reg_rtx (mode); rtx (*gen) (rtx, rtx); if (mode == V2DImode) gen = gen_avx512dq_floatv2div2sf2; else gen = gen_float2; emit_insn (gen (r1, operands[1])); emit_insn (gen (r2, operands[2])); if (mode == V2DImode) emit_insn (gen_sse_movlhps (operands[0], r1, r2)); else emit_insn (gen_avx_vec_concat (operands[0], r1, r2)); DONE; }) (define_expand "floatv2div2sf2_mask" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (vec_merge:V2SF (any_float:V2SF (match_operand:V2DI 1 "nonimmediate_operand" "vm")) (vec_select:V2SF (match_operand:V4SF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (match_dup 4)))] "TARGET_AVX512DQ && TARGET_AVX512VL" "operands[4] = CONST0_RTX (V2SFmode);") (define_insn "*floatv2div2sf2_mask" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (vec_merge:V2SF (any_float:V2SF (match_operand:V2DI 1 "nonimmediate_operand" "vm")) (vec_select:V2SF (match_operand:V4SF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (match_operand:V2SF 4 "const0_operand" "C")))] "TARGET_AVX512DQ && TARGET_AVX512VL" "vcvtqq2ps{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V4SF")]) (define_insn "*floatv2div2sf2_mask_1" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (vec_merge:V2SF (any_float:V2SF (match_operand:V2DI 1 "nonimmediate_operand" "vm")) (match_operand:V2SF 3 "const0_operand" "C") (match_operand:QI 2 "register_operand" "Yk")) (match_operand:V2SF 4 "const0_operand" "C")))] "TARGET_AVX512DQ && TARGET_AVX512VL" "vcvtqq2ps{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V4SF")]) (define_insn "ufloat2" [(set (match_operand:VF2_512_256VL 0 "register_operand" "=v") (unsigned_float:VF2_512_256VL (match_operand: 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" "vcvtudq2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "ufloatv2siv2df2" [(set (match_operand:V2DF 0 "register_operand" "=v") (unsigned_float:V2DF (vec_select:V2SI (match_operand:V4SI 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_AVX512VL" "vcvtudq2pd\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V2DF")]) (define_insn "avx512f_cvtdq2pd512_2" [(set (match_operand:V8DF 0 "register_operand" "=v") (float:V8DF (vec_select:V8SI (match_operand:V16SI 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX512F" "vcvtdq2pd\t{%t1, %0|%0, %t1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V8DF")]) (define_insn "avx_cvtdq2pd256_2" [(set (match_operand:V4DF 0 "register_operand" "=v") (float:V4DF (vec_select:V4SI (match_operand:V8SI 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX" "vcvtdq2pd\t{%x1, %0|%0, %x1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "mode" "V4DF")]) (define_insn "sse2_cvtdq2pd" [(set (match_operand:V2DF 0 "register_operand" "=v") (float:V2DF (vec_select:V2SI (match_operand:V4SI 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE2 && " "%vcvtdq2pd\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V2DF")]) (define_insn "avx512f_cvtpd2dq512" [(set (match_operand:V8SI 0 "register_operand" "=v") (unspec:V8SI [(match_operand:V8DF 1 "" "")] UNSPEC_FIX_NOTRUNC))] "TARGET_AVX512F" "vcvtpd2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "OI")]) (define_insn "avx_cvtpd2dq256" [(set (match_operand:V4SI 0 "register_operand" "=v") (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand" "vm")] UNSPEC_FIX_NOTRUNC))] "TARGET_AVX && " "vcvtpd2dq{y}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "") (set_attr "mode" "OI")]) (define_expand "avx_cvtpd2dq256_2" [(set (match_operand:V8SI 0 "register_operand") (vec_concat:V8SI (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand")] UNSPEC_FIX_NOTRUNC) (match_dup 2)))] "TARGET_AVX" "operands[2] = CONST0_RTX (V4SImode);") (define_insn "*avx_cvtpd2dq256_2" [(set (match_operand:V8SI 0 "register_operand" "=v") (vec_concat:V8SI (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand" "vm")] UNSPEC_FIX_NOTRUNC) (match_operand:V4SI 2 "const0_operand")))] "TARGET_AVX" "vcvtpd2dq{y}\t{%1, %x0|%x0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "vex") (set_attr "btver2_decode" "vector") (set_attr "mode" "OI")]) (define_insn "sse2_cvtpd2dq" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (unspec:V2SI [(match_operand:V2DF 1 "vector_operand" "vBm")] UNSPEC_FIX_NOTRUNC) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_SSE2" { if (TARGET_AVX) return "vcvtpd2dq{x}\t{%1, %0|%0, %1}"; else return "cvtpd2dq\t{%1, %0|%0, %1}"; } [(set_attr "type" "ssecvt") (set_attr "prefix_rep" "1") (set_attr "prefix_data16" "0") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI") (set_attr "amdfam10_decode" "double") (set_attr "athlon_decode" "vector") (set_attr "bdver1_decode" "double")]) (define_insn "sse2_cvtpd2dq_mask" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "vm")] UNSPEC_FIX_NOTRUNC) (vec_select:V2SI (match_operand:V4SI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvtpd2dq{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*sse2_cvtpd2dq_mask_1" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "vm")] UNSPEC_FIX_NOTRUNC) (const_vector:V2SI [(const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvtpd2dq{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) ;; For ufix_notrunc* insn patterns (define_mode_attr pd2udqsuff [(V8DF "") (V4DF "{y}")]) (define_insn "ufix_notrunc2" [(set (match_operand: 0 "register_operand" "=v") (unspec: [(match_operand:VF2_512_256VL 1 "nonimmediate_operand" "")] UNSPEC_UNSIGNED_FIX_NOTRUNC))] "TARGET_AVX512F" "vcvtpd2udq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "ufix_notruncv2dfv2si2" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "vm")] UNSPEC_UNSIGNED_FIX_NOTRUNC) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvtpd2udq{x}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "ufix_notruncv2dfv2si2_mask" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "vm")] UNSPEC_UNSIGNED_FIX_NOTRUNC) (vec_select:V2SI (match_operand:V4SI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvtpd2udq{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*ufix_notruncv2dfv2si2_mask_1" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (unspec:V2SI [(match_operand:V2DF 1 "nonimmediate_operand" "vm")] UNSPEC_UNSIGNED_FIX_NOTRUNC) (const_vector:V2SI [(const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvtpd2udq{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "fix_truncv8dfv8si2" [(set (match_operand:V8SI 0 "register_operand" "=v") (any_fix:V8SI (match_operand:V8DF 1 "" "")))] "TARGET_AVX512F" "vcvttpd2dq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "OI")]) (define_insn "ufix_truncv2dfv2si2" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (unsigned_fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "vm")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvttpd2udq{x}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "ufix_truncv2dfv2si2_mask" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (unsigned_fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "vm")) (vec_select:V2SI (match_operand:V4SI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvttpd2udq{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*ufix_truncv2dfv2si2_mask_1" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (unsigned_fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "vm")) (const_vector:V2SI [(const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvttpd2udq{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "fix_truncv4dfv4si2" [(set (match_operand:V4SI 0 "register_operand" "=v") (fix:V4SI (match_operand:V4DF 1 "nonimmediate_operand" "vm")))] "TARGET_AVX || (TARGET_AVX512VL && TARGET_AVX512F)" "vcvttpd2dq{y}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "ufix_truncv4dfv4si2" [(set (match_operand:V4SI 0 "register_operand" "=v") (unsigned_fix:V4SI (match_operand:V4DF 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512VL && TARGET_AVX512F" "vcvttpd2udq{y}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "fix_trunc2" [(set (match_operand: 0 "register_operand" "=v") (any_fix: (match_operand:VF2_AVX512VL 1 "" "")))] "TARGET_AVX512DQ && " "vcvttpd2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "fix_notrunc2" [(set (match_operand: 0 "register_operand" "=v") (unspec: [(match_operand:VF2_AVX512VL 1 "" "")] UNSPEC_FIX_NOTRUNC))] "TARGET_AVX512DQ && " "vcvtpd2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "ufix_notrunc2" [(set (match_operand: 0 "register_operand" "=v") (unspec: [(match_operand:VF2_AVX512VL 1 "nonimmediate_operand" "")] UNSPEC_UNSIGNED_FIX_NOTRUNC))] "TARGET_AVX512DQ && " "vcvtpd2uqq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "fix_trunc2" [(set (match_operand: 0 "register_operand" "=v") (any_fix: (match_operand:VF1_128_256VL 1 "" "")))] "TARGET_AVX512DQ && " "vcvttps2qq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_fix_truncv2sfv2di2" [(set (match_operand:V2DI 0 "register_operand" "=v") (any_fix:V2DI (vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_AVX512DQ && TARGET_AVX512VL" "vcvttps2qq\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_expand "fix_truncv2sfv2di2" [(set (match_operand:V2DI 0 "register_operand") (any_fix:V2DI (match_operand:V2SF 1 "register_operand")))] "TARGET_AVX512DQ && TARGET_AVX512VL" { rtx op1 = force_reg (V2SFmode, operands[1]); op1 = lowpart_subreg (V4SFmode, op1, V2SFmode); emit_insn (gen_avx512dq_fix_truncv2sfv2di2 (operands[0], op1)); DONE; }) (define_mode_attr vunpckfixt_mode [(V16SF "V8DI") (V8SF "V4DI") (V4SF "V2DI")]) (define_mode_attr vunpckfixt_model [(V16SF "v8di") (V8SF "v4di") (V4SF "v2di")]) (define_mode_attr vunpckfixt_extract_mode [(V16SF "v16sf") (V8SF "v8sf") (V4SF "v8sf")]) (define_expand "vec_unpack_fix_trunc_lo_" [(match_operand: 0 "register_operand") (any_fix: (match_operand:VF1_AVX512VL 1 "register_operand"))] "TARGET_AVX512DQ" { rtx tem = operands[1]; rtx (*gen) (rtx, rtx); if (mode != V4SFmode) { tem = gen_reg_rtx (mode); emit_insn (gen_vec_extract_lo_ (tem, operands[1])); gen = gen_fix_trunc2; } else gen = gen_avx512dq_fix_truncv2sfv2di2; emit_insn (gen (operands[0], tem)); DONE; }) (define_expand "vec_unpack_fix_trunc_hi_" [(match_operand: 0 "register_operand") (any_fix: (match_operand:VF1_AVX512VL 1 "register_operand"))] "TARGET_AVX512DQ" { rtx tem; rtx (*gen) (rtx, rtx); if (mode != V4SFmode) { tem = gen_reg_rtx (mode); emit_insn (gen_vec_extract_hi_ (tem, operands[1])); gen = gen_fix_trunc2; } else { tem = gen_reg_rtx (V4SFmode); emit_insn (gen_avx_vpermilv4sf (tem, operands[1], GEN_INT (0x4e))); gen = gen_avx512dq_fix_truncv2sfv2di2; } emit_insn (gen (operands[0], tem)); DONE; }) (define_insn "ufix_trunc2" [(set (match_operand: 0 "register_operand" "=v") (unsigned_fix: (match_operand:VF1_128_256VL 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512VL" "vcvttps2udq\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx_cvttpd2dq256_2" [(set (match_operand:V8SI 0 "register_operand") (vec_concat:V8SI (fix:V4SI (match_operand:V4DF 1 "nonimmediate_operand")) (match_dup 2)))] "TARGET_AVX" "operands[2] = CONST0_RTX (V4SImode);") (define_insn "sse2_cvttpd2dq" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (fix:V2SI (match_operand:V2DF 1 "vector_operand" "vBm")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_SSE2" { if (TARGET_AVX) return "vcvttpd2dq{x}\t{%1, %0|%0, %1}"; else return "cvttpd2dq\t{%1, %0|%0, %1}"; } [(set_attr "type" "ssecvt") (set_attr "amdfam10_decode" "double") (set_attr "athlon_decode" "vector") (set_attr "bdver1_decode" "double") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) (define_insn "sse2_cvttpd2dq_mask" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "vm")) (vec_select:V2SI (match_operand:V4SI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvttpd2dq{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*sse2_cvttpd2dq_mask_1" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (fix:V2SI (match_operand:V2DF 1 "nonimmediate_operand" "vm")) (const_vector:V2SI [(const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vcvttpd2dq{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "sse2_cvtsd2ss" [(set (match_operand:V4SF 0 "register_operand" "=x,x,v") (vec_merge:V4SF (vec_duplicate:V4SF (float_truncate:V2SF (match_operand:V2DF 2 "nonimmediate_operand" "x,m,"))) (match_operand:V4SF 1 "register_operand" "0,0,v") (const_int 1)))] "TARGET_SSE2" "@ cvtsd2ss\t{%2, %0|%0, %2} cvtsd2ss\t{%2, %0|%0, %q2} vcvtsd2ss\t{%2, %1, %0|%0, %1, %q2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecvt") (set_attr "athlon_decode" "vector,double,*") (set_attr "amdfam10_decode" "vector,double,*") (set_attr "bdver1_decode" "direct,direct,*") (set_attr "btver2_decode" "double,double,double") (set_attr "prefix" "orig,orig,") (set_attr "mode" "SF")]) (define_insn "*sse2_vd_cvtsd2ss" [(set (match_operand:V4SF 0 "register_operand" "=x,x,v") (vec_merge:V4SF (vec_duplicate:V4SF (float_truncate:SF (match_operand:DF 2 "nonimmediate_operand" "x,m,vm"))) (match_operand:V4SF 1 "register_operand" "0,0,v") (const_int 1)))] "TARGET_SSE2" "@ cvtsd2ss\t{%2, %0|%0, %2} cvtsd2ss\t{%2, %0|%0, %2} vcvtsd2ss\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecvt") (set_attr "athlon_decode" "vector,double,*") (set_attr "amdfam10_decode" "vector,double,*") (set_attr "bdver1_decode" "direct,direct,*") (set_attr "btver2_decode" "double,double,double") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "SF")]) (define_insn "sse2_cvtss2sd" [(set (match_operand:V2DF 0 "register_operand" "=x,x,v") (vec_merge:V2DF (float_extend:V2DF (vec_select:V2SF (match_operand:V4SF 2 "" "x,m,") (parallel [(const_int 0) (const_int 1)]))) (match_operand:V2DF 1 "register_operand" "0,0,v") (const_int 1)))] "TARGET_SSE2" "@ cvtss2sd\t{%2, %0|%0, %2} cvtss2sd\t{%2, %0|%0, %k2} vcvtss2sd\t{%2, %1, %0|%0, %1, %k2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecvt") (set_attr "amdfam10_decode" "vector,double,*") (set_attr "athlon_decode" "direct,direct,*") (set_attr "bdver1_decode" "direct,direct,*") (set_attr "btver2_decode" "double,double,double") (set_attr "prefix" "orig,orig,") (set_attr "mode" "DF")]) (define_insn "*sse2_vd_cvtss2sd" [(set (match_operand:V2DF 0 "register_operand" "=x,x,v") (vec_merge:V2DF (vec_duplicate:V2DF (float_extend:DF (match_operand:SF 2 "nonimmediate_operand" "x,m,vm"))) (match_operand:V2DF 1 "register_operand" "0,0,v") (const_int 1)))] "TARGET_SSE2" "@ cvtss2sd\t{%2, %0|%0, %2} cvtss2sd\t{%2, %0|%0, %2} vcvtss2sd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecvt") (set_attr "amdfam10_decode" "vector,double,*") (set_attr "athlon_decode" "direct,direct,*") (set_attr "bdver1_decode" "direct,direct,*") (set_attr "btver2_decode" "double,double,double") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "DF")]) (define_insn "avx512f_cvtpd2ps512" [(set (match_operand:V8SF 0 "register_operand" "=v") (float_truncate:V8SF (match_operand:V8DF 1 "" "")))] "TARGET_AVX512F" "vcvtpd2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V8SF")]) (define_insn "avx_cvtpd2ps256" [(set (match_operand:V4SF 0 "register_operand" "=v") (float_truncate:V4SF (match_operand:V4DF 1 "nonimmediate_operand" "vm")))] "TARGET_AVX && " "vcvtpd2ps{y}\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "btver2_decode" "vector") (set_attr "mode" "V4SF")]) (define_expand "sse2_cvtpd2ps" [(set (match_operand:V4SF 0 "register_operand") (vec_concat:V4SF (float_truncate:V2SF (match_operand:V2DF 1 "vector_operand")) (match_dup 2)))] "TARGET_SSE2" "operands[2] = CONST0_RTX (V2SFmode);") (define_expand "sse2_cvtpd2ps_mask" [(set (match_operand:V4SF 0 "register_operand") (vec_concat:V4SF (vec_merge:V2SF (float_truncate:V2SF (match_operand:V2DF 1 "vector_operand")) (vec_select:V2SF (match_operand:V4SF 2 "nonimm_or_0_operand") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand")) (match_dup 4)))] "TARGET_SSE2" "operands[4] = CONST0_RTX (V2SFmode);") (define_insn "*sse2_cvtpd2ps" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (float_truncate:V2SF (match_operand:V2DF 1 "vector_operand" "vBm")) (match_operand:V2SF 2 "const0_operand" "C")))] "TARGET_SSE2" { if (TARGET_AVX) return "vcvtpd2ps{x}\t{%1, %0|%0, %1}"; else return "cvtpd2ps\t{%1, %0|%0, %1}"; } [(set_attr "type" "ssecvt") (set_attr "amdfam10_decode" "double") (set_attr "athlon_decode" "vector") (set_attr "bdver1_decode" "double") (set_attr "prefix_data16" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V4SF")]) (define_insn "truncv2dfv2sf2" [(set (match_operand:V2SF 0 "register_operand" "=v") (float_truncate:V2SF (match_operand:V2DF 1 "vector_operand" "vBm")))] "TARGET_MMX_WITH_SSE" { if (TARGET_AVX) return "vcvtpd2ps{x}\t{%1, %0|%0, %1}"; else return "cvtpd2ps\t{%1, %0|%0, %1}"; } [(set_attr "type" "ssecvt") (set_attr "amdfam10_decode" "double") (set_attr "athlon_decode" "vector") (set_attr "bdver1_decode" "double") (set_attr "prefix_data16" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V4SF")]) (define_insn "*sse2_cvtpd2ps_mask" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (vec_merge:V2SF (float_truncate:V2SF (match_operand:V2DF 1 "nonimmediate_operand" "vm")) (vec_select:V2SF (match_operand:V4SF 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (match_operand:V2SF 4 "const0_operand" "C")))] "TARGET_AVX512VL" "vcvtpd2ps{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V4SF")]) (define_insn "*sse2_cvtpd2ps_mask_1" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (vec_merge:V2SF (float_truncate:V2SF (match_operand:V2DF 1 "nonimmediate_operand" "vm")) (match_operand:V2SF 3 "const0_operand" "C") (match_operand:QI 2 "register_operand" "Yk")) (match_operand:V2SF 4 "const0_operand" "C")))] "TARGET_AVX512VL" "vcvtpd2ps{x}\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V4SF")]) ;; For _cvtps2pd insn pattern (define_mode_attr sf2dfmode [(V8DF "V8SF") (V4DF "V4SF")]) (define_mode_attr sf2dfmode_lower [(V8DF "v8sf") (V4DF "v4sf")]) (define_expand "trunc2" [(set (match_operand: 0 "register_operand") (float_truncate: (match_operand:VF2_512_256 1 "vector_operand")))] "TARGET_AVX") (define_expand "extend2" [(set (match_operand:VF2_512_256 0 "register_operand") (float_extend:VF2_512_256 (match_operand: 1 "vector_operand")))] "TARGET_AVX") (define_insn "_cvtps2pd" [(set (match_operand:VF2_512_256 0 "register_operand" "=v") (float_extend:VF2_512_256 (match_operand: 1 "" "")))] "TARGET_AVX && && " "vcvtps2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "*avx_cvtps2pd256_2" [(set (match_operand:V4DF 0 "register_operand" "=v") (float_extend:V4DF (vec_select:V4SF (match_operand:V8SF 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX" "vcvtps2pd\t{%x1, %0|%0, %x1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "vex") (set_attr "mode" "V4DF")]) (define_insn "vec_unpacks_lo_v16sf" [(set (match_operand:V8DF 0 "register_operand" "=v") (float_extend:V8DF (vec_select:V8SF (match_operand:V16SF 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX512F" "vcvtps2pd\t{%t1, %0|%0, %t1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V8DF")]) (define_insn "_cvt2mask" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI12_AVX512VL 1 "register_operand" "v")] UNSPEC_CVTINT2MASK))] "TARGET_AVX512BW" "vpmov2m\t{%1, %0|%0, %1}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_cvt2mask" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI48_AVX512VL 1 "register_operand" "v")] UNSPEC_CVTINT2MASK))] "TARGET_AVX512DQ" "vpmov2m\t{%1, %0|%0, %1}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_cvtmask2" [(set (match_operand:VI12_AVX512VL 0 "register_operand") (vec_merge:VI12_AVX512VL (match_dup 2) (match_dup 3) (match_operand: 1 "register_operand")))] "TARGET_AVX512BW" { operands[2] = CONSTM1_RTX (mode); operands[3] = CONST0_RTX (mode); }) (define_insn "*_cvtmask2" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (vec_merge:VI12_AVX512VL (match_operand:VI12_AVX512VL 2 "vector_all_ones_operand") (match_operand:VI12_AVX512VL 3 "const0_operand") (match_operand: 1 "register_operand" "k")))] "TARGET_AVX512BW" "vpmovm2\t{%1, %0|%0, %1}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_cvtmask2" [(set (match_operand:VI48_AVX512VL 0 "register_operand") (vec_merge:VI48_AVX512VL (match_dup 2) (match_dup 3) (match_operand: 1 "register_operand")))] "TARGET_AVX512F" "{ operands[2] = CONSTM1_RTX (mode); operands[3] = CONST0_RTX (mode); }") (define_insn "*_cvtmask2" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v,v") (vec_merge:VI48_AVX512VL (match_operand:VI48_AVX512VL 2 "vector_all_ones_operand") (match_operand:VI48_AVX512VL 3 "const0_operand") (match_operand: 1 "register_operand" "k,Yk")))] "TARGET_AVX512F" "@ vpmovm2\t{%1, %0|%0, %1} vpternlog\t{$0x81, %0, %0, %0%{%1%}%{z%}|%0%{%1%}%{z%}, %0, %0, 0x81}" [(set_attr "isa" "avx512dq,*") (set_attr "length_immediate" "0,1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "sse2_cvtps2pd" [(set (match_operand:V2DF 0 "register_operand" "=v") (float_extend:V2DF (vec_select:V2SF (match_operand:V4SF 1 "vector_operand" "vm") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE2 && " "%vcvtps2pd\t{%1, %0|%0, %q1}" [(set_attr "type" "ssecvt") (set_attr "amdfam10_decode" "direct") (set_attr "athlon_decode" "double") (set_attr "bdver1_decode" "double") (set_attr "prefix_data16" "0") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V2DF")]) (define_insn "extendv2sfv2df2" [(set (match_operand:V2DF 0 "register_operand" "=v") (float_extend:V2DF (match_operand:V2SF 1 "register_operand" "v")))] "TARGET_MMX_WITH_SSE" "%vcvtps2pd\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "amdfam10_decode" "direct") (set_attr "athlon_decode" "double") (set_attr "bdver1_decode" "double") (set_attr "prefix_data16" "0") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V2DF")]) (define_expand "vec_unpacks_hi_v4sf" [(set (match_dup 2) (vec_select:V4SF (vec_concat:V8SF (match_dup 2) (match_operand:V4SF 1 "vector_operand")) (parallel [(const_int 6) (const_int 7) (const_int 2) (const_int 3)]))) (set (match_operand:V2DF 0 "register_operand") (float_extend:V2DF (vec_select:V2SF (match_dup 2) (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE2" "operands[2] = gen_reg_rtx (V4SFmode);") (define_expand "vec_unpacks_hi_v8sf" [(set (match_dup 2) (vec_select:V4SF (match_operand:V8SF 1 "register_operand") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)]))) (set (match_operand:V4DF 0 "register_operand") (float_extend:V4DF (match_dup 2)))] "TARGET_AVX" "operands[2] = gen_reg_rtx (V4SFmode);") (define_expand "vec_unpacks_hi_v16sf" [(set (match_dup 2) (vec_select:V8SF (match_operand:V16SF 1 "register_operand") (parallel [(const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)]))) (set (match_operand:V8DF 0 "register_operand") (float_extend:V8DF (match_dup 2)))] "TARGET_AVX512F" "operands[2] = gen_reg_rtx (V8SFmode);") (define_expand "vec_unpacks_lo_v4sf" [(set (match_operand:V2DF 0 "register_operand") (float_extend:V2DF (vec_select:V2SF (match_operand:V4SF 1 "vector_operand") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE2") (define_expand "vec_unpacks_lo_v8sf" [(set (match_operand:V4DF 0 "register_operand") (float_extend:V4DF (vec_select:V4SF (match_operand:V8SF 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX") (define_mode_attr sseunpackfltmode [(V8HI "V4SF") (V4SI "V2DF") (V16HI "V8SF") (V8SI "V4DF") (V32HI "V16SF") (V16SI "V8DF")]) (define_expand "vec_unpacks_float_hi_" [(match_operand: 0 "register_operand") (match_operand:VI2_AVX512F 1 "register_operand")] "TARGET_SSE2" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_unpacks_hi_ (tmp, operands[1])); emit_insn (gen_rtx_SET (operands[0], gen_rtx_FLOAT (mode, tmp))); DONE; }) (define_expand "vec_unpacks_float_lo_" [(match_operand: 0 "register_operand") (match_operand:VI2_AVX512F 1 "register_operand")] "TARGET_SSE2" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_unpacks_lo_ (tmp, operands[1])); emit_insn (gen_rtx_SET (operands[0], gen_rtx_FLOAT (mode, tmp))); DONE; }) (define_expand "vec_unpacku_float_hi_" [(match_operand: 0 "register_operand") (match_operand:VI2_AVX512F 1 "register_operand")] "TARGET_SSE2" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_unpacku_hi_ (tmp, operands[1])); emit_insn (gen_rtx_SET (operands[0], gen_rtx_FLOAT (mode, tmp))); DONE; }) (define_expand "vec_unpacku_float_lo_" [(match_operand: 0 "register_operand") (match_operand:VI2_AVX512F 1 "register_operand")] "TARGET_SSE2" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_vec_unpacku_lo_ (tmp, operands[1])); emit_insn (gen_rtx_SET (operands[0], gen_rtx_FLOAT (mode, tmp))); DONE; }) (define_expand "vec_unpacks_float_hi_v4si" [(set (match_dup 2) (vec_select:V4SI (match_operand:V4SI 1 "vector_operand") (parallel [(const_int 2) (const_int 3) (const_int 2) (const_int 3)]))) (set (match_operand:V2DF 0 "register_operand") (float:V2DF (vec_select:V2SI (match_dup 2) (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE2" "operands[2] = gen_reg_rtx (V4SImode);") (define_expand "vec_unpacks_float_lo_v4si" [(set (match_operand:V2DF 0 "register_operand") (float:V2DF (vec_select:V2SI (match_operand:V4SI 1 "vector_operand") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE2") (define_expand "vec_unpacks_float_hi_v8si" [(set (match_dup 2) (vec_select:V4SI (match_operand:V8SI 1 "register_operand") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)]))) (set (match_operand:V4DF 0 "register_operand") (float:V4DF (match_dup 2)))] "TARGET_AVX" "operands[2] = gen_reg_rtx (V4SImode);") (define_expand "vec_unpacks_float_lo_v8si" [(set (match_operand:V4DF 0 "register_operand") (float:V4DF (vec_select:V4SI (match_operand:V8SI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX") (define_expand "vec_unpacks_float_hi_v16si" [(set (match_dup 2) (vec_select:V8SI (match_operand:V16SI 1 "nonimmediate_operand") (parallel [(const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)]))) (set (match_operand:V8DF 0 "register_operand") (float:V8DF (match_dup 2)))] "TARGET_AVX512F" "operands[2] = gen_reg_rtx (V8SImode);") (define_expand "vec_unpacks_float_lo_v16si" [(set (match_operand:V8DF 0 "register_operand") (float:V8DF (vec_select:V8SI (match_operand:V16SI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX512F") (define_expand "vec_unpacku_float_hi_v4si" [(set (match_dup 5) (vec_select:V4SI (match_operand:V4SI 1 "vector_operand") (parallel [(const_int 2) (const_int 3) (const_int 2) (const_int 3)]))) (set (match_dup 6) (float:V2DF (vec_select:V2SI (match_dup 5) (parallel [(const_int 0) (const_int 1)])))) (set (match_dup 7) (lt:V2DF (match_dup 6) (match_dup 3))) (set (match_dup 8) (and:V2DF (match_dup 7) (match_dup 4))) (set (match_operand:V2DF 0 "register_operand") (plus:V2DF (match_dup 6) (match_dup 8)))] "TARGET_SSE2" { REAL_VALUE_TYPE TWO32r; rtx x; int i; real_ldexp (&TWO32r, &dconst1, 32); x = const_double_from_real_value (TWO32r, DFmode); operands[3] = force_reg (V2DFmode, CONST0_RTX (V2DFmode)); operands[4] = force_reg (V2DFmode, ix86_build_const_vector (V2DFmode, 1, x)); operands[5] = gen_reg_rtx (V4SImode); for (i = 6; i < 9; i++) operands[i] = gen_reg_rtx (V2DFmode); }) (define_expand "vec_unpacku_float_lo_v4si" [(set (match_dup 5) (float:V2DF (vec_select:V2SI (match_operand:V4SI 1 "vector_operand") (parallel [(const_int 0) (const_int 1)])))) (set (match_dup 6) (lt:V2DF (match_dup 5) (match_dup 3))) (set (match_dup 7) (and:V2DF (match_dup 6) (match_dup 4))) (set (match_operand:V2DF 0 "register_operand") (plus:V2DF (match_dup 5) (match_dup 7)))] "TARGET_SSE2" { REAL_VALUE_TYPE TWO32r; rtx x; int i; real_ldexp (&TWO32r, &dconst1, 32); x = const_double_from_real_value (TWO32r, DFmode); operands[3] = force_reg (V2DFmode, CONST0_RTX (V2DFmode)); operands[4] = force_reg (V2DFmode, ix86_build_const_vector (V2DFmode, 1, x)); for (i = 5; i < 8; i++) operands[i] = gen_reg_rtx (V2DFmode); }) (define_expand "vec_unpacku_float_hi_v8si" [(match_operand:V4DF 0 "register_operand") (match_operand:V8SI 1 "register_operand")] "TARGET_AVX" { REAL_VALUE_TYPE TWO32r; rtx x, tmp[6]; int i; real_ldexp (&TWO32r, &dconst1, 32); x = const_double_from_real_value (TWO32r, DFmode); tmp[0] = force_reg (V4DFmode, CONST0_RTX (V4DFmode)); tmp[1] = force_reg (V4DFmode, ix86_build_const_vector (V4DFmode, 1, x)); tmp[5] = gen_reg_rtx (V4SImode); for (i = 2; i < 5; i++) tmp[i] = gen_reg_rtx (V4DFmode); emit_insn (gen_vec_extract_hi_v8si (tmp[5], operands[1])); emit_insn (gen_floatv4siv4df2 (tmp[2], tmp[5])); emit_insn (gen_rtx_SET (tmp[3], gen_rtx_LT (V4DFmode, tmp[2], tmp[0]))); emit_insn (gen_andv4df3 (tmp[4], tmp[3], tmp[1])); emit_insn (gen_addv4df3 (operands[0], tmp[2], tmp[4])); DONE; }) (define_expand "vec_unpacku_float_hi_v16si" [(match_operand:V8DF 0 "register_operand") (match_operand:V16SI 1 "register_operand")] "TARGET_AVX512F" { REAL_VALUE_TYPE TWO32r; rtx k, x, tmp[4]; real_ldexp (&TWO32r, &dconst1, 32); x = const_double_from_real_value (TWO32r, DFmode); tmp[0] = force_reg (V8DFmode, CONST0_RTX (V8DFmode)); tmp[1] = force_reg (V8DFmode, ix86_build_const_vector (V8DFmode, 1, x)); tmp[2] = gen_reg_rtx (V8DFmode); tmp[3] = gen_reg_rtx (V8SImode); k = gen_reg_rtx (QImode); emit_insn (gen_vec_extract_hi_v16si (tmp[3], operands[1])); emit_insn (gen_floatv8siv8df2 (tmp[2], tmp[3])); ix86_expand_mask_vec_cmp (k, LT, tmp[2], tmp[0]); emit_insn (gen_addv8df3_mask (tmp[2], tmp[2], tmp[1], tmp[2], k)); emit_move_insn (operands[0], tmp[2]); DONE; }) (define_expand "vec_unpacku_float_lo_v8si" [(match_operand:V4DF 0 "register_operand") (match_operand:V8SI 1 "nonimmediate_operand")] "TARGET_AVX" { REAL_VALUE_TYPE TWO32r; rtx x, tmp[5]; int i; real_ldexp (&TWO32r, &dconst1, 32); x = const_double_from_real_value (TWO32r, DFmode); tmp[0] = force_reg (V4DFmode, CONST0_RTX (V4DFmode)); tmp[1] = force_reg (V4DFmode, ix86_build_const_vector (V4DFmode, 1, x)); for (i = 2; i < 5; i++) tmp[i] = gen_reg_rtx (V4DFmode); emit_insn (gen_avx_cvtdq2pd256_2 (tmp[2], operands[1])); emit_insn (gen_rtx_SET (tmp[3], gen_rtx_LT (V4DFmode, tmp[2], tmp[0]))); emit_insn (gen_andv4df3 (tmp[4], tmp[3], tmp[1])); emit_insn (gen_addv4df3 (operands[0], tmp[2], tmp[4])); DONE; }) (define_expand "vec_unpacku_float_lo_v16si" [(match_operand:V8DF 0 "register_operand") (match_operand:V16SI 1 "nonimmediate_operand")] "TARGET_AVX512F" { REAL_VALUE_TYPE TWO32r; rtx k, x, tmp[3]; real_ldexp (&TWO32r, &dconst1, 32); x = const_double_from_real_value (TWO32r, DFmode); tmp[0] = force_reg (V8DFmode, CONST0_RTX (V8DFmode)); tmp[1] = force_reg (V8DFmode, ix86_build_const_vector (V8DFmode, 1, x)); tmp[2] = gen_reg_rtx (V8DFmode); k = gen_reg_rtx (QImode); emit_insn (gen_avx512f_cvtdq2pd512_2 (tmp[2], operands[1])); ix86_expand_mask_vec_cmp (k, LT, tmp[2], tmp[0]); emit_insn (gen_addv8df3_mask (tmp[2], tmp[2], tmp[1], tmp[2], k)); emit_move_insn (operands[0], tmp[2]); DONE; }) (define_expand "vec_pack_trunc_" [(set (match_dup 3) (float_truncate: (match_operand:VF2_512_256 1 "nonimmediate_operand"))) (set (match_dup 4) (float_truncate: (match_operand:VF2_512_256 2 "nonimmediate_operand"))) (set (match_operand: 0 "register_operand") (vec_concat: (match_dup 3) (match_dup 4)))] "TARGET_AVX" { operands[3] = gen_reg_rtx (mode); operands[4] = gen_reg_rtx (mode); }) (define_expand "vec_pack_trunc_v2df" [(match_operand:V4SF 0 "register_operand") (match_operand:V2DF 1 "vector_operand") (match_operand:V2DF 2 "vector_operand")] "TARGET_SSE2" { rtx tmp0, tmp1; if (TARGET_AVX && !TARGET_PREFER_AVX128 && optimize_insn_for_speed_p ()) { tmp0 = gen_reg_rtx (V4DFmode); tmp1 = force_reg (V2DFmode, operands[1]); emit_insn (gen_avx_vec_concatv4df (tmp0, tmp1, operands[2])); emit_insn (gen_avx_cvtpd2ps256 (operands[0], tmp0)); } else { tmp0 = gen_reg_rtx (V4SFmode); tmp1 = gen_reg_rtx (V4SFmode); emit_insn (gen_sse2_cvtpd2ps (tmp0, operands[1])); emit_insn (gen_sse2_cvtpd2ps (tmp1, operands[2])); emit_insn (gen_sse_movlhps (operands[0], tmp0, tmp1)); } DONE; }) (define_expand "vec_pack_sfix_trunc_v8df" [(match_operand:V16SI 0 "register_operand") (match_operand:V8DF 1 "nonimmediate_operand") (match_operand:V8DF 2 "nonimmediate_operand")] "TARGET_AVX512F" { rtx r1, r2; r1 = gen_reg_rtx (V8SImode); r2 = gen_reg_rtx (V8SImode); emit_insn (gen_fix_truncv8dfv8si2 (r1, operands[1])); emit_insn (gen_fix_truncv8dfv8si2 (r2, operands[2])); emit_insn (gen_avx_vec_concatv16si (operands[0], r1, r2)); DONE; }) (define_expand "vec_pack_sfix_trunc_v4df" [(match_operand:V8SI 0 "register_operand") (match_operand:V4DF 1 "nonimmediate_operand") (match_operand:V4DF 2 "nonimmediate_operand")] "TARGET_AVX" { rtx r1, r2; r1 = gen_reg_rtx (V4SImode); r2 = gen_reg_rtx (V4SImode); emit_insn (gen_fix_truncv4dfv4si2 (r1, operands[1])); emit_insn (gen_fix_truncv4dfv4si2 (r2, operands[2])); emit_insn (gen_avx_vec_concatv8si (operands[0], r1, r2)); DONE; }) (define_expand "vec_pack_sfix_trunc_v2df" [(match_operand:V4SI 0 "register_operand") (match_operand:V2DF 1 "vector_operand") (match_operand:V2DF 2 "vector_operand")] "TARGET_SSE2" { rtx tmp0, tmp1, tmp2; if (TARGET_AVX && !TARGET_PREFER_AVX128 && optimize_insn_for_speed_p ()) { tmp0 = gen_reg_rtx (V4DFmode); tmp1 = force_reg (V2DFmode, operands[1]); emit_insn (gen_avx_vec_concatv4df (tmp0, tmp1, operands[2])); emit_insn (gen_fix_truncv4dfv4si2 (operands[0], tmp0)); } else { tmp0 = gen_reg_rtx (V4SImode); tmp1 = gen_reg_rtx (V4SImode); tmp2 = gen_reg_rtx (V2DImode); emit_insn (gen_sse2_cvttpd2dq (tmp0, operands[1])); emit_insn (gen_sse2_cvttpd2dq (tmp1, operands[2])); emit_insn (gen_vec_interleave_lowv2di (tmp2, gen_lowpart (V2DImode, tmp0), gen_lowpart (V2DImode, tmp1))); emit_move_insn (operands[0], gen_lowpart (V4SImode, tmp2)); } DONE; }) (define_mode_attr ssepackfltmode [(V8DF "V16SI") (V4DF "V8SI") (V2DF "V4SI")]) (define_expand "vec_pack_ufix_trunc_" [(match_operand: 0 "register_operand") (match_operand:VF2 1 "register_operand") (match_operand:VF2 2 "register_operand")] "TARGET_SSE2" { if (mode == V8DFmode) { rtx r1, r2; r1 = gen_reg_rtx (V8SImode); r2 = gen_reg_rtx (V8SImode); emit_insn (gen_fixuns_truncv8dfv8si2 (r1, operands[1])); emit_insn (gen_fixuns_truncv8dfv8si2 (r2, operands[2])); emit_insn (gen_avx_vec_concatv16si (operands[0], r1, r2)); } else { rtx tmp[7]; tmp[0] = ix86_expand_adjust_ufix_to_sfix_si (operands[1], &tmp[2]); tmp[1] = ix86_expand_adjust_ufix_to_sfix_si (operands[2], &tmp[3]); tmp[4] = gen_reg_rtx (mode); emit_insn (gen_vec_pack_sfix_trunc_ (tmp[4], tmp[0], tmp[1])); if (mode == V4SImode || TARGET_AVX2) { tmp[5] = gen_reg_rtx (mode); ix86_expand_vec_extract_even_odd (tmp[5], tmp[2], tmp[3], 0); } else { tmp[5] = gen_reg_rtx (V8SFmode); ix86_expand_vec_extract_even_odd (tmp[5], gen_lowpart (V8SFmode, tmp[2]), gen_lowpart (V8SFmode, tmp[3]), 0); tmp[5] = gen_lowpart (V8SImode, tmp[5]); } tmp[6] = expand_simple_binop (mode, XOR, tmp[4], tmp[5], operands[0], 0, OPTAB_DIRECT); if (tmp[6] != operands[0]) emit_move_insn (operands[0], tmp[6]); } DONE; }) (define_expand "avx512f_vec_pack_sfix_v8df" [(match_operand:V16SI 0 "register_operand") (match_operand:V8DF 1 "nonimmediate_operand") (match_operand:V8DF 2 "nonimmediate_operand")] "TARGET_AVX512F" { rtx r1, r2; r1 = gen_reg_rtx (V8SImode); r2 = gen_reg_rtx (V8SImode); emit_insn (gen_avx512f_cvtpd2dq512 (r1, operands[1])); emit_insn (gen_avx512f_cvtpd2dq512 (r2, operands[2])); emit_insn (gen_avx_vec_concatv16si (operands[0], r1, r2)); DONE; }) (define_expand "vec_pack_sfix_v4df" [(match_operand:V8SI 0 "register_operand") (match_operand:V4DF 1 "nonimmediate_operand") (match_operand:V4DF 2 "nonimmediate_operand")] "TARGET_AVX" { rtx r1, r2; r1 = gen_reg_rtx (V4SImode); r2 = gen_reg_rtx (V4SImode); emit_insn (gen_avx_cvtpd2dq256 (r1, operands[1])); emit_insn (gen_avx_cvtpd2dq256 (r2, operands[2])); emit_insn (gen_avx_vec_concatv8si (operands[0], r1, r2)); DONE; }) (define_expand "vec_pack_sfix_v2df" [(match_operand:V4SI 0 "register_operand") (match_operand:V2DF 1 "vector_operand") (match_operand:V2DF 2 "vector_operand")] "TARGET_SSE2" { rtx tmp0, tmp1, tmp2; if (TARGET_AVX && !TARGET_PREFER_AVX128 && optimize_insn_for_speed_p ()) { tmp0 = gen_reg_rtx (V4DFmode); tmp1 = force_reg (V2DFmode, operands[1]); emit_insn (gen_avx_vec_concatv4df (tmp0, tmp1, operands[2])); emit_insn (gen_avx_cvtpd2dq256 (operands[0], tmp0)); } else { tmp0 = gen_reg_rtx (V4SImode); tmp1 = gen_reg_rtx (V4SImode); tmp2 = gen_reg_rtx (V2DImode); emit_insn (gen_sse2_cvtpd2dq (tmp0, operands[1])); emit_insn (gen_sse2_cvtpd2dq (tmp1, operands[2])); emit_insn (gen_vec_interleave_lowv2di (tmp2, gen_lowpart (V2DImode, tmp0), gen_lowpart (V2DImode, tmp1))); emit_move_insn (operands[0], gen_lowpart (V4SImode, tmp2)); } DONE; }) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel single-precision floating point element swizzling ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_expand "sse_movhlps_exp" [(set (match_operand:V4SF 0 "nonimmediate_operand") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "nonimmediate_operand") (match_operand:V4SF 2 "nonimmediate_operand")) (parallel [(const_int 6) (const_int 7) (const_int 2) (const_int 3)])))] "TARGET_SSE" { rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands); emit_insn (gen_sse_movhlps (dst, operands[1], operands[2])); /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); DONE; }) (define_insn "sse_movhlps" [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,v,x,v,m") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "nonimmediate_operand" " 0,v,0,v,0") (match_operand:V4SF 2 "nonimmediate_operand" " x,v,o,o,v")) (parallel [(const_int 6) (const_int 7) (const_int 2) (const_int 3)])))] "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ movhlps\t{%2, %0|%0, %2} vmovhlps\t{%2, %1, %0|%0, %1, %2} movlps\t{%H2, %0|%0, %H2} vmovlps\t{%H2, %1, %0|%0, %1, %H2} %vmovhps\t{%2, %0|%q0, %2}" [(set_attr "isa" "noavx,avx,noavx,avx,*") (set_attr "type" "ssemov") (set_attr "prefix" "orig,maybe_evex,orig,maybe_evex,maybe_vex") (set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")]) (define_expand "sse_movlhps_exp" [(set (match_operand:V4SF 0 "nonimmediate_operand") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "nonimmediate_operand") (match_operand:V4SF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 1) (const_int 4) (const_int 5)])))] "TARGET_SSE" { rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands); emit_insn (gen_sse_movlhps (dst, operands[1], operands[2])); /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); DONE; }) (define_insn "sse_movlhps" [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,v,x,v,o") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "nonimmediate_operand" " 0,v,0,v,0") (match_operand:V4SF 2 "nonimmediate_operand" " x,v,m,v,v")) (parallel [(const_int 0) (const_int 1) (const_int 4) (const_int 5)])))] "TARGET_SSE && ix86_binary_operator_ok (UNKNOWN, V4SFmode, operands)" "@ movlhps\t{%2, %0|%0, %2} vmovlhps\t{%2, %1, %0|%0, %1, %2} movhps\t{%2, %0|%0, %q2} vmovhps\t{%2, %1, %0|%0, %1, %q2} %vmovlps\t{%2, %H0|%H0, %2}" [(set_attr "isa" "noavx,avx,noavx,avx,*") (set_attr "type" "ssemov") (set_attr "prefix" "orig,maybe_evex,orig,maybe_evex,maybe_vex") (set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")]) (define_insn "avx512f_unpckhps512" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_select:V16SF (vec_concat:V32SF (match_operand:V16SF 1 "register_operand" "v") (match_operand:V16SF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 2) (const_int 18) (const_int 3) (const_int 19) (const_int 6) (const_int 22) (const_int 7) (const_int 23) (const_int 10) (const_int 26) (const_int 11) (const_int 27) (const_int 14) (const_int 30) (const_int 15) (const_int 31)])))] "TARGET_AVX512F" "vunpckhps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) ;; Recall that the 256-bit unpck insns only shuffle within their lanes. (define_insn "avx_unpckhps256" [(set (match_operand:V8SF 0 "register_operand" "=v") (vec_select:V8SF (vec_concat:V16SF (match_operand:V8SF 1 "register_operand" "v") (match_operand:V8SF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 2) (const_int 10) (const_int 3) (const_int 11) (const_int 6) (const_int 14) (const_int 7) (const_int 15)])))] "TARGET_AVX && " "vunpckhps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "V8SF")]) (define_expand "vec_interleave_highv8sf" [(set (match_dup 3) (vec_select:V8SF (vec_concat:V16SF (match_operand:V8SF 1 "register_operand") (match_operand:V8SF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 8) (const_int 1) (const_int 9) (const_int 4) (const_int 12) (const_int 5) (const_int 13)]))) (set (match_dup 4) (vec_select:V8SF (vec_concat:V16SF (match_dup 1) (match_dup 2)) (parallel [(const_int 2) (const_int 10) (const_int 3) (const_int 11) (const_int 6) (const_int 14) (const_int 7) (const_int 15)]))) (set (match_operand:V8SF 0 "register_operand") (vec_select:V8SF (vec_concat:V16SF (match_dup 3) (match_dup 4)) (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX" { operands[3] = gen_reg_rtx (V8SFmode); operands[4] = gen_reg_rtx (V8SFmode); }) (define_insn "vec_interleave_highv4sf" [(set (match_operand:V4SF 0 "register_operand" "=x,v") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "register_operand" "0,v") (match_operand:V4SF 2 "vector_operand" "xBm,vm")) (parallel [(const_int 2) (const_int 6) (const_int 3) (const_int 7)])))] "TARGET_SSE && " "@ unpckhps\t{%2, %0|%0, %2} vunpckhps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix" "orig,vex") (set_attr "mode" "V4SF")]) (define_insn "avx512f_unpcklps512" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_select:V16SF (vec_concat:V32SF (match_operand:V16SF 1 "register_operand" "v") (match_operand:V16SF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 16) (const_int 1) (const_int 17) (const_int 4) (const_int 20) (const_int 5) (const_int 21) (const_int 8) (const_int 24) (const_int 9) (const_int 25) (const_int 12) (const_int 28) (const_int 13) (const_int 29)])))] "TARGET_AVX512F" "vunpcklps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) ;; Recall that the 256-bit unpck insns only shuffle within their lanes. (define_insn "avx_unpcklps256" [(set (match_operand:V8SF 0 "register_operand" "=v") (vec_select:V8SF (vec_concat:V16SF (match_operand:V8SF 1 "register_operand" "v") (match_operand:V8SF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 8) (const_int 1) (const_int 9) (const_int 4) (const_int 12) (const_int 5) (const_int 13)])))] "TARGET_AVX && " "vunpcklps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "V8SF")]) (define_insn "unpcklps128_mask" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_merge:V4SF (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "register_operand" "v") (match_operand:V4SF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 4) (const_int 1) (const_int 5)])) (match_operand:V4SF 3 "nonimm_or_0_operand" "0C") (match_operand:QI 4 "register_operand" "Yk")))] "TARGET_AVX512VL" "vunpcklps\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "V4SF")]) (define_expand "vec_interleave_lowv8sf" [(set (match_dup 3) (vec_select:V8SF (vec_concat:V16SF (match_operand:V8SF 1 "register_operand") (match_operand:V8SF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 8) (const_int 1) (const_int 9) (const_int 4) (const_int 12) (const_int 5) (const_int 13)]))) (set (match_dup 4) (vec_select:V8SF (vec_concat:V16SF (match_dup 1) (match_dup 2)) (parallel [(const_int 2) (const_int 10) (const_int 3) (const_int 11) (const_int 6) (const_int 14) (const_int 7) (const_int 15)]))) (set (match_operand:V8SF 0 "register_operand") (vec_select:V8SF (vec_concat:V16SF (match_dup 3) (match_dup 4)) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 8) (const_int 9) (const_int 10) (const_int 11)])))] "TARGET_AVX" { operands[3] = gen_reg_rtx (V8SFmode); operands[4] = gen_reg_rtx (V8SFmode); }) (define_insn "vec_interleave_lowv4sf" [(set (match_operand:V4SF 0 "register_operand" "=x,v") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "register_operand" "0,v") (match_operand:V4SF 2 "vector_operand" "xBm,vm")) (parallel [(const_int 0) (const_int 4) (const_int 1) (const_int 5)])))] "TARGET_SSE" "@ unpcklps\t{%2, %0|%0, %2} vunpcklps\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "V4SF")]) ;; These are modeled with the same vec_concat as the others so that we ;; capture users of shufps that can use the new instructions (define_insn "avx_movshdup256" [(set (match_operand:V8SF 0 "register_operand" "=v") (vec_select:V8SF (vec_concat:V16SF (match_operand:V8SF 1 "nonimmediate_operand" "vm") (match_dup 1)) (parallel [(const_int 1) (const_int 1) (const_int 3) (const_int 3) (const_int 5) (const_int 5) (const_int 7) (const_int 7)])))] "TARGET_AVX && " "vmovshdup\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "vex") (set_attr "mode" "V8SF")]) (define_insn "sse3_movshdup" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "vector_operand" "vBm") (match_dup 1)) (parallel [(const_int 1) (const_int 1) (const_int 7) (const_int 7)])))] "TARGET_SSE3 && " "%vmovshdup\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V4SF")]) (define_insn "avx512f_movshdup512" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_select:V16SF (vec_concat:V32SF (match_operand:V16SF 1 "nonimmediate_operand" "vm") (match_dup 1)) (parallel [(const_int 1) (const_int 1) (const_int 3) (const_int 3) (const_int 5) (const_int 5) (const_int 7) (const_int 7) (const_int 9) (const_int 9) (const_int 11) (const_int 11) (const_int 13) (const_int 13) (const_int 15) (const_int 15)])))] "TARGET_AVX512F" "vmovshdup\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) (define_insn "avx_movsldup256" [(set (match_operand:V8SF 0 "register_operand" "=v") (vec_select:V8SF (vec_concat:V16SF (match_operand:V8SF 1 "nonimmediate_operand" "vm") (match_dup 1)) (parallel [(const_int 0) (const_int 0) (const_int 2) (const_int 2) (const_int 4) (const_int 4) (const_int 6) (const_int 6)])))] "TARGET_AVX && " "vmovsldup\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "vex") (set_attr "mode" "V8SF")]) (define_insn "sse3_movsldup" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "vector_operand" "vBm") (match_dup 1)) (parallel [(const_int 0) (const_int 0) (const_int 6) (const_int 6)])))] "TARGET_SSE3 && " "%vmovsldup\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V4SF")]) (define_insn "avx512f_movsldup512" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_select:V16SF (vec_concat:V32SF (match_operand:V16SF 1 "nonimmediate_operand" "vm") (match_dup 1)) (parallel [(const_int 0) (const_int 0) (const_int 2) (const_int 2) (const_int 4) (const_int 4) (const_int 6) (const_int 6) (const_int 8) (const_int 8) (const_int 10) (const_int 10) (const_int 12) (const_int 12) (const_int 14) (const_int 14)])))] "TARGET_AVX512F" "vmovsldup\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) (define_expand "avx_shufps256" [(match_operand:V8SF 0 "register_operand") (match_operand:V8SF 1 "register_operand") (match_operand:V8SF 2 "nonimmediate_operand") (match_operand:SI 3 "const_int_operand")] "TARGET_AVX" { int mask = INTVAL (operands[3]); emit_insn (gen_avx_shufps256_1 (operands[0], operands[1], operands[2], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT (((mask >> 4) & 3) + 8), GEN_INT (((mask >> 6) & 3) + 8), GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 12), GEN_INT (((mask >> 6) & 3) + 12) )); DONE; }) ;; One bit in mask selects 2 elements. (define_insn "avx_shufps256_1" [(set (match_operand:V8SF 0 "register_operand" "=v") (vec_select:V8SF (vec_concat:V16SF (match_operand:V8SF 1 "register_operand" "v") (match_operand:V8SF 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_3_operand" ) (match_operand 4 "const_0_to_3_operand" ) (match_operand 5 "const_8_to_11_operand" ) (match_operand 6 "const_8_to_11_operand" ) (match_operand 7 "const_4_to_7_operand" ) (match_operand 8 "const_4_to_7_operand" ) (match_operand 9 "const_12_to_15_operand") (match_operand 10 "const_12_to_15_operand")])))] "TARGET_AVX && && (INTVAL (operands[3]) == (INTVAL (operands[7]) - 4) && INTVAL (operands[4]) == (INTVAL (operands[8]) - 4) && INTVAL (operands[5]) == (INTVAL (operands[9]) - 4) && INTVAL (operands[6]) == (INTVAL (operands[10]) - 4))" { int mask; mask = INTVAL (operands[3]); mask |= INTVAL (operands[4]) << 2; mask |= (INTVAL (operands[5]) - 8) << 4; mask |= (INTVAL (operands[6]) - 8) << 6; operands[3] = GEN_INT (mask); return "vshufps\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sseshuf") (set_attr "length_immediate" "1") (set_attr "prefix" "") (set_attr "mode" "V8SF")]) (define_expand "sse_shufps" [(match_operand:V4SF 0 "register_operand") (match_operand:V4SF 1 "register_operand") (match_operand:V4SF 2 "vector_operand") (match_operand:SI 3 "const_int_operand")] "TARGET_SSE" { int mask = INTVAL (operands[3]); emit_insn (gen_sse_shufps_v4sf (operands[0], operands[1], operands[2], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4) )); DONE; }) (define_insn "sse_shufps_v4sf_mask" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_merge:V4SF (vec_select:V4SF (vec_concat:V8SF (match_operand:V4SF 1 "register_operand" "v") (match_operand:V4SF 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_4_to_7_operand") (match_operand 6 "const_4_to_7_operand")])) (match_operand:V4SF 7 "nonimm_or_0_operand" "0C") (match_operand:QI 8 "register_operand" "Yk")))] "TARGET_AVX512VL" { int mask = 0; mask |= INTVAL (operands[3]) << 0; mask |= INTVAL (operands[4]) << 2; mask |= (INTVAL (operands[5]) - 4) << 4; mask |= (INTVAL (operands[6]) - 4) << 6; operands[3] = GEN_INT (mask); return "vshufps\t{%3, %2, %1, %0%{%8%}%N7|%0%{%8%}%N7, %1, %2, %3}"; } [(set_attr "type" "sseshuf") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "V4SF")]) (define_insn "sse_shufps_" [(set (match_operand:VI4F_128 0 "register_operand" "=x,v") (vec_select:VI4F_128 (vec_concat: (match_operand:VI4F_128 1 "register_operand" "0,v") (match_operand:VI4F_128 2 "vector_operand" "xBm,vm")) (parallel [(match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_4_to_7_operand") (match_operand 6 "const_4_to_7_operand")])))] "TARGET_SSE" { int mask = 0; mask |= INTVAL (operands[3]) << 0; mask |= INTVAL (operands[4]) << 2; mask |= (INTVAL (operands[5]) - 4) << 4; mask |= (INTVAL (operands[6]) - 4) << 6; operands[3] = GEN_INT (mask); switch (which_alternative) { case 0: return "shufps\t{%3, %2, %0|%0, %2, %3}"; case 1: return "vshufps\t{%3, %2, %1, %0|%0, %1, %2, %3}"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,avx") (set_attr "type" "sseshuf") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "V4SF")]) (define_insn "sse_storehps" [(set (match_operand:V2SF 0 "nonimmediate_operand" "=m,v,v") (vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand" "v,v,o") (parallel [(const_int 2) (const_int 3)])))] "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "@ %vmovhps\t{%1, %0|%q0, %1} %vmovhlps\t{%1, %d0|%d0, %1} %vmovlps\t{%H1, %d0|%d0, %H1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V2SF,V4SF,V2SF")]) (define_expand "sse_loadhps_exp" [(set (match_operand:V4SF 0 "nonimmediate_operand") (vec_concat:V4SF (vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1)])) (match_operand:V2SF 2 "nonimmediate_operand")))] "TARGET_SSE" { rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands); emit_insn (gen_sse_loadhps (dst, operands[1], operands[2])); /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); DONE; }) (define_insn "sse_loadhps" [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,v,x,v,o") (vec_concat:V4SF (vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand" " 0,v,0,v,0") (parallel [(const_int 0) (const_int 1)])) (match_operand:V2SF 2 "nonimmediate_operand" " m,m,x,v,v")))] "TARGET_SSE" "@ movhps\t{%2, %0|%0, %q2} vmovhps\t{%2, %1, %0|%0, %1, %q2} movlhps\t{%2, %0|%0, %2} vmovlhps\t{%2, %1, %0|%0, %1, %2} %vmovlps\t{%2, %H0|%H0, %2}" [(set_attr "isa" "noavx,avx,noavx,avx,*") (set_attr "type" "ssemov") (set_attr "prefix" "orig,maybe_evex,orig,maybe_evex,maybe_vex") (set_attr "mode" "V2SF,V2SF,V4SF,V4SF,V2SF")]) (define_insn "sse_storelps" [(set (match_operand:V2SF 0 "nonimmediate_operand" "=m,v,v") (vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand" " v,v,m") (parallel [(const_int 0) (const_int 1)])))] "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "@ %vmovlps\t{%1, %0|%q0, %1} %vmovaps\t{%1, %0|%0, %1} %vmovlps\t{%1, %d0|%d0, %q1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "V2SF,V4SF,V2SF")]) (define_expand "sse_loadlps_exp" [(set (match_operand:V4SF 0 "nonimmediate_operand") (vec_concat:V4SF (match_operand:V2SF 2 "nonimmediate_operand") (vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand") (parallel [(const_int 2) (const_int 3)]))))] "TARGET_SSE" { rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands); emit_insn (gen_sse_loadlps (dst, operands[1], operands[2])); /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); DONE; }) (define_insn "sse_loadlps" [(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,v,x,v,m") (vec_concat:V4SF (match_operand:V2SF 2 "nonimmediate_operand" " 0,v,m,m,v") (vec_select:V2SF (match_operand:V4SF 1 "nonimmediate_operand" " x,v,0,v,0") (parallel [(const_int 2) (const_int 3)]))))] "TARGET_SSE" "@ shufps\t{$0xe4, %1, %0|%0, %1, 0xe4} vshufps\t{$0xe4, %1, %2, %0|%0, %2, %1, 0xe4} movlps\t{%2, %0|%0, %q2} vmovlps\t{%2, %1, %0|%0, %1, %q2} %vmovlps\t{%2, %0|%q0, %2}" [(set_attr "isa" "noavx,avx,noavx,avx,*") (set_attr "type" "sseshuf,sseshuf,ssemov,ssemov,ssemov") (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "0,1") (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,maybe_evex,orig,maybe_evex,maybe_vex") (set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")]) (define_insn "sse_movss" [(set (match_operand:V4SF 0 "register_operand" "=x,v") (vec_merge:V4SF (match_operand:V4SF 2 "register_operand" " x,v") (match_operand:V4SF 1 "register_operand" " 0,v") (const_int 1)))] "TARGET_SSE" "@ movss\t{%2, %0|%0, %2} vmovss\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "SF")]) (define_insn "avx2_vec_dup" [(set (match_operand:VF1_128_256 0 "register_operand" "=v") (vec_duplicate:VF1_128_256 (vec_select:SF (match_operand:V4SF 1 "register_operand" "v") (parallel [(const_int 0)]))))] "TARGET_AVX2" "vbroadcastss\t{%1, %0|%0, %1}" [(set_attr "type" "sselog1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn "avx2_vec_dupv8sf_1" [(set (match_operand:V8SF 0 "register_operand" "=v") (vec_duplicate:V8SF (vec_select:SF (match_operand:V8SF 1 "register_operand" "v") (parallel [(const_int 0)]))))] "TARGET_AVX2" "vbroadcastss\t{%x1, %0|%0, %x1}" [(set_attr "type" "sselog1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "V8SF")]) (define_insn "avx512f_vec_dup_1" [(set (match_operand:VF_512 0 "register_operand" "=v") (vec_duplicate:VF_512 (vec_select: (match_operand:VF_512 1 "register_operand" "v") (parallel [(const_int 0)]))))] "TARGET_AVX512F" "vbroadcast\t{%x1, %0|%0, %x1}" [(set_attr "type" "sselog1") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;; Although insertps takes register source, we prefer ;; unpcklps with register source since it is shorter. (define_insn "*vec_concatv2sf_sse4_1" [(set (match_operand:V2SF 0 "register_operand" "=Yr,*x, v,Yr,*x,v,v,*y ,*y") (vec_concat:V2SF (match_operand:SF 1 "nonimmediate_operand" " 0, 0,Yv, 0,0, v,m, 0 , m") (match_operand:SF 2 "nonimm_or_0_operand" " Yr,*x,Yv, m,m, m,C,*ym, C")))] "TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ unpcklps\t{%2, %0|%0, %2} unpcklps\t{%2, %0|%0, %2} vunpcklps\t{%2, %1, %0|%0, %1, %2} insertps\t{$0x10, %2, %0|%0, %2, 0x10} insertps\t{$0x10, %2, %0|%0, %2, 0x10} vinsertps\t{$0x10, %2, %1, %0|%0, %1, %2, 0x10} %vmovss\t{%1, %0|%0, %1} punpckldq\t{%2, %0|%0, %2} movd\t{%1, %0|%0, %1}" [(set (attr "isa") (cond [(eq_attr "alternative" "0,1,3,4") (const_string "noavx") (eq_attr "alternative" "2,5") (const_string "avx") ] (const_string "*"))) (set (attr "type") (cond [(eq_attr "alternative" "6") (const_string "ssemov") (eq_attr "alternative" "7") (const_string "mmxcvt") (eq_attr "alternative" "8") (const_string "mmxmov") ] (const_string "sselog"))) (set (attr "mmx_isa") (if_then_else (eq_attr "alternative" "7,8") (const_string "native") (const_string "*"))) (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "3,4") (const_string "1") (const_string "*"))) (set (attr "prefix_extra") (if_then_else (eq_attr "alternative" "3,4,5") (const_string "1") (const_string "*"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "3,4,5") (const_string "1") (const_string "*"))) (set (attr "prefix") (cond [(eq_attr "alternative" "2,5") (const_string "maybe_evex") (eq_attr "alternative" "6") (const_string "maybe_vex") ] (const_string "orig"))) (set_attr "mode" "V4SF,V4SF,V4SF,V4SF,V4SF,V4SF,SF,DI,DI")]) ;; ??? In theory we can match memory for the MMX alternative, but allowing ;; vector_operand for operand 2 and *not* allowing memory for the SSE ;; alternatives pretty much forces the MMX alternative to be chosen. (define_insn "*vec_concatv2sf_sse" [(set (match_operand:V2SF 0 "register_operand" "=x,x,*y,*y") (vec_concat:V2SF (match_operand:SF 1 "nonimmediate_operand" " 0,m, 0, m") (match_operand:SF 2 "reg_or_0_operand" " x,C,*y, C")))] "TARGET_SSE" "@ unpcklps\t{%2, %0|%0, %2} movss\t{%1, %0|%0, %1} punpckldq\t{%2, %0|%0, %2} movd\t{%1, %0|%0, %1}" [(set_attr "mmx_isa" "*,*,native,native") (set_attr "type" "sselog,ssemov,mmxcvt,mmxmov") (set_attr "mode" "V4SF,SF,DI,DI")]) (define_insn "*vec_concatv4sf" [(set (match_operand:V4SF 0 "register_operand" "=x,v,x,v") (vec_concat:V4SF (match_operand:V2SF 1 "register_operand" " 0,v,0,v") (match_operand:V2SF 2 "nonimmediate_operand" " x,v,m,m")))] "TARGET_SSE" "@ movlhps\t{%2, %0|%0, %2} vmovlhps\t{%2, %1, %0|%0, %1, %2} movhps\t{%2, %0|%0, %q2} vmovhps\t{%2, %1, %0|%0, %1, %q2}" [(set_attr "isa" "noavx,avx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix" "orig,maybe_evex,orig,maybe_evex") (set_attr "mode" "V4SF,V4SF,V2SF,V2SF")]) (define_insn "*vec_concatv4sf_0" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_concat:V4SF (match_operand:V2SF 1 "nonimmediate_operand" "vm") (match_operand:V2SF 2 "const0_operand" " C")))] "TARGET_SSE2" "%vmovq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "DF")]) ;; Avoid combining registers from different units in a single alternative, ;; see comment above inline_secondary_memory_needed function in i386.cc (define_insn "vec_set_0" [(set (match_operand:VI4F_128 0 "nonimmediate_operand" "=Yr,*x,v,v,v,x,x,v,Yr ,*x ,x ,m ,m ,m") (vec_merge:VI4F_128 (vec_duplicate:VI4F_128 (match_operand: 2 "general_operand" " Yr,*x,v,m,r ,m,x,v,*rm,*rm,*rm,!x,!*re,!*fF")) (match_operand:VI4F_128 1 "nonimm_or_0_operand" " C , C,C,C,C ,C,0,v,0 ,0 ,x ,0 ,0 ,0") (const_int 1)))] "TARGET_SSE" "@ insertps\t{$0xe, %2, %0|%0, %2, 0xe} insertps\t{$0xe, %2, %0|%0, %2, 0xe} vinsertps\t{$0xe, %2, %2, %0|%0, %2, %2, 0xe} %vmov\t{%2, %0|%0, %2} %vmovd\t{%2, %0|%0, %2} movss\t{%2, %0|%0, %2} movss\t{%2, %0|%0, %2} vmovss\t{%2, %1, %0|%0, %1, %2} pinsrd\t{$0, %2, %0|%0, %2, 0} pinsrd\t{$0, %2, %0|%0, %2, 0} vpinsrd\t{$0, %2, %1, %0|%0, %1, %2, 0} # # #" [(set (attr "isa") (cond [(eq_attr "alternative" "0,1,8,9") (const_string "sse4_noavx") (eq_attr "alternative" "2,7,10") (const_string "avx") (eq_attr "alternative" "3,4") (const_string "sse2") (eq_attr "alternative" "5,6") (const_string "noavx") ] (const_string "*"))) (set (attr "type") (cond [(eq_attr "alternative" "0,1,2,8,9,10") (const_string "sselog") (eq_attr "alternative" "12") (const_string "imov") (eq_attr "alternative" "13") (const_string "fmov") ] (const_string "ssemov"))) (set (attr "prefix_extra") (if_then_else (eq_attr "alternative" "8,9,10") (const_string "1") (const_string "*"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "8,9,10") (const_string "1") (const_string "*"))) (set (attr "prefix") (cond [(eq_attr "alternative" "0,1,5,6,8,9") (const_string "orig") (eq_attr "alternative" "2") (const_string "maybe_evex") (eq_attr "alternative" "3,4") (const_string "maybe_vex") (eq_attr "alternative" "7,10") (const_string "vex") ] (const_string "*"))) (set_attr "mode" "SF,SF,SF,,SI,SF,SF,SF,TI,TI,TI,*,*,*") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "4") (symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC") ] (symbol_ref "true")))]) (define_insn "vec_set_0" [(set (match_operand:V8_128 0 "register_operand" "=v,v,v,x,x,Yr,*x,x,x,x,v,v") (vec_merge:V8_128 (vec_duplicate:V8_128 (match_operand: 2 "nonimmediate_operand" " r,m,v,r,m,Yr,*x,r,m,x,r,m")) (match_operand:V8_128 1 "reg_or_0_operand" " C,C,v,0,0,0 ,0 ,x,x,x,v,v") (const_int 1)))] "TARGET_SSE2" "@ vmovw\t{%k2, %0|%0, %k2} vmovw\t{%2, %0|%0, %2} vmovsh\t{%2, %1, %0|%0, %1, %2} pinsrw\t{$0, %k2, %0|%0, %k2, 0} pinsrw\t{$0, %2, %0|%0, %2, 0} pblendw\t{$1, %2, %0|%0, %2, 1} pblendw\t{$1, %2, %0|%0, %2, 1} vpinsrw\t{$0, %k2, %1, %0|%0, %1, %k2, 0} vpinsrw\t{$0, %2, %1, %0|%0, %1, %2, 0} vpblendw\t{$1, %2, %1, %0|%0, %1, %2, 1} vpinsrw\t{$0, %k2, %1, %0|%0, %1, %k2, 0} vpinsrw\t{$0, %2, %1, %0|%0, %1, %2, 0}" [(set (attr "isa") (cond [(eq_attr "alternative" "0,1,2") (const_string "avx512fp16") (eq_attr "alternative" "3") (const_string "noavx") (eq_attr "alternative" "4,5,6") (const_string "sse4_noavx") (eq_attr "alternative" "7,8,9") (const_string "avx") (eq_attr "alternative" "10,11") (const_string "avx512bw") ] (const_string "*"))) (set (attr "type") (if_then_else (eq_attr "alternative" "0,1,2,5,6,9") (const_string "ssemov") (const_string "sselog"))) (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "3,4") (const_string "1") (const_string "*"))) (set (attr "prefix_extra") (if_then_else (eq_attr "alternative" "5,6,7,8,9") (const_string "1") (const_string "*"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "0,1,2") (const_string "*") (const_string "1"))) (set (attr "prefix") (cond [(eq_attr "alternative" "0,1,2,10,11") (const_string "evex") (eq_attr "alternative" "7,8,9") (const_string "vex") ] (const_string "orig"))) (set (attr "mode") (if_then_else (eq_attr "alternative" "0,1,2") (const_string "HF") (const_string "TI"))) (set (attr "enabled") (cond [(and (not (match_test "mode == V8HFmode")) (eq_attr "alternative" "2")) (symbol_ref "false") ] (const_string "*")))]) ;; vmovw clears also the higer bits (define_insn "vec_set_0" [(set (match_operand:VI2F_256_512 0 "register_operand" "=v,v") (vec_merge:VI2F_256_512 (vec_duplicate:VI2F_256_512 (match_operand: 2 "nonimmediate_operand" "r,m")) (match_operand:VI2F_256_512 1 "const0_operand" "C,C") (const_int 1)))] "TARGET_AVX512FP16" "@ vmovw\t{%k2, %x0|%x0, %k2} vmovw\t{%2, %x0|%x0, %2}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "HF")]) (define_insn "avx512fp16_movsh" [(set (match_operand:V8HF 0 "register_operand" "=v") (vec_merge:V8HF (match_operand:V8HF 2 "register_operand" "v") (match_operand:V8HF 1 "register_operand" "v") (const_int 1)))] "TARGET_AVX512FP16" "vmovsh\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "HF")]) ;; A subset is vec_setv4sf. (define_insn "*vec_setv4sf_sse4_1" [(set (match_operand:V4SF 0 "register_operand" "=Yr,*x,v") (vec_merge:V4SF (vec_duplicate:V4SF (match_operand:SF 2 "nonimmediate_operand" "Yrm,*xm,vm")) (match_operand:V4SF 1 "register_operand" "0,0,v") (match_operand:SI 3 "const_int_operand")))] "TARGET_SSE4_1 && ((unsigned) exact_log2 (INTVAL (operands[3])) < GET_MODE_NUNITS (V4SFmode))" { operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])) << 4); switch (which_alternative) { case 0: case 1: return "insertps\t{%3, %2, %0|%0, %2, %3}"; case 2: return "vinsertps\t{%3, %2, %1, %0|%0, %1, %2, %3}"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "V4SF")]) ;; All of vinsertps, vmovss, vmovd clear also the higher bits. (define_insn "vec_set_0" [(set (match_operand:VI4F_256_512 0 "register_operand" "=v,v,v") (vec_merge:VI4F_256_512 (vec_duplicate:VI4F_256_512 (match_operand: 2 "nonimmediate_operand" "v,m,r")) (match_operand:VI4F_256_512 1 "const0_operand" "C,C,C") (const_int 1)))] "TARGET_AVX" "@ vinsertps\t{$0xe, %2, %2, %x0|%x0, %2, %2, 0xe} vmov\t{%x2, %x0|%x0, %2} vmovd\t{%2, %x0|%x0, %2}" [(set (attr "type") (if_then_else (eq_attr "alternative" "0") (const_string "sselog") (const_string "ssemov"))) (set_attr "prefix" "maybe_evex") (set_attr "mode" "SF,,SI") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "2") (symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC") ] (symbol_ref "true")))]) (define_insn "sse4_1_insertps" [(set (match_operand:V4SF 0 "register_operand" "=Yr,*x,v") (unspec:V4SF [(match_operand:V4SF 2 "nonimmediate_operand" "Yrm,*xm,vm") (match_operand:V4SF 1 "register_operand" "0,0,v") (match_operand:SI 3 "const_0_to_255_operand" "n,n,n")] UNSPEC_INSERTPS))] "TARGET_SSE4_1" { if (MEM_P (operands[2])) { unsigned count_s = INTVAL (operands[3]) >> 6; if (count_s) operands[3] = GEN_INT (INTVAL (operands[3]) & 0x3f); operands[2] = adjust_address_nv (operands[2], SFmode, count_s * 4); } switch (which_alternative) { case 0: case 1: return "insertps\t{%3, %2, %0|%0, %2, %3}"; case 2: return "vinsertps\t{%3, %2, %1, %0|%0, %1, %2, %3}"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "V4SF")]) (define_split [(set (match_operand:VI4F_128 0 "memory_operand") (vec_merge:VI4F_128 (vec_duplicate:VI4F_128 (match_operand: 1 "nonmemory_operand")) (match_dup 0) (const_int 1)))] "TARGET_SSE && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[0] = adjust_address (operands[0], mode, 0);") ;; Standard scalar operation patterns which preserve the rest of the ;; vector for combiner. (define_insn "vec_setv2df_0" [(set (match_operand:V2DF 0 "register_operand" "=x,v,x,v") (vec_merge:V2DF (vec_duplicate:V2DF (match_operand:DF 2 "nonimmediate_operand" " x,v,m,m")) (match_operand:V2DF 1 "register_operand" " 0,v,0,v") (const_int 1)))] "TARGET_SSE2" "@ movsd\t{%2, %0|%0, %2} vmovsd\t{%2, %1, %0|%0, %1, %2} movlpd\t{%2, %0|%0, %2} vmovlpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx,noavx,avx") (set_attr "type" "ssemov") (set_attr "mode" "DF")]) (define_expand "vec_set" [(match_operand:V_128 0 "register_operand") (match_operand: 1 "register_operand") (match_operand 2 "vec_setm_sse41_operand")] "TARGET_SSE" { if (CONST_INT_P (operands[2])) ix86_expand_vector_set (false, operands[0], operands[1], INTVAL (operands[2])); else ix86_expand_vector_set_var (operands[0], operands[1], operands[2]); DONE; }) (define_expand "vec_setv8hf" [(match_operand:V8HF 0 "register_operand") (match_operand:HF 1 "register_operand") (match_operand 2 "vec_setm_sse41_operand")] "TARGET_SSE" { if (CONST_INT_P (operands[2])) ix86_expand_vector_set (false, operands[0], operands[1], INTVAL (operands[2])); else ix86_expand_vector_set_var (operands[0], operands[1], operands[2]); DONE; }) (define_expand "vec_set" [(match_operand:V_256_512 0 "register_operand") (match_operand: 1 "register_operand") (match_operand 2 "vec_setm_avx2_operand")] "TARGET_AVX" { if (CONST_INT_P (operands[2])) ix86_expand_vector_set (false, operands[0], operands[1], INTVAL (operands[2])); else ix86_expand_vector_set_var (operands[0], operands[1], operands[2]); DONE; }) (define_insn_and_split "*vec_extractv4sf_0" [(set (match_operand:SF 0 "nonimmediate_operand" "=v,m,f,r") (vec_select:SF (match_operand:V4SF 1 "nonimmediate_operand" "vm,v,m,m") (parallel [(const_int 0)])))] "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (SFmode, operands[1]);") (define_insn_and_split "*sse4_1_extractps" [(set (match_operand:SF 0 "nonimmediate_operand" "=rm,rm,rm,Yv,Yv") (vec_select:SF (match_operand:V4SF 1 "register_operand" "Yr,*x,v,0,v") (parallel [(match_operand:SI 2 "const_0_to_3_operand" "n,n,n,n,n")])))] "TARGET_SSE4_1" "@ extractps\t{%2, %1, %0|%0, %1, %2} extractps\t{%2, %1, %0|%0, %1, %2} vextractps\t{%2, %1, %0|%0, %1, %2} # #" "&& reload_completed && SSE_REG_P (operands[0])" [(const_int 0)] { rtx dest = lowpart_subreg (V4SFmode, operands[0], SFmode); switch (INTVAL (operands[2])) { case 1: case 3: emit_insn (gen_sse_shufps_v4sf (dest, operands[1], operands[1], operands[2], operands[2], GEN_INT (INTVAL (operands[2]) + 4), GEN_INT (INTVAL (operands[2]) + 4))); break; case 2: emit_insn (gen_vec_interleave_highv4sf (dest, operands[1], operands[1])); break; default: /* 0 should be handled by the *vec_extractv4sf_0 pattern above. */ gcc_unreachable (); } DONE; } [(set_attr "isa" "noavx,noavx,avx,noavx,avx") (set_attr "type" "sselog,sselog,sselog,*,*") (set_attr "prefix_data16" "1,1,1,*,*") (set_attr "prefix_extra" "1,1,1,*,*") (set_attr "length_immediate" "1,1,1,*,*") (set_attr "prefix" "orig,orig,maybe_evex,*,*") (set_attr "mode" "V4SF,V4SF,V4SF,*,*")]) (define_insn_and_split "*vec_extractv4sf_mem" [(set (match_operand:SF 0 "register_operand" "=v,*r,f") (vec_select:SF (match_operand:V4SF 1 "memory_operand" "o,o,o") (parallel [(match_operand 2 "const_0_to_3_operand" "n,n,n")])))] "TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] { operands[1] = adjust_address (operands[1], SFmode, INTVAL (operands[2]) * 4); }) (define_mode_attr extract_type [(V16SF "avx512f") (V16SI "avx512f") (V8DF "avx512dq") (V8DI "avx512dq")]) (define_mode_attr extract_suf [(V16SF "32x4") (V16SI "32x4") (V8DF "64x2") (V8DI "64x2") (V8SF "32x4") (V8SI "32x4") (V4DF "64x2") (V4DI "64x2")]) (define_mode_iterator AVX512_VEC [(V8DF "TARGET_AVX512DQ") (V8DI "TARGET_AVX512DQ") V16SF V16SI]) (define_expand "_vextract_mask" [(match_operand: 0 "nonimmediate_operand") (match_operand:AVX512_VEC 1 "register_operand") (match_operand:SI 2 "const_0_to_3_operand") (match_operand: 3 "nonimmediate_operand") (match_operand:QI 4 "register_operand")] "TARGET_AVX512F" { int mask; mask = INTVAL (operands[2]); rtx dest = operands[0]; if (MEM_P (operands[0]) && !rtx_equal_p (operands[0], operands[3])) dest = gen_reg_rtx (mode); if (mode == V16SImode || mode == V16SFmode) emit_insn (gen_avx512f_vextract32x4_1_mask (dest, operands[1], GEN_INT (mask * 4), GEN_INT (mask * 4 + 1), GEN_INT (mask * 4 + 2), GEN_INT (mask * 4 + 3), operands[3], operands[4])); else emit_insn (gen_avx512dq_vextract64x2_1_mask (dest, operands[1], GEN_INT (mask * 2), GEN_INT (mask * 2 + 1), operands[3], operands[4])); if (dest != operands[0]) emit_move_insn (operands[0], dest); DONE; }) (define_insn "avx512dq_vextract64x2_1_mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:V8FI 1 "register_operand" "v,v") (parallel [(match_operand 2 "const_0_to_7_operand") (match_operand 3 "const_0_to_7_operand")])) (match_operand: 4 "nonimm_or_0_operand" "0C,0") (match_operand:QI 5 "register_operand" "Yk,Yk")))] "TARGET_AVX512DQ && INTVAL (operands[2]) % 2 == 0 && INTVAL (operands[2]) == INTVAL (operands[3]) - 1 && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[4]))" { operands[2] = GEN_INT (INTVAL (operands[2]) >> 1); return "vextract64x2\t{%2, %1, %0%{%5%}%N4|%0%{%5%}%N4, %1, %2}"; } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512dq_vextract64x2_1" [(set (match_operand: 0 "nonimmediate_operand" "=vm") (vec_select: (match_operand:V8FI 1 "register_operand" "v") (parallel [(match_operand 2 "const_0_to_7_operand") (match_operand 3 "const_0_to_7_operand")])))] "TARGET_AVX512DQ && INTVAL (operands[2]) % 2 == 0 && INTVAL (operands[2]) == INTVAL (operands[3]) - 1" { operands[2] = GEN_INT (INTVAL (operands[2]) >> 1); return "vextract64x2\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_split [(set (match_operand: 0 "nonimmediate_operand") (vec_select: (match_operand:V8FI 1 "register_operand") (parallel [(const_int 0) (const_int 1)])))] "TARGET_AVX512DQ && reload_completed && (TARGET_AVX512VL || REG_P (operands[0]) || !EXT_REX_SSE_REG_P (operands[1]))" [(set (match_dup 0) (match_dup 1))] { if (!TARGET_AVX512VL && REG_P (operands[0]) && EXT_REX_SSE_REG_P (operands[1])) operands[0] = lowpart_subreg (mode, operands[0], mode); else operands[1] = gen_lowpart (mode, operands[1]); }) (define_insn "avx512f_vextract32x4_1_mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:V16FI 1 "register_operand" "v,v") (parallel [(match_operand 2 "const_0_to_15_operand") (match_operand 3 "const_0_to_15_operand") (match_operand 4 "const_0_to_15_operand") (match_operand 5 "const_0_to_15_operand")])) (match_operand: 6 "nonimm_or_0_operand" "0C,0") (match_operand:QI 7 "register_operand" "Yk,Yk")))] "TARGET_AVX512F && INTVAL (operands[2]) % 4 == 0 && INTVAL (operands[2]) == INTVAL (operands[3]) - 1 && INTVAL (operands[3]) == INTVAL (operands[4]) - 1 && INTVAL (operands[4]) == INTVAL (operands[5]) - 1 && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[6]))" { operands[2] = GEN_INT (INTVAL (operands[2]) >> 2); return "vextract32x4\t{%2, %1, %0%{%7%}%N6|%0%{%7%}%N6, %1, %2}"; } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512f_vextract32x4_1" [(set (match_operand: 0 "nonimmediate_operand" "=vm") (vec_select: (match_operand:V16FI 1 "register_operand" "v") (parallel [(match_operand 2 "const_0_to_15_operand") (match_operand 3 "const_0_to_15_operand") (match_operand 4 "const_0_to_15_operand") (match_operand 5 "const_0_to_15_operand")])))] "TARGET_AVX512F && INTVAL (operands[2]) % 4 == 0 && INTVAL (operands[2]) == INTVAL (operands[3]) - 1 && INTVAL (operands[3]) == INTVAL (operands[4]) - 1 && INTVAL (operands[4]) == INTVAL (operands[5]) - 1" { operands[2] = GEN_INT (INTVAL (operands[2]) >> 2); return "vextract32x4\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_split [(set (match_operand: 0 "nonimmediate_operand") (vec_select: (match_operand:V16FI 1 "register_operand") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])))] "TARGET_AVX512F && reload_completed && (TARGET_AVX512VL || REG_P (operands[0]) || !EXT_REX_SSE_REG_P (operands[1]))" [(set (match_dup 0) (match_dup 1))] { if (!TARGET_AVX512VL && REG_P (operands[0]) && EXT_REX_SSE_REG_P (operands[1])) operands[0] = lowpart_subreg (mode, operands[0], mode); else operands[1] = gen_lowpart (mode, operands[1]); }) (define_mode_attr extract_type_2 [(V16SF "avx512dq") (V16SI "avx512dq") (V8DF "avx512f") (V8DI "avx512f")]) (define_mode_attr extract_suf_2 [(V16SF "32x8") (V16SI "32x8") (V8DF "64x4") (V8DI "64x4")]) (define_mode_iterator AVX512_VEC_2 [(V16SF "TARGET_AVX512DQ") (V16SI "TARGET_AVX512DQ") V8DF V8DI]) (define_expand "_vextract_mask" [(match_operand: 0 "nonimmediate_operand") (match_operand:AVX512_VEC_2 1 "register_operand") (match_operand:SI 2 "const_0_to_1_operand") (match_operand: 3 "nonimmediate_operand") (match_operand:QI 4 "register_operand")] "TARGET_AVX512F" { rtx (*insn)(rtx, rtx, rtx, rtx); rtx dest = operands[0]; if (MEM_P (dest) && !rtx_equal_p (dest, operands[3])) dest = gen_reg_rtx (mode); switch (INTVAL (operands[2])) { case 0: insn = gen_vec_extract_lo__mask; break; case 1: insn = gen_vec_extract_hi__mask; break; default: gcc_unreachable (); } emit_insn (insn (dest, operands[1], operands[3], operands[4])); if (dest != operands[0]) emit_move_insn (operands[0], dest); DONE; }) (define_split [(set (match_operand: 0 "nonimmediate_operand") (vec_select: (match_operand:V8FI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1])) && reload_completed && (TARGET_AVX512VL || (REG_P (operands[0]) && !EXT_REX_SSE_REG_P (operands[1])))" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (mode, operands[1]);") (define_insn "vec_extract_lo__mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:V8FI 1 "register_operand" "v,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand:QI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512F && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract64x4\t{$0x0, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x0}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_lo_" [(set (match_operand: 0 "nonimmediate_operand" "=v,vm,v") (vec_select: (match_operand:V8FI 1 "nonimmediate_operand" "v,v,vm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))" { if (!TARGET_AVX512VL && !MEM_P (operands[1])) return "vextract64x4\t{$0x0, %1, %0|%0, %1, 0x0}"; else return "#"; } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,store,load") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_hi__mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:V8FI 1 "register_operand" "v,v") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand:QI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512F && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract64x4\t{$0x1, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_hi_" [(set (match_operand: 0 "nonimmediate_operand" "=vm") (vec_select: (match_operand:V8FI 1 "register_operand" "v") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX512F" "vextract64x4\t{$0x1, %1, %0|%0, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_hi__mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:V16FI 1 "register_operand" "v,v") (parallel [(const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand:QI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512DQ && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract32x8\t{$0x1, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_hi_" [(set (match_operand: 0 "nonimmediate_operand" "=vm,vm") (vec_select: (match_operand:V16FI 1 "register_operand" "v,v") (parallel [(const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX512F" "@ vextract32x8\t{$0x1, %1, %0|%0, %1, 0x1} vextracti64x4\t{$0x1, %1, %0|%0, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "isa" "avx512dq,noavx512dq") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_mode_iterator VI48F_256_DQ [V8SI V8SF (V4DI "TARGET_AVX512DQ") (V4DF "TARGET_AVX512DQ")]) (define_expand "avx512vl_vextractf128" [(match_operand: 0 "nonimmediate_operand") (match_operand:VI48F_256_DQ 1 "register_operand") (match_operand:SI 2 "const_0_to_1_operand") (match_operand: 3 "nonimm_or_0_operand") (match_operand:QI 4 "register_operand")] "TARGET_AVX512VL" { rtx (*insn)(rtx, rtx, rtx, rtx); rtx dest = operands[0]; if (MEM_P (dest) && (GET_MODE_SIZE (GET_MODE_INNER (mode)) == 4 /* For V8S[IF]mode there are maskm insns with =m and 0 constraints. */ ? !rtx_equal_p (dest, operands[3]) /* For V4D[IF]mode, hi insns don't allow memory, and lo insns have =m and 0C constraints. */ : (operands[2] != const0_rtx || (!rtx_equal_p (dest, operands[3]) && GET_CODE (operands[3]) != CONST_VECTOR)))) dest = gen_reg_rtx (mode); switch (INTVAL (operands[2])) { case 0: insn = gen_vec_extract_lo__mask; break; case 1: insn = gen_vec_extract_hi__mask; break; default: gcc_unreachable (); } emit_insn (insn (dest, operands[1], operands[3], operands[4])); if (dest != operands[0]) emit_move_insn (operands[0], dest); DONE; }) (define_expand "avx_vextractf128" [(match_operand: 0 "nonimmediate_operand") (match_operand:V_256H 1 "register_operand") (match_operand:SI 2 "const_0_to_1_operand")] "TARGET_AVX" { rtx (*insn)(rtx, rtx); switch (INTVAL (operands[2])) { case 0: insn = gen_vec_extract_lo_; break; case 1: insn = gen_vec_extract_hi_; break; default: gcc_unreachable (); } emit_insn (insn (operands[0], operands[1])); DONE; }) (define_insn "vec_extract_lo__mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:V16FI 1 "register_operand" "v,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand:QI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512DQ && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract32x8\t{$0x0, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x0}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_lo_" [(set (match_operand: 0 "nonimmediate_operand" "=v,v,m") (vec_select: (match_operand:V16FI 1 "nonimmediate_operand" "v,m,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))" { if (!TARGET_AVX512VL && !REG_P (operands[0]) && EXT_REX_SSE_REG_P (operands[1])) { if (TARGET_AVX512DQ) return "vextract32x8\t{$0x0, %1, %0|%0, %1, 0x0}"; else return "vextract64x4\t{$0x0, %1, %0|%0, %1, 0x0}"; } else return "#"; } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,load,store") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_split [(set (match_operand: 0 "nonimmediate_operand") (vec_select: (match_operand:V16FI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1])) && reload_completed && (TARGET_AVX512VL || REG_P (operands[0]) || !EXT_REX_SSE_REG_P (operands[1]))" [(set (match_dup 0) (match_dup 1))] { if (!TARGET_AVX512VL && REG_P (operands[0]) && EXT_REX_SSE_REG_P (operands[1])) operands[0] = lowpart_subreg (mode, operands[0], mode); else operands[1] = gen_lowpart (mode, operands[1]); }) (define_insn "vec_extract_lo__mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:VI8F_256 1 "register_operand" "v,v") (parallel [(const_int 0) (const_int 1)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand:QI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512DQ && TARGET_AVX512VL && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract64x2\t{$0x0, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x0}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_extract_lo_" [(set (match_operand: 0 "nonimmediate_operand" "=vm,v") (vec_select: (match_operand:VI8F_256 1 "nonimmediate_operand" "v,vm") (parallel [(const_int 0) (const_int 1)])))] "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#") (define_split [(set (match_operand: 0 "nonimmediate_operand") (vec_select: (match_operand:VI8F_256 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1)])))] "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1])) && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (mode, operands[1]);") (define_insn "vec_extract_hi__mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:VI8F_256 1 "register_operand" "v,v") (parallel [(const_int 2) (const_int 3)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand:QI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512DQ && TARGET_AVX512VL && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract64x2\t{$0x1, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "vec_extract_hi_" [(set (match_operand: 0 "nonimmediate_operand" "=vm") (vec_select: (match_operand:VI8F_256 1 "register_operand" "v") (parallel [(const_int 2) (const_int 3)])))] "TARGET_AVX" { if (TARGET_AVX512VL) { if (TARGET_AVX512DQ) return "vextract64x2\t{$0x1, %1, %0|%0, %1, 0x1}"; else return "vextract32x4\t{$0x1, %1, %0|%0, %1, 0x1}"; } else return "vextract\t{$0x1, %1, %0|%0, %1, 0x1}"; } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_split [(set (match_operand: 0 "nonimmediate_operand") (vec_select: (match_operand:VI4F_256 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])))] "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1])) && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (mode, operands[1]);") (define_insn "vec_extract_lo__mask" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_merge: (vec_select: (match_operand:VI4F_256 1 "register_operand" "v,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand:QI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512VL && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract32x4\t{$0x0, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x0}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_lo_" [(set (match_operand: 0 "nonimmediate_operand" "=vm,v") (vec_select: (match_operand:VI4F_256 1 "nonimmediate_operand" "v,vm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])))] "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_hi__mask" [(set (match_operand: 0 "register_operand" "=v,m") (vec_merge: (vec_select: (match_operand:VI4F_256 1 "register_operand" "v,v") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand: 2 "nonimm_or_0_operand" "0C,0") (match_operand: 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512VL && (!MEM_P (operands[0]) || rtx_equal_p (operands[0], operands[2]))" "vextract32x4\t{$0x1, %1, %0%{%3%}%N2|%0%{%3%}%N2, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_extract_hi_" [(set (match_operand: 0 "nonimmediate_operand" "=xm, vm") (vec_select: (match_operand:VI4F_256 1 "register_operand" "x, v") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX" "@ vextract\t{$0x1, %1, %0|%0, %1, 0x1} vextract32x4\t{$0x1, %1, %0|%0, %1, 0x1}" [(set_attr "isa" "*, avx512vl") (set_attr "prefix" "vex, evex") (set_attr "type" "sselog1") (set_attr "length_immediate" "1") (set_attr "mode" "")]) (define_insn_and_split "vec_extract_lo_" [(set (match_operand: 0 "nonimmediate_operand" "=v,v,m") (vec_select: (match_operand:V32_512 1 "nonimmediate_operand" "v,m,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))" { if (TARGET_AVX512VL || REG_P (operands[0]) || !EXT_REX_SSE_REG_P (operands[1])) return "#"; else return "vextracti64x4\t{$0x0, %1, %0|%0, %1, 0x0}"; } "&& reload_completed && (TARGET_AVX512VL || REG_P (operands[0]) || !EXT_REX_SSE_REG_P (operands[1]))" [(set (match_dup 0) (match_dup 1))] { if (!TARGET_AVX512VL && REG_P (operands[0]) && EXT_REX_SSE_REG_P (operands[1])) operands[0] = lowpart_subreg (mode, operands[0], mode); else operands[1] = gen_lowpart (mode, operands[1]); } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,load,store") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_extract_hi_" [(set (match_operand: 0 "nonimmediate_operand" "=vm") (vec_select: (match_operand:V32_512 1 "register_operand" "v") (parallel [(const_int 16) (const_int 17) (const_int 18) (const_int 19) (const_int 20) (const_int 21) (const_int 22) (const_int 23) (const_int 24) (const_int 25) (const_int 26) (const_int 27) (const_int 28) (const_int 29) (const_int 30) (const_int 31)])))] "TARGET_AVX512F" "vextracti64x4\t{$0x1, %1, %0|%0, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn_and_split "vec_extract_lo_" [(set (match_operand: 0 "nonimmediate_operand" "=v,m") (vec_select: (match_operand:V16_256 1 "nonimmediate_operand" "vm,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (mode, operands[1]);") (define_insn "vec_extract_hi_" [(set (match_operand: 0 "nonimmediate_operand" "=xm,vm,vm") (vec_select: (match_operand:V16_256 1 "register_operand" "x,v,v") (parallel [(const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX" "@ vextract%~128\t{$0x1, %1, %0|%0, %1, 0x1} vextracti32x4\t{$0x1, %1, %0|%0, %1, 0x1} vextracti32x4\t{$0x1, %g1, %0|%0, %g1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "isa" "*,avx512dq,avx512f") (set_attr "prefix" "vex,evex,evex") (set_attr "mode" "OI")]) (define_insn_and_split "vec_extract_lo_v64qi" [(set (match_operand:V32QI 0 "nonimmediate_operand" "=v,v,m") (vec_select:V32QI (match_operand:V64QI 1 "nonimmediate_operand" "v,m,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15) (const_int 16) (const_int 17) (const_int 18) (const_int 19) (const_int 20) (const_int 21) (const_int 22) (const_int 23) (const_int 24) (const_int 25) (const_int 26) (const_int 27) (const_int 28) (const_int 29) (const_int 30) (const_int 31)])))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))" { if (TARGET_AVX512VL || REG_P (operands[0]) || !EXT_REX_SSE_REG_P (operands[1])) return "#"; else return "vextracti64x4\t{$0x0, %1, %0|%0, %1, 0x0}"; } "&& reload_completed && (TARGET_AVX512VL || REG_P (operands[0]) || !EXT_REX_SSE_REG_P (operands[1]))" [(set (match_dup 0) (match_dup 1))] { if (!TARGET_AVX512VL && REG_P (operands[0]) && EXT_REX_SSE_REG_P (operands[1])) operands[0] = lowpart_subreg (V64QImode, operands[0], V32QImode); else operands[1] = gen_lowpart (V32QImode, operands[1]); } [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,load,store") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_extract_hi_v64qi" [(set (match_operand:V32QI 0 "nonimmediate_operand" "=vm") (vec_select:V32QI (match_operand:V64QI 1 "register_operand" "v") (parallel [(const_int 32) (const_int 33) (const_int 34) (const_int 35) (const_int 36) (const_int 37) (const_int 38) (const_int 39) (const_int 40) (const_int 41) (const_int 42) (const_int 43) (const_int 44) (const_int 45) (const_int 46) (const_int 47) (const_int 48) (const_int 49) (const_int 50) (const_int 51) (const_int 52) (const_int 53) (const_int 54) (const_int 55) (const_int 56) (const_int 57) (const_int 58) (const_int 59) (const_int 60) (const_int 61) (const_int 62) (const_int 63)])))] "TARGET_AVX512F" "vextracti64x4\t{$0x1, %1, %0|%0, %1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn_and_split "vec_extract_lo_v32qi" [(set (match_operand:V16QI 0 "nonimmediate_operand" "=v,m") (vec_select:V16QI (match_operand:V32QI 1 "nonimmediate_operand" "vm,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (V16QImode, operands[1]);") (define_insn "vec_extract_hi_v32qi" [(set (match_operand:V16QI 0 "nonimmediate_operand" "=xm,vm,vm") (vec_select:V16QI (match_operand:V32QI 1 "register_operand" "x,v,v") (parallel [(const_int 16) (const_int 17) (const_int 18) (const_int 19) (const_int 20) (const_int 21) (const_int 22) (const_int 23) (const_int 24) (const_int 25) (const_int 26) (const_int 27) (const_int 28) (const_int 29) (const_int 30) (const_int 31)])))] "TARGET_AVX" "@ vextract%~128\t{$0x1, %1, %0|%0, %1, 0x1} vextracti32x4\t{$0x1, %1, %0|%0, %1, 0x1} vextracti32x4\t{$0x1, %g1, %0|%0, %g1, 0x1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "isa" "*,avx512dq,avx512f") (set_attr "prefix" "vex,evex,evex") (set_attr "mode" "OI")]) ;; NB: *vec_extract_0 must be placed before *vec_extracthf. ;; Otherwise, it will be ignored. (define_insn_and_split "*vec_extract_0" [(set (match_operand:HF 0 "nonimmediate_operand" "=v,m,r") (vec_select:HF (match_operand:VF_AVX512FP16 1 "nonimmediate_operand" "vm,v,m") (parallel [(const_int 0)])))] "TARGET_AVX512FP16 && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (HFmode, operands[1]);") (define_insn "*vec_extracthf" [(set (match_operand:HF 0 "register_sse4nonimm_operand" "=*r,m,x,v") (vec_select:HF (match_operand:V8HF 1 "register_operand" "v,v,0,v") (parallel [(match_operand:SI 2 "const_0_to_7_operand")])))] "TARGET_SSE2" { switch (which_alternative) { case 0: return "%vpextrw\t{%2, %1, %k0|%k0, %1, %2}"; case 1: return "%vpextrw\t{%2, %1, %0|%0, %1, %2}"; case 2: operands[2] = GEN_INT (INTVAL (operands[2]) * 2); return "psrldq\t{%2, %0|%0, %2}"; case 3: operands[2] = GEN_INT (INTVAL (operands[2]) * 2); return "vpsrldq\t{%2, %1, %0|%0, %1, %2}"; default: gcc_unreachable (); } } [(set_attr "isa" "*,sse4,noavx,avx") (set_attr "type" "sselog1,sselog1,sseishft1,sseishft1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "TI")]) ;; Modes handled by vec_extract patterns. (define_mode_iterator VEC_EXTRACT_MODE [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI (V32HF "TARGET_AVX512BW") (V16HF "TARGET_AVX") V8HF (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")]) (define_expand "vec_extract" [(match_operand: 0 "register_operand") (match_operand:VEC_EXTRACT_MODE 1 "register_operand") (match_operand 2 "const_int_operand")] "TARGET_SSE" { ix86_expand_vector_extract (false, operands[0], operands[1], INTVAL (operands[2])); DONE; }) (define_expand "vec_extract" [(match_operand: 0 "nonimmediate_operand") (match_operand:V_256_512 1 "register_operand") (match_operand 2 "const_0_to_1_operand")] "TARGET_AVX" { if (INTVAL (operands[2])) emit_insn (gen_vec_extract_hi_ (operands[0], operands[1])); else emit_insn (gen_vec_extract_lo_ (operands[0], operands[1])); DONE; }) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel double-precision floating point element swizzling ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn "avx512f_unpckhpd512" [(set (match_operand:V8DF 0 "register_operand" "=v") (vec_select:V8DF (vec_concat:V16DF (match_operand:V8DF 1 "register_operand" "v") (match_operand:V8DF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 1) (const_int 9) (const_int 3) (const_int 11) (const_int 5) (const_int 13) (const_int 7) (const_int 15)])))] "TARGET_AVX512F" "vunpckhpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "V8DF")]) ;; Recall that the 256-bit unpck insns only shuffle within their lanes. (define_insn "avx_unpckhpd256" [(set (match_operand:V4DF 0 "register_operand" "=v") (vec_select:V4DF (vec_concat:V8DF (match_operand:V4DF 1 "register_operand" "v") (match_operand:V4DF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 1) (const_int 5) (const_int 3) (const_int 7)])))] "TARGET_AVX && " "vunpckhpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "V4DF")]) (define_expand "vec_interleave_highv4df" [(set (match_dup 3) (vec_select:V4DF (vec_concat:V8DF (match_operand:V4DF 1 "register_operand") (match_operand:V4DF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 4) (const_int 2) (const_int 6)]))) (set (match_dup 4) (vec_select:V4DF (vec_concat:V8DF (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 5) (const_int 3) (const_int 7)]))) (set (match_operand:V4DF 0 "register_operand") (vec_select:V4DF (vec_concat:V8DF (match_dup 3) (match_dup 4)) (parallel [(const_int 2) (const_int 3) (const_int 6) (const_int 7)])))] "TARGET_AVX" { operands[3] = gen_reg_rtx (V4DFmode); operands[4] = gen_reg_rtx (V4DFmode); }) (define_insn "avx512vl_unpckhpd128_mask" [(set (match_operand:V2DF 0 "register_operand" "=v") (vec_merge:V2DF (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "register_operand" "v") (match_operand:V2DF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 1) (const_int 3)])) (match_operand:V2DF 3 "nonimm_or_0_operand" "0C") (match_operand:QI 4 "register_operand" "Yk")))] "TARGET_AVX512VL" "vunpckhpd\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "V2DF")]) (define_expand "vec_interleave_highv2df" [(set (match_operand:V2DF 0 "register_operand") (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "nonimmediate_operand") (match_operand:V2DF 2 "nonimmediate_operand")) (parallel [(const_int 1) (const_int 3)])))] "TARGET_SSE2" { if (!ix86_vec_interleave_v2df_operator_ok (operands, 1)) operands[2] = force_reg (V2DFmode, operands[2]); }) (define_insn "*vec_interleave_highv2df" [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,v,v,x,v,m") (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "nonimmediate_operand" " 0,v,o,o,o,v") (match_operand:V2DF 2 "nonimmediate_operand" " x,v,1,0,v,0")) (parallel [(const_int 1) (const_int 3)])))] "TARGET_SSE2 && ix86_vec_interleave_v2df_operator_ok (operands, 1)" "@ unpckhpd\t{%2, %0|%0, %2} vunpckhpd\t{%2, %1, %0|%0, %1, %2} %vmovddup\t{%H1, %0|%0, %H1} movlpd\t{%H1, %0|%0, %H1} vmovlpd\t{%H1, %2, %0|%0, %2, %H1} %vmovhpd\t{%1, %0|%q0, %1}" [(set_attr "isa" "noavx,avx,sse3,noavx,avx,*") (set_attr "type" "sselog,sselog,sselog,ssemov,ssemov,ssemov") (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "3,5") (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,maybe_evex,maybe_vex,orig,maybe_evex,maybe_vex") (set_attr "mode" "V2DF,V2DF,DF,V1DF,V1DF,V1DF")]) (define_expand "avx512f_movddup512" [(set (match_operand:V8DF 0 "register_operand") (vec_select:V8DF (vec_concat:V16DF (match_operand:V8DF 1 "nonimmediate_operand") (match_dup 1)) (parallel [(const_int 0) (const_int 8) (const_int 2) (const_int 10) (const_int 4) (const_int 12) (const_int 6) (const_int 14)])))] "TARGET_AVX512F") (define_expand "avx512f_unpcklpd512" [(set (match_operand:V8DF 0 "register_operand") (vec_select:V8DF (vec_concat:V16DF (match_operand:V8DF 1 "register_operand") (match_operand:V8DF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 8) (const_int 2) (const_int 10) (const_int 4) (const_int 12) (const_int 6) (const_int 14)])))] "TARGET_AVX512F") (define_insn "*avx512f_unpcklpd512" [(set (match_operand:V8DF 0 "register_operand" "=v,v") (vec_select:V8DF (vec_concat:V16DF (match_operand:V8DF 1 "nonimmediate_operand" "vm, v") (match_operand:V8DF 2 "nonimmediate_operand" "1 ,vm")) (parallel [(const_int 0) (const_int 8) (const_int 2) (const_int 10) (const_int 4) (const_int 12) (const_int 6) (const_int 14)])))] "TARGET_AVX512F" "@ vmovddup\t{%1, %0|%0, %1} vunpcklpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "V8DF")]) ;; Recall that the 256-bit unpck insns only shuffle within their lanes. (define_expand "avx_movddup256" [(set (match_operand:V4DF 0 "register_operand") (vec_select:V4DF (vec_concat:V8DF (match_operand:V4DF 1 "nonimmediate_operand") (match_dup 1)) (parallel [(const_int 0) (const_int 4) (const_int 2) (const_int 6)])))] "TARGET_AVX && ") (define_expand "avx_unpcklpd256" [(set (match_operand:V4DF 0 "register_operand") (vec_select:V4DF (vec_concat:V8DF (match_operand:V4DF 1 "register_operand") (match_operand:V4DF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 4) (const_int 2) (const_int 6)])))] "TARGET_AVX && ") (define_insn "*avx_unpcklpd256" [(set (match_operand:V4DF 0 "register_operand" "=v,v") (vec_select:V4DF (vec_concat:V8DF (match_operand:V4DF 1 "nonimmediate_operand" " v,m") (match_operand:V4DF 2 "nonimmediate_operand" "vm,1")) (parallel [(const_int 0) (const_int 4) (const_int 2) (const_int 6)])))] "TARGET_AVX && " "@ vunpcklpd\t{%2, %1, %0|%0, %1, %2} vmovddup\t{%1, %0|%0, %1}" [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "V4DF")]) (define_expand "vec_interleave_lowv4df" [(set (match_dup 3) (vec_select:V4DF (vec_concat:V8DF (match_operand:V4DF 1 "register_operand") (match_operand:V4DF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 4) (const_int 2) (const_int 6)]))) (set (match_dup 4) (vec_select:V4DF (vec_concat:V8DF (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 5) (const_int 3) (const_int 7)]))) (set (match_operand:V4DF 0 "register_operand") (vec_select:V4DF (vec_concat:V8DF (match_dup 3) (match_dup 4)) (parallel [(const_int 0) (const_int 1) (const_int 4) (const_int 5)])))] "TARGET_AVX" { operands[3] = gen_reg_rtx (V4DFmode); operands[4] = gen_reg_rtx (V4DFmode); }) (define_insn "avx512vl_unpcklpd128_mask" [(set (match_operand:V2DF 0 "register_operand" "=v") (vec_merge:V2DF (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "register_operand" "v") (match_operand:V2DF 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 2)])) (match_operand:V2DF 3 "nonimm_or_0_operand" "0C") (match_operand:QI 4 "register_operand" "Yk")))] "TARGET_AVX512VL" "vunpcklpd\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "V2DF")]) (define_expand "vec_interleave_lowv2df" [(set (match_operand:V2DF 0 "register_operand") (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "nonimmediate_operand") (match_operand:V2DF 2 "nonimmediate_operand")) (parallel [(const_int 0) (const_int 2)])))] "TARGET_SSE2" { if (!ix86_vec_interleave_v2df_operator_ok (operands, 0)) operands[1] = force_reg (V2DFmode, operands[1]); }) (define_insn "*vec_interleave_lowv2df" [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,v,v,x,v,o") (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "nonimmediate_operand" " 0,v,m,0,v,0") (match_operand:V2DF 2 "nonimmediate_operand" " x,v,1,m,m,v")) (parallel [(const_int 0) (const_int 2)])))] "TARGET_SSE2 && ix86_vec_interleave_v2df_operator_ok (operands, 0)" "@ unpcklpd\t{%2, %0|%0, %2} vunpcklpd\t{%2, %1, %0|%0, %1, %2} %vmovddup\t{%1, %0|%0, %q1} movhpd\t{%2, %0|%0, %q2} vmovhpd\t{%2, %1, %0|%0, %1, %q2} %vmovlpd\t{%2, %H0|%H0, %2}" [(set_attr "isa" "noavx,avx,sse3,noavx,avx,*") (set_attr "type" "sselog,sselog,sselog,ssemov,ssemov,ssemov") (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "3,5") (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,maybe_evex,maybe_vex,orig,maybe_evex,maybe_vex") (set_attr "mode" "V2DF,V2DF,DF,V1DF,V1DF,V1DF")]) (define_split [(set (match_operand:V2DF 0 "memory_operand") (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "register_operand") (match_dup 1)) (parallel [(const_int 0) (const_int 2)])))] "TARGET_SSE3 && reload_completed" [(const_int 0)] { rtx low = gen_lowpart (DFmode, operands[1]); emit_move_insn (adjust_address (operands[0], DFmode, 0), low); emit_move_insn (adjust_address (operands[0], DFmode, 8), low); DONE; }) (define_split [(set (match_operand:V2DF 0 "register_operand") (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "memory_operand") (match_dup 1)) (parallel [(match_operand:SI 2 "const_0_to_1_operand") (match_operand:SI 3 "const_int_operand")])))] "TARGET_SSE3 && INTVAL (operands[2]) + 2 == INTVAL (operands[3])" [(set (match_dup 0) (vec_duplicate:V2DF (match_dup 1)))] { operands[1] = adjust_address (operands[1], DFmode, INTVAL (operands[2]) * 8); }) (define_insn "avx512f_vmscalef" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (unspec:VFH_128 [(match_operand:VFH_128 1 "register_operand" "v") (match_operand:VFH_128 2 "" "")] UNSPEC_SCALEF) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "vscalef\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_scalef" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "register_operand" "v") (match_operand:VFH_AVX512VL 2 "nonimmediate_operand" "")] UNSPEC_SCALEF))] "TARGET_AVX512F" "vscalef\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_vternlog_maskz" [(match_operand:VI48_AVX512VL 0 "register_operand") (match_operand:VI48_AVX512VL 1 "register_operand") (match_operand:VI48_AVX512VL 2 "register_operand") (match_operand:VI48_AVX512VL 3 "nonimmediate_operand") (match_operand:SI 4 "const_0_to_255_operand") (match_operand: 5 "register_operand")] "TARGET_AVX512F" { emit_insn (gen__vternlog_maskz_1 ( operands[0], operands[1], operands[2], operands[3], operands[4], CONST0_RTX (mode), operands[5])); DONE; }) (define_insn "_vternlog" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (unspec:VI48_AVX512VL [(match_operand:VI48_AVX512VL 1 "register_operand" "0") (match_operand:VI48_AVX512VL 2 "register_operand" "v") (match_operand:VI48_AVX512VL 3 "bcst_vector_operand" "vmBr") (match_operand:SI 4 "const_0_to_255_operand")] UNSPEC_VTERNLOG))] "TARGET_AVX512F" "vpternlog\t{%4, %3, %2, %0|%0, %2, %3, %4}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*_vternlog_all" [(set (match_operand:V 0 "register_operand" "=v") (unspec:V [(match_operand:V 1 "register_operand" "0") (match_operand:V 2 "register_operand" "v") (match_operand:V 3 "bcst_vector_operand" "vmBr") (match_operand:SI 4 "const_0_to_255_operand")] UNSPEC_VTERNLOG))] "TARGET_AVX512F /* Disallow embeded broadcast for vector HFmode since it's not real AVX512FP16 instruction. */ && (GET_MODE_SIZE (GET_MODE_INNER (mode)) >= 4 || GET_CODE (operands[3]) != VEC_DUPLICATE)" "vpternlog\t{%4, %3, %2, %0|%0, %2, %3, %4}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;; There must be lots of other combinations like ;; ;; (any_logic:V ;; (any_logic:V op1 op2) ;; (any_logic:V op1 op3)) ;; ;; (any_logic:V ;; (any_logic:V ;; (any_logic:V op1, op2) ;; op3) ;; op1) ;; ;; and so on. (define_code_iterator any_logic1 [and ior xor]) (define_code_iterator any_logic2 [and ior xor]) (define_code_attr logic_op [(and "&") (ior "|") (xor "^")]) (define_insn_and_split "*_vpternlog_1" [(set (match_operand:V 0 "register_operand") (any_logic:V (any_logic1:V (match_operand:V 1 "regmem_or_bitnot_regmem_operand") (match_operand:V 2 "regmem_or_bitnot_regmem_operand")) (any_logic2:V (match_operand:V 3 "regmem_or_bitnot_regmem_operand") (match_operand:V 4 "regmem_or_bitnot_regmem_operand"))))] "( == 64 || TARGET_AVX512VL) && ix86_pre_reload_split () && (rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[4])) || rtx_equal_p (STRIP_UNARY (operands[2]), STRIP_UNARY (operands[4])) || rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[3])) || rtx_equal_p (STRIP_UNARY (operands[2]), STRIP_UNARY (operands[3])))" "#" "&& 1" [(set (match_dup 0) (unspec:V [(match_dup 6) (match_dup 2) (match_dup 1) (match_dup 5)] UNSPEC_VTERNLOG))] { /* VPTERNLOGD reg6, reg2, reg1, imm8. */ int reg6 = 0xF0; int reg2 = 0xCC; int reg1 = 0xAA; int reg3 = 0; int reg4 = 0; int reg_mask, tmp1, tmp2; if (rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[4]))) { reg4 = reg1; reg3 = reg6; operands[6] = operands[3]; } else if (rtx_equal_p (STRIP_UNARY (operands[2]), STRIP_UNARY (operands[4]))) { reg4 = reg2; reg3 = reg6; operands[6] = operands[3]; } else if (rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[3]))) { reg4 = reg6; reg3 = reg1; operands[6] = operands[4]; } else { reg4 = reg6; reg3 = reg2; operands[6] = operands[4]; } reg1 = UNARY_P (operands[1]) ? ~reg1 : reg1; reg2 = UNARY_P (operands[2]) ? ~reg2 : reg2; reg3 = UNARY_P (operands[3]) ? ~reg3 : reg3; reg4 = UNARY_P (operands[4]) ? ~reg4 : reg4; tmp1 = reg1 reg2; tmp2 = reg3 reg4; reg_mask = tmp1 tmp2; reg_mask &= 0xFF; operands[1] = STRIP_UNARY (operands[1]); operands[2] = STRIP_UNARY (operands[2]); operands[6] = STRIP_UNARY (operands[6]); if (!register_operand (operands[2], mode)) operands[2] = force_reg (mode, operands[2]); if (!register_operand (operands[6], mode)) operands[6] = force_reg (mode, operands[6]); operands[5] = GEN_INT (reg_mask); }) (define_insn_and_split "*_vpternlog_2" [(set (match_operand:V 0 "register_operand") (any_logic:V (any_logic1:V (any_logic2:V (match_operand:V 1 "regmem_or_bitnot_regmem_operand") (match_operand:V 2 "regmem_or_bitnot_regmem_operand")) (match_operand:V 3 "regmem_or_bitnot_regmem_operand")) (match_operand:V 4 "regmem_or_bitnot_regmem_operand")))] "( == 64 || TARGET_AVX512VL) && ix86_pre_reload_split () && (rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[4])) || rtx_equal_p (STRIP_UNARY (operands[2]), STRIP_UNARY (operands[4])) || rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[3])) || rtx_equal_p (STRIP_UNARY (operands[2]), STRIP_UNARY (operands[3])))" "#" "&& 1" [(set (match_dup 0) (unspec:V [(match_dup 6) (match_dup 2) (match_dup 1) (match_dup 5)] UNSPEC_VTERNLOG))] { /* VPTERNLOGD reg6, reg2, reg1, imm8. */ int reg6 = 0xF0; int reg2 = 0xCC; int reg1 = 0xAA; int reg3 = 0; int reg4 = 0; int reg_mask, tmp1, tmp2; if (rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[4]))) { reg4 = reg1; reg3 = reg6; operands[6] = operands[3]; } else if (rtx_equal_p (STRIP_UNARY (operands[2]), STRIP_UNARY (operands[4]))) { reg4 = reg2; reg3 = reg6; operands[6] = operands[3]; } else if (rtx_equal_p (STRIP_UNARY (operands[1]), STRIP_UNARY (operands[3]))) { reg4 = reg6; reg3 = reg1; operands[6] = operands[4]; } else { reg4 = reg6; reg3 = reg2; operands[6] = operands[4]; } reg1 = UNARY_P (operands[1]) ? ~reg1 : reg1; reg2 = UNARY_P (operands[2]) ? ~reg2 : reg2; reg3 = UNARY_P (operands[3]) ? ~reg3 : reg3; reg4 = UNARY_P (operands[4]) ? ~reg4 : reg4; tmp1 = reg1 reg2; tmp2 = tmp1 reg3; reg_mask = tmp2 reg4; reg_mask &= 0xFF; operands[1] = STRIP_UNARY (operands[1]); operands[2] = STRIP_UNARY (operands[2]); operands[6] = STRIP_UNARY (operands[6]); operands[5] = GEN_INT (reg_mask); if (!register_operand (operands[2], mode)) operands[2] = force_reg (mode, operands[2]); if (!register_operand (operands[6], mode)) operands[6] = force_reg (mode, operands[6]); }) (define_insn_and_split "*_vpternlog_3" [(set (match_operand:V 0 "register_operand") (any_logic:V (any_logic1:V (match_operand:V 1 "regmem_or_bitnot_regmem_operand") (match_operand:V 2 "regmem_or_bitnot_regmem_operand")) (match_operand:V 3 "regmem_or_bitnot_regmem_operand")))] "( == 64 || TARGET_AVX512VL) && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:V [(match_dup 3) (match_dup 2) (match_dup 1) (match_dup 4)] UNSPEC_VTERNLOG))] { /* VPTERNLOGD reg3, reg2, reg1, imm8. */ int reg3 = 0xF0; int reg2 = 0xCC; int reg1 = 0xAA; int reg_mask, tmp1; reg1 = UNARY_P (operands[1]) ? ~reg1 : reg1; reg2 = UNARY_P (operands[2]) ? ~reg2 : reg2; reg3 = UNARY_P (operands[3]) ? ~reg3 : reg3; tmp1 = reg1 reg2; reg_mask = tmp1 reg3; reg_mask &= 0xFF; operands[1] = STRIP_UNARY (operands[1]); operands[2] = STRIP_UNARY (operands[2]); operands[3] = STRIP_UNARY (operands[3]); operands[4] = GEN_INT (reg_mask); if (!register_operand (operands[2], mode)) operands[2] = force_reg (mode, operands[2]); if (!register_operand (operands[3], mode)) operands[3] = force_reg (mode, operands[3]); }) (define_insn "_vternlog_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (vec_merge:VI48_AVX512VL (unspec:VI48_AVX512VL [(match_operand:VI48_AVX512VL 1 "register_operand" "0") (match_operand:VI48_AVX512VL 2 "register_operand" "v") (match_operand:VI48_AVX512VL 3 "bcst_vector_operand" "vmBr") (match_operand:SI 4 "const_0_to_255_operand")] UNSPEC_VTERNLOG) (match_dup 1) (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512F" "vpternlog\t{%4, %3, %2, %0%{%5%}|%0%{%5%}, %2, %3, %4}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_getexp" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "" "")] UNSPEC_GETEXP))] "TARGET_AVX512F" "vgetexp\t{%1, %0|%0, %1}"; [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_sgetexp" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (unspec:VFH_128 [(match_operand:VFH_128 1 "register_operand" "v") (match_operand:VFH_128 2 "" "")] UNSPEC_GETEXP) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" "vgetexp\t{%2, %1, %0|%0, %1, %2}"; [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_align" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (unspec:VI48_AVX512VL [(match_operand:VI48_AVX512VL 1 "register_operand" "v") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "const_0_to_255_operand")] UNSPEC_ALIGN))] "TARGET_AVX512F" "valign\t{%3, %2, %1, %0|%0, %1, %2, %3}"; [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_mode_attr vec_extract_imm_predicate [(V16SF "const_0_to_15_operand") (V8SF "const_0_to_7_operand") (V16SI "const_0_to_15_operand") (V8SI "const_0_to_7_operand") (V8DF "const_0_to_7_operand") (V4DF "const_0_to_3_operand") (V8DI "const_0_to_7_operand") (V4DI "const_0_to_3_operand")]) (define_insn "*vec_extract_valign" [(set (match_operand: 0 "register_operand" "=v") (vec_select: (match_operand:V48_256_512_AVX512VL 1 "register_operand" "v") (parallel [(match_operand 2 "")])))] "TARGET_AVX512F && INTVAL(operands[2]) * GET_MODE_SIZE (mode) >= 16" { int byte_offset = INTVAL (operands[2]) * GET_MODE_SIZE (mode); if (byte_offset % 16 == 0) { operands[2] = GEN_INT (byte_offset / 16); if (byte_offset / 16 == 1) return "vextract\t{%2, %t1, %x0|%x0, %t1, %2}"; else return "vextract\t{%2, %1, %x0|%x0, %1, %2}"; } else return "valign\t{%2, %1, %1, %0|%0, %1, %1, %2}"; } [(set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_expand "avx512f_shufps512_mask" [(match_operand:V16SF 0 "register_operand") (match_operand:V16SF 1 "register_operand") (match_operand:V16SF 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_255_operand") (match_operand:V16SF 4 "register_operand") (match_operand:HI 5 "register_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[3]); emit_insn (gen_avx512f_shufps512_1_mask (operands[0], operands[1], operands[2], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT (((mask >> 4) & 3) + 16), GEN_INT (((mask >> 6) & 3) + 16), GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 20), GEN_INT (((mask >> 6) & 3) + 20), GEN_INT (((mask >> 0) & 3) + 8), GEN_INT (((mask >> 2) & 3) + 8), GEN_INT (((mask >> 4) & 3) + 24), GEN_INT (((mask >> 6) & 3) + 24), GEN_INT (((mask >> 0) & 3) + 12), GEN_INT (((mask >> 2) & 3) + 12), GEN_INT (((mask >> 4) & 3) + 28), GEN_INT (((mask >> 6) & 3) + 28), operands[4], operands[5])); DONE; }) (define_expand "_fixupimm_maskz" [(match_operand:VF_AVX512VL 0 "register_operand") (match_operand:VF_AVX512VL 1 "register_operand") (match_operand:VF_AVX512VL 2 "register_operand") (match_operand: 3 "") (match_operand:SI 4 "const_0_to_255_operand") (match_operand: 5 "register_operand")] "TARGET_AVX512F" { emit_insn (gen__fixupimm_maskz_1 ( operands[0], operands[1], operands[2], operands[3], operands[4], CONST0_RTX (mode), operands[5] )); DONE; }) (define_insn "_fixupimm" [(set (match_operand:VF_AVX512VL 0 "register_operand" "=v") (unspec:VF_AVX512VL [(match_operand:VF_AVX512VL 1 "register_operand" "0") (match_operand:VF_AVX512VL 2 "register_operand" "v") (match_operand: 3 "nonimmediate_operand" "") (match_operand:SI 4 "const_0_to_255_operand")] UNSPEC_FIXUPIMM))] "TARGET_AVX512F" "vfixupimm\t{%4, %3, %2, %0|%0, %2, %3, %4}"; [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_fixupimm_mask" [(set (match_operand:VF_AVX512VL 0 "register_operand" "=v") (vec_merge:VF_AVX512VL (unspec:VF_AVX512VL [(match_operand:VF_AVX512VL 1 "register_operand" "0") (match_operand:VF_AVX512VL 2 "register_operand" "v") (match_operand: 3 "nonimmediate_operand" "") (match_operand:SI 4 "const_0_to_255_operand")] UNSPEC_FIXUPIMM) (match_dup 1) (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512F" "vfixupimm\t{%4, %3, %2, %0%{%5%}|%0%{%5%}, %2, %3, %4}"; [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512f_sfixupimm_maskz" [(match_operand:VF_128 0 "register_operand") (match_operand:VF_128 1 "register_operand") (match_operand:VF_128 2 "register_operand") (match_operand: 3 "") (match_operand:SI 4 "const_0_to_255_operand") (match_operand: 5 "register_operand")] "TARGET_AVX512F" { emit_insn (gen_avx512f_sfixupimm_maskz_1 ( operands[0], operands[1], operands[2], operands[3], operands[4], CONST0_RTX (mode), operands[5] )); DONE; }) (define_insn "avx512f_sfixupimm" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "register_operand" "0") (match_operand:VF_128 2 "register_operand" "v") (match_operand: 3 "" "") (match_operand:SI 4 "const_0_to_255_operand")] UNSPEC_FIXUPIMM) (match_dup 2) (const_int 1)))] "TARGET_AVX512F" "vfixupimm\t{%4, %3, %2, %0|%0, %2, %3, %4}"; [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_sfixupimm_mask" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "register_operand" "0") (match_operand:VF_128 2 "register_operand" "v") (match_operand: 3 "" "") (match_operand:SI 4 "const_0_to_255_operand")] UNSPEC_FIXUPIMM) (match_dup 2) (const_int 1)) (match_dup 1) (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512F" "vfixupimm\t{%4, %3, %2, %0%{%5%}|%0%{%5%}, %2, %3, %4}"; [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_rndscale" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "nonimmediate_operand" "") (match_operand:SI 2 "const_0_to_255_operand")] UNSPEC_ROUND))] "TARGET_AVX512F" "vrndscale\t{%2, %1, %0|%0, %1, %2}" [(set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_rndscale" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (unspec:VFH_128 [(match_operand:VFH_128 2 "" "") (match_operand:SI 3 "const_0_to_255_operand")] UNSPEC_ROUND) (match_operand:VFH_128 1 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F" "vrndscale\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512f_rndscale" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (vec_duplicate:VFH_128 (unspec: [(match_operand: 2 "" "") (match_operand:SI 3 "const_0_to_255_operand")] UNSPEC_ROUND)) (match_operand:VFH_128 1 "register_operand" "v") (const_int 1)))] "TARGET_AVX512F" "vrndscale\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;; One bit in mask selects 2 elements. (define_insn "avx512f_shufps512_1" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_select:V16SF (vec_concat:V32SF (match_operand:V16SF 1 "register_operand" "v") (match_operand:V16SF 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_16_to_19_operand") (match_operand 6 "const_16_to_19_operand") (match_operand 7 "const_4_to_7_operand") (match_operand 8 "const_4_to_7_operand") (match_operand 9 "const_20_to_23_operand") (match_operand 10 "const_20_to_23_operand") (match_operand 11 "const_8_to_11_operand") (match_operand 12 "const_8_to_11_operand") (match_operand 13 "const_24_to_27_operand") (match_operand 14 "const_24_to_27_operand") (match_operand 15 "const_12_to_15_operand") (match_operand 16 "const_12_to_15_operand") (match_operand 17 "const_28_to_31_operand") (match_operand 18 "const_28_to_31_operand")])))] "TARGET_AVX512F && (INTVAL (operands[3]) == (INTVAL (operands[7]) - 4) && INTVAL (operands[4]) == (INTVAL (operands[8]) - 4) && INTVAL (operands[5]) == (INTVAL (operands[9]) - 4) && INTVAL (operands[6]) == (INTVAL (operands[10]) - 4) && INTVAL (operands[3]) == (INTVAL (operands[11]) - 8) && INTVAL (operands[4]) == (INTVAL (operands[12]) - 8) && INTVAL (operands[5]) == (INTVAL (operands[13]) - 8) && INTVAL (operands[6]) == (INTVAL (operands[14]) - 8) && INTVAL (operands[3]) == (INTVAL (operands[15]) - 12) && INTVAL (operands[4]) == (INTVAL (operands[16]) - 12) && INTVAL (operands[5]) == (INTVAL (operands[17]) - 12) && INTVAL (operands[6]) == (INTVAL (operands[18]) - 12))" { int mask; mask = INTVAL (operands[3]); mask |= INTVAL (operands[4]) << 2; mask |= (INTVAL (operands[5]) - 16) << 4; mask |= (INTVAL (operands[6]) - 16) << 6; operands[3] = GEN_INT (mask); return "vshufps\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) (define_expand "avx512f_shufpd512_mask" [(match_operand:V8DF 0 "register_operand") (match_operand:V8DF 1 "register_operand") (match_operand:V8DF 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_255_operand") (match_operand:V8DF 4 "register_operand") (match_operand:QI 5 "register_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[3]); emit_insn (gen_avx512f_shufpd512_1_mask (operands[0], operands[1], operands[2], GEN_INT (mask & 1), GEN_INT (mask & 2 ? 9 : 8), GEN_INT (mask & 4 ? 3 : 2), GEN_INT (mask & 8 ? 11 : 10), GEN_INT (mask & 16 ? 5 : 4), GEN_INT (mask & 32 ? 13 : 12), GEN_INT (mask & 64 ? 7 : 6), GEN_INT (mask & 128 ? 15 : 14), operands[4], operands[5])); DONE; }) (define_insn "avx512f_shufpd512_1" [(set (match_operand:V8DF 0 "register_operand" "=v") (vec_select:V8DF (vec_concat:V16DF (match_operand:V8DF 1 "register_operand" "v") (match_operand:V8DF 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_1_operand") (match_operand 4 "const_8_to_9_operand") (match_operand 5 "const_2_to_3_operand") (match_operand 6 "const_10_to_11_operand") (match_operand 7 "const_4_to_5_operand") (match_operand 8 "const_12_to_13_operand") (match_operand 9 "const_6_to_7_operand") (match_operand 10 "const_14_to_15_operand")])))] "TARGET_AVX512F" { int mask; mask = INTVAL (operands[3]); mask |= (INTVAL (operands[4]) - 8) << 1; mask |= (INTVAL (operands[5]) - 2) << 2; mask |= (INTVAL (operands[6]) - 10) << 3; mask |= (INTVAL (operands[7]) - 4) << 4; mask |= (INTVAL (operands[8]) - 12) << 5; mask |= (INTVAL (operands[9]) - 6) << 6; mask |= (INTVAL (operands[10]) - 14) << 7; operands[3] = GEN_INT (mask); return "vshufpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "V8DF")]) (define_expand "avx_shufpd256" [(match_operand:V4DF 0 "register_operand") (match_operand:V4DF 1 "register_operand") (match_operand:V4DF 2 "nonimmediate_operand") (match_operand:SI 3 "const_int_operand")] "TARGET_AVX" { int mask = INTVAL (operands[3]); emit_insn (gen_avx_shufpd256_1 (operands[0], operands[1], operands[2], GEN_INT (mask & 1), GEN_INT (mask & 2 ? 5 : 4), GEN_INT (mask & 4 ? 3 : 2), GEN_INT (mask & 8 ? 7 : 6) )); DONE; }) (define_insn "avx_shufpd256_1" [(set (match_operand:V4DF 0 "register_operand" "=v") (vec_select:V4DF (vec_concat:V8DF (match_operand:V4DF 1 "register_operand" "v") (match_operand:V4DF 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_1_operand") (match_operand 4 "const_4_to_5_operand") (match_operand 5 "const_2_to_3_operand") (match_operand 6 "const_6_to_7_operand")])))] "TARGET_AVX && " { int mask; mask = INTVAL (operands[3]); mask |= (INTVAL (operands[4]) - 4) << 1; mask |= (INTVAL (operands[5]) - 2) << 2; mask |= (INTVAL (operands[6]) - 6) << 3; operands[3] = GEN_INT (mask); return "vshufpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sseshuf") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "V4DF")]) (define_expand "sse2_shufpd" [(match_operand:V2DF 0 "register_operand") (match_operand:V2DF 1 "register_operand") (match_operand:V2DF 2 "vector_operand") (match_operand:SI 3 "const_int_operand")] "TARGET_SSE2" { int mask = INTVAL (operands[3]); emit_insn (gen_sse2_shufpd_v2df (operands[0], operands[1], operands[2], GEN_INT (mask & 1), GEN_INT (mask & 2 ? 3 : 2) )); DONE; }) (define_insn "sse2_shufpd_v2df_mask" [(set (match_operand:V2DF 0 "register_operand" "=v") (vec_merge:V2DF (vec_select:V2DF (vec_concat:V4DF (match_operand:V2DF 1 "register_operand" "v") (match_operand:V2DF 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_1_operand") (match_operand 4 "const_2_to_3_operand")])) (match_operand:V2DF 5 "nonimm_or_0_operand" "0C") (match_operand:QI 6 "register_operand" "Yk")))] "TARGET_AVX512VL" { int mask; mask = INTVAL (operands[3]); mask |= (INTVAL (operands[4]) - 2) << 1; operands[3] = GEN_INT (mask); return "vshufpd\t{%3, %2, %1, %0%{%6%}%N5|%0%{%6%}%N5, %1, %2, %3}"; } [(set_attr "type" "sseshuf") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "V2DF")]) ;; punpcklqdq and punpckhqdq are shorter than shufpd. (define_insn "avx2_interleave_highv4di" [(set (match_operand:V4DI 0 "register_operand" "=v") (vec_select:V4DI (vec_concat:V8DI (match_operand:V4DI 1 "register_operand" "v") (match_operand:V4DI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 1) (const_int 5) (const_int 3) (const_int 7)])))] "TARGET_AVX2 && " "vpunpckhqdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_insn "avx512f_interleave_highv8di" [(set (match_operand:V8DI 0 "register_operand" "=v") (vec_select:V8DI (vec_concat:V16DI (match_operand:V8DI 1 "register_operand" "v") (match_operand:V8DI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 1) (const_int 9) (const_int 3) (const_int 11) (const_int 5) (const_int 13) (const_int 7) (const_int 15)])))] "TARGET_AVX512F" "vpunpckhqdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_interleave_highv2di" [(set (match_operand:V2DI 0 "register_operand" "=x,v") (vec_select:V2DI (vec_concat:V4DI (match_operand:V2DI 1 "register_operand" "0,v") (match_operand:V2DI 2 "vector_operand" "xBm,vm")) (parallel [(const_int 1) (const_int 3)])))] "TARGET_SSE2 && " "@ punpckhqdq\t{%2, %0|%0, %2} vpunpckhqdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,") (set_attr "mode" "TI")]) (define_insn "avx2_interleave_lowv4di" [(set (match_operand:V4DI 0 "register_operand" "=v") (vec_select:V4DI (vec_concat:V8DI (match_operand:V4DI 1 "register_operand" "v") (match_operand:V4DI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 4) (const_int 2) (const_int 6)])))] "TARGET_AVX2 && " "vpunpcklqdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_insn "avx512f_interleave_lowv8di" [(set (match_operand:V8DI 0 "register_operand" "=v") (vec_select:V8DI (vec_concat:V16DI (match_operand:V8DI 1 "register_operand" "v") (match_operand:V8DI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 8) (const_int 2) (const_int 10) (const_int 4) (const_int 12) (const_int 6) (const_int 14)])))] "TARGET_AVX512F" "vpunpcklqdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_interleave_lowv2di" [(set (match_operand:V2DI 0 "register_operand" "=x,v") (vec_select:V2DI (vec_concat:V4DI (match_operand:V2DI 1 "register_operand" "0,v") (match_operand:V2DI 2 "vector_operand" "xBm,vm")) (parallel [(const_int 0) (const_int 2)])))] "TARGET_SSE2 && " "@ punpcklqdq\t{%2, %0|%0, %2} vpunpcklqdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_insn "sse2_shufpd_" [(set (match_operand:VI8F_128 0 "register_operand" "=x,v") (vec_select:VI8F_128 (vec_concat: (match_operand:VI8F_128 1 "register_operand" "0,v") (match_operand:VI8F_128 2 "vector_operand" "xBm,vm")) (parallel [(match_operand 3 "const_0_to_1_operand") (match_operand 4 "const_2_to_3_operand")])))] "TARGET_SSE2" { int mask; mask = INTVAL (operands[3]); mask |= (INTVAL (operands[4]) - 2) << 1; operands[3] = GEN_INT (mask); switch (which_alternative) { case 0: return "shufpd\t{%3, %2, %0|%0, %2, %3}"; case 1: return "vshufpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,avx") (set_attr "type" "sseshuf") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "V2DF")]) ;; Avoid combining registers from different units in a single alternative, ;; see comment above inline_secondary_memory_needed function in i386.cc (define_insn "sse2_storehpd" [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,Yv,x,*f,r") (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand" " v,0, v,o,o,o") (parallel [(const_int 1)])))] "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "@ %vmovhpd\t{%1, %0|%0, %1} unpckhpd\t%0, %0 vunpckhpd\t{%d1, %0|%0, %d1} # # #" [(set_attr "isa" "*,noavx,avx,*,*,*") (set_attr "type" "ssemov,sselog1,sselog1,ssemov,fmov,imov") (set (attr "prefix_data16") (if_then_else (and (eq_attr "alternative" "0") (not (match_test "TARGET_AVX"))) (const_string "1") (const_string "*"))) (set_attr "prefix" "maybe_vex,orig,maybe_evex,*,*,*") (set_attr "mode" "V1DF,V1DF,V2DF,DF,DF,DF")]) (define_split [(set (match_operand:DF 0 "register_operand") (vec_select:DF (match_operand:V2DF 1 "memory_operand") (parallel [(const_int 1)])))] "TARGET_SSE2 && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = adjust_address (operands[1], DFmode, 8);") (define_insn "*vec_extractv2df_1_sse" [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x") (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand" "x,x,o") (parallel [(const_int 1)])))] "!TARGET_SSE2 && TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "@ movhps\t{%1, %0|%0, %1} movhlps\t{%1, %0|%0, %1} movlps\t{%H1, %0|%0, %H1}" [(set_attr "type" "ssemov") (set_attr "mode" "V2SF,V4SF,V2SF")]) ;; Avoid combining registers from different units in a single alternative, ;; see comment above inline_secondary_memory_needed function in i386.cc (define_insn "sse2_storelpd" [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x,*f,r") (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand" " v,x,m,m,m") (parallel [(const_int 0)])))] "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "@ %vmovlpd\t{%1, %0|%0, %1} # # # #" [(set_attr "type" "ssemov,ssemov,ssemov,fmov,imov") (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "0") (const_string "1") (const_string "*"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "V1DF,DF,DF,DF,DF")]) (define_split [(set (match_operand:DF 0 "register_operand") (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand") (parallel [(const_int 0)])))] "TARGET_SSE2 && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (DFmode, operands[1]);") (define_insn "*vec_extractv2df_0_sse" [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x") (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand" "x,x,m") (parallel [(const_int 0)])))] "!TARGET_SSE2 && TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "@ movlps\t{%1, %0|%0, %1} movaps\t{%1, %0|%0, %1} movlps\t{%1, %0|%0, %q1}" [(set_attr "type" "ssemov") (set_attr "mode" "V2SF,V4SF,V2SF")]) (define_expand "sse2_loadhpd_exp" [(set (match_operand:V2DF 0 "nonimmediate_operand") (vec_concat:V2DF (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand") (parallel [(const_int 0)])) (match_operand:DF 2 "nonimmediate_operand")))] "TARGET_SSE2" { rtx dst = ix86_fixup_binary_operands (UNKNOWN, V2DFmode, operands); emit_insn (gen_sse2_loadhpd (dst, operands[1], operands[2])); /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); DONE; }) ;; Avoid combining registers from different units in a single alternative, ;; see comment above inline_secondary_memory_needed function in i386.cc (define_insn "sse2_loadhpd" [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,v,x,v ,o,o ,o") (vec_concat:V2DF (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand" " 0,v,0,v ,0,0 ,0") (parallel [(const_int 0)])) (match_operand:DF 2 "nonimmediate_operand" " m,m,x,Yv,x,*f,r")))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ movhpd\t{%2, %0|%0, %2} vmovhpd\t{%2, %1, %0|%0, %1, %2} unpcklpd\t{%2, %0|%0, %2} vunpcklpd\t{%2, %1, %0|%0, %1, %2} # # #" [(set_attr "isa" "noavx,avx,noavx,avx,*,*,*") (set_attr "type" "ssemov,ssemov,sselog,sselog,ssemov,fmov,imov") (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "0") (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,maybe_evex,orig,maybe_evex,*,*,*") (set_attr "mode" "V1DF,V1DF,V2DF,V2DF,DF,DF,DF")]) (define_split [(set (match_operand:V2DF 0 "memory_operand") (vec_concat:V2DF (vec_select:DF (match_dup 0) (parallel [(const_int 0)])) (match_operand:DF 1 "register_operand")))] "TARGET_SSE2 && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[0] = adjust_address (operands[0], DFmode, 8);") (define_expand "sse2_loadlpd_exp" [(set (match_operand:V2DF 0 "nonimmediate_operand") (vec_concat:V2DF (match_operand:DF 2 "nonimmediate_operand") (vec_select:DF (match_operand:V2DF 1 "nonimmediate_operand") (parallel [(const_int 1)]))))] "TARGET_SSE2" { rtx dst = ix86_fixup_binary_operands (UNKNOWN, V2DFmode, operands); emit_insn (gen_sse2_loadlpd (dst, operands[1], operands[2])); /* Fix up the destination if needed. */ if (dst != operands[0]) emit_move_insn (operands[0], dst); DONE; }) ;; Avoid combining registers from different units in a single alternative, ;; see comment above inline_secondary_memory_needed function in i386.cc (define_insn "sse2_loadlpd" [(set (match_operand:V2DF 0 "nonimmediate_operand" "=v,x,v,x,v,x,x,v,m,m ,m") (vec_concat:V2DF (match_operand:DF 2 "nonimmediate_operand" "vm,m,m,x,v,0,0,v,x,*f,r") (vec_select:DF (match_operand:V2DF 1 "nonimm_or_0_operand" " C,0,v,0,v,x,o,o,0,0 ,0") (parallel [(const_int 1)]))))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ %vmovq\t{%2, %0|%0, %2} movlpd\t{%2, %0|%0, %2} vmovlpd\t{%2, %1, %0|%0, %1, %2} movsd\t{%2, %0|%0, %2} vmovsd\t{%2, %1, %0|%0, %1, %2} shufpd\t{$2, %1, %0|%0, %1, 2} movhpd\t{%H1, %0|%0, %H1} vmovhpd\t{%H1, %2, %0|%0, %2, %H1} # # #" [(set_attr "isa" "*,noavx,avx,noavx,avx,noavx,noavx,avx,*,*,*") (set (attr "type") (cond [(eq_attr "alternative" "5") (const_string "sselog") (eq_attr "alternative" "9") (const_string "fmov") (eq_attr "alternative" "10") (const_string "imov") ] (const_string "ssemov"))) (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "1,6") (const_string "1") (const_string "*"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "5") (const_string "1") (const_string "*"))) (set (attr "prefix") (cond [(eq_attr "alternative" "0") (const_string "maybe_vex") (eq_attr "alternative" "1,3,5,6") (const_string "orig") (eq_attr "alternative" "2,4,7") (const_string "maybe_evex") ] (const_string "*"))) (set_attr "mode" "DF,V1DF,V1DF,V1DF,V1DF,V2DF,V1DF,V1DF,DF,DF,DF")]) (define_split [(set (match_operand:V2DF 0 "memory_operand") (vec_concat:V2DF (match_operand:DF 1 "register_operand") (vec_select:DF (match_dup 0) (parallel [(const_int 1)]))))] "TARGET_SSE2 && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[0] = adjust_address (operands[0], DFmode, 0);") (define_insn "sse2_movsd" [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,v,x,v,m,x,x,v,o") (vec_merge:V2DF (match_operand:V2DF 2 "nonimmediate_operand" " x,v,m,m,v,0,0,v,0") (match_operand:V2DF 1 "nonimmediate_operand" " 0,v,0,v,0,x,o,o,v") (const_int 1)))] "TARGET_SSE2" "@ movsd\t{%2, %0|%0, %2} vmovsd\t{%2, %1, %0|%0, %1, %2} movlpd\t{%2, %0|%0, %q2} vmovlpd\t{%2, %1, %0|%0, %1, %q2} %vmovlpd\t{%2, %0|%q0, %2} shufpd\t{$2, %1, %0|%0, %1, 2} movhps\t{%H1, %0|%0, %H1} vmovhps\t{%H1, %2, %0|%0, %2, %H1} %vmovhps\t{%1, %H0|%H0, %1}" [(set_attr "isa" "noavx,avx,noavx,avx,*,noavx,noavx,avx,*") (set (attr "type") (if_then_else (eq_attr "alternative" "5") (const_string "sselog") (const_string "ssemov"))) (set (attr "prefix_data16") (if_then_else (and (eq_attr "alternative" "2,4") (not (match_test "TARGET_AVX"))) (const_string "1") (const_string "*"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "5") (const_string "1") (const_string "*"))) (set (attr "prefix") (cond [(eq_attr "alternative" "1,3,7") (const_string "maybe_evex") (eq_attr "alternative" "4,8") (const_string "maybe_vex") ] (const_string "orig"))) (set_attr "mode" "DF,DF,V1DF,V1DF,V1DF,V2DF,V1DF,V1DF,V1DF")]) (define_insn "vec_dupv2df" [(set (match_operand:V2DF 0 "register_operand" "=x,x,v") (vec_duplicate:V2DF (match_operand:DF 1 "nonimmediate_operand" " 0,xm,vm")))] "TARGET_SSE2 && " "@ unpcklpd\t%0, %0 %vmovddup\t{%1, %0|%0, %1} vmovddup\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,sse3,avx512vl") (set_attr "type" "sselog1") (set_attr "prefix" "orig,maybe_vex,evex") (set_attr "mode" "V2DF,DF,DF")]) (define_insn "vec_concatv2df" [(set (match_operand:V2DF 0 "register_operand" "=x,x,v,x,v,x,x, v,x,x") (vec_concat:V2DF (match_operand:DF 1 "nonimmediate_operand" " 0,x,v,m,m,0,x,vm,0,0") (match_operand:DF 2 "nonimm_or_0_operand" " x,x,v,1,1,m,m, C,x,m")))] "TARGET_SSE && (!(MEM_P (operands[1]) && MEM_P (operands[2])) || (TARGET_SSE3 && rtx_equal_p (operands[1], operands[2])))" "@ unpcklpd\t{%2, %0|%0, %2} vunpcklpd\t{%2, %1, %0|%0, %1, %2} vunpcklpd\t{%2, %1, %0|%0, %1, %2} %vmovddup\t{%1, %0|%0, %1} vmovddup\t{%1, %0|%0, %1} movhpd\t{%2, %0|%0, %2} vmovhpd\t{%2, %1, %0|%0, %1, %2} %vmovq\t{%1, %0|%0, %1} movlhps\t{%2, %0|%0, %2} movhps\t{%2, %0|%0, %2}" [(set (attr "isa") (cond [(eq_attr "alternative" "0,5") (const_string "sse2_noavx") (eq_attr "alternative" "1,6") (const_string "avx") (eq_attr "alternative" "2,4") (const_string "avx512vl") (eq_attr "alternative" "3") (const_string "sse3") (eq_attr "alternative" "7") (const_string "sse2") ] (const_string "noavx"))) (set (attr "type") (if_then_else (eq_attr "alternative" "0,1,2,3,4") (const_string "sselog") (const_string "ssemov"))) (set (attr "prefix_data16") (if_then_else (eq_attr "alternative" "5") (const_string "1") (const_string "*"))) (set (attr "prefix") (cond [(eq_attr "alternative" "1,6") (const_string "vex") (eq_attr "alternative" "2,4") (const_string "evex") (eq_attr "alternative" "3,7") (const_string "maybe_vex") ] (const_string "orig"))) (set_attr "mode" "V2DF,V2DF,V2DF, DF, DF, V1DF,V1DF,DF,V4SF,V2SF")]) ;; vmovq clears also the higher bits. (define_insn "vec_set_0" [(set (match_operand:VF2_512_256 0 "register_operand" "=v") (vec_merge:VF2_512_256 (vec_duplicate:VF2_512_256 (match_operand: 2 "nonimmediate_operand" "vm")) (match_operand:VF2_512_256 1 "const0_operand" "C") (const_int 1)))] "TARGET_AVX" "vmovq\t{%2, %x0|%x0, %2}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_evex") (set_attr "mode" "DF")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel integer down-conversion operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_mode_iterator PMOV_DST_MODE_1 [V16QI V16HI V8SI V8HI]) (define_mode_attr pmov_src_mode [(V16QI "V16SI") (V16HI "V16SI") (V8SI "V8DI") (V8HI "V8DI")]) (define_mode_attr pmov_src_lower [(V16QI "v16si") (V16HI "v16si") (V8SI "v8di") (V8HI "v8di")]) (define_mode_attr pmov_suff_1 [(V16QI "db") (V16HI "dw") (V8SI "qd") (V8HI "qw")]) (define_expand "trunc2" [(set (match_operand:PMOV_DST_MODE_1 0 "nonimmediate_operand") (truncate:PMOV_DST_MODE_1 (match_operand: 1 "register_operand")))] "TARGET_AVX512F") (define_insn "*avx512f_2" [(set (match_operand:PMOV_DST_MODE_1 0 "nonimmediate_operand" "=v,m") (any_truncate:PMOV_DST_MODE_1 (match_operand: 1 "register_operand" "v,v")))] "TARGET_AVX512F" "vpmov\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*avx512bw_permvar_truncv16siv16hi_1" [(set (match_operand:V16HI 0 "nonimmediate_operand") (vec_select:V16HI (unspec:V32HI [(match_operand:V32HI 1 "register_operand") (match_operand:V32HI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX512BW && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V16HI (match_dup 1)))] "operands[1] = lowpart_subreg (V16SImode, operands[1], V32HImode);") (define_insn_and_split "*avx512bw_permvar_truncv16siv16hi_1_hf" [(set (match_operand:V16HF 0 "nonimmediate_operand") (vec_select:V16HF (subreg:V32HF (unspec:V32HI [(match_operand:V32HI 1 "register_operand") (match_operand:V32HI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX512BW && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V16HI (match_dup 1)))] { operands[0] = lowpart_subreg (V16HImode, operands[0], V16HFmode); operands[1] = lowpart_subreg (V16SImode, operands[1], V32HImode); }) (define_insn_and_split "*avx512f_permvar_truncv8siv8hi_1" [(set (match_operand:V8HI 0 "nonimmediate_operand") (vec_select:V8HI (unspec:V16HI [(match_operand:V16HI 1 "register_operand") (match_operand:V16HI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX512VL && TARGET_AVX512BW && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V8HI (match_dup 1)))] "operands[1] = lowpart_subreg (V8SImode, operands[1], V16HImode);") (define_insn_and_split "*avx512f_permvar_truncv8siv8hi_1_hf" [(set (match_operand:V8HF 0 "nonimmediate_operand") (vec_select:V8HF (subreg:V16HF (unspec:V16HI [(match_operand:V16HI 1 "register_operand") (match_operand:V16HI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX512VL && TARGET_AVX512BW && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V8HI (match_dup 1)))] { operands[0] = lowpart_subreg (V8HImode, operands[0], V8HFmode); operands[1] = lowpart_subreg (V8SImode, operands[1], V16HImode); }) (define_insn_and_split "*avx512f_vpermvar_truncv8div8si_1" [(set (match_operand:V8SI 0 "nonimmediate_operand") (vec_select:V8SI (unspec:V16SI [(match_operand:V16SI 1 "register_operand") (match_operand:V16SI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_AVX512F && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V8SI (match_dup 1)))] "operands[1] = lowpart_subreg (V8DImode, operands[1], V16SImode);") (define_insn "avx512f_2_mask" [(set (match_operand:PMOV_DST_MODE_1 0 "nonimmediate_operand" "=v,m") (vec_merge:PMOV_DST_MODE_1 (any_truncate:PMOV_DST_MODE_1 (match_operand: 1 "register_operand" "v,v")) (match_operand:PMOV_DST_MODE_1 2 "nonimm_or_0_operand" "0C,0") (match_operand: 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512F" "vpmov\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512f_2_mask_store" [(set (match_operand:PMOV_DST_MODE_1 0 "memory_operand") (vec_merge:PMOV_DST_MODE_1 (any_truncate:PMOV_DST_MODE_1 (match_operand: 1 "register_operand")) (match_dup 0) (match_operand: 2 "register_operand")))] "TARGET_AVX512F") (define_expand "truncv32hiv32qi2" [(set (match_operand:V32QI 0 "nonimmediate_operand") (truncate:V32QI (match_operand:V32HI 1 "register_operand")))] "TARGET_AVX512BW") (define_insn "avx512bw_v32hiv32qi2" [(set (match_operand:V32QI 0 "nonimmediate_operand" "=v,m") (any_truncate:V32QI (match_operand:V32HI 1 "register_operand" "v,v")))] "TARGET_AVX512BW" "vpmovwb\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn_and_split "*avx512f_permvar_truncv32hiv32qi_1" [(set (match_operand:V32QI 0 "nonimmediate_operand") (vec_select:V32QI (unspec:V64QI [(match_operand:V64QI 1 "register_operand") (match_operand:V64QI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15) (const_int 16) (const_int 17) (const_int 18) (const_int 19) (const_int 20) (const_int 21) (const_int 22) (const_int 23) (const_int 24) (const_int 25) (const_int 26) (const_int 27) (const_int 28) (const_int 29) (const_int 30) (const_int 31)])))] "TARGET_AVX512VBMI && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V32QI (match_dup 1)))] "operands[1] = lowpart_subreg (V32HImode, operands[1], V64QImode);") (define_insn "avx512bw_v32hiv32qi2_mask" [(set (match_operand:V32QI 0 "nonimmediate_operand" "=v,m") (vec_merge:V32QI (any_truncate:V32QI (match_operand:V32HI 1 "register_operand" "v,v")) (match_operand:V32QI 2 "nonimm_or_0_operand" "0C,0") (match_operand:SI 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512BW" "vpmovwb\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "avx512bw_v32hiv32qi2_mask_store" [(set (match_operand:V32QI 0 "nonimmediate_operand") (vec_merge:V32QI (any_truncate:V32QI (match_operand:V32HI 1 "register_operand")) (match_dup 0) (match_operand:SI 2 "register_operand")))] "TARGET_AVX512BW") (define_mode_iterator PMOV_DST_MODE_2 [V4SI V8HI (V16QI "TARGET_AVX512BW")]) (define_mode_attr pmov_suff_2 [(V16QI "wb") (V8HI "dw") (V4SI "qd")]) (define_expand "trunc2" [(set (match_operand:PMOV_DST_MODE_2 0 "nonimmediate_operand") (truncate:PMOV_DST_MODE_2 (match_operand: 1 "register_operand")))] "TARGET_AVX512VL") (define_insn "*avx512vl_2" [(set (match_operand:PMOV_DST_MODE_2 0 "nonimmediate_operand" "=v,m") (any_truncate:PMOV_DST_MODE_2 (match_operand: 1 "register_operand" "v,v")))] "TARGET_AVX512VL" "vpmov\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn_and_split "*avx512f_permvar_truncv16hiv16qi_1" [(set (match_operand:V16QI 0 "nonimmediate_operand") (vec_select:V16QI (unspec:V32QI [(match_operand:V32QI 1 "register_operand") (match_operand:V32QI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX512VL && TARGET_AVX512VBMI && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V16QI (match_dup 1)))] "operands[1] = lowpart_subreg (V16HImode, operands[1], V32QImode);") (define_insn_and_split "*avx512f_permvar_truncv4div4si_1" [(set (match_operand:V4SI 0 "nonimmediate_operand") (vec_select:V4SI (unspec:V8SI [(match_operand:V8SI 1 "register_operand") (match_operand:V8SI 2 "permvar_truncate_operand")] UNSPEC_VPERMVAR) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (truncate:V4SI (match_dup 1)))] "operands[1] = lowpart_subreg (V4DImode, operands[1], V8SImode);") (define_insn "_2_mask" [(set (match_operand:PMOV_DST_MODE_2 0 "nonimmediate_operand" "=v,m") (vec_merge:PMOV_DST_MODE_2 (any_truncate:PMOV_DST_MODE_2 (match_operand: 1 "register_operand" "v,v")) (match_operand:PMOV_DST_MODE_2 2 "nonimm_or_0_operand" "0C,0") (match_operand: 3 "register_operand" "Yk,Yk")))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "none,store") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_2_mask_store" [(set (match_operand:PMOV_DST_MODE_2 0 "nonimmediate_operand") (vec_merge:PMOV_DST_MODE_2 (any_truncate:PMOV_DST_MODE_2 (match_operand: 1 "register_operand")) (match_dup 0) (match_operand: 2 "register_operand")))] "TARGET_AVX512VL") (define_mode_iterator PMOV_SRC_MODE_3 [V4DI V2DI V8SI V4SI (V8HI "TARGET_AVX512BW")]) (define_mode_attr pmov_dst_3_lower [(V4DI "v4qi") (V2DI "v2qi") (V8SI "v8qi") (V4SI "v4qi") (V8HI "v8qi")]) (define_mode_attr pmov_dst_3 [(V4DI "V4QI") (V2DI "V2QI") (V8SI "V8QI") (V4SI "V4QI") (V8HI "V8QI")]) (define_mode_attr pmov_dst_zeroed_3 [(V4DI "V12QI") (V2DI "V14QI") (V8SI "V8QI") (V4SI "V12QI") (V8HI "V8QI")]) (define_mode_attr pmov_suff_3 [(V4DI "qb") (V2DI "qb") (V8SI "db") (V4SI "db") (V8HI "wb")]) (define_expand "trunc2" [(set (match_operand: 0 "register_operand") (truncate: (match_operand:PMOV_SRC_MODE_3 1 "register_operand")))] "TARGET_AVX512VL" { rtx op0 = gen_reg_rtx (V16QImode); emit_insn (gen_avx512vl_truncatevqi2 (op0, operands[1], CONST0_RTX (mode))); emit_move_insn (operands[0], lowpart_subreg (mode, op0, V16QImode)); DONE; }) (define_insn "avx512vl_vqi2" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (any_truncate: (match_operand:PMOV_SRC_MODE_3 1 "register_operand" "v")) (match_operand: 2 "const0_operand")))] "TARGET_AVX512VL" "vpmov\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512f_pshufb_truncv8hiv8qi_1" [(set (match_operand:DI 0 "register_operand") (vec_select:DI (subreg:V2DI (unspec:V16QI [(match_operand:V16QI 1 "register_operand") (match_operand:V16QI 2 "pshufb_truncv8hiv8qi_operand")] UNSPEC_PSHUFB) 0) (parallel [(const_int 0)])))] "TARGET_AVX512VL && TARGET_AVX512BW && ix86_pre_reload_split ()" "#" "&& 1" [(const_int 0)] { rtx op1 = gen_reg_rtx (V8QImode); operands[1] = lowpart_subreg (V8HImode, operands[1], V16QImode); emit_insn (gen_truncv8hiv8qi2 (op1, operands[1])); emit_move_insn (operands[0], lowpart_subreg (DImode, op1, V8QImode)); DONE; }) (define_insn "*avx512vl_v2div2qi2_store_1" [(set (match_operand:V2QI 0 "memory_operand" "=m") (any_truncate:V2QI (match_operand:V2DI 1 "register_operand" "v")))] "TARGET_AVX512VL" "vpmovqb\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512vl_v2div2qi2_store_2" [(set (match_operand:HI 0 "memory_operand") (subreg:HI (any_truncate:V2QI (match_operand:V2DI 1 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_truncate:V2QI (match_dup 1)))] "operands[0] = adjust_address_nv (operands[0], V2QImode, 0);") (define_insn "avx512vl_v2div2qi2_mask" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V2QI (any_truncate:V2QI (match_operand:V2DI 1 "register_operand" "v")) (vec_select:V2QI (match_operand:V16QI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V14QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmovqb\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v2div2qi2_mask_1" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V2QI (any_truncate:V2QI (match_operand:V2DI 1 "register_operand" "v")) (const_vector:V2QI [(const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V14QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmovqb\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v2div2qi2_mask_store_1" [(set (match_operand:V2QI 0 "memory_operand" "=m") (vec_merge:V2QI (any_truncate:V2QI (match_operand:V2DI 1 "register_operand" "v")) (match_dup 0) (match_operand:QI 2 "register_operand" "Yk")))] "TARGET_AVX512VL" "vpmovqb\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "avx512vl_v2div2qi2_mask_store_2" [(set (match_operand:HI 0 "memory_operand") (subreg:HI (vec_merge:V2QI (any_truncate:V2QI (match_operand:V2DI 1 "register_operand")) (vec_select:V2QI (subreg:V4QI (vec_concat:V2HI (match_dup 0) (const_int 0)) 0) (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 2 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_merge:V2QI (any_truncate:V2QI (match_dup 1)) (match_dup 0) (match_dup 2)))] "operands[0] = adjust_address_nv (operands[0], V2QImode, 0);") (define_insn "*avx512vl_v4qi2_store_1" [(set (match_operand:V4QI 0 "memory_operand" "=m") (any_truncate:V4QI (match_operand:VI4_128_8_256 1 "register_operand" "v")))] "TARGET_AVX512VL" "vpmov\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512vl_v4qi2_store_2" [(set (match_operand:SI 0 "memory_operand") (subreg:SI (any_truncate:V4QI (match_operand:VI4_128_8_256 1 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_truncate:V4QI (match_dup 1)))] "operands[0] = adjust_address_nv (operands[0], V4QImode, 0);") (define_insn "avx512vl_v4qi2_mask" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V4QI (any_truncate:V4QI (match_operand:VI4_128_8_256 1 "register_operand" "v")) (vec_select:V4QI (match_operand:V16QI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V12QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v4qi2_mask_1" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V4QI (any_truncate:V4QI (match_operand:VI4_128_8_256 1 "register_operand" "v")) (const_vector:V4QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V12QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v4qi2_mask_store_1" [(set (match_operand:V4QI 0 "memory_operand" "=m") (vec_merge:V4QI (any_truncate:V4QI (match_operand:VI4_128_8_256 1 "register_operand" "v")) (match_dup 0) (match_operand:QI 2 "register_operand" "Yk")))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "avx512vl_v4qi2_mask_store_2" [(set (match_operand:SI 0 "memory_operand") (subreg:SI (vec_merge:V4QI (any_truncate:V4QI (match_operand:VI4_128_8_256 1 "register_operand")) (vec_select:V4QI (subreg:V8QI (vec_concat:V2SI (match_dup 0) (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 2 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_merge:V4QI (any_truncate:V4QI (match_dup 1)) (match_dup 0) (match_dup 2)))] "operands[0] = adjust_address_nv (operands[0], V4QImode, 0);") (define_mode_iterator VI2_128_BW_4_256 [(V8HI "TARGET_AVX512BW") V8SI]) (define_insn "*avx512vl_v8qi2_store_1" [(set (match_operand:V8QI 0 "memory_operand" "=m") (any_truncate:V8QI (match_operand:VI2_128_BW_4_256 1 "register_operand" "v")))] "TARGET_AVX512VL" "vpmov\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512vl_v8qi2_store_2" [(set (match_operand:DI 0 "memory_operand" "=m") (subreg:DI (any_truncate:V8QI (match_operand:VI2_128_BW_4_256 1 "register_operand" "v")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_truncate:V8QI (match_dup 1)))] "operands[0] = adjust_address_nv (operands[0], V8QImode, 0);") (define_insn "avx512vl_v8qi2_mask" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V8QI (any_truncate:V8QI (match_operand:VI2_128_BW_4_256 1 "register_operand" "v")) (vec_select:V8QI (match_operand:V16QI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V8QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v8qi2_mask_1" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V8QI (any_truncate:V8QI (match_operand:VI2_128_BW_4_256 1 "register_operand" "v")) (const_vector:V8QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V8QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v8qi2_mask_store_1" [(set (match_operand:V8QI 0 "memory_operand" "=m") (vec_merge:V8QI (any_truncate:V8QI (match_operand:VI2_128_BW_4_256 1 "register_operand" "v")) (match_dup 0) (match_operand:QI 2 "register_operand" "Yk")))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "avx512vl_v8qi2_mask_store_2" [(set (match_operand:DI 0 "memory_operand") (subreg:DI (vec_merge:V8QI (any_truncate:V8QI (match_operand:VI2_128_BW_4_256 1 "register_operand")) (vec_select:V8QI (subreg:V16QI (vec_concat:V2DI (match_dup 0) (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand:QI 2 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_merge:V8QI (any_truncate:V8QI (match_dup 1)) (match_dup 0) (match_dup 2)))] "operands[0] = adjust_address_nv (operands[0], V8QImode, 0);") (define_mode_iterator PMOV_SRC_MODE_4 [V4DI V2DI V4SI]) (define_mode_attr pmov_dst_4 [(V4DI "V4HI") (V2DI "V2HI") (V4SI "V4HI")]) (define_mode_attr pmov_dst_4_lower [(V4DI "v4hi") (V2DI "v2hi") (V4SI "v4hi")]) (define_mode_attr pmov_dst_zeroed_4 [(V4DI "V4HI") (V2DI "V6HI") (V4SI "V4HI")]) (define_mode_attr pmov_suff_4 [(V4DI "qw") (V2DI "qw") (V4SI "dw")]) (define_expand "trunc2" [(set (match_operand: 0 "register_operand") (truncate: (match_operand:PMOV_SRC_MODE_4 1 "register_operand")))] "TARGET_AVX512VL" { rtx op0 = gen_reg_rtx (V8HImode); emit_insn (gen_avx512vl_truncatevhi2 (op0, operands[1], CONST0_RTX (mode))); emit_move_insn (operands[0], lowpart_subreg (mode, op0, V8HImode)); DONE; }) (define_insn "avx512vl_vhi2" [(set (match_operand:V8HI 0 "register_operand" "=v") (vec_concat:V8HI (any_truncate: (match_operand:PMOV_SRC_MODE_4 1 "register_operand" "v")) (match_operand: 2 "const0_operand")))] "TARGET_AVX512VL" "vpmov\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512f_pshufb_truncv4siv4hi_1" [(set (match_operand:DI 0 "register_operand") (vec_select:DI (subreg:V2DI (unspec:V16QI [(match_operand:V16QI 1 "register_operand") (match_operand:V16QI 2 "pshufb_truncv4siv4hi_operand")] UNSPEC_PSHUFB) 0) (parallel [(const_int 0)])))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(const_int 0)] { rtx op1 = gen_reg_rtx (V4HImode); operands[1] = lowpart_subreg (V4SImode, operands[1], V16QImode); emit_insn (gen_truncv4siv4hi2 (op1, operands[1])); emit_move_insn (operands[0], lowpart_subreg (DImode, op1, V4HImode)); DONE; }) (define_insn "*avx512vl_v4hi2_store_1" [(set (match_operand:V4HI 0 "memory_operand" "=m") (any_truncate:V4HI (match_operand:VI4_128_8_256 1 "register_operand" "v")))] "TARGET_AVX512VL" "vpmov\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512vl_v4hi2_store_2" [(set (match_operand:DI 0 "memory_operand") (subreg:DI (any_truncate:V4HI (match_operand:VI4_128_8_256 1 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_truncate:V4HI (match_dup 1)))] "operands[0] = adjust_address_nv (operands[0], V4HImode, 0);") (define_insn "avx512vl_v4hi2_mask" [(set (match_operand:V8HI 0 "register_operand" "=v") (vec_concat:V8HI (vec_merge:V4HI (any_truncate:V4HI (match_operand:VI4_128_8_256 1 "register_operand" "v")) (vec_select:V4HI (match_operand:V8HI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V4HI [(const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v4hi2_mask_1" [(set (match_operand:V8HI 0 "register_operand" "=v") (vec_concat:V8HI (vec_merge:V4HI (any_truncate:V4HI (match_operand:VI4_128_8_256 1 "register_operand" "v")) (const_vector:V4HI [(const_int 0) (const_int 0) (const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V4HI [(const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmov\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v4hi2_mask_store_1" [(set (match_operand:V4HI 0 "memory_operand" "=m") (vec_merge:V4HI (any_truncate:V4HI (match_operand:VI4_128_8_256 1 "register_operand" "v")) (match_dup 0) (match_operand:QI 2 "register_operand" "Yk")))] "TARGET_AVX512VL" { if (GET_MODE_SIZE (GET_MODE_INNER (mode)) == 4) return "vpmov\t{%1, %0%{%2%}|%0%{%2%}, %t1}"; return "vpmov\t{%1, %0%{%2%}|%0%{%2%}, %g1}"; } [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "avx512vl_v4hi2_mask_store_2" [(set (match_operand:DI 0 "memory_operand") (subreg:DI (vec_merge:V4HI (any_truncate:V4HI (match_operand:VI4_128_8_256 1 "register_operand")) (vec_select:V4HI (subreg:V8HI (vec_concat:V2DI (match_dup 0) (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand:QI 2 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_merge:V4HI (any_truncate:V4HI (match_dup 1)) (match_dup 0) (match_dup 2)))] "operands[0] = adjust_address_nv (operands[0], V4HImode, 0);") (define_insn "*avx512vl_v2div2hi2_store_1" [(set (match_operand:V2HI 0 "memory_operand" "=m") (any_truncate:V2HI (match_operand:V2DI 1 "register_operand" "v")))] "TARGET_AVX512VL" "vpmovqw\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512vl_v2div2hi2_store_2" [(set (match_operand:SI 0 "memory_operand") (subreg:SI (any_truncate:V2HI (match_operand:V2DI 1 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_truncate:V2HI (match_dup 1)))] "operands[0] = adjust_address_nv (operands[0], V2HImode, 0);") (define_insn "avx512vl_v2div2hi2_mask" [(set (match_operand:V8HI 0 "register_operand" "=v") (vec_concat:V8HI (vec_merge:V2HI (any_truncate:V2HI (match_operand:V2DI 1 "register_operand" "v")) (vec_select:V2HI (match_operand:V8HI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V6HI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmovqw\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v2div2hi2_mask_1" [(set (match_operand:V8HI 0 "register_operand" "=v") (vec_concat:V8HI (vec_merge:V2HI (any_truncate:V2HI (match_operand:V2DI 1 "register_operand" "v")) (const_vector:V2HI [(const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V6HI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmovqw\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v2div2hi2_mask_store_1" [(set (match_operand:V2HI 0 "memory_operand" "=m") (vec_merge:V2HI (any_truncate:V2HI (match_operand:V2DI 1 "register_operand" "v")) (match_dup 0) (match_operand:QI 2 "register_operand" "Yk")))] "TARGET_AVX512VL" "vpmovqw\t{%1, %0%{%2%}|%0%{%2%}, %g1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "avx512vl_v2div2hi2_mask_store_2" [(set (match_operand:SI 0 "memory_operand") (subreg:SI (vec_merge:V2HI (any_truncate:V2HI (match_operand:V2DI 1 "register_operand")) (vec_select:V2HI (subreg:V4HI (vec_concat:V2SI (match_dup 0) (const_int 0)) 0) (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 2 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_merge:V2HI (any_truncate:V2HI (match_dup 1)) (match_dup 0) (match_dup 2)))] "operands[0] = adjust_address_nv (operands[0], V2HImode, 0);") (define_expand "truncv2div2si2" [(set (match_operand:V2SI 0 "register_operand") (truncate:V2SI (match_operand:V2DI 1 "register_operand")))] "TARGET_AVX512VL" { rtx op0 = gen_reg_rtx (V4SImode); emit_insn (gen_avx512vl_truncatev2div2si2 (op0, operands[1], CONST0_RTX (V2SImode))); emit_move_insn (operands[0], lowpart_subreg (V2SImode, op0, V4SImode)); DONE; }) (define_insn "avx512vl_v2div2si2" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (any_truncate:V2SI (match_operand:V2DI 1 "register_operand" "v")) (match_operand:V2SI 2 "const0_operand")))] "TARGET_AVX512VL" "vpmovqd\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512f_pshufd_truncv2div2si_1" [(set (match_operand:DI 0 "register_operand") (vec_select:DI (subreg:V2DI (vec_select:V4SI (match_operand:V4SI 1 "register_operand") (parallel [(const_int 0) (const_int 2) (const_int 2) (const_int 3)])) 0) (parallel [(const_int 0)])))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(const_int 0)] { rtx op1 = gen_reg_rtx (V2SImode); operands[1] = lowpart_subreg (V2DImode, operands[1], V4SImode); emit_insn (gen_truncv2div2si2 (op1, operands[1])); emit_move_insn (operands[0], lowpart_subreg (DImode, op1, V2SImode)); DONE; }) (define_insn "*avx512vl_v2div2si2_store_1" [(set (match_operand:V2SI 0 "memory_operand" "=m") (any_truncate:V2SI (match_operand:V2DI 1 "register_operand" "v")))] "TARGET_AVX512VL" "vpmovqd\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512vl_v2div2si2_store_2" [(set (match_operand:DI 0 "memory_operand") (subreg:DI (any_truncate:V2SI (match_operand:V2DI 1 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_truncate:V2SI (match_dup 1)))] "operands[0] = adjust_address_nv (operands[0], V2SImode, 0);") (define_insn "avx512vl_v2div2si2_mask" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (any_truncate:V2SI (match_operand:V2DI 1 "register_operand" "v")) (vec_select:V2SI (match_operand:V4SI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmovqd\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v2div2si2_mask_1" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_concat:V4SI (vec_merge:V2SI (any_truncate:V2SI (match_operand:V2DI 1 "register_operand" "v")) (const_vector:V2SI [(const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V2SI [(const_int 0) (const_int 0)])))] "TARGET_AVX512VL" "vpmovqd\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512vl_v2div2si2_mask_store_1" [(set (match_operand:V2SI 0 "memory_operand" "=m") (vec_merge:V2SI (any_truncate:V2SI (match_operand:V2DI 1 "register_operand" "v")) (match_dup 0) (match_operand:QI 2 "register_operand" "Yk")))] "TARGET_AVX512VL" "vpmovqd\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "avx512vl_v2div2si2_mask_store_2" [(set (match_operand:DI 0 "memory_operand") (subreg:DI (vec_merge:V2SI (any_truncate:V2SI (match_operand:V2DI 1 "register_operand")) (vec_select:V2SI (subreg:V4SI (vec_concat:V2DI (match_dup 0) (const_int 0)) 0) (parallel [(const_int 0) (const_int 1)])) (match_operand:QI 2 "register_operand")) 0))] "TARGET_AVX512VL && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_merge:V2SI (any_truncate:V2SI (match_dup 1)) (match_dup 0) (match_dup 2)))] "operands[0] = adjust_address_nv (operands[0], V2SImode, 0);") (define_expand "truncv8div8qi2" [(set (match_operand:V8QI 0 "register_operand") (truncate:V8QI (match_operand:V8DI 1 "register_operand")))] "TARGET_AVX512F" { rtx op0 = gen_reg_rtx (V16QImode); emit_insn (gen_avx512f_truncatev8div16qi2 (op0, operands[1])); emit_move_insn (operands[0], lowpart_subreg (V8QImode, op0, V16QImode)); DONE; }) (define_insn "avx512f_v8div16qi2" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (any_truncate:V8QI (match_operand:V8DI 1 "register_operand" "v")) (const_vector:V8QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512F" "vpmovqb\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512f_v8div16qi2_store_1" [(set (match_operand:V8QI 0 "memory_operand" "=m") (any_truncate:V8QI (match_operand:V8DI 1 "register_operand" "v")))] "TARGET_AVX512F" "vpmovqb\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "*avx512f_v8div16qi2_store_2" [(set (match_operand:DI 0 "memory_operand") (subreg:DI (any_truncate:V8QI (match_operand:V8DI 1 "register_operand")) 0))] "TARGET_AVX512F && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_truncate:V8QI (match_dup 1)))] "operands[0] = adjust_address_nv (operands[0], V8QImode, 0);") (define_insn "avx512f_v8div16qi2_mask" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V8QI (any_truncate:V8QI (match_operand:V8DI 1 "register_operand" "v")) (vec_select:V8QI (match_operand:V16QI 2 "nonimm_or_0_operand" "0C") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand:QI 3 "register_operand" "Yk")) (const_vector:V8QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512F" "vpmovqb\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512f_v8div16qi2_mask_1" [(set (match_operand:V16QI 0 "register_operand" "=v") (vec_concat:V16QI (vec_merge:V8QI (any_truncate:V8QI (match_operand:V8DI 1 "register_operand" "v")) (const_vector:V8QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)]) (match_operand:QI 2 "register_operand" "Yk")) (const_vector:V8QI [(const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0) (const_int 0)])))] "TARGET_AVX512F" "vpmovqb\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn "*avx512f_v8div16qi2_mask_store_1" [(set (match_operand:V8QI 0 "memory_operand" "=m") (vec_merge:V8QI (any_truncate:V8QI (match_operand:V8DI 1 "register_operand" "v")) (match_dup 0) (match_operand:QI 2 "register_operand" "Yk")))] "TARGET_AVX512F" "vpmovqb\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "memory" "store") (set_attr "prefix" "evex") (set_attr "mode" "TI")]) (define_insn_and_split "avx512f_v8div16qi2_mask_store_2" [(set (match_operand:DI 0 "memory_operand") (subreg:DI (vec_merge:V8QI (any_truncate:V8QI (match_operand:V8DI 1 "register_operand")) (vec_select:V8QI (subreg:V16QI (vec_concat:V2DI (match_dup 0) (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand:QI 2 "register_operand")) 0))] "TARGET_AVX512F && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_merge:V8QI (any_truncate:V8QI (match_dup 1)) (match_dup 0) (match_dup 2)))] "operands[0] = adjust_address_nv (operands[0], V8QImode, 0);") ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel integral arithmetic ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_expand "neg2" [(set (match_operand:VI_AVX2 0 "register_operand") (minus:VI_AVX2 (match_dup 2) (match_operand:VI_AVX2 1 "vector_operand")))] "TARGET_SSE2" "operands[2] = force_reg (mode, CONST0_RTX (mode));") (define_expand "3" [(set (match_operand:VI_AVX2 0 "register_operand") (plusminus:VI_AVX2 (match_operand:VI_AVX2 1 "vector_operand") (match_operand:VI_AVX2 2 "vector_operand")))] "TARGET_SSE2" "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_expand "cond_" [(set (match_operand:VI1248_AVX512VLBW 0 "register_operand") (vec_merge:VI1248_AVX512VLBW (plusminus:VI1248_AVX512VLBW (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand") (match_operand:VI1248_AVX512VLBW 3 "nonimmediate_operand")) (match_operand:VI1248_AVX512VLBW 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { emit_insn (gen_3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "3_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand") (vec_merge:VI48_AVX512VL (plusminus:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand")) (match_operand:VI48_AVX512VL 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")))] "TARGET_AVX512F" "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_expand "3_mask" [(set (match_operand:VI12_AVX512VL 0 "register_operand") (vec_merge:VI12_AVX512VL (plusminus:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand")) (match_operand:VI12_AVX512VL 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")))] "TARGET_AVX512BW" "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*3" [(set (match_operand:VI_AVX2 0 "register_operand" "=x,") (plusminus:VI_AVX2 (match_operand:VI_AVX2 1 "bcst_vector_operand" "0,") (match_operand:VI_AVX2 2 "bcst_vector_operand" "xBm,mBr")))] "TARGET_SSE2 && ix86_binary_operator_ok (, mode, operands)" "@ p\t{%2, %0|%0, %2} vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "")]) (define_insn "*3_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (vec_merge:VI48_AVX512VL (plusminus:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "v") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm")) (match_operand:VI48_AVX512VL 3 "nonimm_or_0_operand" "0C") (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F && ix86_binary_operator_ok (, mode, operands)" "vp\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*3_mask" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (vec_merge:VI12_AVX512VL (plusminus:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "v") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm")) (match_operand:VI12_AVX512VL 3 "nonimm_or_0_operand" "0C") (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512BW && ix86_binary_operator_ok (, mode, operands)" "vp\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_3" [(set (match_operand:VI12_AVX2_AVX512BW 0 "register_operand") (sat_plusminus:VI12_AVX2_AVX512BW (match_operand:VI12_AVX2_AVX512BW 1 "vector_operand") (match_operand:VI12_AVX2_AVX512BW 2 "vector_operand")))] "TARGET_SSE2 && && " "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*_3" [(set (match_operand:VI12_AVX2_AVX512BW 0 "register_operand" "=x,") (sat_plusminus:VI12_AVX2_AVX512BW (match_operand:VI12_AVX2_AVX512BW 1 "vector_operand" "0,") (match_operand:VI12_AVX2_AVX512BW 2 "vector_operand" "xBm,m")))] "TARGET_SSE2 && && && ix86_binary_operator_ok (, mode, operands)" "@ p\t{%2, %0|%0, %2} vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "TI")]) ;; PR96906 - optimize psubusw compared to 0 into pminuw compared to op0. (define_split [(set (match_operand:VI12_AVX2 0 "register_operand") (eq:VI12_AVX2 (us_minus:VI12_AVX2 (match_operand:VI12_AVX2 1 "vector_operand") (match_operand:VI12_AVX2 2 "vector_operand")) (match_operand:VI12_AVX2 3 "const0_operand")))] "TARGET_SSE2 && (mode != V8HImode || TARGET_SSE4_1) && ix86_binary_operator_ok (US_MINUS, mode, operands)" [(set (match_dup 4) (umin:VI12_AVX2 (match_dup 1) (match_dup 2))) (set (match_dup 0) (eq:VI12_AVX2 (match_dup 4) (match_dup 1)))] "operands[4] = gen_reg_rtx (mode);") (define_expand "mulv8qi3" [(set (match_operand:V8QI 0 "register_operand") (mult:V8QI (match_operand:V8QI 1 "register_operand") (match_operand:V8QI 2 "register_operand")))] "TARGET_AVX512VL && TARGET_AVX512BW && TARGET_64BIT" { ix86_expand_vecop_qihi (MULT, operands[0], operands[1], operands[2]); DONE; }) (define_expand "mul3" [(set (match_operand:VI1_AVX512 0 "register_operand") (mult:VI1_AVX512 (match_operand:VI1_AVX512 1 "register_operand") (match_operand:VI1_AVX512 2 "register_operand")))] "TARGET_SSE2" { ix86_expand_vecop_qihi (MULT, operands[0], operands[1], operands[2]); DONE; }) (define_expand "cond_mul" [(set (match_operand:VI2_AVX512VL 0 "register_operand") (vec_merge:VI2_AVX512VL (mult:VI2_AVX512VL (match_operand:VI2_AVX512VL 2 "vector_operand") (match_operand:VI2_AVX512VL 3 "vector_operand")) (match_operand:VI2_AVX512VL 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512BW" { emit_insn (gen_mul3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "mul3" [(set (match_operand:VI2_AVX2 0 "register_operand") (mult:VI2_AVX2 (match_operand:VI2_AVX2 1 "vector_operand") (match_operand:VI2_AVX2 2 "vector_operand")))] "TARGET_SSE2 && && " "ix86_fixup_binary_operands_no_copy (MULT, mode, operands);") (define_insn "*mul3" [(set (match_operand:VI2_AVX2 0 "register_operand" "=x,") (mult:VI2_AVX2 (match_operand:VI2_AVX2 1 "vector_operand" "%0,") (match_operand:VI2_AVX2 2 "vector_operand" "xBm,m")))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2])) && && " "@ pmullw\t{%2, %0|%0, %2} vpmullw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseimul") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_expand "mul3_highpart" [(set (match_operand:VI2_AVX2 0 "register_operand") (truncate:VI2_AVX2 (lshiftrt: (mult: (any_extend: (match_operand:VI2_AVX2 1 "vector_operand")) (any_extend: (match_operand:VI2_AVX2 2 "vector_operand"))) (const_int 16))))] "TARGET_SSE2 && && " "ix86_fixup_binary_operands_no_copy (MULT, mode, operands);") (define_insn "*mul3_highpart" [(set (match_operand:VI2_AVX2 0 "register_operand" "=x,") (truncate:VI2_AVX2 (lshiftrt: (mult: (any_extend: (match_operand:VI2_AVX2 1 "vector_operand" "%0,")) (any_extend: (match_operand:VI2_AVX2 2 "vector_operand" "xBm,m"))) (const_int 16))))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2])) && && " "@ pmulhw\t{%2, %0|%0, %2} vpmulhw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseimul") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_expand "vec_widen_umult_even_v16si" [(set (match_operand:V8DI 0 "register_operand") (mult:V8DI (zero_extend:V8DI (vec_select:V8SI (match_operand:V16SI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (zero_extend:V8DI (vec_select:V8SI (match_operand:V16SI 2 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])))))] "TARGET_AVX512F" "ix86_fixup_binary_operands_no_copy (MULT, V16SImode, operands);") (define_insn "*vec_widen_umult_even_v16si" [(set (match_operand:V8DI 0 "register_operand" "=v") (mult:V8DI (zero_extend:V8DI (vec_select:V8SI (match_operand:V16SI 1 "nonimmediate_operand" "%v") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (zero_extend:V8DI (vec_select:V8SI (match_operand:V16SI 2 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])))))] "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpmuludq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseimul") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "vec_widen_umult_even_v8si" [(set (match_operand:V4DI 0 "register_operand") (mult:V4DI (zero_extend:V4DI (vec_select:V4SI (match_operand:V8SI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (zero_extend:V4DI (vec_select:V4SI (match_operand:V8SI 2 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))))] "TARGET_AVX2 && " "ix86_fixup_binary_operands_no_copy (MULT, V8SImode, operands);") (define_insn "*vec_widen_umult_even_v8si" [(set (match_operand:V4DI 0 "register_operand" "=v") (mult:V4DI (zero_extend:V4DI (vec_select:V4SI (match_operand:V8SI 1 "nonimmediate_operand" "%v") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (zero_extend:V4DI (vec_select:V4SI (match_operand:V8SI 2 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))))] "TARGET_AVX2 && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpmuludq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseimul") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_expand "vec_widen_umult_even_v4si" [(set (match_operand:V2DI 0 "register_operand") (mult:V2DI (zero_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "vector_operand") (parallel [(const_int 0) (const_int 2)]))) (zero_extend:V2DI (vec_select:V2SI (match_operand:V4SI 2 "vector_operand") (parallel [(const_int 0) (const_int 2)])))))] "TARGET_SSE2 && " "ix86_fixup_binary_operands_no_copy (MULT, V4SImode, operands);") (define_insn "*vec_widen_umult_even_v4si" [(set (match_operand:V2DI 0 "register_operand" "=x,v") (mult:V2DI (zero_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "vector_operand" "%0,v") (parallel [(const_int 0) (const_int 2)]))) (zero_extend:V2DI (vec_select:V2SI (match_operand:V4SI 2 "vector_operand" "xBm,vm") (parallel [(const_int 0) (const_int 2)])))))] "TARGET_SSE2 && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pmuludq\t{%2, %0|%0, %2} vpmuludq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseimul") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "TI")]) (define_expand "vec_widen_smult_even_v16si" [(set (match_operand:V8DI 0 "register_operand") (mult:V8DI (sign_extend:V8DI (vec_select:V8SI (match_operand:V16SI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (sign_extend:V8DI (vec_select:V8SI (match_operand:V16SI 2 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])))))] "TARGET_AVX512F" "ix86_fixup_binary_operands_no_copy (MULT, V16SImode, operands);") (define_insn "*vec_widen_smult_even_v16si" [(set (match_operand:V8DI 0 "register_operand" "=v") (mult:V8DI (sign_extend:V8DI (vec_select:V8SI (match_operand:V16SI 1 "nonimmediate_operand" "%v") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (sign_extend:V8DI (vec_select:V8SI (match_operand:V16SI 2 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])))))] "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpmuldq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseimul") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "vec_widen_smult_even_v8si" [(set (match_operand:V4DI 0 "register_operand") (mult:V4DI (sign_extend:V4DI (vec_select:V4SI (match_operand:V8SI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (sign_extend:V4DI (vec_select:V4SI (match_operand:V8SI 2 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))))] "TARGET_AVX2 && " "ix86_fixup_binary_operands_no_copy (MULT, V8SImode, operands);") (define_insn "*vec_widen_smult_even_v8si" [(set (match_operand:V4DI 0 "register_operand" "=v") (mult:V4DI (sign_extend:V4DI (vec_select:V4SI (match_operand:V8SI 1 "nonimmediate_operand" "%v") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (sign_extend:V4DI (vec_select:V4SI (match_operand:V8SI 2 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))))] "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpmuldq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseimul") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_expand "sse4_1_mulv2siv2di3" [(set (match_operand:V2DI 0 "register_operand") (mult:V2DI (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "vector_operand") (parallel [(const_int 0) (const_int 2)]))) (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 2 "vector_operand") (parallel [(const_int 0) (const_int 2)])))))] "TARGET_SSE4_1 && " "ix86_fixup_binary_operands_no_copy (MULT, V4SImode, operands);") (define_insn "*sse4_1_mulv2siv2di3" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v") (mult:V2DI (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "vector_operand" "%0,0,v") (parallel [(const_int 0) (const_int 2)]))) (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 2 "vector_operand" "YrBm,*xBm,vm") (parallel [(const_int 0) (const_int 2)])))))] "TARGET_SSE4_1 && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pmuldq\t{%2, %0|%0, %2} pmuldq\t{%2, %0|%0, %2} vpmuldq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sseimul") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) (define_insn "avx512bw_pmaddwd512" [(set (match_operand: 0 "register_operand" "=v") (unspec: [(match_operand:VI2_AVX2 1 "register_operand" "v") (match_operand:VI2_AVX2 2 "nonimmediate_operand" "vm")] UNSPEC_PMADDWD512))] "TARGET_AVX512BW && " "vpmaddwd\t{%2, %1, %0|%0, %1, %2}"; [(set_attr "type" "sseiadd") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "avx2_pmaddwd" [(set (match_operand:V8SI 0 "register_operand") (plus:V8SI (mult:V8SI (sign_extend:V8SI (vec_select:V8HI (match_operand:V16HI 1 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (sign_extend:V8SI (vec_select:V8HI (match_operand:V16HI 2 "nonimmediate_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])))) (mult:V8SI (sign_extend:V8SI (vec_select:V8HI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)]))) (sign_extend:V8SI (vec_select:V8HI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)]))))))] "TARGET_AVX2" "ix86_fixup_binary_operands_no_copy (MULT, V16HImode, operands);") (define_insn "*avx2_pmaddwd" [(set (match_operand:V8SI 0 "register_operand" "=Yw") (plus:V8SI (mult:V8SI (sign_extend:V8SI (vec_select:V8HI (match_operand:V16HI 1 "nonimmediate_operand" "%Yw") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (sign_extend:V8SI (vec_select:V8HI (match_operand:V16HI 2 "nonimmediate_operand" "Ywm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])))) (mult:V8SI (sign_extend:V8SI (vec_select:V8HI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)]))) (sign_extend:V8SI (vec_select:V8HI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)]))))))] "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpmaddwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_expand "sse2_pmaddwd" [(set (match_operand:V4SI 0 "register_operand") (plus:V4SI (mult:V4SI (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 1 "vector_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 2 "vector_operand") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))) (mult:V4SI (sign_extend:V4SI (vec_select:V4HI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))) (sign_extend:V4SI (vec_select:V4HI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))))))] "TARGET_SSE2" "ix86_fixup_binary_operands_no_copy (MULT, V8HImode, operands);") (define_insn "*sse2_pmaddwd" [(set (match_operand:V4SI 0 "register_operand" "=x,Yw") (plus:V4SI (mult:V4SI (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 1 "vector_operand" "%0,Yw") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 2 "vector_operand" "xBm,Ywm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))) (mult:V4SI (sign_extend:V4SI (vec_select:V4HI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))) (sign_extend:V4SI (vec_select:V4HI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))))))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pmaddwd\t{%2, %0|%0, %2} vpmaddwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "atom_unit" "simul") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_expand "cond_mul" [(set (match_operand:VI8_AVX512VL 0 "register_operand") (vec_merge:VI8_AVX512VL (mult:VI8_AVX512VL (match_operand:VI8_AVX512VL 2 "vector_operand") (match_operand:VI8_AVX512VL 3 "vector_operand")) (match_operand:VI8_AVX512VL 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512DQ" { emit_insn (gen_avx512dq_mul3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "avx512dq_mul3" [(set (match_operand:VI8_AVX512VL 0 "register_operand") (mult:VI8_AVX512VL (match_operand:VI8_AVX512VL 1 "bcst_vector_operand") (match_operand:VI8_AVX512VL 2 "bcst_vector_operand")))] "TARGET_AVX512DQ && " "ix86_fixup_binary_operands_no_copy (MULT, mode, operands);") (define_insn "*avx512dq_mul3" [(set (match_operand:VI8_AVX512VL 0 "register_operand" "=v") (mult:VI8_AVX512VL (match_operand:VI8_AVX512VL 1 "bcst_vector_operand" "%v") (match_operand:VI8_AVX512VL 2 "bcst_vector_operand" "vmBr")))] "TARGET_AVX512DQ && && ix86_binary_operator_ok (MULT, mode, operands)" { if (TARGET_DEST_FALSE_DEP_FOR_GLC && && !reg_mentioned_p (operands[0], operands[1]) && !reg_mentioned_p (operands[0], operands[2])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vpmullq\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sseimul") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "cond_mul" [(set (match_operand:VI4_AVX512VL 0 "register_operand") (vec_merge:VI4_AVX512VL (mult:VI4_AVX512VL (match_operand:VI4_AVX512VL 2 "vector_operand") (match_operand:VI4_AVX512VL 3 "vector_operand")) (match_operand:VI4_AVX512VL 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { emit_insn (gen_mul3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "mul3" [(set (match_operand:VI4_AVX512F 0 "register_operand") (mult:VI4_AVX512F (match_operand:VI4_AVX512F 1 "general_vector_operand") (match_operand:VI4_AVX512F 2 "general_vector_operand")))] "TARGET_SSE2 && " { if (TARGET_SSE4_1) { if (!vector_operand (operands[1], mode)) operands[1] = force_reg (mode, operands[1]); if (!vector_operand (operands[2], mode)) operands[2] = force_reg (mode, operands[2]); ix86_fixup_binary_operands_no_copy (MULT, mode, operands); } else { ix86_expand_sse2_mulv4si3 (operands[0], operands[1], operands[2]); DONE; } }) (define_insn "*_mul3" [(set (match_operand:VI4_AVX512F 0 "register_operand" "=Yr,*x,v") (mult:VI4_AVX512F (match_operand:VI4_AVX512F 1 "bcst_vector_operand" "%0,0,v") (match_operand:VI4_AVX512F 2 "bcst_vector_operand" "YrBm,*xBm,vmBr")))] "TARGET_SSE4_1 && ix86_binary_operator_ok (MULT, mode, operands) && " "@ pmulld\t{%2, %0|%0, %2} pmulld\t{%2, %0|%0, %2} vpmulld\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sseimul") (set_attr "prefix_extra" "1") (set_attr "prefix" "") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "mode" "")]) (define_expand "mul3" [(set (match_operand:VI8_AVX2_AVX512F 0 "register_operand") (mult:VI8_AVX2_AVX512F (match_operand:VI8_AVX2_AVX512F 1 "register_operand") (match_operand:VI8_AVX2_AVX512F 2 "register_operand")))] "TARGET_SSE2" { ix86_expand_sse2_mulvxdi3 (operands[0], operands[1], operands[2]); DONE; }) (define_expand "vec_widen_mult_hi_" [(match_operand: 0 "register_operand") (any_extend: (match_operand:VI124_AVX2 1 "register_operand")) (match_operand:VI124_AVX2 2 "register_operand")] "TARGET_SSE2" { ix86_expand_mul_widen_hilo (operands[0], operands[1], operands[2], , true); DONE; }) (define_expand "vec_widen_mult_lo_" [(match_operand: 0 "register_operand") (any_extend: (match_operand:VI124_AVX2 1 "register_operand")) (match_operand:VI124_AVX2 2 "register_operand")] "TARGET_SSE2" { ix86_expand_mul_widen_hilo (operands[0], operands[1], operands[2], , false); DONE; }) ;; Most widen_mult_even_ can be handled directly from other ;; named patterns, but signed V4SI needs special help for plain SSE2. (define_expand "vec_widen_smult_even_v4si" [(match_operand:V2DI 0 "register_operand") (match_operand:V4SI 1 "vector_operand") (match_operand:V4SI 2 "vector_operand")] "TARGET_SSE2" { ix86_expand_mul_widen_evenodd (operands[0], operands[1], operands[2], false, false); DONE; }) (define_expand "vec_widen_mult_odd_" [(match_operand: 0 "register_operand") (any_extend: (match_operand:VI4_AVX512F 1 "general_vector_operand")) (match_operand:VI4_AVX512F 2 "general_vector_operand")] "TARGET_SSE2" { ix86_expand_mul_widen_evenodd (operands[0], operands[1], operands[2], , true); DONE; }) (define_mode_attr SDOT_PMADD_SUF [(V32HI "512v32hi") (V16HI "") (V8HI "")]) (define_mode_attr SDOT_VPDP_SUF [(V32HI "v16si") (V16HI "v8si") (V8HI "v4si")]) (define_expand "sdot_prod" [(match_operand: 0 "register_operand") (match_operand:VI2_AVX512VNNIBW 1 "register_operand") (match_operand:VI2_AVX512VNNIBW 2 "register_operand") (match_operand: 3 "register_operand")] "TARGET_SSE2" { /* Try with vnni instructions. */ if (( == 64 && TARGET_AVX512VNNI) || ( < 64 && ((TARGET_AVX512VNNI && TARGET_AVX512VL) || TARGET_AVXVNNI))) { operands[1] = lowpart_subreg (mode, operands[1], mode); operands[2] = lowpart_subreg (mode, operands[2], mode); emit_insn (gen_rtx_SET (operands[0], operands[3])); emit_insn (gen_vpdpwssd_ (operands[0], operands[3], operands[1], operands[2])); } /* Otherwise use pmaddwd + paddd. */ else { rtx t = gen_reg_rtx (mode); emit_insn (gen__pmaddwd (t, operands[1], operands[2])); emit_insn (gen_rtx_SET (operands[0], gen_rtx_PLUS (mode, operands[3], t))); } DONE; }) ;; Normally we use widen_mul_even/odd, but combine can't quite get it all ;; back together when madd is available. (define_expand "sdot_prodv4si" [(match_operand:V2DI 0 "register_operand") (match_operand:V4SI 1 "register_operand") (match_operand:V4SI 2 "register_operand") (match_operand:V2DI 3 "register_operand")] "TARGET_XOP" { rtx t = gen_reg_rtx (V2DImode); emit_insn (gen_xop_pmacsdqh (t, operands[1], operands[2], operands[3])); emit_insn (gen_xop_pmacsdql (operands[0], operands[1], operands[2], t)); DONE; }) (define_expand "uavg3_ceil" [(set (match_operand:VI12_AVX2_AVX512BW 0 "register_operand") (truncate:VI12_AVX2_AVX512BW (lshiftrt: (plus: (plus: (zero_extend: (match_operand:VI12_AVX2_AVX512BW 1 "vector_operand")) (zero_extend: (match_operand:VI12_AVX2_AVX512BW 2 "vector_operand"))) (match_dup 3)) (const_int 1))))] "TARGET_SSE2" { operands[3] = CONST1_RTX(mode); ix86_fixup_binary_operands_no_copy (PLUS, mode, operands); }) (define_expand "usadv16qi" [(match_operand:V4SI 0 "register_operand") (match_operand:V16QI 1 "register_operand") (match_operand:V16QI 2 "vector_operand") (match_operand:V4SI 3 "vector_operand")] "TARGET_SSE2" { rtx t1 = gen_reg_rtx (V2DImode); rtx t2 = gen_reg_rtx (V4SImode); emit_insn (gen_sse2_psadbw (t1, operands[1], operands[2])); convert_move (t2, t1, 0); emit_insn (gen_addv4si3 (operands[0], t2, operands[3])); DONE; }) (define_expand "usadv32qi" [(match_operand:V8SI 0 "register_operand") (match_operand:V32QI 1 "register_operand") (match_operand:V32QI 2 "nonimmediate_operand") (match_operand:V8SI 3 "nonimmediate_operand")] "TARGET_AVX2" { rtx t1 = gen_reg_rtx (V4DImode); rtx t2 = gen_reg_rtx (V8SImode); emit_insn (gen_avx2_psadbw (t1, operands[1], operands[2])); convert_move (t2, t1, 0); emit_insn (gen_addv8si3 (operands[0], t2, operands[3])); DONE; }) (define_expand "usadv64qi" [(match_operand:V16SI 0 "register_operand") (match_operand:V64QI 1 "register_operand") (match_operand:V64QI 2 "nonimmediate_operand") (match_operand:V16SI 3 "nonimmediate_operand")] "TARGET_AVX512BW" { rtx t1 = gen_reg_rtx (V8DImode); rtx t2 = gen_reg_rtx (V16SImode); emit_insn (gen_avx512f_psadbw (t1, operands[1], operands[2])); convert_move (t2, t1, 0); emit_insn (gen_addv16si3 (operands[0], t2, operands[3])); DONE; }) (define_insn "ashr3" [(set (match_operand:VI248_AVX512BW_1 0 "register_operand" "=v,v") (ashiftrt:VI248_AVX512BW_1 (match_operand:VI248_AVX512BW_1 1 "nonimmediate_operand" "v,vm") (match_operand:DI 2 "nonmemory_operand" "v,N")))] "TARGET_AVX512VL" "vpsra\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set (attr "length_immediate") (if_then_else (match_operand 2 "const_int_operand") (const_string "1") (const_string "0"))) (set_attr "mode" "")]) (define_insn "ashr3" [(set (match_operand:VI24_AVX2 0 "register_operand" "=x,") (ashiftrt:VI24_AVX2 (match_operand:VI24_AVX2 1 "register_operand" "0,") (match_operand:DI 2 "nonmemory_operand" "xN,YwN")))] "TARGET_SSE2" "@ psra\t{%2, %0|%0, %2} vpsra\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseishft") (set (attr "length_immediate") (if_then_else (match_operand 2 "const_int_operand") (const_string "1") (const_string "0"))) (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_insn "ashr3" [(set (match_operand:VI248_AVX512BW_AVX512VL 0 "register_operand" "=v,v") (ashiftrt:VI248_AVX512BW_AVX512VL (match_operand:VI248_AVX512BW_AVX512VL 1 "nonimmediate_operand" "v,vm") (match_operand:DI 2 "nonmemory_operand" "v,N")))] "TARGET_AVX512F" "vpsra\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set (attr "length_immediate") (if_then_else (match_operand 2 "const_int_operand") (const_string "1") (const_string "0"))) (set_attr "mode" "")]) (define_expand "ashr3" [(set (match_operand:VI248_AVX512BW 0 "register_operand") (ashiftrt:VI248_AVX512BW (match_operand:VI248_AVX512BW 1 "nonimmediate_operand") (match_operand:DI 2 "nonmemory_operand")))] "TARGET_AVX512F") (define_expand "ashrv4di3" [(set (match_operand:V4DI 0 "register_operand") (ashiftrt:V4DI (match_operand:V4DI 1 "nonimmediate_operand") (match_operand:DI 2 "nonmemory_operand")))] "TARGET_AVX2" { if (!TARGET_AVX512VL) { if (CONST_INT_P (operands[2]) && UINTVAL (operands[2]) >= 63) { rtx zero = force_reg (V4DImode, CONST0_RTX (V4DImode)); emit_insn (gen_avx2_gtv4di3 (operands[0], zero, operands[1])); DONE; } if (operands[2] == const0_rtx) { emit_move_insn (operands[0], operands[1]); DONE; } operands[1] = force_reg (V4DImode, operands[1]); if (CONST_INT_P (operands[2])) { vec_perm_builder sel (8, 8, 1); sel.quick_grow (8); rtx arg0, arg1; rtx op1 = lowpart_subreg (V8SImode, operands[1], V4DImode); rtx target = gen_reg_rtx (V8SImode); if (INTVAL (operands[2]) > 32) { arg0 = gen_reg_rtx (V8SImode); arg1 = gen_reg_rtx (V8SImode); emit_insn (gen_ashrv8si3 (arg1, op1, GEN_INT (31))); emit_insn (gen_ashrv8si3 (arg0, op1, GEN_INT (INTVAL (operands[2]) - 32))); sel[0] = 1; sel[1] = 9; sel[2] = 3; sel[3] = 11; sel[4] = 5; sel[5] = 13; sel[6] = 7; sel[7] = 15; } else if (INTVAL (operands[2]) == 32) { arg0 = op1; arg1 = gen_reg_rtx (V8SImode); emit_insn (gen_ashrv8si3 (arg1, op1, GEN_INT (31))); sel[0] = 1; sel[1] = 9; sel[2] = 3; sel[3] = 11; sel[4] = 5; sel[5] = 13; sel[6] = 7; sel[7] = 15; } else { arg0 = gen_reg_rtx (V4DImode); arg1 = gen_reg_rtx (V8SImode); emit_insn (gen_lshrv4di3 (arg0, operands[1], operands[2])); emit_insn (gen_ashrv8si3 (arg1, op1, operands[2])); arg0 = lowpart_subreg (V8SImode, arg0, V4DImode); sel[0] = 0; sel[1] = 9; sel[2] = 2; sel[3] = 11; sel[4] = 4; sel[5] = 13; sel[6] = 6; sel[7] = 15; } vec_perm_indices indices (sel, 2, 8); bool ok = targetm.vectorize.vec_perm_const (V8SImode, target, arg0, arg1, indices); gcc_assert (ok); emit_move_insn (operands[0], lowpart_subreg (V4DImode, target, V8SImode)); DONE; } rtx zero = force_reg (V4DImode, CONST0_RTX (V4DImode)); rtx zero_or_all_ones = gen_reg_rtx (V4DImode); emit_insn (gen_avx2_gtv4di3 (zero_or_all_ones, zero, operands[1])); rtx lshr_res = gen_reg_rtx (V4DImode); emit_insn (gen_lshrv4di3 (lshr_res, operands[1], operands[2])); rtx ashl_res = gen_reg_rtx (V4DImode); rtx amount; if (TARGET_64BIT) { amount = gen_reg_rtx (DImode); emit_insn (gen_subdi3 (amount, force_reg (DImode, GEN_INT (64)), operands[2])); } else { rtx temp = gen_reg_rtx (SImode); emit_insn (gen_subsi3 (temp, force_reg (SImode, GEN_INT (64)), lowpart_subreg (SImode, operands[2], DImode))); amount = gen_reg_rtx (V4SImode); emit_insn (gen_vec_setv4si_0 (amount, CONST0_RTX (V4SImode), temp)); } amount = lowpart_subreg (DImode, amount, GET_MODE (amount)); emit_insn (gen_ashlv4di3 (ashl_res, zero_or_all_ones, amount)); emit_insn (gen_iorv4di3 (operands[0], lshr_res, ashl_res)); DONE; } }) (define_insn "3" [(set (match_operand:VI248_AVX512BW_2 0 "register_operand" "=v,v") (any_lshift:VI248_AVX512BW_2 (match_operand:VI248_AVX512BW_2 1 "nonimmediate_operand" "v,vm") (match_operand:DI 2 "nonmemory_operand" "v,N")))] "TARGET_AVX512VL" "vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set (attr "length_immediate") (if_then_else (match_operand 2 "const_int_operand") (const_string "1") (const_string "0"))) (set_attr "mode" "")]) (define_insn "3" [(set (match_operand:VI248_AVX2 0 "register_operand" "=x,") (any_lshift:VI248_AVX2 (match_operand:VI248_AVX2 1 "register_operand" "0,") (match_operand:DI 2 "nonmemory_operand" "xN,YwN")))] "TARGET_SSE2" "@ p\t{%2, %0|%0, %2} vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseishft") (set (attr "length_immediate") (if_then_else (match_operand 2 "const_int_operand") (const_string "1") (const_string "0"))) (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_insn "3" [(set (match_operand:VI248_AVX512BW 0 "register_operand" "=v,v") (any_lshift:VI248_AVX512BW (match_operand:VI248_AVX512BW 1 "nonimmediate_operand" "v,m") (match_operand:DI 2 "nonmemory_operand" "vN,N")))] "TARGET_AVX512F" "vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set (attr "length_immediate") (if_then_else (match_operand 2 "const_int_operand") (const_string "1") (const_string "0"))) (set_attr "mode" "")]) ;; PR target/101796: Transfrom movl+vpbranchcastw+vpsravw to vpsraw ;; when COUNT is immediate. (define_split [(set (match_operand:VI248_AVX512BW 0 "register_operand") (any_shift:VI248_AVX512BW (match_operand:VI248_AVX512BW 1 "nonimmediate_operand") (match_operand:VI248_AVX512BW 2 "const_vector_duplicate_operand")))] "TARGET_AVX512F && GET_MODE_UNIT_BITSIZE (mode) > INTVAL (XVECEXP (operands[2], 0, 0))" [(set (match_dup 0) (any_shift:VI248_AVX512BW (match_dup 1) (match_dup 3)))] "operands[3] = XVECEXP (operands[2], 0, 0);") (define_expand "vec_shl_" [(set (match_dup 3) (ashift:V1TI (match_operand:V_128 1 "register_operand") (match_operand:SI 2 "const_0_to_255_mul_8_operand"))) (set (match_operand:V_128 0 "register_operand") (match_dup 4))] "TARGET_SSE2" { operands[1] = gen_lowpart (V1TImode, operands[1]); operands[3] = gen_reg_rtx (V1TImode); operands[4] = gen_lowpart (mode, operands[3]); }) (define_expand "vec_shr_" [(set (match_dup 3) (lshiftrt:V1TI (match_operand:V_128 1 "register_operand") (match_operand:SI 2 "const_0_to_255_mul_8_operand"))) (set (match_operand:V_128 0 "register_operand") (match_dup 4))] "TARGET_SSE2" { operands[1] = gen_lowpart (V1TImode, operands[1]); operands[3] = gen_reg_rtx (V1TImode); operands[4] = gen_lowpart (mode, operands[3]); }) (define_expand "ashlv1ti3" [(set (match_operand:V1TI 0 "register_operand") (ashift:V1TI (match_operand:V1TI 1 "register_operand") (match_operand:QI 2 "general_operand")))] "TARGET_SSE2 && TARGET_64BIT" { ix86_expand_v1ti_shift (ASHIFT, operands); DONE; }) (define_expand "lshrv1ti3" [(set (match_operand:V1TI 0 "register_operand") (lshiftrt:V1TI (match_operand:V1TI 1 "register_operand") (match_operand:QI 2 "general_operand")))] "TARGET_SSE2 && TARGET_64BIT" { ix86_expand_v1ti_shift (LSHIFTRT, operands); DONE; }) (define_expand "ashrv1ti3" [(set (match_operand:V1TI 0 "register_operand") (ashiftrt:V1TI (match_operand:V1TI 1 "register_operand") (match_operand:QI 2 "general_operand")))] "TARGET_SSE2 && TARGET_64BIT" { ix86_expand_v1ti_ashiftrt (operands); DONE; }) (define_expand "rotlv1ti3" [(set (match_operand:V1TI 0 "register_operand") (rotate:V1TI (match_operand:V1TI 1 "register_operand") (match_operand:QI 2 "general_operand")))] "TARGET_SSE2 && TARGET_64BIT" { ix86_expand_v1ti_rotate (ROTATE, operands); DONE; }) (define_expand "rotrv1ti3" [(set (match_operand:V1TI 0 "register_operand") (rotatert:V1TI (match_operand:V1TI 1 "register_operand") (match_operand:QI 2 "general_operand")))] "TARGET_SSE2 && TARGET_64BIT" { ix86_expand_v1ti_rotate (ROTATERT, operands); DONE; }) (define_insn "avx512bw_3" [(set (match_operand:VIMAX_AVX512VL 0 "register_operand" "=v") (any_lshift:VIMAX_AVX512VL (match_operand:VIMAX_AVX512VL 1 "nonimmediate_operand" "vm") (match_operand:SI 2 "const_0_to_255_mul_8_operand" "n")))] "TARGET_AVX512BW" { operands[2] = GEN_INT (INTVAL (operands[2]) / 8); return "vpdq\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sseishft") (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn "_3" [(set (match_operand:VIMAX_AVX2 0 "register_operand" "=x,Yw") (any_lshift:VIMAX_AVX2 (match_operand:VIMAX_AVX2 1 "register_operand" "0,Yw") (match_operand:SI 2 "const_0_to_255_mul_8_operand" "n,n")))] "TARGET_SSE2" { operands[2] = GEN_INT (INTVAL (operands[2]) / 8); switch (which_alternative) { case 0: return "pdq\t{%2, %0|%0, %2}"; case 1: return "vpdq\t{%2, %1, %0|%0, %1, %2}"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,avx") (set_attr "type" "sseishft") (set_attr "length_immediate" "1") (set_attr "atom_unit" "sishuf") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_insn "_v" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (any_rotate:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "register_operand" "v") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" "vpv\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (any_rotate:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "vm") (match_operand:SI 2 "const_0_to_255_operand")))] "TARGET_AVX512F" "vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "3" [(set (match_operand:VI124_256_AVX512F_AVX512BW 0 "register_operand") (maxmin:VI124_256_AVX512F_AVX512BW (match_operand:VI124_256_AVX512F_AVX512BW 1 "nonimmediate_operand") (match_operand:VI124_256_AVX512F_AVX512BW 2 "nonimmediate_operand")))] "TARGET_AVX2" "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*avx2_3" [(set (match_operand:VI124_256 0 "register_operand" "=") (maxmin:VI124_256 (match_operand:VI124_256 1 "nonimmediate_operand" "%") (match_operand:VI124_256 2 "nonimmediate_operand" "m")))] "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_expand "cond_" [(set (match_operand:VI1248_AVX512VLBW 0 "register_operand") (vec_merge:VI1248_AVX512VLBW (maxmin:VI1248_AVX512VLBW (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand") (match_operand:VI1248_AVX512VLBW 3 "nonimmediate_operand")) (match_operand:VI1248_AVX512VLBW 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { emit_insn (gen_3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "3_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand") (vec_merge:VI48_AVX512VL (maxmin:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand")) (match_operand:VI48_AVX512VL 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")))] "TARGET_AVX512F" "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*avx512f_3" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (maxmin:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "%v") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512F && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn "3" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (maxmin:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "register_operand" "v") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512BW" "vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "3" [(set (match_operand:VI8_AVX2_AVX512F 0 "register_operand") (maxmin:VI8_AVX2_AVX512F (match_operand:VI8_AVX2_AVX512F 1 "register_operand") (match_operand:VI8_AVX2_AVX512F 2 "register_operand")))] "TARGET_SSE4_2" { if (TARGET_AVX512F && (mode == V8DImode || TARGET_AVX512VL)) ; else { enum rtx_code code; rtx xops[6]; bool ok; xops[0] = operands[0]; if ( == SMAX || == UMAX) { xops[1] = operands[1]; xops[2] = operands[2]; } else { xops[1] = operands[2]; xops[2] = operands[1]; } code = ( == UMAX || == UMIN) ? GTU : GT; xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]); xops[4] = operands[1]; xops[5] = operands[2]; ok = ix86_expand_int_vcond (xops); gcc_assert (ok); DONE; } }) (define_expand "3" [(set (match_operand:VI124_128 0 "register_operand") (smaxmin:VI124_128 (match_operand:VI124_128 1 "vector_operand") (match_operand:VI124_128 2 "vector_operand")))] "TARGET_SSE2" { if (TARGET_SSE4_1 || mode == V8HImode) ix86_fixup_binary_operands_no_copy (, mode, operands); else { rtx xops[6]; bool ok; xops[0] = operands[0]; operands[1] = force_reg (mode, operands[1]); operands[2] = force_reg (mode, operands[2]); if ( == SMAX) { xops[1] = operands[1]; xops[2] = operands[2]; } else { xops[1] = operands[2]; xops[2] = operands[1]; } xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]); xops[4] = operands[1]; xops[5] = operands[2]; ok = ix86_expand_int_vcond (xops); gcc_assert (ok); DONE; } }) (define_insn "*sse4_1_3" [(set (match_operand:VI14_128 0 "register_operand" "=Yr,*x,") (smaxmin:VI14_128 (match_operand:VI14_128 1 "vector_operand" "%0,0,") (match_operand:VI14_128 2 "vector_operand" "YrBm,*xBm,m")))] "TARGET_SSE4_1 && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ p\t{%2, %0|%0, %2} p\t{%2, %0|%0, %2} vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sseiadd") (set_attr "prefix_extra" "1,1,*") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) (define_insn "*v8hi3" [(set (match_operand:V8HI 0 "register_operand" "=x,Yw") (smaxmin:V8HI (match_operand:V8HI 1 "vector_operand" "%0,Yw") (match_operand:V8HI 2 "vector_operand" "xBm,Ywm")))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pw\t{%2, %0|%0, %2} vpw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "*,1") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_expand "3" [(set (match_operand:VI124_128 0 "register_operand") (umaxmin:VI124_128 (match_operand:VI124_128 1 "vector_operand") (match_operand:VI124_128 2 "vector_operand")))] "TARGET_SSE2" { if (TARGET_SSE4_1 || mode == V16QImode) ix86_fixup_binary_operands_no_copy (, mode, operands); else if ( == UMAX && mode == V8HImode) { rtx op0 = operands[0], op2 = operands[2], op3 = op0; operands[1] = force_reg (mode, operands[1]); if (rtx_equal_p (op3, op2)) op3 = gen_reg_rtx (V8HImode); emit_insn (gen_sse2_ussubv8hi3 (op3, operands[1], op2)); emit_insn (gen_addv8hi3 (op0, op3, op2)); DONE; } else { rtx xops[6]; bool ok; operands[1] = force_reg (mode, operands[1]); operands[2] = force_reg (mode, operands[2]); xops[0] = operands[0]; if ( == UMAX) { xops[1] = operands[1]; xops[2] = operands[2]; } else { xops[1] = operands[2]; xops[2] = operands[1]; } xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]); xops[4] = operands[1]; xops[5] = operands[2]; ok = ix86_expand_int_vcond (xops); gcc_assert (ok); DONE; } }) (define_insn "*sse4_1_3" [(set (match_operand:VI24_128 0 "register_operand" "=Yr,*x,") (umaxmin:VI24_128 (match_operand:VI24_128 1 "vector_operand" "%0,0,") (match_operand:VI24_128 2 "vector_operand" "YrBm,*xBm,m")))] "TARGET_SSE4_1 && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ p\t{%2, %0|%0, %2} p\t{%2, %0|%0, %2} vp\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sseiadd") (set_attr "prefix_extra" "1,1,*") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) (define_insn "*v16qi3" [(set (match_operand:V16QI 0 "register_operand" "=x,Yw") (umaxmin:V16QI (match_operand:V16QI 1 "vector_operand" "%0,Yw") (match_operand:V16QI 2 "vector_operand" "xBm,Ywm")))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pb\t{%2, %0|%0, %2} vpb\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "*,1") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel integral comparisons ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn "*avx2_eq3" [(set (match_operand:VI_256 0 "register_operand" "=x") (eq:VI_256 (match_operand:VI_256 1 "nonimmediate_operand" "%x") (match_operand:VI_256 2 "nonimmediate_operand" "xm")))] "TARGET_AVX2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpcmpeq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecmp") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_insn_and_split "*avx2_pcmp3_1" [(set (match_operand:VI_128_256 0 "register_operand") (vec_merge:VI_128_256 (match_operand:VI_128_256 1 "vector_all_ones_operand") (match_operand:VI_128_256 2 "const0_operand") (unspec: [(match_operand:VI_128_256 3 "nonimmediate_operand") (match_operand:VI_128_256 4 "nonimmediate_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split () /* EQ is commutative. */ && ((INTVAL (operands[5]) == 0 && !(MEM_P (operands[3]) && MEM_P (operands[4]))) /* NLE aka GT, 3 must be register. */ || (INTVAL (operands[5]) == 6 && !MEM_P (operands[3])) /* LT, 4 must be register and we swap operands. */ || (INTVAL (operands[5]) == 1 && !MEM_P (operands[4])))" "#" "&& 1" [(const_int 0)] { if (INTVAL (operands[5]) == 1) std::swap (operands[3], operands[4]); enum rtx_code code = INTVAL (operands[5]) ? GT : EQ; emit_move_insn (operands[0], gen_rtx_fmt_ee (code, mode, operands[3], operands[4])); DONE; }) (define_insn_and_split "*avx2_pcmp3_2" [(set (match_operand:VI_128_256 0 "register_operand") (vec_merge:VI_128_256 (match_operand:VI_128_256 1 "vector_all_ones_operand") (match_operand:VI_128_256 2 "const0_operand") (not: (unspec: [(match_operand:VI_128_256 3 "nonimmediate_operand") (match_operand:VI_128_256 4 "nonimmediate_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP))))] "TARGET_AVX512VL && ix86_pre_reload_split () /* NE is commutative. */ && ((INTVAL (operands[5]) == 4 && !(MEM_P (operands[3]) && MEM_P (operands[4]))) /* LE, 3 must be register. */ || (INTVAL (operands[5]) == 2 && !MEM_P (operands[3])) /* NLT aka GE, 4 must be register and we swap operands. */ || (INTVAL (operands[5]) == 5 && !MEM_P (operands[4])))" "#" "&& 1" [(const_int 0)] { if (INTVAL (operands[5]) == 5) std::swap (operands[3], operands[4]); enum rtx_code code = INTVAL (operands[5]) != 4 ? GT : EQ; emit_move_insn (operands[0], gen_rtx_fmt_ee (code, mode, operands[3], operands[4])); DONE; }) (define_insn_and_split "*avx2_pcmp3_3" [(set (match_operand:VI1_AVX2 0 "register_operand") (vec_merge:VI1_AVX2 (match_operand:VI1_AVX2 1 "vector_operand") (match_operand:VI1_AVX2 2 "vector_operand") (unspec: [(match_operand:VI1_AVX2 3 "register_operand") (match_operand:VI1_AVX2 4 "const0_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split () /* LT or GE 0 */ && ((INTVAL (operands[5]) == 1 && !MEM_P (operands[2])) || (INTVAL (operands[5]) == 5 && !MEM_P (operands[1])))" "#" "&& 1" [(set (match_dup 0) (unspec:VI1_AVX2 [(match_dup 2) (match_dup 1) (lt:VI1_AVX2 (match_dup 3) (match_dup 4))] UNSPEC_BLENDV))] { if (INTVAL (operands[5]) == 5) std::swap (operands[1], operands[2]); }) (define_insn_and_split "*avx2_pcmp3_4" [(set (match_operand:VI1_AVX2 0 "register_operand") (vec_merge:VI1_AVX2 (match_operand:VI1_AVX2 1 "vector_operand") (match_operand:VI1_AVX2 2 "vector_operand") (unspec: [(subreg:VI1_AVX2 (not (match_operand 3 "register_operand")) 0) (match_operand:VI1_AVX2 4 "const0_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split () && GET_MODE_CLASS (GET_MODE (operands[3])) == MODE_VECTOR_INT && GET_MODE_SIZE (GET_MODE (operands[3])) == /* LT or GE 0 */ && ((INTVAL (operands[5]) == 1 && !MEM_P (operands[1])) || (INTVAL (operands[5]) == 5 && !MEM_P (operands[2])))" "#" "&& 1" [(set (match_dup 0) (unspec:VI1_AVX2 [(match_dup 1) (match_dup 2) (lt:VI1_AVX2 (match_dup 3) (match_dup 4))] UNSPEC_BLENDV))] { if (INTVAL (operands[5]) == 1) std::swap (operands[1], operands[2]); operands[3] = gen_lowpart (mode, operands[3]); }) (define_insn_and_split "*avx2_pcmp3_5" [(set (match_operand:VI1_AVX2 0 "register_operand") (vec_merge:VI1_AVX2 (match_operand:VI1_AVX2 1 "vector_operand") (match_operand:VI1_AVX2 2 "vector_operand") (unspec: [(not:VI1_AVX2 (match_operand:VI1_AVX2 3 "register_operand")) (match_operand:VI1_AVX2 4 "const0_operand") (match_operand:SI 5 "const_0_to_7_operand")] UNSPEC_PCMP)))] "TARGET_AVX512VL && ix86_pre_reload_split () /* LT or GE 0 */ && ((INTVAL (operands[5]) == 1 && !MEM_P (operands[1])) || (INTVAL (operands[5]) == 5 && !MEM_P (operands[2])))" "#" "&& 1" [(set (match_dup 0) (unspec:VI1_AVX2 [(match_dup 1) (match_dup 2) (lt:VI1_AVX2 (match_dup 3) (match_dup 4))] UNSPEC_BLENDV))] { if (INTVAL (operands[5]) == 1) std::swap (operands[1], operands[2]); }) (define_expand "_eq3" [(set (match_operand: 0 "register_operand") (unspec: [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand") (const_int 0)] UNSPEC_PCMP))] "TARGET_AVX512BW" "ix86_fixup_binary_operands_no_copy (EQ, mode, operands);") (define_expand "_eq3" [(set (match_operand: 0 "register_operand") (unspec: [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand") (const_int 0)] UNSPEC_PCMP))] "TARGET_AVX512F" "ix86_fixup_binary_operands_no_copy (EQ, mode, operands);") (define_insn "*sse4_1_eqv2di3" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,x") (eq:V2DI (match_operand:V2DI 1 "vector_operand" "%0,0,x") (match_operand:V2DI 2 "vector_operand" "YrBm,*xBm,xm")))] "TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pcmpeqq\t{%2, %0|%0, %2} pcmpeqq\t{%2, %0|%0, %2} vpcmpeqq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecmp") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) (define_insn "*sse2_eq3" [(set (match_operand:VI124_128 0 "register_operand" "=x,x") (eq:VI124_128 (match_operand:VI124_128 1 "vector_operand" "%0,x") (match_operand:VI124_128 2 "vector_operand" "xBm,xm")))] "TARGET_SSE2 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pcmpeq\t{%2, %0|%0, %2} vpcmpeq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssecmp") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_insn "sse4_2_gtv2di3" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,x") (gt:V2DI (match_operand:V2DI 1 "register_operand" "0,0,x") (match_operand:V2DI 2 "vector_operand" "YrBm,*xBm,xm")))] "TARGET_SSE4_2" "@ pcmpgtq\t{%2, %0|%0, %2} pcmpgtq\t{%2, %0|%0, %2} vpcmpgtq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecmp") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) (define_insn "avx2_gt3" [(set (match_operand:VI_256 0 "register_operand" "=x") (gt:VI_256 (match_operand:VI_256 1 "register_operand" "x") (match_operand:VI_256 2 "nonimmediate_operand" "xm")))] "TARGET_AVX2" "vpcmpgt\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecmp") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_expand "_gt3" [(set (match_operand: 0 "register_operand") (unspec: [(match_operand:VI48_AVX512VL 1 "register_operand") (match_operand:VI48_AVX512VL 2 "nonimmediate_operand") (const_int 6)] UNSPEC_PCMP))] "TARGET_AVX512F") (define_expand "_gt3" [(set (match_operand: 0 "register_operand") (unspec: [(match_operand:VI12_AVX512VL 1 "register_operand") (match_operand:VI12_AVX512VL 2 "nonimmediate_operand") (const_int 6)] UNSPEC_PCMP))] "TARGET_AVX512BW") (define_insn "*sse2_gt3" [(set (match_operand:VI124_128 0 "register_operand" "=x,x") (gt:VI124_128 (match_operand:VI124_128 1 "register_operand" "0,x") (match_operand:VI124_128 2 "vector_operand" "xBm,xm")))] "TARGET_SSE2" "@ pcmpgt\t{%2, %0|%0, %2} vpcmpgt\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "ssecmp") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_expand "vcond" [(set (match_operand:V_512 0 "register_operand") (if_then_else:V_512 (match_operator 3 "" [(match_operand:VI_AVX512BW 4 "nonimmediate_operand") (match_operand:VI_AVX512BW 5 "general_operand")]) (match_operand:V_512 1) (match_operand:V_512 2)))] "TARGET_AVX512F && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand:V_256 0 "register_operand") (if_then_else:V_256 (match_operator 3 "" [(match_operand:VI_256 4 "nonimmediate_operand") (match_operand:VI_256 5 "general_operand")]) (match_operand:V_256 1) (match_operand:V_256 2)))] "TARGET_AVX2 && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcond" [(set (match_operand:V_128 0 "register_operand") (if_then_else:V_128 (match_operator 3 "" [(match_operand:VI124_128 4 "vector_operand") (match_operand:VI124_128 5 "general_operand")]) (match_operand:V_128 1) (match_operand:V_128 2)))] "TARGET_SSE2 && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcondv2di" [(set (match_operand:VI8F_128 0 "register_operand") (if_then_else:VI8F_128 (match_operator 3 "" [(match_operand:V2DI 4 "vector_operand") (match_operand:V2DI 5 "general_operand")]) (match_operand:VI8F_128 1) (match_operand:VI8F_128 2)))] "TARGET_SSE4_2" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcondu" [(set (match_operand:V_512 0 "register_operand") (if_then_else:V_512 (match_operator 3 "" [(match_operand:VI_AVX512BW 4 "nonimmediate_operand") (match_operand:VI_AVX512BW 5 "nonimmediate_operand")]) (match_operand:V_512 1 "general_operand") (match_operand:V_512 2 "general_operand")))] "TARGET_AVX512F && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcondu" [(set (match_operand:V_256 0 "register_operand") (if_then_else:V_256 (match_operator 3 "" [(match_operand:VI_256 4 "nonimmediate_operand") (match_operand:VI_256 5 "nonimmediate_operand")]) (match_operand:V_256 1 "general_operand") (match_operand:V_256 2 "general_operand")))] "TARGET_AVX2 && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcondu" [(set (match_operand:V_128 0 "register_operand") (if_then_else:V_128 (match_operator 3 "" [(match_operand:VI124_128 4 "vector_operand") (match_operand:VI124_128 5 "vector_operand")]) (match_operand:V_128 1 "general_operand") (match_operand:V_128 2 "general_operand")))] "TARGET_SSE2 && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vconduv2di" [(set (match_operand:VI8F_128 0 "register_operand") (if_then_else:VI8F_128 (match_operator 3 "" [(match_operand:V2DI 4 "vector_operand") (match_operand:V2DI 5 "vector_operand")]) (match_operand:VI8F_128 1 "general_operand") (match_operand:VI8F_128 2 "general_operand")))] "TARGET_SSE4_2" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcondu" [(set (match_operand:VF_AVX512FP16VL 0 "register_operand") (if_then_else:VF_AVX512FP16VL (match_operator 3 "" [(match_operand: 4 "vector_operand") (match_operand: 5 "vector_operand")]) (match_operand:VF_AVX512FP16VL 1 "general_operand") (match_operand:VF_AVX512FP16VL 2 "general_operand")))] "TARGET_AVX512FP16" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_expand "vcondeqv2di" [(set (match_operand:VI8F_128 0 "register_operand") (if_then_else:VI8F_128 (match_operator 3 "" [(match_operand:V2DI 4 "vector_operand") (match_operand:V2DI 5 "general_operand")]) (match_operand:VI8F_128 1) (match_operand:VI8F_128 2)))] "TARGET_SSE4_1" { bool ok = ix86_expand_int_vcond (operands); gcc_assert (ok); DONE; }) (define_mode_iterator VEC_PERM_AVX2 [V16QI V8HI V4SI V2DI V4SF V2DF (V8HF "TARGET_AVX512FP16") (V32QI "TARGET_AVX2") (V16HI "TARGET_AVX2") (V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2") (V8SF "TARGET_AVX2") (V4DF "TARGET_AVX2") (V16HF "TARGET_AVX512FP16") (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F") (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F") (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512VBMI") (V32HF "TARGET_AVX512FP16")]) (define_expand "vec_perm" [(match_operand:VEC_PERM_AVX2 0 "register_operand") (match_operand:VEC_PERM_AVX2 1 "register_operand") (match_operand:VEC_PERM_AVX2 2 "register_operand") (match_operand: 3 "register_operand")] "TARGET_SSSE3 || TARGET_AVX || TARGET_XOP" { ix86_expand_vec_perm (operands); DONE; }) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel bitwise logical operations ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_expand "one_cmpl2" [(set (match_operand:VI 0 "register_operand") (xor:VI (match_operand:VI 1 "vector_operand") (match_dup 2)))] "TARGET_SSE" { operands[2] = CONSTM1_RTX (mode); if (!TARGET_AVX512F) operands[2] = force_reg (mode, operands[2]); }) (define_insn "one_cmpl2" [(set (match_operand:VI 0 "register_operand" "=v,v") (xor:VI (match_operand:VI 1 "nonimmediate_operand" "v,m") (match_operand:VI 2 "vector_all_ones_operand" "BC,BC")))] "TARGET_AVX512F && (! || mode == SImode || mode == DImode)" { if (TARGET_AVX512VL) return "vpternlog\t{$0x55, %1, %0, %0|%0, %0, %1, 0x55}"; else return "vpternlog\t{$0x55, %g1, %g0, %g0|%g0, %g0, %g1, 0x55}"; } [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set (attr "mode") (if_then_else (match_test "TARGET_AVX512VL") (const_string "") (const_string "XI"))) (set (attr "enabled") (if_then_else (eq_attr "alternative" "1") (symbol_ref " == 64 || TARGET_AVX512VL") (const_int 1)))]) (define_expand "_andnot3" [(set (match_operand:VI_AVX2 0 "register_operand") (and:VI_AVX2 (not:VI_AVX2 (match_operand:VI_AVX2 1 "register_operand")) (match_operand:VI_AVX2 2 "vector_operand")))] "TARGET_SSE2") (define_expand "_andnot3_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand") (vec_merge:VI48_AVX512VL (and:VI48_AVX512VL (not:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "register_operand")) (match_operand:VI48_AVX512VL 2 "nonimmediate_operand")) (match_operand:VI48_AVX512VL 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")))] "TARGET_AVX512F") (define_expand "_andnot3_mask" [(set (match_operand:VI12_AVX512VL 0 "register_operand") (vec_merge:VI12_AVX512VL (and:VI12_AVX512VL (not:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "register_operand")) (match_operand:VI12_AVX512VL 2 "nonimmediate_operand")) (match_operand:VI12_AVX512VL 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")))] "TARGET_AVX512BW") (define_insn "*andnot3" [(set (match_operand:VI 0 "register_operand" "=x,x,v") (and:VI (not:VI (match_operand:VI 1 "vector_operand" "0,x,v")) (match_operand:VI 2 "bcst_vector_operand" "xBm,xm,vmBr")))] "TARGET_SSE" { char buf[64]; const char *ops; const char *tmp; const char *ssesuffix; switch (get_attr_mode (insn)) { case MODE_XI: gcc_assert (TARGET_AVX512F); /* FALLTHRU */ case MODE_OI: gcc_assert (TARGET_AVX2); /* FALLTHRU */ case MODE_TI: gcc_assert (TARGET_SSE2); tmp = "pandn"; switch (mode) { case E_V64QImode: case E_V32HImode: /* There is no vpandnb or vpandnw instruction, nor vpandn for 512-bit vectors. Use vpandnq instead. */ ssesuffix = "q"; break; case E_V16SImode: case E_V8DImode: ssesuffix = ""; break; case E_V8SImode: case E_V4DImode: case E_V4SImode: case E_V2DImode: ssesuffix = (TARGET_AVX512VL && which_alternative == 2 ? "" : ""); break; default: ssesuffix = TARGET_AVX512VL && which_alternative == 2 ? "q" : ""; } break; case MODE_V16SF: gcc_assert (TARGET_AVX512F); /* FALLTHRU */ case MODE_V8SF: gcc_assert (TARGET_AVX); /* FALLTHRU */ case MODE_V4SF: gcc_assert (TARGET_SSE); tmp = "andn"; ssesuffix = "ps"; break; default: gcc_unreachable (); } switch (which_alternative) { case 0: ops = "%s%s\t{%%2, %%0|%%0, %%2}"; break; case 1: case 2: ops = "v%s%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; default: gcc_unreachable (); } snprintf (buf, sizeof (buf), ops, tmp, ssesuffix); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx") (set_attr "type" "sselog") (set (attr "prefix_data16") (if_then_else (and (eq_attr "alternative" "0") (eq_attr "mode" "TI")) (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,vex,evex") (set (attr "mode") (cond [(match_test "TARGET_AVX2") (const_string "") (match_test "TARGET_AVX") (if_then_else (match_test " > 16") (const_string "V8SF") (const_string "")) (ior (not (match_test "TARGET_SSE2")) (match_test "optimize_function_for_size_p (cfun)")) (const_string "V4SF") ] (const_string "")))]) ;; PR target/100711: Split notl; vpbroadcastd; vpand as vpbroadcastd; vpandn (define_split [(set (match_operand:VI48_128 0 "register_operand") (and:VI48_128 (vec_duplicate:VI48_128 (not: (match_operand: 1 "register_operand"))) (match_operand:VI48_128 2 "vector_operand")))] "TARGET_SSE" [(set (match_dup 3) (vec_duplicate:VI48_128 (match_dup 1))) (set (match_dup 0) (and:VI48_128 (not:VI48_128 (match_dup 3)) (match_dup 2)))] "operands[3] = gen_reg_rtx (mode);") ;; PR target/100711: Split notl; vpbroadcastd; vpand as vpbroadcastd; vpandn (define_split [(set (match_operand:VI124_AVX2 0 "register_operand") (and:VI124_AVX2 (vec_duplicate:VI124_AVX2 (not: (match_operand: 1 "register_operand"))) (match_operand:VI124_AVX2 2 "vector_operand")))] "TARGET_AVX2" [(set (match_dup 3) (vec_duplicate:VI124_AVX2 (match_dup 1))) (set (match_dup 0) (and:VI124_AVX2 (not:VI124_AVX2 (match_dup 3)) (match_dup 2)))] "operands[3] = gen_reg_rtx (mode);") (define_insn "*andnot3_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (vec_merge:VI48_AVX512VL (and:VI48_AVX512VL (not:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "register_operand" "v")) (match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm")) (match_operand:VI48_AVX512VL 3 "nonimm_or_0_operand" "0C") (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vpandn\t{%2, %1, %0%{%4%}%N3|%0%{%4%}%N3, %1, %2}"; [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "3" [(set (match_operand:VI 0 "register_operand") (any_logic:VI (match_operand:VI 1 "nonimmediate_or_const_vector_operand") (match_operand:VI 2 "nonimmediate_or_const_vector_operand")))] "TARGET_SSE" { ix86_expand_vector_logical_operator (, mode, operands); DONE; }) (define_expand "cond_" [(set (match_operand:VI48_AVX512VL 0 "register_operand") (vec_merge:VI48_AVX512VL (any_logic:VI48_AVX512VL (match_operand:VI48_AVX512VL 2 "vector_operand") (match_operand:VI48_AVX512VL 3 "vector_operand")) (match_operand:VI48_AVX512VL 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { emit_insn (gen_3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_expand "3_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand") (vec_merge:VI48_AVX512VL (any_logic:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "bcst_vector_operand") (match_operand:VI48_AVX512VL 2 "bcst_vector_operand")) (match_operand:VI48_AVX512VL 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")))] "TARGET_AVX512F" "ix86_fixup_binary_operands_no_copy (, mode, operands);") (define_insn "*3" [(set (match_operand:VI48_AVX_AVX512F 0 "register_operand" "=x,x,v") (any_logic:VI48_AVX_AVX512F (match_operand:VI48_AVX_AVX512F 1 "bcst_vector_operand" "%0,x,v") (match_operand:VI48_AVX_AVX512F 2 "bcst_vector_operand" "xBm,xm,vmBr")))] "TARGET_SSE && && ix86_binary_operator_ok (, mode, operands)" { char buf[64]; const char *ops; const char *tmp; const char *ssesuffix; switch (get_attr_mode (insn)) { case MODE_XI: gcc_assert (TARGET_AVX512F); /* FALLTHRU */ case MODE_OI: gcc_assert (TARGET_AVX2); /* FALLTHRU */ case MODE_TI: gcc_assert (TARGET_SSE2); tmp = "p"; switch (mode) { case E_V16SImode: case E_V8DImode: ssesuffix = ""; break; case E_V8SImode: case E_V4DImode: case E_V4SImode: case E_V2DImode: ssesuffix = (TARGET_AVX512VL && ( || which_alternative == 2) ? "" : ""); break; default: gcc_unreachable (); } break; case MODE_V8SF: gcc_assert (TARGET_AVX); /* FALLTHRU */ case MODE_V4SF: gcc_assert (TARGET_SSE); tmp = ""; ssesuffix = "ps"; break; default: gcc_unreachable (); } switch (which_alternative) { case 0: if () ops = "v%s%s\t{%%2, %%0, %%0|%%0, %%0, %%2}"; else ops = "%s%s\t{%%2, %%0|%%0, %%2}"; break; case 1: case 2: ops = "v%s%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; default: gcc_unreachable (); } snprintf (buf, sizeof (buf), ops, tmp, ssesuffix); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx") (set_attr "type" "sselog") (set (attr "prefix_data16") (if_then_else (and (eq_attr "alternative" "0") (eq_attr "mode" "TI")) (const_string "1") (const_string "*"))) (set_attr "prefix" ",evex") (set (attr "mode") (cond [(match_test "TARGET_AVX2") (const_string "") (match_test "TARGET_AVX") (if_then_else (match_test " > 16") (const_string "V8SF") (const_string "")) (ior (not (match_test "TARGET_SSE2")) (match_test "optimize_function_for_size_p (cfun)")) (const_string "V4SF") ] (const_string "")))]) (define_insn "*3" [(set (match_operand:VI12_AVX_AVX512F 0 "register_operand" "=x,x,v") (any_logic:VI12_AVX_AVX512F (match_operand:VI12_AVX_AVX512F 1 "vector_operand" "%0,x,v") (match_operand:VI12_AVX_AVX512F 2 "vector_operand" "xBm,xm,vm")))] "TARGET_SSE && !(MEM_P (operands[1]) && MEM_P (operands[2]))" { char buf[64]; const char *ops; const char *tmp; const char *ssesuffix; switch (get_attr_mode (insn)) { case MODE_XI: gcc_assert (TARGET_AVX512F); /* FALLTHRU */ case MODE_OI: gcc_assert (TARGET_AVX2); /* FALLTHRU */ case MODE_TI: gcc_assert (TARGET_SSE2); tmp = "p"; switch (mode) { case E_V64QImode: case E_V32HImode: ssesuffix = "q"; break; case E_V32QImode: case E_V16HImode: case E_V16QImode: case E_V8HImode: ssesuffix = TARGET_AVX512VL && which_alternative == 2 ? "q" : ""; break; default: gcc_unreachable (); } break; case MODE_V8SF: gcc_assert (TARGET_AVX); /* FALLTHRU */ case MODE_V4SF: gcc_assert (TARGET_SSE); tmp = ""; ssesuffix = "ps"; break; default: gcc_unreachable (); } switch (which_alternative) { case 0: ops = "%s%s\t{%%2, %%0|%%0, %%2}"; break; case 1: case 2: ops = "v%s%s\t{%%2, %%1, %%0|%%0, %%1, %%2}"; break; default: gcc_unreachable (); } snprintf (buf, sizeof (buf), ops, tmp, ssesuffix); output_asm_insn (buf, operands); return ""; } [(set_attr "isa" "noavx,avx,avx") (set_attr "type" "sselog") (set (attr "prefix_data16") (if_then_else (and (eq_attr "alternative" "0") (eq_attr "mode" "TI")) (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,vex,evex") (set (attr "mode") (cond [(match_test "TARGET_AVX2") (const_string "") (match_test "TARGET_AVX") (if_then_else (match_test " > 16") (const_string "V8SF") (const_string "")) (ior (not (match_test "TARGET_SSE2")) (match_test "optimize_function_for_size_p (cfun)")) (const_string "V4SF") ] (const_string "")))]) (define_insn "v1ti3" [(set (match_operand:V1TI 0 "register_operand" "=x,x,v") (any_logic:V1TI (match_operand:V1TI 1 "register_operand" "%0,x,v") (match_operand:V1TI 2 "vector_operand" "xBm,xm,vm")))] "TARGET_SSE2" "@ p\t{%2, %0|%0, %2} vp\t{%2, %1, %0|%0, %1, %2} vpd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx,avx512vl") (set_attr "prefix" "orig,vex,evex") (set_attr "prefix_data16" "1,*,*") (set_attr "type" "sselog") (set_attr "mode" "TI")]) (define_expand "one_cmplv1ti2" [(set (match_operand:V1TI 0 "register_operand") (xor:V1TI (match_operand:V1TI 1 "register_operand") (match_dup 2)))] "TARGET_SSE2" { operands[2] = force_reg (V1TImode, CONSTM1_RTX (V1TImode)); }) (define_mode_iterator AVX512ZEXTMASK [(DI "TARGET_AVX512BW") (SI "TARGET_AVX512BW") HI]) (define_insn "_testm3" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI1248_AVX512VLBW 1 "register_operand" "v") (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand" "vm")] UNSPEC_TESTM))] "TARGET_AVX512F" "vptestm\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_testnm3" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI1248_AVX512VLBW 1 "register_operand" "v") (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand" "vm")] UNSPEC_TESTNM))] "TARGET_AVX512F" "vptestnm\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*_testm3_zext" [(set (match_operand:AVX512ZEXTMASK 0 "register_operand" "=k") (zero_extend:AVX512ZEXTMASK (unspec: [(match_operand:VI1248_AVX512VLBW 1 "register_operand" "v") (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand" "vm")] UNSPEC_TESTM)))] "TARGET_AVX512BW && ( > GET_MODE_SIZE (mode))" "vptestm\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*_testm3_zext_mask" [(set (match_operand:AVX512ZEXTMASK 0 "register_operand" "=k") (zero_extend:AVX512ZEXTMASK (and: (unspec: [(match_operand:VI1248_AVX512VLBW 1 "register_operand" "v") (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand" "vm")] UNSPEC_TESTM) (match_operand: 3 "register_operand" "Yk"))))] "TARGET_AVX512BW && ( > GET_MODE_SIZE (mode))" "vptestm\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*_testnm3_zext" [(set (match_operand:AVX512ZEXTMASK 0 "register_operand" "=k") (zero_extend:AVX512ZEXTMASK (unspec: [(match_operand:VI1248_AVX512VLBW 1 "register_operand" "v") (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand" "vm")] UNSPEC_TESTNM)))] "TARGET_AVX512BW && ( > GET_MODE_SIZE (mode))" "vptestnm\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*_testnm3_zext_mask" [(set (match_operand:AVX512ZEXTMASK 0 "register_operand" "=k") (zero_extend:AVX512ZEXTMASK (and: (unspec: [(match_operand:VI1248_AVX512VLBW 1 "register_operand" "v") (match_operand:VI1248_AVX512VLBW 2 "nonimmediate_operand" "vm")] UNSPEC_TESTNM) (match_operand: 3 "register_operand" "Yk"))))] "TARGET_AVX512BW && ( > GET_MODE_SIZE (mode))" "vptestnm\t{%2, %1, %0%{%3%}|%0%{%3%}, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Parallel integral element swizzling ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_expand "vec_pack_trunc_" [(match_operand: 0 "register_operand") (match_operand:VI248_AVX2_8_AVX512F_24_AVX512BW 1 "register_operand") (match_operand:VI248_AVX2_8_AVX512F_24_AVX512BW 2 "register_operand")] "TARGET_SSE2" { rtx op1 = gen_lowpart (mode, operands[1]); rtx op2 = gen_lowpart (mode, operands[2]); ix86_expand_vec_extract_even_odd (operands[0], op1, op2, 0); DONE; }) (define_expand "vec_pack_trunc_qi" [(set (match_operand:HI 0 "register_operand") (ior:HI (ashift:HI (zero_extend:HI (match_operand:QI 2 "register_operand")) (const_int 8)) (zero_extend:HI (match_operand:QI 1 "register_operand"))))] "TARGET_AVX512F") (define_expand "vec_pack_trunc_" [(set (match_operand: 0 "register_operand") (ior: (ashift: (zero_extend: (match_operand:SWI24 2 "register_operand")) (match_dup 3)) (zero_extend: (match_operand:SWI24 1 "register_operand"))))] "TARGET_AVX512BW" { operands[3] = GEN_INT (GET_MODE_BITSIZE (mode)); }) (define_expand "vec_pack_sbool_trunc_qi" [(match_operand:QI 0 "register_operand") (match_operand:QI 1 "register_operand") (match_operand:QI 2 "register_operand") (match_operand:QI 3 "const_int_operand")] "TARGET_AVX512F" { HOST_WIDE_INT nunits = INTVAL (operands[3]); rtx mask, tem1, tem2; if (nunits != 8 && nunits != 4) FAIL; mask = gen_reg_rtx (QImode); emit_move_insn (mask, GEN_INT ((1 << (nunits / 2)) - 1)); tem1 = gen_reg_rtx (QImode); emit_insn (gen_kandqi (tem1, operands[1], mask)); if (TARGET_AVX512DQ) { tem2 = gen_reg_rtx (QImode); emit_insn (gen_kashiftqi (tem2, operands[2], GEN_INT (nunits / 2))); } else { tem2 = gen_reg_rtx (HImode); emit_insn (gen_kashifthi (tem2, lowpart_subreg (HImode, operands[2], QImode), GEN_INT (nunits / 2))); tem2 = lowpart_subreg (QImode, tem2, HImode); } emit_insn (gen_kiorqi (operands[0], tem1, tem2)); DONE; }) (define_insn "_packsswb" [(set (match_operand:VI1_AVX512 0 "register_operand" "=x,") (vec_concat:VI1_AVX512 (ss_truncate: (match_operand: 1 "register_operand" "0,")) (ss_truncate: (match_operand: 2 "vector_operand" "xBm,m"))))] "TARGET_SSE2 && && " "@ packsswb\t{%2, %0|%0, %2} vpacksswb\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,") (set_attr "mode" "")]) (define_insn "_packssdw" [(set (match_operand:VI2_AVX2 0 "register_operand" "=x,") (vec_concat:VI2_AVX2 (ss_truncate: (match_operand: 1 "register_operand" "0,")) (ss_truncate: (match_operand: 2 "vector_operand" "xBm,m"))))] "TARGET_SSE2 && && " "@ packssdw\t{%2, %0|%0, %2} vpackssdw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,") (set_attr "mode" "")]) (define_insn "_packuswb" [(set (match_operand:VI1_AVX512 0 "register_operand" "=x,") (vec_concat:VI1_AVX512 (us_truncate: (match_operand: 1 "register_operand" "0,")) (us_truncate: (match_operand: 2 "vector_operand" "xBm,m"))))] "TARGET_SSE2 && && " "@ packuswb\t{%2, %0|%0, %2} vpackuswb\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,") (set_attr "mode" "")]) (define_insn "avx512bw_interleave_highv64qi" [(set (match_operand:V64QI 0 "register_operand" "=v") (vec_select:V64QI (vec_concat:V128QI (match_operand:V64QI 1 "register_operand" "v") (match_operand:V64QI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 8) (const_int 72) (const_int 9) (const_int 73) (const_int 10) (const_int 74) (const_int 11) (const_int 75) (const_int 12) (const_int 76) (const_int 13) (const_int 77) (const_int 14) (const_int 78) (const_int 15) (const_int 79) (const_int 24) (const_int 88) (const_int 25) (const_int 89) (const_int 26) (const_int 90) (const_int 27) (const_int 91) (const_int 28) (const_int 92) (const_int 29) (const_int 93) (const_int 30) (const_int 94) (const_int 31) (const_int 95) (const_int 40) (const_int 104) (const_int 41) (const_int 105) (const_int 42) (const_int 106) (const_int 43) (const_int 107) (const_int 44) (const_int 108) (const_int 45) (const_int 109) (const_int 46) (const_int 110) (const_int 47) (const_int 111) (const_int 56) (const_int 120) (const_int 57) (const_int 121) (const_int 58) (const_int 122) (const_int 59) (const_int 123) (const_int 60) (const_int 124) (const_int 61) (const_int 125) (const_int 62) (const_int 126) (const_int 63) (const_int 127)])))] "TARGET_AVX512BW" "vpunpckhbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "avx2_interleave_highv32qi" [(set (match_operand:V32QI 0 "register_operand" "=Yw") (vec_select:V32QI (vec_concat:V64QI (match_operand:V32QI 1 "register_operand" "Yw") (match_operand:V32QI 2 "nonimmediate_operand" "Ywm")) (parallel [(const_int 8) (const_int 40) (const_int 9) (const_int 41) (const_int 10) (const_int 42) (const_int 11) (const_int 43) (const_int 12) (const_int 44) (const_int 13) (const_int 45) (const_int 14) (const_int 46) (const_int 15) (const_int 47) (const_int 24) (const_int 56) (const_int 25) (const_int 57) (const_int 26) (const_int 58) (const_int 27) (const_int 59) (const_int 28) (const_int 60) (const_int 29) (const_int 61) (const_int 30) (const_int 62) (const_int 31) (const_int 63)])))] "TARGET_AVX2 && && " "vpunpckhbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "") (set_attr "mode" "OI")]) (define_insn "vec_interleave_highv16qi" [(set (match_operand:V16QI 0 "register_operand" "=x,Yw") (vec_select:V16QI (vec_concat:V32QI (match_operand:V16QI 1 "register_operand" "0,Yw") (match_operand:V16QI 2 "vector_operand" "xBm,Ywm")) (parallel [(const_int 8) (const_int 24) (const_int 9) (const_int 25) (const_int 10) (const_int 26) (const_int 11) (const_int 27) (const_int 12) (const_int 28) (const_int 13) (const_int 29) (const_int 14) (const_int 30) (const_int 15) (const_int 31)])))] "TARGET_SSE2 && && " "@ punpckhbw\t{%2, %0|%0, %2} vpunpckhbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,") (set_attr "mode" "TI")]) (define_insn "avx512bw_interleave_lowv64qi" [(set (match_operand:V64QI 0 "register_operand" "=v") (vec_select:V64QI (vec_concat:V128QI (match_operand:V64QI 1 "register_operand" "v") (match_operand:V64QI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 64) (const_int 1) (const_int 65) (const_int 2) (const_int 66) (const_int 3) (const_int 67) (const_int 4) (const_int 68) (const_int 5) (const_int 69) (const_int 6) (const_int 70) (const_int 7) (const_int 71) (const_int 16) (const_int 80) (const_int 17) (const_int 81) (const_int 18) (const_int 82) (const_int 19) (const_int 83) (const_int 20) (const_int 84) (const_int 21) (const_int 85) (const_int 22) (const_int 86) (const_int 23) (const_int 87) (const_int 32) (const_int 96) (const_int 33) (const_int 97) (const_int 34) (const_int 98) (const_int 35) (const_int 99) (const_int 36) (const_int 100) (const_int 37) (const_int 101) (const_int 38) (const_int 102) (const_int 39) (const_int 103) (const_int 48) (const_int 112) (const_int 49) (const_int 113) (const_int 50) (const_int 114) (const_int 51) (const_int 115) (const_int 52) (const_int 116) (const_int 53) (const_int 117) (const_int 54) (const_int 118) (const_int 55) (const_int 119)])))] "TARGET_AVX512BW" "vpunpcklbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "avx2_interleave_lowv32qi" [(set (match_operand:V32QI 0 "register_operand" "=Yw") (vec_select:V32QI (vec_concat:V64QI (match_operand:V32QI 1 "register_operand" "Yw") (match_operand:V32QI 2 "nonimmediate_operand" "Ywm")) (parallel [(const_int 0) (const_int 32) (const_int 1) (const_int 33) (const_int 2) (const_int 34) (const_int 3) (const_int 35) (const_int 4) (const_int 36) (const_int 5) (const_int 37) (const_int 6) (const_int 38) (const_int 7) (const_int 39) (const_int 16) (const_int 48) (const_int 17) (const_int 49) (const_int 18) (const_int 50) (const_int 19) (const_int 51) (const_int 20) (const_int 52) (const_int 21) (const_int 53) (const_int 22) (const_int 54) (const_int 23) (const_int 55)])))] "TARGET_AVX2 && && " "vpunpcklbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "maybe_vex") (set_attr "mode" "OI")]) (define_insn "vec_interleave_lowv16qi" [(set (match_operand:V16QI 0 "register_operand" "=x,Yw") (vec_select:V16QI (vec_concat:V32QI (match_operand:V16QI 1 "register_operand" "0,Yw") (match_operand:V16QI 2 "vector_operand" "xBm,Ywm")) (parallel [(const_int 0) (const_int 16) (const_int 1) (const_int 17) (const_int 2) (const_int 18) (const_int 3) (const_int 19) (const_int 4) (const_int 20) (const_int 5) (const_int 21) (const_int 6) (const_int 22) (const_int 7) (const_int 23)])))] "TARGET_SSE2 && && " "@ punpcklbw\t{%2, %0|%0, %2} vpunpcklbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_insn "avx512bw_interleave_high" [(set (match_operand:V32_512 0 "register_operand" "=v") (vec_select:V32_512 (vec_concat: (match_operand:V32_512 1 "register_operand" "v") (match_operand:V32_512 2 "nonimmediate_operand" "vm")) (parallel [(const_int 4) (const_int 36) (const_int 5) (const_int 37) (const_int 6) (const_int 38) (const_int 7) (const_int 39) (const_int 12) (const_int 44) (const_int 13) (const_int 45) (const_int 14) (const_int 46) (const_int 15) (const_int 47) (const_int 20) (const_int 52) (const_int 21) (const_int 53) (const_int 22) (const_int 54) (const_int 23) (const_int 55) (const_int 28) (const_int 60) (const_int 29) (const_int 61) (const_int 30) (const_int 62) (const_int 31) (const_int 63)])))] "TARGET_AVX512BW" "vpunpckhwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "avx2_interleave_high" [(set (match_operand:V16_256 0 "register_operand" "=Yw") (vec_select:V16_256 (vec_concat: (match_operand:V16_256 1 "register_operand" "Yw") (match_operand:V16_256 2 "nonimmediate_operand" "Ywm")) (parallel [(const_int 4) (const_int 20) (const_int 5) (const_int 21) (const_int 6) (const_int 22) (const_int 7) (const_int 23) (const_int 12) (const_int 28) (const_int 13) (const_int 29) (const_int 14) (const_int 30) (const_int 15) (const_int 31)])))] "TARGET_AVX2 && && " "vpunpckhwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "vec_interleave_high" [(set (match_operand:V8_128 0 "register_operand" "=x,Yw") (vec_select:V8_128 (vec_concat: (match_operand:V8_128 1 "register_operand" "0,Yw") (match_operand:V8_128 2 "vector_operand" "xBm,Ywm")) (parallel [(const_int 4) (const_int 12) (const_int 5) (const_int 13) (const_int 6) (const_int 14) (const_int 7) (const_int 15)])))] "TARGET_SSE2 && && " "@ punpckhwd\t{%2, %0|%0, %2} vpunpckhwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,maybe_vex") (set_attr "mode" "TI")]) (define_insn "avx512bw_interleave_low" [(set (match_operand:V32_512 0 "register_operand" "=v") (vec_select:V32_512 (vec_concat: (match_operand:V32_512 1 "register_operand" "v") (match_operand:V32_512 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 32) (const_int 1) (const_int 33) (const_int 2) (const_int 34) (const_int 3) (const_int 35) (const_int 8) (const_int 40) (const_int 9) (const_int 41) (const_int 10) (const_int 42) (const_int 11) (const_int 43) (const_int 16) (const_int 48) (const_int 17) (const_int 49) (const_int 18) (const_int 50) (const_int 19) (const_int 51) (const_int 24) (const_int 56) (const_int 25) (const_int 57) (const_int 26) (const_int 58) (const_int 27) (const_int 59)])))] "TARGET_AVX512BW" "vpunpcklwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "avx2_interleave_low" [(set (match_operand:V16_256 0 "register_operand" "=Yw") (vec_select:V16_256 (vec_concat: (match_operand:V16_256 1 "register_operand" "Yw") (match_operand:V16_256 2 "nonimmediate_operand" "Ywm")) (parallel [(const_int 0) (const_int 16) (const_int 1) (const_int 17) (const_int 2) (const_int 18) (const_int 3) (const_int 19) (const_int 8) (const_int 24) (const_int 9) (const_int 25) (const_int 10) (const_int 26) (const_int 11) (const_int 27)])))] "TARGET_AVX2 && && " "vpunpcklwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "vec_interleave_low" [(set (match_operand:V8_128 0 "register_operand" "=x,Yw") (vec_select:V8_128 (vec_concat: (match_operand:V8_128 1 "register_operand" "0,Yw") (match_operand:V8_128 2 "vector_operand" "xBm,Ywm")) (parallel [(const_int 0) (const_int 8) (const_int 1) (const_int 9) (const_int 2) (const_int 10) (const_int 3) (const_int 11)])))] "TARGET_SSE2 && && " "@ punpcklwd\t{%2, %0|%0, %2} vpunpcklwd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn "avx2_interleave_highv8si" [(set (match_operand:V8SI 0 "register_operand" "=v") (vec_select:V8SI (vec_concat:V16SI (match_operand:V8SI 1 "register_operand" "v") (match_operand:V8SI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 2) (const_int 10) (const_int 3) (const_int 11) (const_int 6) (const_int 14) (const_int 7) (const_int 15)])))] "TARGET_AVX2 && " "vpunpckhdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "avx512f_interleave_highv16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_select:V16SI (vec_concat:V32SI (match_operand:V16SI 1 "register_operand" "v") (match_operand:V16SI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 2) (const_int 18) (const_int 3) (const_int 19) (const_int 6) (const_int 22) (const_int 7) (const_int 23) (const_int 10) (const_int 26) (const_int 11) (const_int 27) (const_int 14) (const_int 30) (const_int 15) (const_int 31)])))] "TARGET_AVX512F" "vpunpckhdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_interleave_highv4si" [(set (match_operand:V4SI 0 "register_operand" "=x,v") (vec_select:V4SI (vec_concat:V8SI (match_operand:V4SI 1 "register_operand" "0,v") (match_operand:V4SI 2 "vector_operand" "xBm,vm")) (parallel [(const_int 2) (const_int 6) (const_int 3) (const_int 7)])))] "TARGET_SSE2 && " "@ punpckhdq\t{%2, %0|%0, %2} vpunpckhdq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,maybe_vex") (set_attr "mode" "TI")]) (define_insn "avx2_interleave_lowv8si" [(set (match_operand:V8SI 0 "register_operand" "=v") (vec_select:V8SI (vec_concat:V16SI (match_operand:V8SI 1 "register_operand" "v") (match_operand:V8SI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 8) (const_int 1) (const_int 9) (const_int 4) (const_int 12) (const_int 5) (const_int 13)])))] "TARGET_AVX2 && " "vpunpckldq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "avx512f_interleave_lowv16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_select:V16SI (vec_concat:V32SI (match_operand:V16SI 1 "register_operand" "v") (match_operand:V16SI 2 "nonimmediate_operand" "vm")) (parallel [(const_int 0) (const_int 16) (const_int 1) (const_int 17) (const_int 4) (const_int 20) (const_int 5) (const_int 21) (const_int 8) (const_int 24) (const_int 9) (const_int 25) (const_int 12) (const_int 28) (const_int 13) (const_int 29)])))] "TARGET_AVX512F" "vpunpckldq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_interleave_lowv4si" [(set (match_operand:V4SI 0 "register_operand" "=x,v") (vec_select:V4SI (vec_concat:V8SI (match_operand:V4SI 1 "register_operand" "0,v") (match_operand:V4SI 2 "vector_operand" "xBm,vm")) (parallel [(const_int 0) (const_int 4) (const_int 1) (const_int 5)])))] "TARGET_SSE2 && " "@ punpckldq\t{%2, %0|%0, %2} vpunpckldq\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_expand "vec_interleave_high" [(match_operand:VI_256 0 "register_operand") (match_operand:VI_256 1 "register_operand") (match_operand:VI_256 2 "nonimmediate_operand")] "TARGET_AVX2" { rtx t1 = gen_reg_rtx (mode); rtx t2 = gen_reg_rtx (mode); rtx t3 = gen_reg_rtx (V4DImode); emit_insn (gen_avx2_interleave_low (t1, operands[1], operands[2])); emit_insn (gen_avx2_interleave_high (t2, operands[1], operands[2])); emit_insn (gen_avx2_permv2ti (t3, gen_lowpart (V4DImode, t1), gen_lowpart (V4DImode, t2), GEN_INT (1 + (3 << 4)))); emit_move_insn (operands[0], gen_lowpart (mode, t3)); DONE; }) (define_expand "vec_interleave_low" [(match_operand:VI_256 0 "register_operand") (match_operand:VI_256 1 "register_operand") (match_operand:VI_256 2 "nonimmediate_operand")] "TARGET_AVX2" { rtx t1 = gen_reg_rtx (mode); rtx t2 = gen_reg_rtx (mode); rtx t3 = gen_reg_rtx (V4DImode); emit_insn (gen_avx2_interleave_low (t1, operands[1], operands[2])); emit_insn (gen_avx2_interleave_high (t2, operands[1], operands[2])); emit_insn (gen_avx2_permv2ti (t3, gen_lowpart (V4DImode, t1), gen_lowpart (V4DImode, t2), GEN_INT (0 + (2 << 4)))); emit_move_insn (operands[0], gen_lowpart (mode, t3)); DONE; }) ;; Modes handled by pinsr patterns. (define_mode_iterator PINSR_MODE [(V16QI "TARGET_SSE4_1") V8HI V8HF (V4SI "TARGET_SSE4_1") (V2DI "TARGET_SSE4_1 && TARGET_64BIT")]) (define_mode_attr sse2p4_1 [(V16QI "sse4_1") (V8HI "sse2") (V8HF "sse2") (V4SI "sse4_1") (V2DI "sse4_1")]) (define_mode_attr pinsr_evex_isa [(V16QI "avx512bw") (V8HI "avx512bw") (V8HF "avx512bw") (V4SI "avx512dq") (V2DI "avx512dq")]) ;; sse4_1_pinsrd must come before sse2_loadld since it is preferred. (define_insn "_pinsr" [(set (match_operand:PINSR_MODE 0 "register_operand" "=x,x,x,x,v,v,&x") (vec_merge:PINSR_MODE (vec_duplicate:PINSR_MODE (match_operand: 2 "nonimmediate_operand" "r,m,r,m,r,m,x")) (match_operand:PINSR_MODE 1 "register_operand" "0,0,x,x,v,v,x") (match_operand:SI 3 "const_int_operand")))] "TARGET_SSE2 && ((unsigned) exact_log2 (INTVAL (operands[3])) < GET_MODE_NUNITS (mode))" { HOST_WIDE_INT items = INTVAL (operands[3]); operands[3] = GEN_INT (exact_log2 (items)); switch (which_alternative) { case 0: if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (SImode)) return "pinsr\t{%3, %k2, %0|%0, %k2, %3}"; /* FALLTHRU */ case 1: return "pinsr\t{%3, %2, %0|%0, %2, %3}"; case 2: case 4: if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (SImode)) return "vpinsr\t{%3, %k2, %1, %0|%0, %1, %k2, %3}"; /* FALLTHRU */ case 3: case 5: return "vpinsr\t{%3, %2, %1, %0|%0, %1, %2, %3}"; case 6: /* This pattern needs to be shadowed with vec_set{v8hi,v8hf}_0. */ gcc_assert (items > 1); return "#"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,noavx,avx,avx,,,avx2") (set_attr "type" "sselog") (set (attr "prefix_rex") (if_then_else (and (not (match_test "TARGET_AVX")) (match_test "GET_MODE_NUNITS (mode) == 2")) (const_string "1") (const_string "*"))) (set (attr "prefix_data16") (if_then_else (and (not (match_test "TARGET_AVX")) (match_test "GET_MODE_NUNITS (mode) == 8")) (const_string "1") (const_string "*"))) (set (attr "prefix_extra") (if_then_else (and (not (match_test "TARGET_AVX")) (match_test "GET_MODE_NUNITS (mode) == 8")) (const_string "*") (const_string "1"))) (set_attr "length_immediate" "1") (set_attr "prefix" "orig,orig,vex,vex,evex,evex,vex") (set_attr "mode" "TI") (set (attr "enabled") (cond [(and (not (match_test "GET_MODE_NUNITS (mode) == 8")) (eq_attr "alternative" "6")) (symbol_ref "false") ] (const_string "*")))]) ;; For TARGET_AVX2, implement insert from XMM reg with PBROADCASTW + PBLENDW. (define_split [(set (match_operand:V8_128 0 "sse_reg_operand") (vec_merge:V8_128 (vec_duplicate:V8_128 (match_operand: 2 "sse_reg_operand")) (match_operand:V8_128 1 "sse_reg_operand") (match_operand:SI 3 "const_int_operand")))] "TARGET_AVX2 && reload_completed && INTVAL (operands[3]) > 1 && ((unsigned) exact_log2 (INTVAL (operands[3])) < GET_MODE_NUNITS (mode))" [(set (match_dup 0) (vec_duplicate:V8_128 (match_dup 2))) (set (match_dup 0) (vec_merge:V8_128 (match_dup 0) (match_dup 1) (match_dup 3)))]) (define_expand "_vinsert_mask" [(match_operand:AVX512_VEC 0 "register_operand") (match_operand:AVX512_VEC 1 "register_operand") (match_operand: 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_3_operand") (match_operand:AVX512_VEC 4 "register_operand") (match_operand: 5 "register_operand")] "TARGET_AVX512F" { int mask, selector; mask = INTVAL (operands[3]); selector = (GET_MODE_UNIT_SIZE (mode) == 4 ? 0xFFFF ^ (0x000F << mask * 4) : 0xFF ^ (0x03 << mask * 2)); emit_insn (gen__vinsert_1_mask (operands[0], operands[1], operands[2], GEN_INT (selector), operands[4], operands[5])); DONE; }) (define_insn "*_vinsert_0" [(set (match_operand:AVX512_VEC 0 "register_operand" "=v,x,Yv") (vec_merge:AVX512_VEC (match_operand:AVX512_VEC 1 "reg_or_0_operand" "v,C,C") (vec_duplicate:AVX512_VEC (match_operand: 2 "nonimmediate_operand" "vm,xm,vm")) (match_operand:SI 3 "const_int_operand" "n,n,n")))] "TARGET_AVX512F && (INTVAL (operands[3]) == (GET_MODE_UNIT_SIZE (mode) == 4 ? 0xFFF0 : 0xFC))" { if (which_alternative == 0) return "vinsert\t{$0, %2, %1, %0|%0, %1, %2, 0}"; switch (mode) { case E_V8DFmode: if (misaligned_operand (operands[2], mode)) return "vmovupd\t{%2, %x0|%x0, %2}"; else return "vmovapd\t{%2, %x0|%x0, %2}"; case E_V16SFmode: if (misaligned_operand (operands[2], mode)) return "vmovups\t{%2, %x0|%x0, %2}"; else return "vmovaps\t{%2, %x0|%x0, %2}"; case E_V8DImode: if (misaligned_operand (operands[2], mode)) return which_alternative == 2 ? "vmovdqu64\t{%2, %x0|%x0, %2}" : "vmovdqu\t{%2, %x0|%x0, %2}"; else return which_alternative == 2 ? "vmovdqa64\t{%2, %x0|%x0, %2}" : "vmovdqa\t{%2, %x0|%x0, %2}"; case E_V16SImode: if (misaligned_operand (operands[2], mode)) return which_alternative == 2 ? "vmovdqu32\t{%2, %x0|%x0, %2}" : "vmovdqu\t{%2, %x0|%x0, %2}"; else return which_alternative == 2 ? "vmovdqa32\t{%2, %x0|%x0, %2}" : "vmovdqa\t{%2, %x0|%x0, %2}"; default: gcc_unreachable (); } } [(set_attr "type" "sselog,ssemov,ssemov") (set_attr "length_immediate" "1,0,0") (set_attr "prefix" "evex,vex,evex") (set_attr "mode" ",,")]) (define_insn "_vinsert_1" [(set (match_operand:AVX512_VEC 0 "register_operand" "=v") (vec_merge:AVX512_VEC (match_operand:AVX512_VEC 1 "register_operand" "v") (vec_duplicate:AVX512_VEC (match_operand: 2 "nonimmediate_operand" "vm")) (match_operand:SI 3 "const_int_operand" "n")))] "TARGET_AVX512F" { int mask; int selector = INTVAL (operands[3]); if (selector == (GET_MODE_UNIT_SIZE (mode) == 4 ? 0xFFF0 : 0xFC)) mask = 0; else if (selector == (GET_MODE_UNIT_SIZE (mode) == 4 ? 0xFF0F : 0xF3)) mask = 1; else if (selector == (GET_MODE_UNIT_SIZE (mode) == 4 ? 0xF0FF : 0xCF)) mask = 2; else if (selector == (GET_MODE_UNIT_SIZE (mode) == 4 ? 0x0FFF : 0x3F)) mask = 3; else gcc_unreachable (); operands[3] = GEN_INT (mask); return "vinsert\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_vinsert_mask" [(match_operand:AVX512_VEC_2 0 "register_operand") (match_operand:AVX512_VEC_2 1 "register_operand") (match_operand: 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_1_operand") (match_operand:AVX512_VEC_2 4 "register_operand") (match_operand: 5 "register_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[3]); if (mask == 0) emit_insn (gen_vec_set_lo__mask (operands[0], operands[1], operands[2], operands[4], operands[5])); else emit_insn (gen_vec_set_hi__mask (operands[0], operands[1], operands[2], operands[4], operands[5])); DONE; }) (define_insn "vec_set_lo_" [(set (match_operand:V16FI 0 "register_operand" "=v") (vec_concat:V16FI (match_operand: 2 "nonimmediate_operand" "vm") (vec_select: (match_operand:V16FI 1 "register_operand" "v") (parallel [(const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)]))))] "TARGET_AVX512DQ" "vinsert32x8\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}" [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_set_hi_" [(set (match_operand:V16FI 0 "register_operand" "=v") (vec_concat:V16FI (vec_select: (match_operand:V16FI 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand: 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512DQ" "vinsert32x8\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}" [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vec_set_lo_" [(set (match_operand:V8FI 0 "register_operand" "=v") (vec_concat:V8FI (match_operand: 2 "nonimmediate_operand" "vm") (vec_select: (match_operand:V8FI 1 "register_operand" "v") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX512F" "vinsert64x4\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}" [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "vec_set_hi_" [(set (match_operand:V8FI 0 "register_operand" "=v") (vec_concat:V8FI (vec_select: (match_operand:V8FI 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand: 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" "vinsert64x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}" [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "avx512dq_shuf_64x2_mask" [(match_operand:VI8F_256 0 "register_operand") (match_operand:VI8F_256 1 "register_operand") (match_operand:VI8F_256 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_3_operand") (match_operand:VI8F_256 4 "register_operand") (match_operand:QI 5 "register_operand")] "TARGET_AVX512DQ" { int mask = INTVAL (operands[3]); emit_insn (gen_avx512dq_shuf_64x2_1_mask (operands[0], operands[1], operands[2], GEN_INT (((mask >> 0) & 1) * 2 + 0), GEN_INT (((mask >> 0) & 1) * 2 + 1), GEN_INT (((mask >> 1) & 1) * 2 + 4), GEN_INT (((mask >> 1) & 1) * 2 + 5), operands[4], operands[5])); DONE; }) (define_insn "avx512dq_shuf_64x2_1" [(set (match_operand:VI8F_256 0 "register_operand" "=v") (vec_select:VI8F_256 (vec_concat: (match_operand:VI8F_256 1 "register_operand" "v") (match_operand:VI8F_256 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_4_to_7_operand") (match_operand 6 "const_4_to_7_operand")])))] "TARGET_AVX512VL && (INTVAL (operands[3]) & 1) == 0 && INTVAL (operands[3]) == INTVAL (operands[4]) - 1 && (INTVAL (operands[5]) & 1) == 0 && INTVAL (operands[5]) == INTVAL (operands[6]) - 1" { int mask; mask = INTVAL (operands[3]) / 2; mask |= (INTVAL (operands[5]) - 4) / 2 << 1; operands[3] = GEN_INT (mask); return "vshuf64x2\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "avx512f_shuf_64x2_mask" [(match_operand:V8FI 0 "register_operand") (match_operand:V8FI 1 "register_operand") (match_operand:V8FI 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_255_operand") (match_operand:V8FI 4 "register_operand") (match_operand:QI 5 "register_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[3]); emit_insn (gen_avx512f_shuf_64x2_1_mask (operands[0], operands[1], operands[2], GEN_INT (((mask >> 0) & 3) * 2), GEN_INT (((mask >> 0) & 3) * 2 + 1), GEN_INT (((mask >> 2) & 3) * 2), GEN_INT (((mask >> 2) & 3) * 2 + 1), GEN_INT (((mask >> 4) & 3) * 2 + 8), GEN_INT (((mask >> 4) & 3) * 2 + 9), GEN_INT (((mask >> 6) & 3) * 2 + 8), GEN_INT (((mask >> 6) & 3) * 2 + 9), operands[4], operands[5])); DONE; }) (define_insn "avx512f_shuf_64x2_1" [(set (match_operand:V8FI 0 "register_operand" "=v") (vec_select:V8FI (vec_concat: (match_operand:V8FI 1 "register_operand" "v") (match_operand:V8FI 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_7_operand") (match_operand 4 "const_0_to_7_operand") (match_operand 5 "const_0_to_7_operand") (match_operand 6 "const_0_to_7_operand") (match_operand 7 "const_8_to_15_operand") (match_operand 8 "const_8_to_15_operand") (match_operand 9 "const_8_to_15_operand") (match_operand 10 "const_8_to_15_operand")])))] "TARGET_AVX512F && (INTVAL (operands[3]) & 1) == 0 && INTVAL (operands[3]) == INTVAL (operands[4]) - 1 && (INTVAL (operands[5]) & 1) == 0 && INTVAL (operands[5]) == INTVAL (operands[6]) - 1 && (INTVAL (operands[7]) & 1) == 0 && INTVAL (operands[7]) == INTVAL (operands[8]) - 1 && (INTVAL (operands[9]) & 1) == 0 && INTVAL (operands[9]) == INTVAL (operands[10]) - 1" { int mask; mask = INTVAL (operands[3]) / 2; mask |= INTVAL (operands[5]) / 2 << 2; mask |= (INTVAL (operands[7]) - 8) / 2 << 4; mask |= (INTVAL (operands[9]) - 8) / 2 << 6; operands[3] = GEN_INT (mask); return "vshuf64x2\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512f_shuf_64x2_1_1" [(set (match_operand:V8FI 0 "register_operand" "=v") (vec_select:V8FI (match_operand:V8FI 1 "register_operand" "v") (parallel [(match_operand 2 "const_0_to_7_operand") (match_operand 3 "const_0_to_7_operand") (match_operand 4 "const_0_to_7_operand") (match_operand 5 "const_0_to_7_operand") (match_operand 6 "const_0_to_7_operand") (match_operand 7 "const_0_to_7_operand") (match_operand 8 "const_0_to_7_operand") (match_operand 9 "const_0_to_7_operand")])))] "TARGET_AVX512F && (INTVAL (operands[2]) & 1) == 0 && INTVAL (operands[2]) == INTVAL (operands[3]) - 1 && (INTVAL (operands[4]) & 1) == 0 && INTVAL (operands[4]) == INTVAL (operands[5]) - 1 && (INTVAL (operands[6]) & 1) == 0 && INTVAL (operands[6]) == INTVAL (operands[7]) - 1 && (INTVAL (operands[8]) & 1) == 0 && INTVAL (operands[8]) == INTVAL (operands[9]) - 1" { int mask; mask = INTVAL (operands[2]) / 2; mask |= INTVAL (operands[4]) / 2 << 2; mask |= INTVAL (operands[6]) / 2 << 4; mask |= INTVAL (operands[8]) / 2 << 6; operands[2] = GEN_INT (mask); return "vshuf64x2\t{%2, %1, %1, %0|%0, %1, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512vl_shuf_32x4_mask" [(match_operand:VI4F_256 0 "register_operand") (match_operand:VI4F_256 1 "register_operand") (match_operand:VI4F_256 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_3_operand") (match_operand:VI4F_256 4 "register_operand") (match_operand:QI 5 "register_operand")] "TARGET_AVX512VL" { int mask = INTVAL (operands[3]); emit_insn (gen_avx512vl_shuf_32x4_1_mask (operands[0], operands[1], operands[2], GEN_INT (((mask >> 0) & 1) * 4 + 0), GEN_INT (((mask >> 0) & 1) * 4 + 1), GEN_INT (((mask >> 0) & 1) * 4 + 2), GEN_INT (((mask >> 0) & 1) * 4 + 3), GEN_INT (((mask >> 1) & 1) * 4 + 8), GEN_INT (((mask >> 1) & 1) * 4 + 9), GEN_INT (((mask >> 1) & 1) * 4 + 10), GEN_INT (((mask >> 1) & 1) * 4 + 11), operands[4], operands[5])); DONE; }) (define_insn "avx512vl_shuf_32x4_1" [(set (match_operand:VI4F_256 0 "register_operand" "=v") (vec_select:VI4F_256 (vec_concat: (match_operand:VI4F_256 1 "register_operand" "v") (match_operand:VI4F_256 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_7_operand") (match_operand 4 "const_0_to_7_operand") (match_operand 5 "const_0_to_7_operand") (match_operand 6 "const_0_to_7_operand") (match_operand 7 "const_8_to_15_operand") (match_operand 8 "const_8_to_15_operand") (match_operand 9 "const_8_to_15_operand") (match_operand 10 "const_8_to_15_operand")])))] "TARGET_AVX512VL && (INTVAL (operands[3]) & 3) == 0 && INTVAL (operands[3]) == INTVAL (operands[4]) - 1 && INTVAL (operands[3]) == INTVAL (operands[5]) - 2 && INTVAL (operands[3]) == INTVAL (operands[6]) - 3 && (INTVAL (operands[7]) & 3) == 0 && INTVAL (operands[7]) == INTVAL (operands[8]) - 1 && INTVAL (operands[7]) == INTVAL (operands[9]) - 2 && INTVAL (operands[7]) == INTVAL (operands[10]) - 3" { int mask; mask = INTVAL (operands[3]) / 4; mask |= (INTVAL (operands[7]) - 8) / 4 << 1; operands[3] = GEN_INT (mask); return "vshuf32x4\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512f_shuf_32x4_mask" [(match_operand:V16FI 0 "register_operand") (match_operand:V16FI 1 "register_operand") (match_operand:V16FI 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_255_operand") (match_operand:V16FI 4 "register_operand") (match_operand:HI 5 "register_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[3]); emit_insn (gen_avx512f_shuf_32x4_1_mask (operands[0], operands[1], operands[2], GEN_INT (((mask >> 0) & 3) * 4), GEN_INT (((mask >> 0) & 3) * 4 + 1), GEN_INT (((mask >> 0) & 3) * 4 + 2), GEN_INT (((mask >> 0) & 3) * 4 + 3), GEN_INT (((mask >> 2) & 3) * 4), GEN_INT (((mask >> 2) & 3) * 4 + 1), GEN_INT (((mask >> 2) & 3) * 4 + 2), GEN_INT (((mask >> 2) & 3) * 4 + 3), GEN_INT (((mask >> 4) & 3) * 4 + 16), GEN_INT (((mask >> 4) & 3) * 4 + 17), GEN_INT (((mask >> 4) & 3) * 4 + 18), GEN_INT (((mask >> 4) & 3) * 4 + 19), GEN_INT (((mask >> 6) & 3) * 4 + 16), GEN_INT (((mask >> 6) & 3) * 4 + 17), GEN_INT (((mask >> 6) & 3) * 4 + 18), GEN_INT (((mask >> 6) & 3) * 4 + 19), operands[4], operands[5])); DONE; }) (define_insn "avx512f_shuf_32x4_1" [(set (match_operand:V16FI 0 "register_operand" "=v") (vec_select:V16FI (vec_concat: (match_operand:V16FI 1 "register_operand" "v") (match_operand:V16FI 2 "nonimmediate_operand" "vm")) (parallel [(match_operand 3 "const_0_to_15_operand") (match_operand 4 "const_0_to_15_operand") (match_operand 5 "const_0_to_15_operand") (match_operand 6 "const_0_to_15_operand") (match_operand 7 "const_0_to_15_operand") (match_operand 8 "const_0_to_15_operand") (match_operand 9 "const_0_to_15_operand") (match_operand 10 "const_0_to_15_operand") (match_operand 11 "const_16_to_31_operand") (match_operand 12 "const_16_to_31_operand") (match_operand 13 "const_16_to_31_operand") (match_operand 14 "const_16_to_31_operand") (match_operand 15 "const_16_to_31_operand") (match_operand 16 "const_16_to_31_operand") (match_operand 17 "const_16_to_31_operand") (match_operand 18 "const_16_to_31_operand")])))] "TARGET_AVX512F && (INTVAL (operands[3]) & 3) == 0 && INTVAL (operands[3]) == INTVAL (operands[4]) - 1 && INTVAL (operands[3]) == INTVAL (operands[5]) - 2 && INTVAL (operands[3]) == INTVAL (operands[6]) - 3 && (INTVAL (operands[7]) & 3) == 0 && INTVAL (operands[7]) == INTVAL (operands[8]) - 1 && INTVAL (operands[7]) == INTVAL (operands[9]) - 2 && INTVAL (operands[7]) == INTVAL (operands[10]) - 3 && (INTVAL (operands[11]) & 3) == 0 && INTVAL (operands[11]) == INTVAL (operands[12]) - 1 && INTVAL (operands[11]) == INTVAL (operands[13]) - 2 && INTVAL (operands[11]) == INTVAL (operands[14]) - 3 && (INTVAL (operands[15]) & 3) == 0 && INTVAL (operands[15]) == INTVAL (operands[16]) - 1 && INTVAL (operands[15]) == INTVAL (operands[17]) - 2 && INTVAL (operands[15]) == INTVAL (operands[18]) - 3" { int mask; mask = INTVAL (operands[3]) / 4; mask |= INTVAL (operands[7]) / 4 << 2; mask |= (INTVAL (operands[11]) - 16) / 4 << 4; mask |= (INTVAL (operands[15]) - 16) / 4 << 6; operands[3] = GEN_INT (mask); return "vshuf32x4\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512f_shuf_32x4_1_1" [(set (match_operand:V16FI 0 "register_operand" "=v") (vec_select:V16FI (match_operand:V16FI 1 "register_operand" "v") (parallel [(match_operand 2 "const_0_to_15_operand") (match_operand 3 "const_0_to_15_operand") (match_operand 4 "const_0_to_15_operand") (match_operand 5 "const_0_to_15_operand") (match_operand 6 "const_0_to_15_operand") (match_operand 7 "const_0_to_15_operand") (match_operand 8 "const_0_to_15_operand") (match_operand 9 "const_0_to_15_operand") (match_operand 10 "const_0_to_15_operand") (match_operand 11 "const_0_to_15_operand") (match_operand 12 "const_0_to_15_operand") (match_operand 13 "const_0_to_15_operand") (match_operand 14 "const_0_to_15_operand") (match_operand 15 "const_0_to_15_operand") (match_operand 16 "const_0_to_15_operand") (match_operand 17 "const_0_to_15_operand")])))] "TARGET_AVX512F && (INTVAL (operands[2]) & 3) == 0 && INTVAL (operands[2]) == INTVAL (operands[3]) - 1 && INTVAL (operands[2]) == INTVAL (operands[4]) - 2 && INTVAL (operands[2]) == INTVAL (operands[5]) - 3 && (INTVAL (operands[6]) & 3) == 0 && INTVAL (operands[6]) == INTVAL (operands[7]) - 1 && INTVAL (operands[6]) == INTVAL (operands[8]) - 2 && INTVAL (operands[6]) == INTVAL (operands[9]) - 3 && (INTVAL (operands[10]) & 3) == 0 && INTVAL (operands[10]) == INTVAL (operands[11]) - 1 && INTVAL (operands[10]) == INTVAL (operands[12]) - 2 && INTVAL (operands[10]) == INTVAL (operands[13]) - 3 && (INTVAL (operands[14]) & 3) == 0 && INTVAL (operands[14]) == INTVAL (operands[15]) - 1 && INTVAL (operands[14]) == INTVAL (operands[16]) - 2 && INTVAL (operands[14]) == INTVAL (operands[17]) - 3" { int mask; mask = INTVAL (operands[2]) / 4; mask |= INTVAL (operands[6]) / 4 << 2; mask |= INTVAL (operands[10]) / 4 << 4; mask |= INTVAL (operands[14]) / 4 << 6; operands[2] = GEN_INT (mask); return "vshuf32x4\t{%2, %1, %1, %0|%0, %1, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx512f_pshufdv3_mask" [(match_operand:V16SI 0 "register_operand") (match_operand:V16SI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V16SI 3 "register_operand") (match_operand:HI 4 "register_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[2]); emit_insn (gen_avx512f_pshufd_1_mask (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4), GEN_INT (((mask >> 0) & 3) + 8), GEN_INT (((mask >> 2) & 3) + 8), GEN_INT (((mask >> 4) & 3) + 8), GEN_INT (((mask >> 6) & 3) + 8), GEN_INT (((mask >> 0) & 3) + 12), GEN_INT (((mask >> 2) & 3) + 12), GEN_INT (((mask >> 4) & 3) + 12), GEN_INT (((mask >> 6) & 3) + 12), operands[3], operands[4])); DONE; }) (define_insn "avx512f_pshufd_1" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_select:V16SI (match_operand:V16SI 1 "nonimmediate_operand" "vm") (parallel [(match_operand 2 "const_0_to_3_operand") (match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_0_to_3_operand") (match_operand 6 "const_4_to_7_operand") (match_operand 7 "const_4_to_7_operand") (match_operand 8 "const_4_to_7_operand") (match_operand 9 "const_4_to_7_operand") (match_operand 10 "const_8_to_11_operand") (match_operand 11 "const_8_to_11_operand") (match_operand 12 "const_8_to_11_operand") (match_operand 13 "const_8_to_11_operand") (match_operand 14 "const_12_to_15_operand") (match_operand 15 "const_12_to_15_operand") (match_operand 16 "const_12_to_15_operand") (match_operand 17 "const_12_to_15_operand")])))] "TARGET_AVX512F && INTVAL (operands[2]) + 4 == INTVAL (operands[6]) && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) && INTVAL (operands[5]) + 4 == INTVAL (operands[9]) && INTVAL (operands[2]) + 8 == INTVAL (operands[10]) && INTVAL (operands[3]) + 8 == INTVAL (operands[11]) && INTVAL (operands[4]) + 8 == INTVAL (operands[12]) && INTVAL (operands[5]) + 8 == INTVAL (operands[13]) && INTVAL (operands[2]) + 12 == INTVAL (operands[14]) && INTVAL (operands[3]) + 12 == INTVAL (operands[15]) && INTVAL (operands[4]) + 12 == INTVAL (operands[16]) && INTVAL (operands[5]) + 12 == INTVAL (operands[17])" { int mask = 0; mask |= INTVAL (operands[2]) << 0; mask |= INTVAL (operands[3]) << 2; mask |= INTVAL (operands[4]) << 4; mask |= INTVAL (operands[5]) << 6; operands[2] = GEN_INT (mask); return "vpshufd\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog1") (set_attr "prefix" "evex") (set_attr "length_immediate" "1") (set_attr "mode" "XI")]) (define_expand "avx512vl_pshufdv3_mask" [(match_operand:V8SI 0 "register_operand") (match_operand:V8SI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V8SI 3 "register_operand") (match_operand:QI 4 "register_operand")] "TARGET_AVX512VL" { int mask = INTVAL (operands[2]); emit_insn (gen_avx2_pshufd_1_mask (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4), operands[3], operands[4])); DONE; }) (define_expand "avx2_pshufdv3" [(match_operand:V8SI 0 "register_operand") (match_operand:V8SI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand")] "TARGET_AVX2" { int mask = INTVAL (operands[2]); emit_insn (gen_avx2_pshufd_1 (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4))); DONE; }) (define_insn "avx2_pshufd_1" [(set (match_operand:V8SI 0 "register_operand" "=v") (vec_select:V8SI (match_operand:V8SI 1 "nonimmediate_operand" "vm") (parallel [(match_operand 2 "const_0_to_3_operand") (match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_0_to_3_operand") (match_operand 6 "const_4_to_7_operand") (match_operand 7 "const_4_to_7_operand") (match_operand 8 "const_4_to_7_operand") (match_operand 9 "const_4_to_7_operand")])))] "TARGET_AVX2 && && INTVAL (operands[2]) + 4 == INTVAL (operands[6]) && INTVAL (operands[3]) + 4 == INTVAL (operands[7]) && INTVAL (operands[4]) + 4 == INTVAL (operands[8]) && INTVAL (operands[5]) + 4 == INTVAL (operands[9])" { int mask = 0; mask |= INTVAL (operands[2]) << 0; mask |= INTVAL (operands[3]) << 2; mask |= INTVAL (operands[4]) << 4; mask |= INTVAL (operands[5]) << 6; operands[2] = GEN_INT (mask); return "vpshufd\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog1") (set_attr "prefix" "maybe_evex") (set_attr "length_immediate" "1") (set_attr "mode" "OI")]) (define_expand "avx512vl_pshufd_mask" [(match_operand:V4SI 0 "register_operand") (match_operand:V4SI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V4SI 3 "register_operand") (match_operand:QI 4 "register_operand")] "TARGET_AVX512VL" { int mask = INTVAL (operands[2]); emit_insn (gen_sse2_pshufd_1_mask (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), operands[3], operands[4])); DONE; }) (define_expand "sse2_pshufd" [(match_operand:V4SI 0 "register_operand") (match_operand:V4SI 1 "vector_operand") (match_operand:SI 2 "const_int_operand")] "TARGET_SSE2" { int mask = INTVAL (operands[2]); emit_insn (gen_sse2_pshufd_1 (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3))); DONE; }) (define_insn "sse2_pshufd_1" [(set (match_operand:V4SI 0 "register_operand" "=v") (vec_select:V4SI (match_operand:V4SI 1 "vector_operand" "vBm") (parallel [(match_operand 2 "const_0_to_3_operand") (match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_0_to_3_operand")])))] "TARGET_SSE2 && " { int mask = 0; mask |= INTVAL (operands[2]) << 0; mask |= INTVAL (operands[3]) << 2; mask |= INTVAL (operands[4]) << 4; mask |= INTVAL (operands[5]) << 6; operands[2] = GEN_INT (mask); return "%vpshufd\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog1") (set_attr "prefix_data16" "1") (set_attr "prefix" "") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_insn "avx512bw_pshuflwv32hi" [(set (match_operand:V32HI 0 "register_operand" "=v") (unspec:V32HI [(match_operand:V32HI 1 "nonimmediate_operand" "vm") (match_operand:SI 2 "const_0_to_255_operand" "n")] UNSPEC_PSHUFLW))] "TARGET_AVX512BW" "vpshuflw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "avx512vl_pshuflwv3_mask" [(match_operand:V16HI 0 "register_operand") (match_operand:V16HI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V16HI 3 "register_operand") (match_operand:HI 4 "register_operand")] "TARGET_AVX512VL && TARGET_AVX512BW" { int mask = INTVAL (operands[2]); emit_insn (gen_avx2_pshuflw_1_mask (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), GEN_INT (((mask >> 0) & 3) + 8), GEN_INT (((mask >> 2) & 3) + 8), GEN_INT (((mask >> 4) & 3) + 8), GEN_INT (((mask >> 6) & 3) + 8), operands[3], operands[4])); DONE; }) (define_expand "avx2_pshuflwv3" [(match_operand:V16HI 0 "register_operand") (match_operand:V16HI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand")] "TARGET_AVX2" { int mask = INTVAL (operands[2]); emit_insn (gen_avx2_pshuflw_1 (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), GEN_INT (((mask >> 0) & 3) + 8), GEN_INT (((mask >> 2) & 3) + 8), GEN_INT (((mask >> 4) & 3) + 8), GEN_INT (((mask >> 6) & 3) + 8))); DONE; }) (define_insn "avx2_pshuflw_1" [(set (match_operand:V16HI 0 "register_operand" "=Yw") (vec_select:V16HI (match_operand:V16HI 1 "nonimmediate_operand" "Ywm") (parallel [(match_operand 2 "const_0_to_3_operand") (match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_0_to_3_operand") (const_int 4) (const_int 5) (const_int 6) (const_int 7) (match_operand 6 "const_8_to_11_operand") (match_operand 7 "const_8_to_11_operand") (match_operand 8 "const_8_to_11_operand") (match_operand 9 "const_8_to_11_operand") (const_int 12) (const_int 13) (const_int 14) (const_int 15)])))] "TARGET_AVX2 && && && INTVAL (operands[2]) + 8 == INTVAL (operands[6]) && INTVAL (operands[3]) + 8 == INTVAL (operands[7]) && INTVAL (operands[4]) + 8 == INTVAL (operands[8]) && INTVAL (operands[5]) + 8 == INTVAL (operands[9])" { int mask = 0; mask |= INTVAL (operands[2]) << 0; mask |= INTVAL (operands[3]) << 2; mask |= INTVAL (operands[4]) << 4; mask |= INTVAL (operands[5]) << 6; operands[2] = GEN_INT (mask); return "vpshuflw\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "prefix" "maybe_evex") (set_attr "length_immediate" "1") (set_attr "mode" "OI")]) (define_expand "avx512vl_pshuflw_mask" [(match_operand:V8HI 0 "register_operand") (match_operand:V8HI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V8HI 3 "register_operand") (match_operand:QI 4 "register_operand")] "TARGET_AVX512VL && TARGET_AVX512BW" { int mask = INTVAL (operands[2]); emit_insn (gen_sse2_pshuflw_1_mask (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), operands[3], operands[4])); DONE; }) (define_expand "sse2_pshuflw" [(match_operand:V8HI 0 "register_operand") (match_operand:V8HI 1 "vector_operand") (match_operand:SI 2 "const_int_operand")] "TARGET_SSE2" { int mask = INTVAL (operands[2]); emit_insn (gen_sse2_pshuflw_1 (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3))); DONE; }) (define_insn "sse2_pshuflw_1" [(set (match_operand:V8HI 0 "register_operand" "=Yw") (vec_select:V8HI (match_operand:V8HI 1 "vector_operand" "YwBm") (parallel [(match_operand 2 "const_0_to_3_operand") (match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_0_to_3_operand") (const_int 4) (const_int 5) (const_int 6) (const_int 7)])))] "TARGET_SSE2 && && " { int mask = 0; mask |= INTVAL (operands[2]) << 0; mask |= INTVAL (operands[3]) << 2; mask |= INTVAL (operands[4]) << 4; mask |= INTVAL (operands[5]) << 6; operands[2] = GEN_INT (mask); return "%vpshuflw\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "prefix_data16" "0") (set_attr "prefix_rep" "1") (set_attr "prefix" "maybe_vex") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_expand "avx2_pshufhwv3" [(match_operand:V16HI 0 "register_operand") (match_operand:V16HI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand")] "TARGET_AVX2" { int mask = INTVAL (operands[2]); emit_insn (gen_avx2_pshufhw_1 (operands[0], operands[1], GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4), GEN_INT (((mask >> 0) & 3) + 12), GEN_INT (((mask >> 2) & 3) + 12), GEN_INT (((mask >> 4) & 3) + 12), GEN_INT (((mask >> 6) & 3) + 12))); DONE; }) (define_insn "avx512bw_pshufhwv32hi" [(set (match_operand:V32HI 0 "register_operand" "=v") (unspec:V32HI [(match_operand:V32HI 1 "nonimmediate_operand" "vm") (match_operand:SI 2 "const_0_to_255_operand" "n")] UNSPEC_PSHUFHW))] "TARGET_AVX512BW" "vpshufhw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "avx512vl_pshufhwv3_mask" [(match_operand:V16HI 0 "register_operand") (match_operand:V16HI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V16HI 3 "register_operand") (match_operand:HI 4 "register_operand")] "TARGET_AVX512VL && TARGET_AVX512BW" { int mask = INTVAL (operands[2]); emit_insn (gen_avx2_pshufhw_1_mask (operands[0], operands[1], GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4), GEN_INT (((mask >> 0) & 3) + 12), GEN_INT (((mask >> 2) & 3) + 12), GEN_INT (((mask >> 4) & 3) + 12), GEN_INT (((mask >> 6) & 3) + 12), operands[3], operands[4])); DONE; }) (define_insn "avx2_pshufhw_1" [(set (match_operand:V16HI 0 "register_operand" "=Yw") (vec_select:V16HI (match_operand:V16HI 1 "nonimmediate_operand" "Ywm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (match_operand 2 "const_4_to_7_operand") (match_operand 3 "const_4_to_7_operand") (match_operand 4 "const_4_to_7_operand") (match_operand 5 "const_4_to_7_operand") (const_int 8) (const_int 9) (const_int 10) (const_int 11) (match_operand 6 "const_12_to_15_operand") (match_operand 7 "const_12_to_15_operand") (match_operand 8 "const_12_to_15_operand") (match_operand 9 "const_12_to_15_operand")])))] "TARGET_AVX2 && && && INTVAL (operands[2]) + 8 == INTVAL (operands[6]) && INTVAL (operands[3]) + 8 == INTVAL (operands[7]) && INTVAL (operands[4]) + 8 == INTVAL (operands[8]) && INTVAL (operands[5]) + 8 == INTVAL (operands[9])" { int mask = 0; mask |= (INTVAL (operands[2]) - 4) << 0; mask |= (INTVAL (operands[3]) - 4) << 2; mask |= (INTVAL (operands[4]) - 4) << 4; mask |= (INTVAL (operands[5]) - 4) << 6; operands[2] = GEN_INT (mask); return "vpshufhw\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "prefix" "maybe_evex") (set_attr "length_immediate" "1") (set_attr "mode" "OI")]) (define_expand "avx512vl_pshufhw_mask" [(match_operand:V8HI 0 "register_operand") (match_operand:V8HI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V8HI 3 "register_operand") (match_operand:QI 4 "register_operand")] "TARGET_AVX512VL && TARGET_AVX512BW" { int mask = INTVAL (operands[2]); emit_insn (gen_sse2_pshufhw_1_mask (operands[0], operands[1], GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4), operands[3], operands[4])); DONE; }) (define_expand "sse2_pshufhw" [(match_operand:V8HI 0 "register_operand") (match_operand:V8HI 1 "vector_operand") (match_operand:SI 2 "const_int_operand")] "TARGET_SSE2" { int mask = INTVAL (operands[2]); emit_insn (gen_sse2_pshufhw_1 (operands[0], operands[1], GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4))); DONE; }) (define_insn "sse2_pshufhw_1" [(set (match_operand:V8HI 0 "register_operand" "=Yw") (vec_select:V8HI (match_operand:V8HI 1 "vector_operand" "YwBm") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (match_operand 2 "const_4_to_7_operand") (match_operand 3 "const_4_to_7_operand") (match_operand 4 "const_4_to_7_operand") (match_operand 5 "const_4_to_7_operand")])))] "TARGET_SSE2 && && " { int mask = 0; mask |= (INTVAL (operands[2]) - 4) << 0; mask |= (INTVAL (operands[3]) - 4) << 2; mask |= (INTVAL (operands[4]) - 4) << 4; mask |= (INTVAL (operands[5]) - 4) << 6; operands[2] = GEN_INT (mask); return "%vpshufhw\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "prefix_rep" "1") (set_attr "prefix_data16" "0") (set_attr "prefix" "maybe_vex") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_expand "sse2_loadd" [(set (match_operand:V4SI 0 "register_operand") (vec_merge:V4SI (vec_duplicate:V4SI (match_operand:SI 1 "nonimmediate_operand")) (match_dup 2) (const_int 1)))] "TARGET_SSE" "operands[2] = CONST0_RTX (V4SImode);") (define_insn "sse2_loadld" [(set (match_operand:V4SI 0 "register_operand" "=v,v,x,x,v") (vec_merge:V4SI (vec_duplicate:V4SI (match_operand:SI 2 "nonimmediate_operand" "m ,r ,m,x,v")) (match_operand:V4SI 1 "reg_or_0_operand" "C ,C ,C,0,v") (const_int 1)))] "TARGET_SSE" "@ %vmovd\t{%2, %0|%0, %2} %vmovd\t{%2, %0|%0, %2} movss\t{%2, %0|%0, %2} movss\t{%2, %0|%0, %2} vmovss\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "sse2,sse2,noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex,maybe_vex,orig,orig,maybe_evex") (set_attr "mode" "TI,TI,V4SF,SF,SF") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "1") (symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC") ] (symbol_ref "true")))]) ;; QI and HI modes handled by pextr patterns. (define_mode_iterator PEXTR_MODE12 [(V16QI "TARGET_SSE4_1") V8HI]) (define_insn "*vec_extract" [(set (match_operand: 0 "register_sse4nonimm_operand" "=r,m") (vec_select: (match_operand:PEXTR_MODE12 1 "register_operand" "YW,YW") (parallel [(match_operand:SI 2 "const_0_to__operand")])))] "TARGET_SSE2" "@ %vpextr\t{%2, %1, %k0|%k0, %1, %2} %vpextr\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "*,sse4") (set_attr "type" "sselog1") (set_attr "prefix_data16" "1") (set (attr "prefix_extra") (if_then_else (and (eq_attr "alternative" "0,2") (eq (const_string "mode") (const_string "V8HImode"))) (const_string "*") (const_string "1"))) (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex,maybe_vex") (set_attr "mode" "TI")]) (define_insn "*vec_extract_zext" [(set (match_operand:SWI48 0 "register_operand" "=r") (zero_extend:SWI48 (vec_select: (match_operand:PEXTR_MODE12 1 "register_operand" "YW") (parallel [(match_operand:SI 2 "const_0_to__operand")]))))] "TARGET_SSE2" "%vpextr\t{%2, %1, %k0|%k0, %1, %2}" [(set_attr "type" "sselog1") (set_attr "prefix_data16" "1") (set (attr "prefix_extra") (if_then_else (eq (const_string "mode") (const_string "V8HImode")) (const_string "*") (const_string "1"))) (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) (define_insn "*vec_extractv16qi_zext" [(set (match_operand:HI 0 "register_operand" "=r") (zero_extend:HI (vec_select:QI (match_operand:V16QI 1 "register_operand" "YW") (parallel [(match_operand:SI 2 "const_0_to_15_operand")]))))] "TARGET_SSE4_1" "%vpextrb\t{%2, %1, %k0|%k0, %1, %2}" [(set_attr "type" "sselog1") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) (define_insn "*vec_extract_mem" [(set (match_operand: 0 "register_operand" "=r") (vec_select: (match_operand:VI12_128 1 "memory_operand" "o") (parallel [(match_operand 2 "const_0_to__operand")])))] "TARGET_SSE" "#") (define_insn "*vec_extract_0" [(set (match_operand:SWI48 0 "nonimmediate_operand" "=r,r,v ,m") (vec_select:SWI48 (match_operand: 1 "nonimmediate_operand" "m ,v,vm,v") (parallel [(const_int 0)])))] "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" [(set_attr "isa" "*,sse2,*,*") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "1") (symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC") ] (symbol_ref "true")))]) (define_insn "*vec_extractv2di_0_sse" [(set (match_operand:DI 0 "nonimmediate_operand" "=r,x ,m") (vec_select:DI (match_operand:V2DI 1 "nonimmediate_operand" " x,xm,x") (parallel [(const_int 0)])))] "TARGET_SSE && !TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" [(set_attr "isa" "sse4,*,*") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "0") (symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC") ] (symbol_ref "true")))]) (define_split [(set (match_operand:DI 0 "general_reg_operand") (vec_select:DI (match_operand:V2DI 1 "register_operand") (parallel [(const_int 0)])))] "TARGET_SSE4_1 && !TARGET_64BIT && reload_completed" [(set (match_dup 2) (match_dup 4)) (set (match_dup 3) (vec_select:SI (match_dup 5) (parallel [(const_int 1)])))] { operands[4] = gen_lowpart (SImode, operands[1]); operands[5] = gen_lowpart (V4SImode, operands[1]); split_double_mode (DImode, &operands[0], 1, &operands[2], &operands[3]); }) (define_split [(set (match_operand:SWI48x 0 "nonimmediate_operand") (vec_select:SWI48x (match_operand: 1 "register_operand") (parallel [(const_int 0)])))] "TARGET_SSE && reload_completed" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (mode, operands[1]);") (define_insn "*vec_extractv4si_0_zext_sse4" [(set (match_operand:DI 0 "register_operand" "=r,x,v") (zero_extend:DI (vec_select:SI (match_operand:V4SI 1 "register_operand" "v,x,v") (parallel [(const_int 0)]))))] "TARGET_64BIT && TARGET_SSE4_1" "#" [(set_attr "isa" "*,*,avx512f") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "0") (symbol_ref "TARGET_INTER_UNIT_MOVES_FROM_VEC") ] (symbol_ref "true")))]) (define_insn "*vec_extractv4si_0_zext" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (vec_select:SI (match_operand:V4SI 1 "register_operand" "x") (parallel [(const_int 0)]))))] "TARGET_64BIT && TARGET_SSE2 && TARGET_INTER_UNIT_MOVES_FROM_VEC" "#") (define_split [(set (match_operand:DI 0 "register_operand") (zero_extend:DI (vec_select:SI (match_operand:V4SI 1 "register_operand") (parallel [(const_int 0)]))))] "TARGET_SSE2 && reload_completed" [(set (match_dup 0) (zero_extend:DI (match_dup 1)))] "operands[1] = gen_lowpart (SImode, operands[1]);") (define_insn "*vec_extractv4si" [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,rm,Yr,*x,Yw") (vec_select:SI (match_operand:V4SI 1 "register_operand" " x, v, 0, 0,Yw") (parallel [(match_operand:SI 2 "const_0_to_3_operand")])))] "TARGET_SSE4_1" { switch (which_alternative) { case 0: case 1: return "%vpextrd\t{%2, %1, %0|%0, %1, %2}"; case 2: case 3: operands[2] = GEN_INT (INTVAL (operands[2]) * 4); return "psrldq\t{%2, %0|%0, %2}"; case 4: operands[2] = GEN_INT (INTVAL (operands[2]) * 4); return "vpsrldq\t{%2, %1, %0|%0, %1, %2}"; default: gcc_unreachable (); } } [(set_attr "isa" "*,avx512dq,noavx,noavx,avx") (set_attr "type" "sselog1,sselog1,sseishft1,sseishft1,sseishft1") (set (attr "prefix_extra") (if_then_else (eq_attr "alternative" "0,1") (const_string "1") (const_string "*"))) (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex,evex,orig,orig,maybe_vex") (set_attr "mode" "TI")]) (define_insn "*vec_extractv4si_zext" [(set (match_operand:DI 0 "register_operand" "=r,r") (zero_extend:DI (vec_select:SI (match_operand:V4SI 1 "register_operand" "x,v") (parallel [(match_operand:SI 2 "const_0_to_3_operand")]))))] "TARGET_64BIT && TARGET_SSE4_1" "%vpextrd\t{%2, %1, %k0|%k0, %1, %2}" [(set_attr "isa" "*,avx512dq") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) (define_insn "*vec_extractv4si_mem" [(set (match_operand:SI 0 "register_operand" "=x,r") (vec_select:SI (match_operand:V4SI 1 "memory_operand" "o,o") (parallel [(match_operand 2 "const_0_to_3_operand")])))] "TARGET_SSE" "#") (define_insn_and_split "*vec_extractv4si_zext_mem" [(set (match_operand:DI 0 "register_operand" "=x,r") (zero_extend:DI (vec_select:SI (match_operand:V4SI 1 "memory_operand" "o,o") (parallel [(match_operand:SI 2 "const_0_to_3_operand")]))))] "TARGET_64BIT && TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:DI (match_dup 1)))] { operands[1] = adjust_address (operands[1], SImode, INTVAL (operands[2]) * 4); }) (define_insn "*vec_extractv2di_1" [(set (match_operand:DI 0 "nonimmediate_operand" "=rm,rm,m,x,x,Yv,x,v,r") (vec_select:DI (match_operand:V2DI 1 "nonimmediate_operand" "x ,v ,v,0,x, v,x,o,o") (parallel [(const_int 1)])))] "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "@ %vpextrq\t{$1, %1, %0|%0, %1, 1} vpextrq\t{$1, %1, %0|%0, %1, 1} %vmovhps\t{%1, %0|%0, %1} psrldq\t{$8, %0|%0, 8} vpsrldq\t{$8, %1, %0|%0, %1, 8} vpsrldq\t{$8, %1, %0|%0, %1, 8} movhlps\t{%1, %0|%0, %1} # #" [(set (attr "isa") (cond [(eq_attr "alternative" "0") (const_string "x64_sse4") (eq_attr "alternative" "1") (const_string "x64_avx512dq") (eq_attr "alternative" "3") (const_string "sse2_noavx") (eq_attr "alternative" "4") (const_string "avx") (eq_attr "alternative" "5") (const_string "avx512bw") (eq_attr "alternative" "6") (const_string "noavx") (eq_attr "alternative" "8") (const_string "x64") ] (const_string "*"))) (set (attr "type") (cond [(eq_attr "alternative" "2,6,7") (const_string "ssemov") (eq_attr "alternative" "3,4,5") (const_string "sseishft1") (eq_attr "alternative" "8") (const_string "imov") ] (const_string "sselog1"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "0,1,3,4,5") (const_string "1") (const_string "*"))) (set (attr "prefix_rex") (if_then_else (eq_attr "alternative" "0,1") (const_string "1") (const_string "*"))) (set (attr "prefix_extra") (if_then_else (eq_attr "alternative" "0,1") (const_string "1") (const_string "*"))) (set_attr "prefix" "maybe_vex,evex,maybe_vex,orig,vex,evex,orig,*,*") (set_attr "mode" "TI,TI,V2SF,TI,TI,TI,V4SF,DI,DI")]) (define_split [(set (match_operand: 0 "register_operand") (vec_select: (match_operand:VI_128 1 "memory_operand") (parallel [(match_operand 2 "const_0_to__operand")])))] "TARGET_SSE && reload_completed" [(set (match_dup 0) (match_dup 1))] { int offs = INTVAL (operands[2]) * GET_MODE_SIZE (mode); operands[1] = adjust_address (operands[1], mode, offs); }) (define_insn "*vec_extractv2ti" [(set (match_operand:TI 0 "nonimmediate_operand" "=xm,vm") (vec_select:TI (match_operand:V2TI 1 "register_operand" "x,v") (parallel [(match_operand:SI 2 "const_0_to_1_operand")])))] "TARGET_AVX" "@ vextract%~128\t{%2, %1, %0|%0, %1, %2} vextracti32x4\t{%2, %g1, %0|%0, %g1, %2}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex,evex") (set_attr "mode" "OI")]) (define_insn "*vec_extractv4ti" [(set (match_operand:TI 0 "nonimmediate_operand" "=vm") (vec_select:TI (match_operand:V4TI 1 "register_operand" "v") (parallel [(match_operand:SI 2 "const_0_to_3_operand")])))] "TARGET_AVX512F" "vextracti32x4\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_mode_iterator VEXTRACTI128_MODE [(V4TI "TARGET_AVX512F") V2TI]) (define_split [(set (match_operand:TI 0 "nonimmediate_operand") (vec_select:TI (match_operand:VEXTRACTI128_MODE 1 "register_operand") (parallel [(const_int 0)])))] "TARGET_AVX && reload_completed && (TARGET_AVX512VL || !EXT_REX_SSE_REG_P (operands[1]))" [(set (match_dup 0) (match_dup 1))] "operands[1] = gen_lowpart (TImode, operands[1]);") ;; Turn SImode or DImode extraction from arbitrary SSE/AVX/AVX512F ;; vector modes into vec_extract*. (define_split [(set (match_operand:SWI48x 0 "nonimmediate_operand") (subreg:SWI48x (match_operand 1 "register_operand") 0))] "can_create_pseudo_p () && REG_P (operands[1]) && VECTOR_MODE_P (GET_MODE (operands[1])) && ((TARGET_SSE && GET_MODE_SIZE (GET_MODE (operands[1])) == 16) || (TARGET_AVX && GET_MODE_SIZE (GET_MODE (operands[1])) == 32) || (TARGET_AVX512F && GET_MODE_SIZE (GET_MODE (operands[1])) == 64)) && (mode == SImode || TARGET_64BIT || MEM_P (operands[0]))" [(set (match_dup 0) (vec_select:SWI48x (match_dup 1) (parallel [(const_int 0)])))] { rtx tmp; switch (GET_MODE_SIZE (GET_MODE (operands[1]))) { case 64: if (mode == SImode) { tmp = gen_reg_rtx (V8SImode); emit_insn (gen_vec_extract_lo_v16si (tmp, gen_lowpart (V16SImode, operands[1]))); } else { tmp = gen_reg_rtx (V4DImode); emit_insn (gen_vec_extract_lo_v8di (tmp, gen_lowpart (V8DImode, operands[1]))); } operands[1] = tmp; /* FALLTHRU */ case 32: tmp = gen_reg_rtx (mode); if (mode == SImode) emit_insn (gen_vec_extract_lo_v8si (tmp, gen_lowpart (V8SImode, operands[1]))); else emit_insn (gen_vec_extract_lo_v4di (tmp, gen_lowpart (V4DImode, operands[1]))); operands[1] = tmp; break; case 16: operands[1] = gen_lowpart (mode, operands[1]); break; } }) (define_insn "*vec_concatv2si_sse4_1" [(set (match_operand:V2SI 0 "register_operand" "=Yr,*x, x, v,Yr,*x, v, v, *y,*y") (vec_concat:V2SI (match_operand:SI 1 "nonimmediate_operand" " 0, 0, x,Yv, 0, 0,Yv,rm, 0,rm") (match_operand:SI 2 "nonimm_or_0_operand" " rm,rm,rm,rm,Yr,*x,Yv, C,*ym, C")))] "TARGET_SSE4_1 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pinsrd\t{$1, %2, %0|%0, %2, 1} pinsrd\t{$1, %2, %0|%0, %2, 1} vpinsrd\t{$1, %2, %1, %0|%0, %1, %2, 1} vpinsrd\t{$1, %2, %1, %0|%0, %1, %2, 1} punpckldq\t{%2, %0|%0, %2} punpckldq\t{%2, %0|%0, %2} vpunpckldq\t{%2, %1, %0|%0, %1, %2} %vmovd\t{%1, %0|%0, %1} punpckldq\t{%2, %0|%0, %2} movd\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx,avx512dq,noavx,noavx,avx,*,*,*") (set (attr "mmx_isa") (if_then_else (eq_attr "alternative" "8,9") (const_string "native") (const_string "*"))) (set (attr "type") (cond [(eq_attr "alternative" "7") (const_string "ssemov") (eq_attr "alternative" "8") (const_string "mmxcvt") (eq_attr "alternative" "9") (const_string "mmxmov") ] (const_string "sselog"))) (set (attr "prefix_extra") (if_then_else (eq_attr "alternative" "0,1,2,3") (const_string "1") (const_string "*"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "0,1,2,3") (const_string "1") (const_string "*"))) (set_attr "prefix" "orig,orig,vex,evex,orig,orig,maybe_evex,maybe_vex,orig,orig") (set_attr "mode" "TI,TI,TI,TI,TI,TI,TI,TI,DI,DI")]) ;; ??? In theory we can match memory for the MMX alternative, but allowing ;; nonimmediate_operand for operand 2 and *not* allowing memory for the SSE ;; alternatives pretty much forces the MMX alternative to be chosen. (define_insn "*vec_concatv2si" [(set (match_operand:V2SI 0 "register_operand" "=x,x ,x,x,*y,*y") (vec_concat:V2SI (match_operand:SI 1 "nonimmediate_operand" " 0,rm,0,m, 0,rm") (match_operand:SI 2 "reg_or_0_operand" " x,C ,x,C,*y,C")))] "TARGET_SSE && !TARGET_SSE4_1" "@ punpckldq\t{%2, %0|%0, %2} movd\t{%1, %0|%0, %1} unpcklps\t{%2, %0|%0, %2} movss\t{%1, %0|%0, %1} punpckldq\t{%2, %0|%0, %2} movd\t{%1, %0|%0, %1}" [(set_attr "isa" "sse2,sse2,*,*,*,*") (set_attr "mmx_isa" "*,*,*,*,native,native") (set_attr "type" "sselog,ssemov,sselog,ssemov,mmxcvt,mmxmov") (set_attr "mode" "TI,TI,V4SF,SF,DI,DI")]) (define_insn "*vec_concatv4si" [(set (match_operand:V4SI 0 "register_operand" "=x,v,x,x,v") (vec_concat:V4SI (match_operand:V2SI 1 "register_operand" " 0,v,0,0,v") (match_operand:V2SI 2 "nonimmediate_operand" " x,v,x,m,m")))] "TARGET_SSE" "@ punpcklqdq\t{%2, %0|%0, %2} vpunpcklqdq\t{%2, %1, %0|%0, %1, %2} movlhps\t{%2, %0|%0, %2} movhps\t{%2, %0|%0, %q2} vmovhps\t{%2, %1, %0|%0, %1, %q2}" [(set_attr "isa" "sse2_noavx,avx,noavx,noavx,avx") (set_attr "type" "sselog,sselog,ssemov,ssemov,ssemov") (set_attr "prefix" "orig,maybe_evex,orig,orig,maybe_evex") (set_attr "mode" "TI,TI,V4SF,V2SF,V2SF")]) (define_insn "*vec_concat_0" [(set (match_operand:VI124_128 0 "register_operand" "=v,x") (vec_concat:VI124_128 (match_operand: 1 "nonimmediate_operand" "vm,?!*y") (match_operand: 2 "const0_operand" " C,C")))] "TARGET_SSE2" "@ %vmovq\t{%1, %0|%0, %1} movq2dq\t{%1, %0|%0, %1}" [(set_attr "mmx_isa" "*,native") (set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex,orig") (set_attr "mode" "TI")]) (define_insn "vec_concatv2di" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,x ,v ,x,v ,x,x,v") (vec_concat:V2DI (match_operand:DI 1 "register_operand" " 0, 0,x ,Yv,0,Yv,0,0,v") (match_operand:DI 2 "nonimmediate_operand" " rm,rm,rm,rm,x,Yv,x,m,m")))] "TARGET_SSE" "@ pinsrq\t{$1, %2, %0|%0, %2, 1} pinsrq\t{$1, %2, %0|%0, %2, 1} vpinsrq\t{$1, %2, %1, %0|%0, %1, %2, 1} vpinsrq\t{$1, %2, %1, %0|%0, %1, %2, 1} punpcklqdq\t{%2, %0|%0, %2} vpunpcklqdq\t{%2, %1, %0|%0, %1, %2} movlhps\t{%2, %0|%0, %2} movhps\t{%2, %0|%0, %2} vmovhps\t{%2, %1, %0|%0, %1, %2}" [(set (attr "isa") (cond [(eq_attr "alternative" "0,1") (const_string "x64_sse4_noavx") (eq_attr "alternative" "2") (const_string "x64_avx") (eq_attr "alternative" "3") (const_string "x64_avx512dq") (eq_attr "alternative" "4") (const_string "sse2_noavx") (eq_attr "alternative" "5,8") (const_string "avx") ] (const_string "noavx"))) (set (attr "type") (if_then_else (eq_attr "alternative" "0,1,2,3,4,5") (const_string "sselog") (const_string "ssemov"))) (set (attr "prefix_rex") (if_then_else (eq_attr "alternative" "0,1,2,3") (const_string "1") (const_string "*"))) (set (attr "prefix_extra") (if_then_else (eq_attr "alternative" "0,1,2,3") (const_string "1") (const_string "*"))) (set (attr "length_immediate") (if_then_else (eq_attr "alternative" "0,1,2,3") (const_string "1") (const_string "*"))) (set (attr "prefix") (cond [(eq_attr "alternative" "2") (const_string "vex") (eq_attr "alternative" "3") (const_string "evex") (eq_attr "alternative" "5,8") (const_string "maybe_evex") ] (const_string "orig"))) (set_attr "mode" "TI,TI,TI,TI,TI,TI,V4SF,V2SF,V2SF")]) (define_insn "*vec_concatv2di_0" [(set (match_operand:V2DI 0 "register_operand" "=v,v ,x") (vec_concat:V2DI (match_operand:DI 1 "nonimmediate_operand" " r,vm,?!*y") (match_operand:DI 2 "const0_operand" " C,C ,C")))] "TARGET_SSE2" "@ * return HAVE_AS_IX86_INTERUNIT_MOVQ ? \"%vmovq\t{%1, %0|%0, %1}\" : \"%vmovd\t{%1, %0|%0, %1}\"; %vmovq\t{%1, %0|%0, %1} movq2dq\t{%1, %0|%0, %1}" [(set_attr "isa" "x64,*,*") (set_attr "mmx_isa" "*,*,native") (set_attr "type" "ssemov") (set_attr "prefix_rex" "1,*,*") (set_attr "prefix" "maybe_vex,maybe_vex,orig") (set_attr "mode" "TI") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "0") (symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC") ] (symbol_ref "true")))]) ;; vmovq clears also the higher bits. (define_insn "vec_set_0" [(set (match_operand:VI8_AVX_AVX512F 0 "register_operand" "=v,v") (vec_merge:VI8_AVX_AVX512F (vec_duplicate:VI8_AVX_AVX512F (match_operand: 2 "nonimmediate_operand" "r,vm")) (match_operand:VI8_AVX_AVX512F 1 "const0_operand" "C,C") (const_int 1)))] "TARGET_AVX" "vmovq\t{%2, %x0|%x0, %2}" [(set_attr "isa" "x64,*") (set_attr "type" "ssemov") (set_attr "prefix_rex" "1,*") (set_attr "prefix" "maybe_evex") (set_attr "mode" "TI") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "0") (symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC") ] (symbol_ref "true")))]) (define_expand "vec_unpacks_lo_" [(match_operand: 0 "register_operand") (match_operand:VI124_AVX2_24_AVX512F_1_AVX512BW 1 "register_operand")] "TARGET_SSE2" "ix86_expand_sse_unpack (operands[0], operands[1], false, false); DONE;") (define_expand "vec_unpacks_hi_" [(match_operand: 0 "register_operand") (match_operand:VI124_AVX2_24_AVX512F_1_AVX512BW 1 "register_operand")] "TARGET_SSE2" "ix86_expand_sse_unpack (operands[0], operands[1], false, true); DONE;") (define_expand "vec_unpacku_lo_" [(match_operand: 0 "register_operand") (match_operand:VI124_AVX2_24_AVX512F_1_AVX512BW 1 "register_operand")] "TARGET_SSE2" "ix86_expand_sse_unpack (operands[0], operands[1], true, false); DONE;") (define_expand "vec_unpacks_sbool_lo_qi" [(match_operand:QI 0 "register_operand") (match_operand:QI 1 "register_operand") (match_operand:QI 2 "const_int_operand")] "TARGET_AVX512F" { if (INTVAL (operands[2]) != 8 && INTVAL (operands[2]) != 4) FAIL; emit_move_insn (operands[0], operands[1]); DONE; }) (define_expand "vec_unpacks_lo_hi" [(set (subreg:HI (match_operand:QI 0 "register_operand") 0) (match_operand:HI 1 "register_operand"))] "TARGET_AVX512F") (define_expand "vec_unpacks_lo_si" [(set (match_operand:HI 0 "register_operand") (subreg:HI (match_operand:SI 1 "register_operand") 0))] "TARGET_AVX512F") (define_expand "vec_unpacks_lo_di" [(set (match_operand:SI 0 "register_operand") (subreg:SI (match_operand:DI 1 "register_operand") 0))] "TARGET_AVX512BW") (define_expand "vec_unpacku_hi_" [(match_operand: 0 "register_operand") (match_operand:VI124_AVX2_24_AVX512F_1_AVX512BW 1 "register_operand")] "TARGET_SSE2" "ix86_expand_sse_unpack (operands[0], operands[1], true, true); DONE;") (define_expand "vec_unpacks_sbool_hi_qi" [(match_operand:QI 0 "register_operand") (match_operand:QI 1 "register_operand") (match_operand:QI 2 "const_int_operand")] "TARGET_AVX512F" { HOST_WIDE_INT nunits = INTVAL (operands[2]); if (nunits != 8 && nunits != 4) FAIL; if (TARGET_AVX512DQ) emit_insn (gen_klshiftrtqi (operands[0], operands[1], GEN_INT (nunits / 2))); else { rtx tem = gen_reg_rtx (HImode); emit_insn (gen_klshiftrthi (tem, lowpart_subreg (HImode, operands[1], QImode), GEN_INT (nunits / 2))); emit_move_insn (operands[0], lowpart_subreg (QImode, tem, HImode)); } DONE; }) (define_expand "vec_unpacks_hi_hi" [(parallel [(set (subreg:HI (match_operand:QI 0 "register_operand") 0) (lshiftrt:HI (match_operand:HI 1 "register_operand") (const_int 8))) (unspec [(const_int 0)] UNSPEC_MASKOP)])] "TARGET_AVX512F") (define_expand "vec_unpacks_hi_" [(parallel [(set (subreg:SWI48x (match_operand: 0 "register_operand") 0) (lshiftrt:SWI48x (match_operand:SWI48x 1 "register_operand") (match_dup 2))) (unspec [(const_int 0)] UNSPEC_MASKOP)])] "TARGET_AVX512BW" "operands[2] = GEN_INT (GET_MODE_BITSIZE (mode));") ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Miscellaneous ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_expand "_uavg3" [(set (match_operand:VI12_AVX2_AVX512BW 0 "register_operand") (truncate:VI12_AVX2_AVX512BW (lshiftrt: (plus: (plus: (zero_extend: (match_operand:VI12_AVX2_AVX512BW 1 "vector_operand")) (zero_extend: (match_operand:VI12_AVX2_AVX512BW 2 "vector_operand"))) (match_dup )) (const_int 1))))] "TARGET_SSE2 && && " { operands[] = CONST1_RTX(mode); ix86_fixup_binary_operands_no_copy (PLUS, mode, operands); }) (define_insn "*_uavg3" [(set (match_operand:VI12_AVX2_AVX512BW 0 "register_operand" "=x,") (truncate:VI12_AVX2_AVX512BW (lshiftrt: (plus: (plus: (zero_extend: (match_operand:VI12_AVX2_AVX512BW 1 "vector_operand" "%0,")) (zero_extend: (match_operand:VI12_AVX2_AVX512BW 2 "vector_operand" "xBm,m"))) (match_operand: "const1_operand")) (const_int 1))))] "TARGET_SSE2 && && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pavg\t{%2, %0|%0, %2} vpavg\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,") (set_attr "mode" "")]) ;; The correct representation for this is absolutely enormous, and ;; surely not generally useful. (define_insn "_psadbw" [(set (match_operand:VI8_AVX2_AVX512BW 0 "register_operand" "=x,YW") (unspec:VI8_AVX2_AVX512BW [(match_operand: 1 "register_operand" "0,YW") (match_operand: 2 "vector_operand" "xBm,YWm")] UNSPEC_PSADBW))] "TARGET_SSE2" "@ psadbw\t{%2, %0|%0, %2} vpsadbw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "atom_unit" "simul") (set_attr "prefix_data16" "1,*") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "")]) (define_insn "_movmsk" [(set (match_operand:SI 0 "register_operand" "=r") (unspec:SI [(match_operand:VF_128_256 1 "register_operand" "x")] UNSPEC_MOVMSK))] "TARGET_SSE" "%vmovmsk\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "*_movmsk_ext" [(set (match_operand:DI 0 "register_operand" "=r") (any_extend:DI (unspec:SI [(match_operand:VF_128_256 1 "register_operand" "x")] UNSPEC_MOVMSK)))] "TARGET_64BIT && TARGET_SSE" "%vmovmsk\t{%1, %k0|%k0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn_and_split "*_movmsk_lt" [(set (match_operand:SI 0 "register_operand" "=r") (unspec:SI [(lt:VF_128_256 (match_operand: 1 "register_operand" "x") (match_operand: 2 "const0_operand" "C"))] UNSPEC_MOVMSK))] "TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK))] "operands[1] = gen_lowpart (mode, operands[1]);" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn_and_split "*_movmsk_ext_lt" [(set (match_operand:DI 0 "register_operand" "=r") (any_extend:DI (unspec:SI [(lt:VF_128_256 (match_operand: 1 "register_operand" "x") (match_operand: 2 "const0_operand" "C"))] UNSPEC_MOVMSK)))] "TARGET_64BIT && TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (any_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))] "operands[1] = gen_lowpart (mode, operands[1]);" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn_and_split "*_movmsk_shift" [(set (match_operand:SI 0 "register_operand" "=r") (unspec:SI [(subreg:VF_128_256 (ashiftrt: (match_operand: 1 "register_operand" "x") (match_operand:QI 2 "const_int_operand" "n")) 0)] UNSPEC_MOVMSK))] "TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK))] "operands[1] = gen_lowpart (mode, operands[1]);" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn_and_split "*_movmsk_ext_shift" [(set (match_operand:DI 0 "register_operand" "=r") (any_extend:DI (unspec:SI [(subreg:VF_128_256 (ashiftrt: (match_operand: 1 "register_operand" "x") (match_operand:QI 2 "const_int_operand" "n")) 0)] UNSPEC_MOVMSK)))] "TARGET_64BIT && TARGET_SSE" "#" "&& reload_completed" [(set (match_dup 0) (any_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))] "operands[1] = gen_lowpart (mode, operands[1]);" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "_pmovmskb" [(set (match_operand:SI 0 "register_operand" "=r") (unspec:SI [(match_operand:VI1_AVX2 1 "register_operand" "x")] UNSPEC_MOVMSK))] "TARGET_SSE2" "%vpmovmskb\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "SI")]) (define_insn "*_pmovmskb_zext" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (unspec:SI [(match_operand:VI1_AVX2 1 "register_operand" "x")] UNSPEC_MOVMSK)))] "TARGET_64BIT && TARGET_SSE2" "%vpmovmskb\t{%1, %k0|%k0, %1}" [(set_attr "type" "ssemov") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "SI")]) (define_insn "*sse2_pmovmskb_ext" [(set (match_operand:DI 0 "register_operand" "=r") (sign_extend:DI (unspec:SI [(match_operand:V16QI 1 "register_operand" "x")] UNSPEC_MOVMSK)))] "TARGET_64BIT && TARGET_SSE2" "%vpmovmskb\t{%1, %k0|%k0, %1}" [(set_attr "type" "ssemov") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "SI")]) (define_insn_and_split "*sse2_pmovskb_zexthisi" [(set (match_operand:SI 0 "register_operand") (zero_extend:SI (subreg:HI (unspec:SI [(match_operand:V16QI 1 "register_operand")] UNSPEC_MOVMSK) 0)))] "TARGET_SSE2 && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK))]) (define_split [(set (match_operand:SI 0 "register_operand") (zero_extend:SI (not:HI (subreg:HI (unspec:SI [(match_operand:V16QI 1 "register_operand")] UNSPEC_MOVMSK) 0))))] "TARGET_SSE2" [(set (match_dup 2) (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)) (set (match_dup 0) (xor:SI (match_dup 2) (const_int 65535)))] "operands[2] = gen_reg_rtx (SImode);") (define_split [(set (match_operand:SI 0 "register_operand") (unspec:SI [(not:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand"))] UNSPEC_MOVMSK))] "TARGET_SSE2" [(set (match_dup 2) (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)) (set (match_dup 0) (match_dup 3))] { operands[2] = gen_reg_rtx (SImode); if (GET_MODE_NUNITS (mode) == 32) operands[3] = gen_rtx_NOT (SImode, operands[2]); else { operands[3] = gen_int_mode ((HOST_WIDE_INT_1 << GET_MODE_NUNITS (mode)) - 1, SImode); operands[3] = gen_rtx_XOR (SImode, operands[2], operands[3]); } }) (define_split [(set (match_operand:SI 0 "register_operand") (unspec:SI [(subreg:VI1_AVX2 (not (match_operand 1 "register_operand")) 0)] UNSPEC_MOVMSK))] "TARGET_SSE2 && GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_VECTOR_INT && GET_MODE_SIZE (GET_MODE (operands[1])) == " [(set (match_dup 2) (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)) (set (match_dup 0) (match_dup 3))] { operands[2] = gen_reg_rtx (SImode); operands[1] = gen_lowpart (mode, operands[1]); if (GET_MODE_NUNITS (mode) == 32) operands[3] = gen_rtx_NOT (SImode, operands[2]); else { operands[3] = gen_int_mode ((HOST_WIDE_INT_1 << GET_MODE_NUNITS (mode)) - 1, SImode); operands[3] = gen_rtx_XOR (SImode, operands[2], operands[3]); } }) (define_insn_and_split "*_pmovmskb_lt" [(set (match_operand:SI 0 "register_operand" "=r") (unspec:SI [(lt:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand" "x") (match_operand:VI1_AVX2 2 "const0_operand" "C"))] UNSPEC_MOVMSK))] "TARGET_SSE2" "#" "&& 1" [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK))] "" [(set_attr "type" "ssemov") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "SI")]) (define_insn_and_split "*_pmovmskb_zext_lt" [(set (match_operand:DI 0 "register_operand" "=r") (zero_extend:DI (unspec:SI [(lt:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand" "x") (match_operand:VI1_AVX2 2 "const0_operand" "C"))] UNSPEC_MOVMSK)))] "TARGET_64BIT && TARGET_SSE2" "#" "&& 1" [(set (match_dup 0) (zero_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))] "" [(set_attr "type" "ssemov") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "SI")]) (define_insn_and_split "*sse2_pmovmskb_ext_lt" [(set (match_operand:DI 0 "register_operand" "=r") (sign_extend:DI (unspec:SI [(lt:V16QI (match_operand:V16QI 1 "register_operand" "x") (match_operand:V16QI 2 "const0_operand" "C"))] UNSPEC_MOVMSK)))] "TARGET_64BIT && TARGET_SSE2" "#" "&& 1" [(set (match_dup 0) (sign_extend:DI (unspec:SI [(match_dup 1)] UNSPEC_MOVMSK)))] "" [(set_attr "type" "ssemov") (set (attr "prefix_data16") (if_then_else (match_test "TARGET_AVX") (const_string "*") (const_string "1"))) (set_attr "prefix" "maybe_vex") (set_attr "mode" "SI")]) (define_expand "sse2_maskmovdqu" [(set (match_operand:V16QI 0 "memory_operand") (unspec:V16QI [(match_operand:V16QI 1 "register_operand") (match_operand:V16QI 2 "register_operand") (match_dup 0)] UNSPEC_MASKMOV))] "TARGET_SSE2") (define_insn "*sse2_maskmovdqu" [(set (mem:V16QI (match_operand:P 0 "register_operand" "D")) (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "x") (match_operand:V16QI 2 "register_operand" "x") (mem:V16QI (match_dup 0))] UNSPEC_MASKMOV))] "TARGET_SSE2" { /* We can't use %^ here due to ASM_OUTPUT_OPCODE processing that requires %v to be at the beginning of the opcode name. */ if (Pmode != word_mode) fputs ("\taddr32", asm_out_file); return "%vmaskmovdqu\t{%2, %1|%1, %2}"; } [(set_attr "type" "ssemov") (set_attr "prefix_data16" "1") (set (attr "length_address") (symbol_ref ("Pmode != word_mode"))) ;; The implicit %rdi operand confuses default length_vex computation. (set (attr "length_vex") (symbol_ref ("3 + REX_SSE_REGNO_P (REGNO (operands[2]))"))) (set_attr "prefix" "maybe_vex") (set_attr "znver1_decode" "vector") (set_attr "mode" "TI")]) (define_insn "sse_ldmxcsr" [(unspec_volatile [(match_operand:SI 0 "memory_operand" "m")] UNSPECV_LDMXCSR)] "TARGET_SSE" "%vldmxcsr\t%0" [(set_attr "type" "sse") (set_attr "atom_sse_attr" "mxcsr") (set_attr "prefix" "maybe_vex") (set_attr "memory" "load")]) (define_insn "sse_stmxcsr" [(set (match_operand:SI 0 "memory_operand" "=m") (unspec_volatile:SI [(const_int 0)] UNSPECV_STMXCSR))] "TARGET_SSE" "%vstmxcsr\t%0" [(set_attr "type" "sse") (set_attr "atom_sse_attr" "mxcsr") (set_attr "prefix" "maybe_vex") (set_attr "memory" "store")]) (define_insn "sse2_clflush" [(unspec_volatile [(match_operand 0 "address_operand" "p")] UNSPECV_CLFLUSH)] "TARGET_SSE2" "clflush\t%a0" [(set_attr "type" "sse") (set_attr "atom_sse_attr" "fence") (set_attr "memory" "unknown")]) ;; As per AMD and Intel ISA manuals, the first operand is extensions ;; and it goes to %ecx. The second operand received is hints and it goes ;; to %eax. (define_insn "sse3_mwait" [(unspec_volatile [(match_operand:SI 0 "register_operand" "c") (match_operand:SI 1 "register_operand" "a")] UNSPECV_MWAIT)] "TARGET_MWAIT" ;; 64bit version is "mwait %rax,%rcx". But only lower 32bits are used. ;; Since 32bit register operands are implicitly zero extended to 64bit, ;; we only need to set up 32bit registers. "mwait" [(set_attr "length" "3")]) (define_insn "@sse3_monitor_" [(unspec_volatile [(match_operand:P 0 "register_operand" "a") (match_operand:SI 1 "register_operand" "c") (match_operand:SI 2 "register_operand" "d")] UNSPECV_MONITOR)] "TARGET_MWAIT" ;; 64bit version is "monitor %rax,%rcx,%rdx". But only lower 32bits in ;; RCX and RDX are used. Since 32bit register operands are implicitly ;; zero extended to 64bit, we only need to set up 32bit registers. "%^monitor" [(set (attr "length") (symbol_ref ("(Pmode != word_mode) + 3")))]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; SSSE3 instructions ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_code_iterator ssse3_plusminus [plus ss_plus minus ss_minus]) (define_insn "avx2_phwv16hi3" [(set (match_operand:V16HI 0 "register_operand" "=x") (ssse3_plusminus:V16HI (vec_select:V16HI (vec_concat:V32HI (match_operand:V16HI 1 "register_operand" "x") (match_operand:V16HI 2 "nonimmediate_operand" "xm")) (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 16) (const_int 18) (const_int 20) (const_int 22) (const_int 8) (const_int 10) (const_int 12) (const_int 14) (const_int 24) (const_int 26) (const_int 28) (const_int 30)])) (vec_select:V16HI (vec_concat:V32HI (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 17) (const_int 19) (const_int 21) (const_int 23) (const_int 9) (const_int 11) (const_int 13) (const_int 15) (const_int 25) (const_int 27) (const_int 29) (const_int 31)]))))] "TARGET_AVX2" "vphw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_insn "ssse3_phwv8hi3" [(set (match_operand:V8HI 0 "register_operand" "=x,x") (ssse3_plusminus:V8HI (vec_select:V8HI (vec_concat:V16HI (match_operand:V8HI 1 "register_operand" "0,x") (match_operand:V8HI 2 "vector_operand" "xBm,xm")) (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])) (vec_select:V8HI (vec_concat:V16HI (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)]))))] "TARGET_SSSE3" "@ phw\t{%2, %0|%0, %2} vphw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "atom_unit" "complex") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_insn_and_split "ssse3_phwv4hi3" [(set (match_operand:V4HI 0 "register_operand" "=y,x,Yv") (ssse3_plusminus:V4HI (vec_select:V4HI (vec_concat:V8HI (match_operand:V4HI 1 "register_operand" "0,0,Yv") (match_operand:V4HI 2 "register_mmxmem_operand" "ym,x,Yv")) (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])) (vec_select:V4HI (vec_concat:V8HI (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" "@ phw\t{%2, %0|%0, %2} # #" "TARGET_SSSE3 && reload_completed && SSE_REGNO_P (REGNO (operands[0]))" [(const_int 0)] { /* Generate SSE version of the operation. */ rtx op0 = lowpart_subreg (V8HImode, operands[0], GET_MODE (operands[0])); rtx op1 = lowpart_subreg (V8HImode, operands[1], GET_MODE (operands[1])); rtx op2 = lowpart_subreg (V8HImode, operands[2], GET_MODE (operands[2])); emit_insn (gen_ssse3_phwv8hi3 (op0, op1, op2)); ix86_move_vector_high_sse_to_mmx (op0); DONE; } [(set_attr "mmx_isa" "native,sse_noavx,avx") (set_attr "type" "sseiadd") (set_attr "atom_unit" "complex") (set_attr "prefix_extra" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "DI,TI,TI")]) (define_insn "avx2_phdv8si3" [(set (match_operand:V8SI 0 "register_operand" "=x") (plusminus:V8SI (vec_select:V8SI (vec_concat:V16SI (match_operand:V8SI 1 "register_operand" "x") (match_operand:V8SI 2 "nonimmediate_operand" "xm")) (parallel [(const_int 0) (const_int 2) (const_int 8) (const_int 10) (const_int 4) (const_int 6) (const_int 12) (const_int 14)])) (vec_select:V8SI (vec_concat:V16SI (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 3) (const_int 9) (const_int 11) (const_int 5) (const_int 7) (const_int 13) (const_int 15)]))))] "TARGET_AVX2" "vphd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_insn "ssse3_phdv4si3" [(set (match_operand:V4SI 0 "register_operand" "=x,x") (plusminus:V4SI (vec_select:V4SI (vec_concat:V8SI (match_operand:V4SI 1 "register_operand" "0,x") (match_operand:V4SI 2 "vector_operand" "xBm,xm")) (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])) (vec_select:V4SI (vec_concat:V8SI (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))))] "TARGET_SSSE3" "@ phd\t{%2, %0|%0, %2} vphd\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "atom_unit" "complex") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_insn_and_split "ssse3_phdv2si3" [(set (match_operand:V2SI 0 "register_operand" "=y,x,Yv") (plusminus:V2SI (vec_select:V2SI (vec_concat:V4SI (match_operand:V2SI 1 "register_operand" "0,0,Yv") (match_operand:V2SI 2 "register_mmxmem_operand" "ym,x,Yv")) (parallel [(const_int 0) (const_int 2)])) (vec_select:V2SI (vec_concat:V4SI (match_dup 1) (match_dup 2)) (parallel [(const_int 1) (const_int 3)]))))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" "@ phd\t{%2, %0|%0, %2} # #" "TARGET_SSSE3 && reload_completed && SSE_REGNO_P (REGNO (operands[0]))" [(const_int 0)] { /* Generate SSE version of the operation. */ rtx op0 = lowpart_subreg (V4SImode, operands[0], GET_MODE (operands[0])); rtx op1 = lowpart_subreg (V4SImode, operands[1], GET_MODE (operands[1])); rtx op2 = lowpart_subreg (V4SImode, operands[2], GET_MODE (operands[2])); emit_insn (gen_ssse3_phdv4si3 (op0, op1, op2)); ix86_move_vector_high_sse_to_mmx (op0); DONE; } [(set_attr "mmx_isa" "native,sse_noavx,avx") (set_attr "type" "sseiadd") (set_attr "atom_unit" "complex") (set_attr "prefix_extra" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "DI,TI,TI")]) (define_insn "avx2_pmaddubsw256" [(set (match_operand:V16HI 0 "register_operand" "=Yw") (ss_plus:V16HI (mult:V16HI (zero_extend:V16HI (vec_select:V16QI (match_operand:V32QI 1 "register_operand" "Yw") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14) (const_int 16) (const_int 18) (const_int 20) (const_int 22) (const_int 24) (const_int 26) (const_int 28) (const_int 30)]))) (sign_extend:V16HI (vec_select:V16QI (match_operand:V32QI 2 "nonimmediate_operand" "Ywm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14) (const_int 16) (const_int 18) (const_int 20) (const_int 22) (const_int 24) (const_int 26) (const_int 28) (const_int 30)])))) (mult:V16HI (zero_extend:V16HI (vec_select:V16QI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15) (const_int 17) (const_int 19) (const_int 21) (const_int 23) (const_int 25) (const_int 27) (const_int 29) (const_int 31)]))) (sign_extend:V16HI (vec_select:V16QI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15) (const_int 17) (const_int 19) (const_int 21) (const_int 23) (const_int 25) (const_int 27) (const_int 29) (const_int 31)]))))))] "TARGET_AVX2" "vpmaddubsw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseiadd") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) ;; The correct representation for this is absolutely enormous, and ;; surely not generally useful. (define_insn "avx512bw_pmaddubsw512" [(set (match_operand:VI2_AVX512VL 0 "register_operand" "=v") (unspec:VI2_AVX512VL [(match_operand: 1 "register_operand" "v") (match_operand: 2 "nonimmediate_operand" "vm")] UNSPEC_PMADDUBSW512))] "TARGET_AVX512BW" "vpmaddubsw\t{%2, %1, %0|%0, %1, %2}"; [(set_attr "type" "sseiadd") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "avx512bw_umulhrswv32hi3" [(set (match_operand:V32HI 0 "register_operand" "=v") (truncate:V32HI (lshiftrt:V32SI (plus:V32SI (lshiftrt:V32SI (mult:V32SI (sign_extend:V32SI (match_operand:V32HI 1 "nonimmediate_operand" "%v")) (sign_extend:V32SI (match_operand:V32HI 2 "nonimmediate_operand" "vm"))) (const_int 14)) (const_vector:V32HI [(const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1) (const_int 1)])) (const_int 1))))] "TARGET_AVX512BW" "vpmulhrsw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseimul") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "ssse3_pmaddubsw128" [(set (match_operand:V8HI 0 "register_operand" "=x,Yw") (ss_plus:V8HI (mult:V8HI (zero_extend:V8HI (vec_select:V8QI (match_operand:V16QI 1 "register_operand" "0,Yw") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (sign_extend:V8HI (vec_select:V8QI (match_operand:V16QI 2 "vector_operand" "xBm,Ywm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)])))) (mult:V8HI (zero_extend:V8HI (vec_select:V8QI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)]))) (sign_extend:V8HI (vec_select:V8QI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)]))))))] "TARGET_SSSE3" "@ pmaddubsw\t{%2, %0|%0, %2} vpmaddubsw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseiadd") (set_attr "atom_unit" "simul") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_insn "ssse3_pmaddubsw" [(set (match_operand:V4HI 0 "register_operand" "=y,x,Yv") (ss_plus:V4HI (mult:V4HI (zero_extend:V4HI (vec_select:V4QI (match_operand:V8QI 1 "register_operand" "0,0,Yv") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (sign_extend:V4HI (vec_select:V4QI (match_operand:V8QI 2 "register_mmxmem_operand" "ym,x,Yv") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))) (mult:V4HI (zero_extend:V4HI (vec_select:V4QI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))) (sign_extend:V4HI (vec_select:V4QI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))))))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" "@ pmaddubsw\t{%2, %0|%0, %2} pmaddubsw\t{%2, %0|%0, %2} vpmaddubsw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "*,noavx,avx") (set_attr "mmx_isa" "native,*,*") (set_attr "type" "sseiadd") (set_attr "atom_unit" "simul") (set_attr "prefix_extra" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "DI,TI,TI")]) (define_mode_iterator PMULHRSW [V8HI (V16HI "TARGET_AVX2")]) (define_expand "_pmulhrsw3_mask" [(set (match_operand:PMULHRSW 0 "register_operand") (vec_merge:PMULHRSW (truncate:PMULHRSW (lshiftrt: (plus: (lshiftrt: (mult: (sign_extend: (match_operand:PMULHRSW 1 "nonimmediate_operand")) (sign_extend: (match_operand:PMULHRSW 2 "nonimmediate_operand"))) (const_int 14)) (match_dup 5)) (const_int 1))) (match_operand:PMULHRSW 3 "register_operand") (match_operand: 4 "register_operand")))] "TARGET_AVX512BW && TARGET_AVX512VL" { operands[5] = CONST1_RTX(mode); ix86_fixup_binary_operands_no_copy (MULT, mode, operands); }) (define_expand "_pmulhrsw3" [(set (match_operand:PMULHRSW 0 "register_operand") (truncate:PMULHRSW (lshiftrt: (plus: (lshiftrt: (mult: (sign_extend: (match_operand:PMULHRSW 1 "nonimmediate_operand")) (sign_extend: (match_operand:PMULHRSW 2 "nonimmediate_operand"))) (const_int 14)) (match_dup 3)) (const_int 1))))] "TARGET_SSSE3" { operands[3] = CONST1_RTX(mode); ix86_fixup_binary_operands_no_copy (MULT, mode, operands); }) (define_expand "smulhrs3" [(set (match_operand:VI2_AVX2 0 "register_operand") (truncate:VI2_AVX2 (lshiftrt: (plus: (lshiftrt: (mult: (sign_extend: (match_operand:VI2_AVX2 1 "nonimmediate_operand")) (sign_extend: (match_operand:VI2_AVX2 2 "nonimmediate_operand"))) (const_int 14)) (match_dup 3)) (const_int 1))))] "TARGET_SSSE3" { operands[3] = CONST1_RTX(mode); ix86_fixup_binary_operands_no_copy (MULT, mode, operands); }) (define_insn "*_pmulhrsw3" [(set (match_operand:VI2_AVX2 0 "register_operand" "=x,") (truncate:VI2_AVX2 (lshiftrt: (plus: (lshiftrt: (mult: (sign_extend: (match_operand:VI2_AVX2 1 "vector_operand" "%0,")) (sign_extend: (match_operand:VI2_AVX2 2 "vector_operand" "xBm,m"))) (const_int 14)) (match_operand:VI2_AVX2 3 "const1_operand")) (const_int 1))))] "TARGET_SSSE3 && && && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pmulhrsw\t{%2, %0|%0, %2} vpmulhrsw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseimul") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "")]) (define_expand "smulhrsv4hi3" [(set (match_operand:V4HI 0 "register_operand") (truncate:V4HI (lshiftrt:V4SI (plus:V4SI (lshiftrt:V4SI (mult:V4SI (sign_extend:V4SI (match_operand:V4HI 1 "register_operand")) (sign_extend:V4SI (match_operand:V4HI 2 "register_operand"))) (const_int 14)) (match_dup 3)) (const_int 1))))] "TARGET_MMX_WITH_SSE && TARGET_SSSE3" "operands[3] = CONST1_RTX(V4HImode);") (define_expand "ssse3_pmulhrswv4hi3" [(set (match_operand:V4HI 0 "register_operand") (truncate:V4HI (lshiftrt:V4SI (plus:V4SI (lshiftrt:V4SI (mult:V4SI (sign_extend:V4SI (match_operand:V4HI 1 "register_mmxmem_operand")) (sign_extend:V4SI (match_operand:V4HI 2 "register_mmxmem_operand"))) (const_int 14)) (match_dup 3)) (const_int 1))))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" { operands[3] = CONST1_RTX(V4HImode); ix86_fixup_binary_operands_no_copy (MULT, V4HImode, operands); }) (define_insn "*ssse3_pmulhrswv4hi3" [(set (match_operand:V4HI 0 "register_operand" "=y,x,Yv") (truncate:V4HI (lshiftrt:V4SI (plus:V4SI (lshiftrt:V4SI (mult:V4SI (sign_extend:V4SI (match_operand:V4HI 1 "register_mmxmem_operand" "%0,0,Yv")) (sign_extend:V4SI (match_operand:V4HI 2 "register_mmxmem_operand" "ym,x,Yv"))) (const_int 14)) (match_operand:V4HI 3 "const1_operand")) (const_int 1))))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pmulhrsw\t{%2, %0|%0, %2} pmulhrsw\t{%2, %0|%0, %2} vpmulhrsw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "*,noavx,avx") (set_attr "mmx_isa" "native,*,*") (set_attr "type" "sseimul") (set_attr "prefix_extra" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "DI,TI,TI")]) (define_expand "smulhrsv2hi3" [(set (match_operand:V2HI 0 "register_operand") (truncate:V2HI (lshiftrt:V2SI (plus:V2SI (lshiftrt:V2SI (mult:V2SI (sign_extend:V2SI (match_operand:V2HI 1 "register_operand")) (sign_extend:V2SI (match_operand:V2HI 2 "register_operand"))) (const_int 14)) (match_dup 3)) (const_int 1))))] "TARGET_SSSE3" "operands[3] = CONST1_RTX(V2HImode);") (define_insn "*smulhrsv2hi3" [(set (match_operand:V2HI 0 "register_operand" "=x,Yv") (truncate:V2HI (lshiftrt:V2SI (plus:V2SI (lshiftrt:V2SI (mult:V2SI (sign_extend:V2SI (match_operand:V2HI 1 "register_operand" "%0,Yv")) (sign_extend:V2SI (match_operand:V2HI 2 "register_operand" "x,Yv"))) (const_int 14)) (match_operand:V2HI 3 "const1_operand")) (const_int 1))))] "TARGET_SSSE3 && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "@ pmulhrsw\t{%2, %0|%0, %2} vpmulhrsw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sseimul") (set_attr "prefix_extra" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "TI")]) (define_insn "_pshufb3" [(set (match_operand:VI1_AVX512 0 "register_operand" "=x,") (unspec:VI1_AVX512 [(match_operand:VI1_AVX512 1 "register_operand" "0,") (match_operand:VI1_AVX512 2 "vector_operand" "xBm,m")] UNSPEC_PSHUFB))] "TARGET_SSSE3 && && " "@ pshufb\t{%2, %0|%0, %2} vpshufb\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,maybe_evex") (set_attr "btver2_decode" "vector") (set_attr "mode" "")]) (define_expand "ssse3_pshufbv8qi3" [(parallel [(set (match_operand:V8QI 0 "register_operand") (unspec:V8QI [(match_operand:V8QI 1 "register_operand") (match_operand:V8QI 2 "register_mmxmem_operand") (match_dup 3)] UNSPEC_PSHUFB)) (clobber (match_scratch:V4SI 4))])] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" { operands[3] = ix86_build_const_vector (V4SImode, true, gen_int_mode (0xf7f7f7f7, SImode)); }) (define_insn_and_split "*ssse3_pshufbv8qi3" [(set (match_operand:V8QI 0 "register_operand" "=y,x,Yv") (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "0,0,Yv") (match_operand:V8QI 2 "register_mmxmem_operand" "ym,x,Yv") (match_operand:V4SI 4 "reg_or_const_vector_operand" "i,3,3")] UNSPEC_PSHUFB)) (clobber (match_scratch:V4SI 3 "=X,&x,&Yv"))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" "@ pshufb\t{%2, %0|%0, %2} # #" "TARGET_SSSE3 && reload_completed && SSE_REGNO_P (REGNO (operands[0]))" [(set (match_dup 3) (and:V4SI (match_dup 3) (match_dup 2))) (set (match_dup 0) (unspec:V16QI [(match_dup 1) (match_dup 4)] UNSPEC_PSHUFB))] { /* Emulate MMX version of pshufb with SSE version by masking out the bit 3 of the shuffle control byte. */ operands[0] = lowpart_subreg (V16QImode, operands[0], GET_MODE (operands[0])); operands[1] = lowpart_subreg (V16QImode, operands[1], GET_MODE (operands[1])); operands[2] = lowpart_subreg (V4SImode, operands[2], GET_MODE (operands[2])); operands[4] = lowpart_subreg (V16QImode, operands[3], GET_MODE (operands[3])); } [(set_attr "mmx_isa" "native,sse_noavx,avx") (set_attr "prefix_extra" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "DI,TI,TI")]) (define_insn "_psign3" [(set (match_operand:VI124_AVX2 0 "register_operand" "=x,x") (unspec:VI124_AVX2 [(match_operand:VI124_AVX2 1 "register_operand" "0,x") (match_operand:VI124_AVX2 2 "vector_operand" "xBm,xm")] UNSPEC_PSIGN))] "TARGET_SSSE3" "@ psign\t{%2, %0|%0, %2} vpsign\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_insn "ssse3_psign3" [(set (match_operand:MMXMODEI 0 "register_operand" "=y,x,Yv") (unspec:MMXMODEI [(match_operand:MMXMODEI 1 "register_operand" "0,0,Yv") (match_operand:MMXMODEI 2 "register_mmxmem_operand" "ym,x,Yv")] UNSPEC_PSIGN))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" "@ psign\t{%2, %0|%0, %2} psign\t{%2, %0|%0, %2} vpsign\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "*,noavx,avx") (set_attr "mmx_isa" "native,*,*") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "DI,TI,TI")]) (define_insn "_palignr_mask" [(set (match_operand:VI1_AVX512 0 "register_operand" "=v") (vec_merge:VI1_AVX512 (unspec:VI1_AVX512 [(match_operand:VI1_AVX512 1 "register_operand" "v") (match_operand:VI1_AVX512 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "const_0_to_255_mul_8_operand" "n")] UNSPEC_PALIGNR) (match_operand:VI1_AVX512 4 "nonimm_or_0_operand" "0C") (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512BW && ( == 64 || TARGET_AVX512VL)" { operands[3] = GEN_INT (INTVAL (operands[3]) / 8); return "vpalignr\t{%3, %2, %1, %0%{%5%}%N4|%0%{%5%}%N4, %1, %2, %3}"; } [(set_attr "type" "sseishft") (set_attr "atom_unit" "sishuf") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_palignr" [(set (match_operand:SSESCALARMODE 0 "register_operand" "=x,") (unspec:SSESCALARMODE [(match_operand:SSESCALARMODE 1 "register_operand" "0,") (match_operand:SSESCALARMODE 2 "vector_operand" "xBm,m") (match_operand:SI 3 "const_0_to_255_mul_8_operand" "n,n")] UNSPEC_PALIGNR))] "TARGET_SSSE3" { operands[3] = GEN_INT (INTVAL (operands[3]) / 8); switch (which_alternative) { case 0: return "palignr\t{%3, %2, %0|%0, %2, %3}"; case 1: return "vpalignr\t{%3, %2, %1, %0|%0, %1, %2, %3}"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,avx") (set_attr "type" "sseishft") (set_attr "atom_unit" "sishuf") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "")]) (define_insn_and_split "ssse3_palignrdi" [(set (match_operand:DI 0 "register_operand" "=y,x,Yv") (unspec:DI [(match_operand:DI 1 "register_operand" "0,0,Yv") (match_operand:DI 2 "register_mmxmem_operand" "ym,x,Yv") (match_operand:SI 3 "const_0_to_255_mul_8_operand" "n,n,n")] UNSPEC_PALIGNR))] "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSSE3" { switch (which_alternative) { case 0: operands[3] = GEN_INT (INTVAL (operands[3]) / 8); return "palignr\t{%3, %2, %0|%0, %2, %3}"; case 1: case 2: return "#"; default: gcc_unreachable (); } } "TARGET_SSSE3 && reload_completed && SSE_REGNO_P (REGNO (operands[0]))" [(set (match_dup 0) (lshiftrt:V1TI (match_dup 0) (match_dup 3)))] { /* Emulate MMX palignrdi with SSE psrldq. */ rtx op0 = lowpart_subreg (V2DImode, operands[0], GET_MODE (operands[0])); if (TARGET_AVX) emit_insn (gen_vec_concatv2di (op0, operands[2], operands[1])); else { /* NB: SSE can only concatenate OP0 and OP1 to OP0. */ emit_insn (gen_vec_concatv2di (op0, operands[1], operands[2])); /* Swap bits 0:63 with bits 64:127. */ rtx mask = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (4, GEN_INT (2), GEN_INT (3), GEN_INT (0), GEN_INT (1))); rtx op1 = lowpart_subreg (V4SImode, op0, GET_MODE (op0)); rtx op2 = gen_rtx_VEC_SELECT (V4SImode, op1, mask); emit_insn (gen_rtx_SET (op1, op2)); } operands[0] = lowpart_subreg (V1TImode, op0, GET_MODE (op0)); } [(set_attr "mmx_isa" "native,sse_noavx,avx") (set_attr "type" "sseishft") (set_attr "atom_unit" "sishuf") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)")) (set_attr "mode" "DI,TI,TI")]) ;; Mode iterator to handle singularity w/ absence of V2DI and V4DI ;; modes for abs instruction on pre AVX-512 targets. (define_mode_iterator VI1248_AVX512VL_AVX512BW [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")]) (define_insn "*abs2" [(set (match_operand:VI1248_AVX512VL_AVX512BW 0 "register_operand" "=") (abs:VI1248_AVX512VL_AVX512BW (match_operand:VI1248_AVX512VL_AVX512BW 1 "vector_operand" "Bm")))] "TARGET_SSSE3" "%vpabs\t{%1, %0|%0, %1}" [(set_attr "type" "sselog1") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "")]) (define_insn "abs2_mask" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (vec_merge:VI48_AVX512VL (abs:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "vm")) (match_operand:VI48_AVX512VL 2 "nonimm_or_0_operand" "0C") (match_operand: 3 "register_operand" "Yk")))] "TARGET_AVX512F" "vpabs\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "sselog1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "abs2_mask" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (vec_merge:VI12_AVX512VL (abs:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "vm")) (match_operand:VI12_AVX512VL 2 "nonimm_or_0_operand" "0C") (match_operand: 3 "register_operand" "Yk")))] "TARGET_AVX512BW" "vpabs\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "sselog1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "abs2" [(set (match_operand:VI_AVX2 0 "register_operand") (abs:VI_AVX2 (match_operand:VI_AVX2 1 "vector_operand")))] "TARGET_SSE2" { if (!TARGET_SSSE3 || ((mode == V2DImode || mode == V4DImode) && !TARGET_AVX512VL)) { ix86_expand_sse2_abs (operands[0], operands[1]); DONE; } }) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; AMD SSE4A instructions ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn "sse4a_movnt" [(set (match_operand:MODEF 0 "memory_operand" "=m") (unspec:MODEF [(match_operand:MODEF 1 "register_operand" "x")] UNSPEC_MOVNT))] "TARGET_SSE4A" "movnt\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "mode" "")]) (define_insn "sse4a_vmmovnt" [(set (match_operand: 0 "memory_operand" "=m") (unspec: [(vec_select: (match_operand:VF_128 1 "register_operand" "x") (parallel [(const_int 0)]))] UNSPEC_MOVNT))] "TARGET_SSE4A" "movnt\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "mode" "")]) (define_insn "sse4a_extrqi" [(set (match_operand:V2DI 0 "register_operand" "=x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") (match_operand 2 "const_0_to_255_operand") (match_operand 3 "const_0_to_255_operand")] UNSPEC_EXTRQI))] "TARGET_SSE4A" "extrq\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "sse") (set_attr "prefix_data16" "1") (set_attr "length_immediate" "2") (set_attr "mode" "TI")]) (define_insn "sse4a_extrq" [(set (match_operand:V2DI 0 "register_operand" "=x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") (match_operand:V16QI 2 "register_operand" "x")] UNSPEC_EXTRQ))] "TARGET_SSE4A" "extrq\t{%2, %0|%0, %2}" [(set_attr "type" "sse") (set_attr "prefix_data16" "1") (set_attr "mode" "TI")]) (define_insn "sse4a_insertqi" [(set (match_operand:V2DI 0 "register_operand" "=x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") (match_operand:V2DI 2 "register_operand" "x") (match_operand 3 "const_0_to_255_operand") (match_operand 4 "const_0_to_255_operand")] UNSPEC_INSERTQI))] "TARGET_SSE4A" "insertq\t{%4, %3, %2, %0|%0, %2, %3, %4}" [(set_attr "type" "sseins") (set_attr "prefix_data16" "0") (set_attr "prefix_rep" "1") (set_attr "length_immediate" "2") (set_attr "mode" "TI")]) (define_insn "sse4a_insertq" [(set (match_operand:V2DI 0 "register_operand" "=x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0") (match_operand:V2DI 2 "register_operand" "x")] UNSPEC_INSERTQ))] "TARGET_SSE4A" "insertq\t{%2, %0|%0, %2}" [(set_attr "type" "sseins") (set_attr "prefix_data16" "0") (set_attr "prefix_rep" "1") (set_attr "mode" "TI")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Intel SSE4.1 instructions ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Mapping of immediate bits for blend instructions (define_mode_attr blendbits [(V8SF "255") (V4SF "15") (V4DF "15") (V2DF "3")]) (define_insn "_blend" [(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x") (vec_merge:VF_128_256 (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm") (match_operand:VF_128_256 1 "register_operand" "0,0,x") (match_operand:SI 3 "const_0_to__operand")))] "TARGET_SSE4_1" "@ blend\t{%3, %2, %0|%0, %2, %3} blend\t{%3, %2, %0|%0, %2, %3} vblend\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "")]) (define_insn "_blendv" [(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "register_operand" "0,0,x") (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm") (match_operand:VF_128_256 3 "register_operand" "Yz,Yz,x")] UNSPEC_BLENDV))] "TARGET_SSE4_1" "@ blendv\t{%3, %2, %0|%0, %2, %3} blendv\t{%3, %2, %0|%0, %2, %3} vblendv\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "mode" "")]) ;; Also define scalar versions. These are used for conditional move. ;; Using subregs into vector modes causes register allocation lossage. ;; These patterns do not allow memory operands because the native ;; instructions read the full 128-bits. (define_insn "sse4_1_blendv" [(set (match_operand:MODEF 0 "register_operand" "=Yr,*x,x") (unspec:MODEF [(match_operand:MODEF 1 "register_operand" "0,0,x") (match_operand:MODEF 2 "register_operand" "Yr,*x,x") (match_operand:MODEF 3 "register_operand" "Yz,Yz,x")] UNSPEC_BLENDV))] "TARGET_SSE4_1" { if (get_attr_mode (insn) == MODE_V4SF) return (which_alternative == 2 ? "vblendvps\t{%3, %2, %1, %0|%0, %1, %2, %3}" : "blendvps\t{%3, %2, %0|%0, %2, %3}"); else return (which_alternative == 2 ? "vblendv\t{%3, %2, %1, %0|%0, %1, %2, %3}" : "blendv\t{%3, %2, %0|%0, %2, %3}"); } [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set (attr "mode") (cond [(match_test "TARGET_AVX") (const_string "") (match_test "optimize_function_for_size_p (cfun)") (const_string "V4SF") (match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL") (const_string "V4SF") ] (const_string "")))]) (define_insn_and_split "*_blendv_lt" [(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "register_operand" "0,0,x") (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm") (lt:VF_128_256 (match_operand: 3 "register_operand" "Yz,Yz,x") (match_operand: 4 "const0_operand" "C,C,C"))] UNSPEC_BLENDV))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (unspec:VF_128_256 [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_BLENDV))] "operands[3] = gen_lowpart (mode, operands[3]);" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "mode" "")]) (define_mode_attr ssefltmodesuffix [(V2DI "pd") (V4DI "pd") (V4SI "ps") (V8SI "ps")]) (define_mode_attr ssefltvecmode [(V2DI "V2DF") (V4DI "V4DF") (V4SI "V4SF") (V8SI "V8SF")]) (define_insn_and_split "*_blendv_ltint" [(set (match_operand: 0 "register_operand" "=Yr,*x,x") (unspec: [(match_operand: 1 "register_operand" "0,0,x") (match_operand: 2 "vector_operand" "YrBm,*xBm,xm") (subreg: (lt:VI48_AVX (match_operand:VI48_AVX 3 "register_operand" "Yz,Yz,x") (match_operand:VI48_AVX 4 "const0_operand" "C,C,C")) 0)] UNSPEC_BLENDV))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (unspec: [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_BLENDV))] { operands[0] = gen_lowpart (mode, operands[0]); operands[1] = gen_lowpart (mode, operands[1]); operands[2] = gen_lowpart (mode, operands[2]); operands[3] = gen_lowpart (mode, operands[3]); } [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "mode" "")]) ;; PR target/100738: Transform vpcmpeqd + vpxor + vblendvps to vblendvps for inverted mask; (define_insn_and_split "*_blendv_not_ltint" [(set (match_operand: 0 "register_operand") (unspec: [(match_operand: 1 "register_operand") (match_operand: 2 "vector_operand") (subreg: (lt:VI48_AVX (subreg:VI48_AVX (not: (match_operand: 3 "register_operand")) 0) (match_operand:VI48_AVX 4 "const0_operand")) 0)] UNSPEC_BLENDV))] "TARGET_SSE4_1 && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec: [(match_dup 2) (match_dup 1) (match_dup 3)] UNSPEC_BLENDV))] { operands[0] = gen_lowpart (mode, operands[0]); operands[1] = gen_lowpart (mode, operands[1]); operands[2] = force_reg (mode, gen_lowpart (mode, operands[2])); operands[3] = gen_lowpart (mode, operands[3]); }) (define_insn "_dp" [(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "vector_operand" "%0,0,x") (match_operand:VF_128_256 2 "vector_operand" "YrBm,*xBm,xm") (match_operand:SI 3 "const_0_to_255_operand" "n,n,n")] UNSPEC_DP))] "TARGET_SSE4_1" "@ dp\t{%3, %2, %0|%0, %2, %3} dp\t{%3, %2, %0|%0, %2, %3} vdp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemul") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "znver1_decode" "vector,vector,vector") (set_attr "mode" "")]) ;; Mode attribute used by `vmovntdqa' pattern (define_mode_attr vi8_sse4_1_avx2_avx512 [(V2DI "sse4_1") (V4DI "avx2") (V8DI "avx512f")]) (define_insn "_movntdqa" [(set (match_operand:VI8_AVX2_AVX512F 0 "register_operand" "=Yr,*x,v") (unspec:VI8_AVX2_AVX512F [(match_operand:VI8_AVX2_AVX512F 1 "memory_operand" "m,m,m")] UNSPEC_MOVNTDQA))] "TARGET_SSE4_1" "%vmovntdqa\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1,1,*") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "")]) (define_insn "_mpsadbw" [(set (match_operand:VI1_AVX2 0 "register_operand" "=Yr,*x,x") (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 1 "register_operand" "0,0,x") (match_operand:VI1_AVX2 2 "vector_operand" "YrBm,*xBm,xm") (match_operand:SI 3 "const_0_to_255_operand" "n,n,n")] UNSPEC_MPSADBW))] "TARGET_SSE4_1" "@ mpsadbw\t{%3, %2, %0|%0, %2, %3} mpsadbw\t{%3, %2, %0|%0, %2, %3} vmpsadbw\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sselog1") (set_attr "length_immediate" "1") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "znver1_decode" "vector,vector,vector") (set_attr "mode" "")]) (define_insn "_packusdw" [(set (match_operand:VI2_AVX2 0 "register_operand" "=Yr,*x,") (vec_concat:VI2_AVX2 (us_truncate: (match_operand: 1 "register_operand" "0,0,")) (us_truncate: (match_operand: 2 "vector_operand" "YrBm,*xBm,m"))))] "TARGET_SSE4_1 && && " "@ packusdw\t{%2, %0|%0, %2} packusdw\t{%2, %0|%0, %2} vpackusdw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,") (set_attr "mode" "")]) (define_insn "_pblendvb" [(set (match_operand:VI1_AVX2 0 "register_operand" "=Yr,*x,x") (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 1 "register_operand" "0,0,x") (match_operand:VI1_AVX2 2 "vector_operand" "YrBm,*xBm,xm") (match_operand:VI1_AVX2 3 "register_operand" "Yz,Yz,x")] UNSPEC_BLENDV))] "TARGET_SSE4_1" "@ pblendvb\t{%3, %2, %0|%0, %2, %3} pblendvb\t{%3, %2, %0|%0, %2, %3} vpblendvb\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "*,*,1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "mode" "")]) (define_split [(set (match_operand:VI1_AVX2 0 "register_operand") (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 1 "vector_operand") (match_operand:VI1_AVX2 2 "register_operand") (not:VI1_AVX2 (match_operand:VI1_AVX2 3 "register_operand"))] UNSPEC_BLENDV))] "TARGET_SSE4_1" [(set (match_dup 0) (unspec:VI1_AVX2 [(match_dup 2) (match_dup 1) (match_dup 3)] UNSPEC_BLENDV))]) (define_split [(set (match_operand:VI1_AVX2 0 "register_operand") (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 1 "vector_operand") (match_operand:VI1_AVX2 2 "register_operand") (subreg:VI1_AVX2 (not (match_operand 3 "register_operand")) 0)] UNSPEC_BLENDV))] "TARGET_SSE4_1 && GET_MODE_CLASS (GET_MODE (operands[3])) == MODE_VECTOR_INT && GET_MODE_SIZE (GET_MODE (operands[3])) == " [(set (match_dup 0) (unspec:VI1_AVX2 [(match_dup 2) (match_dup 1) (match_dup 4)] UNSPEC_BLENDV))] "operands[4] = gen_lowpart (mode, operands[3]);") (define_insn_and_split "*_pblendvb_lt" [(set (match_operand:VI1_AVX2 0 "register_operand" "=Yr,*x,x") (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 1 "register_operand" "0,0,x") (match_operand:VI1_AVX2 2 "vector_operand" "YrBm,*xBm,xm") (lt:VI1_AVX2 (match_operand:VI1_AVX2 3 "register_operand" "Yz,Yz,x") (match_operand:VI1_AVX2 4 "const0_operand" "C,C,C"))] UNSPEC_BLENDV))] "TARGET_SSE4_1" "#" "&& 1" [(set (match_dup 0) (unspec:VI1_AVX2 [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_BLENDV))] "" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "*,*,1") (set_attr "prefix" "orig,orig,vex") (set_attr "btver2_decode" "vector,vector,vector") (set_attr "mode" "")]) (define_insn_and_split "*_pblendvb_lt_subreg_not" [(set (match_operand:VI1_AVX2 0 "register_operand") (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 2 "vector_operand") (match_operand:VI1_AVX2 1 "register_operand") (lt:VI1_AVX2 (subreg:VI1_AVX2 (not (match_operand 3 "register_operand")) 0) (match_operand:VI1_AVX2 4 "const0_operand"))] UNSPEC_BLENDV))] "TARGET_SSE4_1 && GET_MODE_CLASS (GET_MODE (operands[3])) == MODE_VECTOR_INT && GET_MODE_SIZE (GET_MODE (operands[3])) == && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (unspec:VI1_AVX2 [(match_dup 1) (match_dup 2) (lt:VI1_AVX2 (match_dup 3) (match_dup 4))] UNSPEC_BLENDV))] "operands[3] = gen_lowpart (mode, operands[3]);") (define_insn "sse4_1_pblend" [(set (match_operand:V8_128 0 "register_operand" "=Yr,*x,x") (vec_merge:V8_128 (match_operand:V8_128 2 "vector_operand" "YrBm,*xBm,xm") (match_operand:V8_128 1 "register_operand" "0,0,x") (match_operand:SI 3 "const_0_to_255_operand" "n,n,n")))] "TARGET_SSE4_1" "@ pblendw\t{%3, %2, %0|%0, %2, %3} pblendw\t{%3, %2, %0|%0, %2, %3} vpblendw\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) ;; The builtin uses an 8-bit immediate. Expand that. (define_expand "avx2_pblend" [(set (match_operand:V16_256 0 "register_operand") (vec_merge:V16_256 (match_operand:V16_256 2 "nonimmediate_operand") (match_operand:V16_256 1 "register_operand") (match_operand:SI 3 "const_0_to_255_operand")))] "TARGET_AVX2" { HOST_WIDE_INT val = INTVAL (operands[3]) & 0xff; operands[3] = GEN_INT (val << 8 | val); }) (define_expand "avx2_pblend_1" [(set (match_operand:V16_256 0 "register_operand") (vec_merge:V16_256 (match_operand:V16_256 2 "register_operand") (match_operand:V16_256 1 "register_operand") (match_operand:SI 3 "const_int_operand")))] "TARGET_AVX2 && !((INTVAL (operands[3]) & 0xff) && (INTVAL (operands[3]) & 0xff00))" { int mask = INTVAL (operands[3]); if (mask == 0) emit_move_insn (operands[0], operands[1]); else { rtx tmp = gen_reg_rtx (mode); rtx blendw_idx, blendd_idx; if (mask & 0xff) { blendw_idx = GEN_INT (mask & 0xff); blendd_idx = GEN_INT (15); } else { blendw_idx = GEN_INT (mask >> 8 & 0xff); blendd_idx = GEN_INT (240); } emit_insn (gen_avx2_pblend (tmp, operands[1], operands[2], blendw_idx)); operands[0] = lowpart_subreg (V8SImode, operands[0], mode); tmp = lowpart_subreg (V8SImode, tmp, mode); operands[1] = lowpart_subreg (V8SImode, operands[1], mode); emit_insn (gen_avx2_pblenddv8si (operands[0], operands[1], tmp, blendd_idx)); } DONE; }) (define_insn "*avx2_pblend" [(set (match_operand:V16_256 0 "register_operand" "=x") (vec_merge:V16_256 (match_operand:V16_256 2 "nonimmediate_operand" "xm") (match_operand:V16_256 1 "register_operand" "x") (match_operand:SI 3 "avx2_pblendw_operand" "n")))] "TARGET_AVX2" { operands[3] = GEN_INT (INTVAL (operands[3]) & 0xff); return "vpblendw\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_insn "avx2_pblendd" [(set (match_operand:VI4_AVX2 0 "register_operand" "=x") (vec_merge:VI4_AVX2 (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xm") (match_operand:VI4_AVX2 1 "register_operand" "x") (match_operand:SI 3 "const_0_to_255_operand" "n")))] "TARGET_AVX2" "vpblendd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "sse4_1_phminposuw" [(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,x") (unspec:V8HI [(match_operand:V8HI 1 "vector_operand" "YrBm,*xBm,xm")] UNSPEC_PHMINPOSUW))] "TARGET_SSE4_1" "%vphminposuw\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) (define_insn "avx2_v16qiv16hi2" [(set (match_operand:V16HI 0 "register_operand" "=Yw") (any_extend:V16HI (match_operand:V16QI 1 "nonimmediate_operand" "Ywm")))] "TARGET_AVX2 && && " "vpmovbw\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn_and_split "*avx2_zero_extendv16qiv16hi2_1" [(set (match_operand:V32QI 0 "register_operand" "=v") (vec_select:V32QI (vec_concat:V64QI (match_operand:V32QI 1 "nonimmediate_operand" "vm") (match_operand:V32QI 2 "const0_operand" "C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n")])))] "TARGET_AVX2" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V16HI (match_dup 1)))] { operands[0] = lowpart_subreg (V16HImode, operands[0], V32QImode); operands[1] = lowpart_subreg (V16QImode, operands[1], V32QImode); }) (define_insn_and_split "*avx2_zero_extendv16qiv16hi2_2" [(set (match_operand:V32QI 0 "register_operand" "=v") (vec_select:V32QI (vec_concat:V64QI (subreg:V32QI (vec_concat:VI248_256 (match_operand: 1 "nonimmediate_operand" "vm") (match_operand: 2 "const0_operand" "C")) 0) (match_operand:V32QI 3 "const0_operand" "C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n")])))] "TARGET_AVX2" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V16HI (match_dup 1)))] { operands[0] = lowpart_subreg (V16HImode, operands[0], V32QImode); operands[1] = lowpart_subreg (V16QImode, operands[1], mode); }) (define_expand "v16qiv16hi2" [(set (match_operand:V16HI 0 "register_operand") (any_extend:V16HI (match_operand:V16QI 1 "nonimmediate_operand")))] "TARGET_AVX2") (define_insn "avx512bw_v32qiv32hi2" [(set (match_operand:V32HI 0 "register_operand" "=v") (any_extend:V32HI (match_operand:V32QI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512BW" "vpmovbw\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn_and_split "*avx512bw_zero_extendv32qiv32hi2_1" [(set (match_operand:V64QI 0 "register_operand" "=v") (vec_select:V64QI (vec_concat:V128QI (match_operand:V64QI 1 "nonimmediate_operand" "vm") (match_operand:V64QI 2 "const0_operand" "C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n")])))] "TARGET_AVX512BW" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V32HI (match_dup 1)))] { operands[0] = lowpart_subreg (V32HImode, operands[0], V64QImode); operands[1] = lowpart_subreg (V32QImode, operands[1], V64QImode); }) (define_insn_and_split "*avx512bw_zero_extendv32qiv32hi2_2" [(set (match_operand:V64QI 0 "register_operand" "=v") (vec_select:V64QI (vec_concat:V128QI (subreg:V64QI (vec_concat:VI248_512 (match_operand: 1 "nonimmediate_operand" "vm") (match_operand: 2 "const0_operand" "C")) 0) (match_operand:V64QI 3 "const0_operand" "C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n")])))] "TARGET_AVX512BW" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V32HI (match_dup 1)))] { operands[0] = lowpart_subreg (V32HImode, operands[0], V64QImode); operands[1] = lowpart_subreg (V32QImode, operands[1], mode); }) (define_expand "v32qiv32hi2" [(set (match_operand:V32HI 0 "register_operand") (any_extend:V32HI (match_operand:V32QI 1 "nonimmediate_operand")))] "TARGET_AVX512BW") (define_insn "sse4_1_v8qiv8hi2" [(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,Yw") (any_extend:V8HI (vec_select:V8QI (match_operand:V16QI 1 "register_operand" "Yr,*x,Yw") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_SSE4_1 && && " "%vpmovbw\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn "*sse4_1_v8qiv8hi2_1" [(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,Yw") (any_extend:V8HI (match_operand:V8QI 1 "memory_operand" "m,m,m")))] "TARGET_SSE4_1 && && " "%vpmovbw\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn_and_split "*sse4_1_v8qiv8hi2_2" [(set (match_operand:V8HI 0 "register_operand") (any_extend:V8HI (vec_select:V8QI (subreg:V16QI (vec_concat:V2DI (match_operand:DI 1 "memory_operand") (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_SSE4_1 && && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V8HI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V8QImode, 0);") (define_insn_and_split "*sse4_1_zero_extendv8qiv8hi2_3" [(set (match_operand:V16QI 0 "register_operand" "=Yr,*x,Yw") (vec_select:V16QI (vec_concat:V32QI (match_operand:V16QI 1 "vector_operand" "YrBm,*xBm,Ywm") (match_operand:V16QI 2 "const0_operand" "C,C,C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n,n,n")])))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V8HI (vec_select:V8QI (match_dup 1) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] { operands[0] = lowpart_subreg (V8HImode, operands[0], V16QImode); if (MEM_P (operands[1])) { operands[1] = lowpart_subreg (V8QImode, operands[1], V16QImode); operands[1] = gen_rtx_ZERO_EXTEND (V8HImode, operands[1]); emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } } [(set_attr "isa" "noavx,noavx,avx")]) (define_insn_and_split "*sse4_1_zero_extendv8qiv8hi2_4" [(set (match_operand:V16QI 0 "register_operand" "=Yr,*x,Yw") (vec_select:V16QI (vec_concat:V32QI (subreg:V16QI (vec_concat:VI248_128 (match_operand: 1 "vector_operand" "YrBm,*xBm,Ywm") (match_operand: 2 "const0_operand" "C,C,C")) 0) (match_operand:V16QI 3 "const0_operand" "C,C,C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n,n,n")])))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V8HI (vec_select:V8QI (match_dup 1) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] { operands[0] = lowpart_subreg (V8HImode, operands[0], V16QImode); if (MEM_P (operands[1])) { operands[1] = lowpart_subreg (V8QImode, operands[1], mode); operands[1] = gen_rtx_ZERO_EXTEND (V8HImode, operands[1]); emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } operands[1] = lowpart_subreg (V16QImode, operands[1], mode); } [(set_attr "isa" "noavx,noavx,avx")]) (define_expand "v8qiv8hi2" [(set (match_operand:V8HI 0 "register_operand") (any_extend:V8HI (match_operand:V8QI 1 "nonimmediate_operand")))] "TARGET_SSE4_1" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V8QImode, operands[1]); op1 = lowpart_subreg (V16QImode, op1, V8QImode); emit_insn (gen_sse4_1_v8qiv8hi2 (operands[0], op1)); DONE; } }) (define_insn "avx512f_v16qiv16si2" [(set (match_operand:V16SI 0 "register_operand" "=v") (any_extend:V16SI (match_operand:V16QI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" "vpmovbd\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "v16qiv16si2" [(set (match_operand:V16SI 0 "register_operand") (any_extend:V16SI (match_operand:V16QI 1 "nonimmediate_operand")))] "TARGET_AVX512F") (define_insn "avx2_v8qiv8si2" [(set (match_operand:V8SI 0 "register_operand" "=v") (any_extend:V8SI (vec_select:V8QI (match_operand:V16QI 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX2 && " "vpmovbd\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "*avx2_v8qiv8si2_1" [(set (match_operand:V8SI 0 "register_operand" "=v") (any_extend:V8SI (match_operand:V8QI 1 "memory_operand" "m")))] "TARGET_AVX2 && " "%vpmovbd\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn_and_split "*avx2_v8qiv8si2_2" [(set (match_operand:V8SI 0 "register_operand") (any_extend:V8SI (vec_select:V8QI (subreg:V16QI (vec_concat:V2DI (match_operand:DI 1 "memory_operand") (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX2 && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V8SI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V8QImode, 0);") (define_expand "v8qiv8si2" [(set (match_operand:V8SI 0 "register_operand") (any_extend:V8SI (match_operand:V8QI 1 "nonimmediate_operand")))] "TARGET_AVX2" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V8QImode, operands[1]); op1 = lowpart_subreg (V16QImode, op1, V8QImode); emit_insn (gen_avx2_v8qiv8si2 (operands[0], op1)); DONE; } }) (define_insn "sse4_1_v4qiv4si2" [(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v") (any_extend:V4SI (vec_select:V4QI (match_operand:V16QI 1 "register_operand" "Yr,*x,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_SSE4_1 && " "%vpmovbd\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn "*sse4_1_v4qiv4si2_1" [(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v") (any_extend:V4SI (match_operand:V4QI 1 "memory_operand" "m,m,m")))] "TARGET_SSE4_1 && " "%vpmovbd\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn_and_split "*sse4_1_v4qiv4si2_2" [(set (match_operand:V4SI 0 "register_operand") (any_extend:V4SI (vec_select:V4QI (subreg:V16QI (vec_merge:V4SI (vec_duplicate:V4SI (match_operand:SI 1 "memory_operand")) (const_vector:V4SI [(const_int 0) (const_int 0) (const_int 0) (const_int 0)]) (const_int 1)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_SSE4_1 && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V4SI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V4QImode, 0);") (define_expand "v4qiv4si2" [(set (match_operand:V4SI 0 "register_operand") (any_extend:V4SI (match_operand:V4QI 1 "nonimmediate_operand")))] "TARGET_SSE4_1" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V4QImode, operands[1]); op1 = lowpart_subreg (V16QImode, op1, V4QImode); emit_insn (gen_sse4_1_v4qiv4si2 (operands[0], op1)); DONE; } }) (define_insn "avx512f_v16hiv16si2" [(set (match_operand:V16SI 0 "register_operand" "=v") (any_extend:V16SI (match_operand:V16HI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" "vpmovwd\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "v16hiv16si2" [(set (match_operand:V16SI 0 "register_operand") (any_extend:V16SI (match_operand:V16HI 1 "nonimmediate_operand")))] "TARGET_AVX512F") (define_insn_and_split "avx512f_zero_extendv16hiv16si2_1" [(set (match_operand:V32HI 0 "register_operand" "=v") (vec_select:V32HI (vec_concat:V64HI (match_operand:V32HI 1 "nonimmediate_operand" "vm") (match_operand:V32HI 2 "const0_operand" "C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n")])))] "TARGET_AVX512F" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V16SI (match_dup 1)))] { operands[0] = lowpart_subreg (V16SImode, operands[0], V32HImode); operands[1] = lowpart_subreg (V16HImode, operands[1], V32HImode); }) (define_insn_and_split "*avx512f_zero_extendv16hiv16si2_2" [(set (match_operand:V32HI 0 "register_operand" "=v") (vec_select:V32HI (vec_concat:V64HI (subreg:V32HI (vec_concat:VI148_512 (match_operand: 1 "nonimmediate_operand" "vm") (match_operand: 2 "const0_operand" "C")) 0) (match_operand:V32HI 3 "const0_operand" "C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n")])))] "TARGET_AVX512F" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V16SI (match_dup 1)))] { operands[0] = lowpart_subreg (V16SImode, operands[0], V32HImode); operands[1] = lowpart_subreg (V16HImode, operands[1], mode); }) (define_insn "avx2_v8hiv8si2" [(set (match_operand:V8SI 0 "register_operand" "=v") (any_extend:V8SI (match_operand:V8HI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX2 && " "vpmovwd\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_expand "v8hiv8si2" [(set (match_operand:V8SI 0 "register_operand") (any_extend:V8SI (match_operand:V8HI 1 "nonimmediate_operand")))] "TARGET_AVX2") (define_insn_and_split "avx2_zero_extendv8hiv8si2_1" [(set (match_operand:V16HI 0 "register_operand" "=v") (vec_select:V16HI (vec_concat:V32HI (match_operand:V16HI 1 "nonimmediate_operand" "vm") (match_operand:V16HI 2 "const0_operand" "C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n")])))] "TARGET_AVX2" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V8SI (match_dup 1)))] { operands[0] = lowpart_subreg (V8SImode, operands[0], V16HImode); operands[1] = lowpart_subreg (V8HImode, operands[1], V16HImode); }) (define_insn_and_split "*avx2_zero_extendv8hiv8si2_2" [(set (match_operand:V16HI 0 "register_operand" "=v") (vec_select:V16HI (vec_concat:V32HI (subreg:V16HI (vec_concat:VI148_256 (match_operand: 1 "nonimmediate_operand" "vm") (match_operand: 2 "const0_operand" "C")) 0) (match_operand:V16HI 3 "const0_operand" "C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n")])))] "TARGET_AVX2" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V8SI (match_dup 1)))] { operands[0] = lowpart_subreg (V8SImode, operands[0], V16HImode); operands[1] = lowpart_subreg (V8HImode, operands[1], mode); }) (define_insn "sse4_1_v4hiv4si2" [(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v") (any_extend:V4SI (vec_select:V4HI (match_operand:V8HI 1 "register_operand" "Yr,*x,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_SSE4_1 && " "%vpmovwd\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn "*sse4_1_v4hiv4si2_1" [(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v") (any_extend:V4SI (match_operand:V4HI 1 "memory_operand" "m,m,m")))] "TARGET_SSE4_1 && " "%vpmovwd\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn_and_split "*sse4_1_v4hiv4si2_2" [(set (match_operand:V4SI 0 "register_operand") (any_extend:V4SI (vec_select:V4HI (subreg:V8HI (vec_concat:V2DI (match_operand:DI 1 "memory_operand") (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_SSE4_1 && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V4SI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V4HImode, 0);") (define_expand "v4hiv4si2" [(set (match_operand:V4SI 0 "register_operand") (any_extend:V4SI (match_operand:V4HI 1 "nonimmediate_operand")))] "TARGET_SSE4_1" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V4HImode, operands[1]); op1 = lowpart_subreg (V8HImode, op1, V4HImode); emit_insn (gen_sse4_1_v4hiv4si2 (operands[0], op1)); DONE; } }) (define_insn_and_split "*sse4_1_zero_extendv4hiv4si2_3" [(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,v") (vec_select:V8HI (vec_concat:V16HI (match_operand:V8HI 1 "vector_operand" "YrBm,*xBm,vm") (match_operand:V8HI 2 "const0_operand" "C,C,C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n,n,n")])))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V4SI (vec_select:V4HI (match_dup 1) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] { operands[0] = lowpart_subreg (V4SImode, operands[0], V8HImode); if (MEM_P (operands[1])) { operands[1] = lowpart_subreg (V4HImode, operands[1], V8HImode); operands[1] = gen_rtx_ZERO_EXTEND (V4SImode, operands[1]); emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } } [(set_attr "isa" "noavx,noavx,avx")]) (define_insn_and_split "*sse4_1_zero_extendv4hiv4si2_4" [(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,v") (vec_select:V8HI (vec_concat:V16HI (subreg:V8HI (vec_concat:VI148_128 (match_operand: 1 "vector_operand" "YrBm,*xBm,vm") (match_operand: 2 "const0_operand" "C,C,C")) 0) (match_operand:V8HI 3 "const0_operand" "C,C,C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n,n,n")])))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V4SI (vec_select:V4HI (match_dup 1) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] { operands[0] = lowpart_subreg (V4SImode, operands[0], V8HImode); if (MEM_P (operands[1])) { operands[1] = lowpart_subreg (V4HImode, operands[1], mode); operands[1] = gen_rtx_ZERO_EXTEND (V4SImode, operands[1]); emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } operands[1] = lowpart_subreg (V8HImode, operands[1], mode); } [(set_attr "isa" "noavx,noavx,avx")]) (define_insn "avx512f_v8qiv8di2" [(set (match_operand:V8DI 0 "register_operand" "=v") (any_extend:V8DI (vec_select:V8QI (match_operand:V16QI 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX512F" "vpmovbq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "*avx512f_v8qiv8di2_1" [(set (match_operand:V8DI 0 "register_operand" "=v") (any_extend:V8DI (match_operand:V8QI 1 "memory_operand" "m")))] "TARGET_AVX512F" "vpmovbq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn_and_split "*avx512f_v8qiv8di2_2" [(set (match_operand:V8DI 0 "register_operand") (any_extend:V8DI (vec_select:V8QI (subreg:V16QI (vec_concat:V2DI (match_operand:DI 1 "memory_operand") (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX512F && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V8DI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V8QImode, 0);") (define_expand "v8qiv8di2" [(set (match_operand:V8DI 0 "register_operand") (any_extend:V8DI (match_operand:V8QI 1 "nonimmediate_operand")))] "TARGET_AVX512F" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V8QImode, operands[1]); op1 = lowpart_subreg (V16QImode, op1, V8QImode); emit_insn (gen_avx512f_v8qiv8di2 (operands[0], op1)); DONE; } }) (define_insn "avx2_v4qiv4di2" [(set (match_operand:V4DI 0 "register_operand" "=v") (any_extend:V4DI (vec_select:V4QI (match_operand:V16QI 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX2 && " "vpmovbq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "*avx2_v4qiv4di2_1" [(set (match_operand:V4DI 0 "register_operand" "=v") (any_extend:V4DI (match_operand:V4QI 1 "memory_operand" "m")))] "TARGET_AVX2 && " "vpmovbq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn_and_split "*avx2_v4qiv4di2_2" [(set (match_operand:V4DI 0 "register_operand") (any_extend:V4DI (vec_select:V4QI (subreg:V16QI (vec_merge:V4SI (vec_duplicate:V4SI (match_operand:SI 1 "memory_operand")) (const_vector:V4SI [(const_int 0) (const_int 0) (const_int 0) (const_int 0)]) (const_int 1)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX2 && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V4DI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V4QImode, 0);") (define_expand "v4qiv4di2" [(set (match_operand:V4DI 0 "register_operand") (any_extend:V4DI (match_operand:V4QI 1 "nonimmediate_operand")))] "TARGET_AVX2" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V4QImode, operands[1]); op1 = lowpart_subreg (V16QImode, operands[1], V4QImode); emit_insn (gen_avx2_v4qiv4di2 (operands[0], op1)); DONE; } }) (define_insn "sse4_1_v2qiv2di2" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v") (any_extend:V2DI (vec_select:V2QI (match_operand:V16QI 1 "register_operand" "Yr,*x,v") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE4_1 && " "%vpmovbq\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_expand "v2qiv2di2" [(set (match_operand:V2DI 0 "register_operand") (any_extend:V2DI (match_operand:V2QI 1 "register_operand")))] "TARGET_SSE4_1" { rtx op1 = force_reg (V2QImode, operands[1]); op1 = lowpart_subreg (V16QImode, op1, V2QImode); emit_insn (gen_sse4_1_v2qiv2di2 (operands[0], op1)); DONE; }) (define_insn "avx512f_v8hiv8di2" [(set (match_operand:V8DI 0 "register_operand" "=v") (any_extend:V8DI (match_operand:V8HI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" "vpmovwq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_expand "v8hiv8di2" [(set (match_operand:V8DI 0 "register_operand") (any_extend:V8DI (match_operand:V8HI 1 "nonimmediate_operand")))] "TARGET_AVX512F") (define_insn "avx2_v4hiv4di2" [(set (match_operand:V4DI 0 "register_operand" "=v") (any_extend:V4DI (vec_select:V4HI (match_operand:V8HI 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX2 && " "vpmovwq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn "*avx2_v4hiv4di2_1" [(set (match_operand:V4DI 0 "register_operand" "=v") (any_extend:V4DI (match_operand:V4HI 1 "memory_operand" "m")))] "TARGET_AVX2 && " "vpmovwq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "OI")]) (define_insn_and_split "*avx2_v4hiv4di2_2" [(set (match_operand:V4DI 0 "register_operand") (any_extend:V4DI (vec_select:V4HI (subreg:V8HI (vec_concat:V2DI (match_operand:DI 1 "memory_operand") (const_int 0)) 0) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))))] "TARGET_AVX2 && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V4DI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V4HImode, 0);") (define_expand "v4hiv4di2" [(set (match_operand:V4DI 0 "register_operand") (any_extend:V4DI (match_operand:V4HI 1 "nonimmediate_operand")))] "TARGET_AVX2" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V4HImode, operands[1]); op1 = lowpart_subreg (V8HImode, op1, V4HImode); emit_insn (gen_avx2_v4hiv4di2 (operands[0], op1)); DONE; } }) (define_insn "sse4_1_v2hiv2di2" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v") (any_extend:V2DI (vec_select:V2HI (match_operand:V8HI 1 "register_operand" "Yr,*x,v") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE4_1 && " "%vpmovwq\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn "*sse4_1_v2hiv2di2_1" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v") (any_extend:V2DI (match_operand:V2HI 1 "memory_operand" "m,m,m")))] "TARGET_SSE4_1 && " "%vpmovwq\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn_and_split "*sse4_1_v2hiv2di2_2" [(set (match_operand:V2DI 0 "register_operand") (any_extend:V2DI (vec_select:V2HI (subreg:V8HI (vec_merge:V4SI (vec_duplicate:V4SI (match_operand:SI 1 "memory_operand")) (const_vector:V4SI [(const_int 0) (const_int 0) (const_int 0) (const_int 0)]) (const_int 1)) 0) (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE4_1 && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V2DI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V2HImode, 0);") (define_expand "v2hiv2di2" [(set (match_operand:V2DI 0 "register_operand") (any_extend:V2DI (match_operand:V2HI 1 "nonimmediate_operand")))] "TARGET_SSE4_1" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V2HImode, operands[1]); op1 = lowpart_subreg (V8HImode, op1, V2HImode); emit_insn (gen_sse4_1_v2hiv2di2 (operands[0], op1)); DONE; } }) (define_insn "avx512f_v8siv8di2" [(set (match_operand:V8DI 0 "register_operand" "=v") (any_extend:V8DI (match_operand:V8SI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512F" "vpmovdq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn_and_split "*avx512f_zero_extendv8siv8di2_1" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_select:V16SI (vec_concat:V32SI (match_operand:V16SI 1 "nonimmediate_operand" "vm") (match_operand:V16SI 2 "const0_operand" "C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n")])))] "TARGET_AVX512F" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V8DI (match_dup 1)))] { operands[0] = lowpart_subreg (V8DImode, operands[0], V16SImode); operands[1] = lowpart_subreg (V8SImode, operands[1], V16SImode); }) (define_insn_and_split "*avx512f_zero_extendv8siv8di2_2" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_select:V16SI (vec_concat:V32SI (vec_concat:V16SI (match_operand:V8SI 1 "nonimmediate_operand" "vm") (match_operand:V8SI 2 "const0_operand" "C")) (match_operand:V16SI 3 "const0_operand" "C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n")])))] "TARGET_AVX512F" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V8DI (match_dup 1)))] { operands[0] = lowpart_subreg (V8DImode, operands[0], V16SImode); }) (define_expand "v8siv8di2" [(set (match_operand:V8DI 0 "register_operand" "=v") (any_extend:V8DI (match_operand:V8SI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512F") (define_insn "avx2_v4siv4di2" [(set (match_operand:V4DI 0 "register_operand" "=v") (any_extend:V4DI (match_operand:V4SI 1 "nonimmediate_operand" "vm")))] "TARGET_AVX2 && " "vpmovdq\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "maybe_evex") (set_attr "prefix_extra" "1") (set_attr "mode" "OI")]) (define_insn_and_split "*avx2_zero_extendv4siv4di2_1" [(set (match_operand:V8SI 0 "register_operand" "=v") (vec_select:V8SI (vec_concat:V16SI (match_operand:V8SI 1 "nonimmediate_operand" "vm") (match_operand:V8SI 2 "const0_operand" "C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n")])))] "TARGET_AVX2" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V4DI (match_dup 1)))] { operands[0] = lowpart_subreg (V4DImode, operands[0], V8SImode); operands[1] = lowpart_subreg (V4SImode, operands[1], V8SImode); }) (define_insn_and_split "*avx2_zero_extendv4siv4di2_2" [(set (match_operand:V8SI 0 "register_operand" "=v") (vec_select:V8SI (vec_concat:V16SI (vec_concat:V8SI (match_operand:V4SI 1 "nonimmediate_operand" "vm") (match_operand:V4SI 2 "const0_operand" "C")) (match_operand:V8SI 3 "const0_operand" "C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n")])))] "TARGET_AVX2" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V4DI (match_dup 1)))] { operands[0] = lowpart_subreg (V4DImode, operands[0], V8SImode); }) (define_expand "v4siv4di2" [(set (match_operand:V4DI 0 "register_operand") (any_extend:V4DI (match_operand:V4SI 1 "nonimmediate_operand")))] "TARGET_AVX2") (define_insn "sse4_1_v2siv2di2" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v") (any_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "register_operand" "Yr,*x,v") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE4_1 && " "%vpmovdq\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn "*sse4_1_v2siv2di2_1" [(set (match_operand:V2DI 0 "register_operand" "=Yr,*x,v") (any_extend:V2DI (match_operand:V2SI 1 "memory_operand" "m,m,m")))] "TARGET_SSE4_1 && " "%vpmovdq\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,maybe_evex") (set_attr "mode" "TI")]) (define_insn_and_split "*sse4_1_v2siv2di2_2" [(set (match_operand:V2DI 0 "register_operand") (any_extend:V2DI (vec_select:V2SI (subreg:V4SI (vec_concat:V2DI (match_operand:DI 1 "memory_operand") (const_int 0)) 0) (parallel [(const_int 0) (const_int 1)]))))] "TARGET_SSE4_1 && && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (any_extend:V2DI (match_dup 1)))] "operands[1] = adjust_address_nv (operands[1], V2SImode, 0);") (define_insn_and_split "*sse4_1_zero_extendv2siv2di2_3" [(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v") (vec_select:V4SI (vec_concat:V8SI (match_operand:V4SI 1 "vector_operand" "YrBm,*xBm,vm") (match_operand:V4SI 2 "const0_operand" "C,C,C")) (match_parallel 3 "pmovzx_parallel" [(match_operand 4 "const_int_operand" "n,n,n")])))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V2DI (vec_select:V2SI (match_dup 1) (parallel [(const_int 0) (const_int 1)]))))] { operands[0] = lowpart_subreg (V2DImode, operands[0], V4SImode); if (MEM_P (operands[1])) { operands[1] = lowpart_subreg (V2SImode, operands[1], V4SImode); operands[1] = gen_rtx_ZERO_EXTEND (V2DImode, operands[1]); emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } } [(set_attr "isa" "noavx,noavx,avx")]) (define_insn_and_split "*sse4_1_zero_extendv2siv2di2_4" [(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v") (vec_select:V4SI (vec_concat:V8SI (vec_concat:V4SI (match_operand:V2SI 1 "vector_operand" "YrBm, *xBm, vm") (match_operand:V2SI 2 "const0_operand" "C,C,C")) (match_operand:V4SI 3 "const0_operand" "C,C,C")) (match_parallel 4 "pmovzx_parallel" [(match_operand 5 "const_int_operand" "n,n,n")])))] "TARGET_SSE4_1" "#" "&& reload_completed" [(set (match_dup 0) (zero_extend:V2DI (vec_select:V2SI (match_dup 1) (parallel [(const_int 0) (const_int 1)]))))] { operands[0] = lowpart_subreg (V2DImode, operands[0], V4SImode); if (MEM_P (operands[1])) { operands[1] = gen_rtx_ZERO_EXTEND (V2DImode, operands[1]); emit_insn (gen_rtx_SET (operands[0], operands[1])); DONE; } operands[1] = lowpart_subreg (V4SImode, operands[1], V2SImode); } [(set_attr "isa" "noavx,noavx,avx")]) (define_expand "v2siv2di2" [(set (match_operand:V2DI 0 "register_operand") (any_extend:V2DI (match_operand:V2SI 1 "nonimmediate_operand")))] "TARGET_SSE4_1" { if (!MEM_P (operands[1])) { rtx op1 = force_reg (V2SImode, operands[1]); op1 = lowpart_subreg (V4SImode, op1, V2SImode); emit_insn (gen_sse4_1_v2siv2di2 (operands[0], op1)); DONE; } }) ;; ptestps/ptestpd are very similar to comiss and ucomiss when ;; setting FLAGS_REG. But it is not a really compare instruction. (define_insn "avx_vtest" [(set (reg:CC FLAGS_REG) (unspec:CC [(match_operand:VF_128_256 0 "register_operand" "x") (match_operand:VF_128_256 1 "nonimmediate_operand" "xm")] UNSPEC_VTESTP))] "TARGET_AVX" "vtest\t{%1, %0|%0, %1}" [(set_attr "type" "ssecomi") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) ;; ptest is very similar to comiss and ucomiss when setting FLAGS_REG. ;; But it is not a really compare instruction. (define_insn "_ptest" [(set (reg:CC FLAGS_REG) (unspec:CC [(match_operand:V_AVX 0 "register_operand" "Yr, *x, x") (match_operand:V_AVX 1 "vector_operand" "YrBm, *xBm, xm")] UNSPEC_PTEST))] "TARGET_SSE4_1" "%vptest\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecomi") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set (attr "btver2_decode") (if_then_else (match_test "mode==OImode") (const_string "vector") (const_string "*"))) (set_attr "mode" "")]) (define_insn "ptesttf2" [(set (reg:CC FLAGS_REG) (unspec:CC [(match_operand:TF 0 "register_operand" "Yr, *x, x") (match_operand:TF 1 "vector_operand" "YrBm, *xBm, xm")] UNSPEC_PTEST))] "TARGET_SSE4_1" "%vptest\t{%1, %0|%0, %1}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecomi") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "TI")]) (define_expand "nearbyint2" [(set (match_operand:VFH 0 "register_operand") (unspec:VFH [(match_operand:VFH 1 "vector_operand") (match_dup 2)] UNSPEC_ROUND))] "TARGET_SSE4_1" "operands[2] = GEN_INT (ROUND_MXCSR | ROUND_NO_EXC);") (define_expand "rint2" [(set (match_operand:VFH 0 "register_operand") (unspec:VFH [(match_operand:VFH 1 "vector_operand") (match_dup 2)] UNSPEC_ROUND))] "TARGET_SSE4_1" "operands[2] = GEN_INT (ROUND_MXCSR);") (define_insn "_round" [(set (match_operand:VF_128_256 0 "register_operand" "=Yr,*x,x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "vector_operand" "YrBm,*xBm,xm") (match_operand:SI 2 "const_0_to_15_operand" "n,n,n")] UNSPEC_ROUND))] "TARGET_SSE4_1" "%vround\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,noavx,avx") (set_attr "type" "ssecvt") (set_attr "prefix_data16" "1,1,*") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,orig,vex") (set_attr "mode" "")]) (define_expand "_round_sfix" [(match_operand: 0 "register_operand") (match_operand:VF1_128_256 1 "vector_operand") (match_operand:SI 2 "const_0_to_15_operand")] "TARGET_SSE4_1" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen__round (tmp, operands[1], operands[2])); emit_insn (gen_fix_trunc2 (operands[0], tmp)); DONE; }) (define_expand "avx512f_round512" [(match_operand:VF_512 0 "register_operand") (match_operand:VF_512 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_15_operand")] "TARGET_AVX512F" { emit_insn (gen_avx512f_rndscale (operands[0], operands[1], operands[2])); DONE; }) (define_expand "avx512f_roundps512_sfix" [(match_operand:V16SI 0 "register_operand") (match_operand:V16SF 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_15_operand")] "TARGET_AVX512F" { rtx tmp = gen_reg_rtx (V16SFmode); emit_insn (gen_avx512f_rndscalev16sf (tmp, operands[1], operands[2])); emit_insn (gen_fix_truncv16sfv16si2 (operands[0], tmp)); DONE; }) (define_expand "_round_vec_pack_sfix" [(match_operand: 0 "register_operand") (match_operand:VF2 1 "vector_operand") (match_operand:VF2 2 "vector_operand") (match_operand:SI 3 "const_0_to_15_operand")] "TARGET_SSE4_1" { rtx tmp0, tmp1; if (mode == V2DFmode && TARGET_AVX && !TARGET_PREFER_AVX128 && optimize_insn_for_speed_p ()) { rtx tmp2 = gen_reg_rtx (V4DFmode); tmp0 = gen_reg_rtx (V4DFmode); tmp1 = force_reg (V2DFmode, operands[1]); emit_insn (gen_avx_vec_concatv4df (tmp0, tmp1, operands[2])); emit_insn (gen_avx_roundpd256 (tmp2, tmp0, operands[3])); emit_insn (gen_fix_truncv4dfv4si2 (operands[0], tmp2)); } else { tmp0 = gen_reg_rtx (mode); tmp1 = gen_reg_rtx (mode); emit_insn (gen__round (tmp0, operands[1], operands[3])); emit_insn (gen__round (tmp1, operands[2], operands[3])); emit_insn (gen_vec_pack_sfix_trunc_ (operands[0], tmp0, tmp1)); } DONE; }) (define_insn "sse4_1_round" [(set (match_operand:VF_128 0 "register_operand" "=Yr,*x,x,v") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 2 "nonimmediate_operand" "Yrm,*xm,xm,vm") (match_operand:SI 3 "const_0_to_15_operand" "n,n,n,n")] UNSPEC_ROUND) (match_operand:VF_128 1 "register_operand" "0,0,x,v") (const_int 1)))] "TARGET_SSE4_1" "@ round\t{%3, %2, %0|%0, %2, %3} round\t{%3, %2, %0|%0, %2, %3} vround\t{%3, %2, %1, %0|%0, %1, %2, %3} vrndscale\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx,avx512f") (set_attr "type" "ssecvt") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex,evex") (set_attr "mode" "")]) (define_insn "*sse4_1_round" [(set (match_operand:VFH_128 0 "register_operand" "=Yr,*x,x,v") (vec_merge:VFH_128 (vec_duplicate:VFH_128 (unspec: [(match_operand: 2 "nonimmediate_operand" "Yrm,*xm,xm,vm") (match_operand:SI 3 "const_0_to_15_operand" "n,n,n,n")] UNSPEC_ROUND)) (match_operand:VFH_128 1 "register_operand" "0,0,x,v") (const_int 1)))] "TARGET_SSE4_1" "@ round\t{%3, %2, %0|%0, %2, %3} round\t{%3, %2, %0|%0, %2, %3} vround\t{%3, %2, %1, %0|%0, %1, %2, %3} vrndscale\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,noavx,avx,avx512f") (set_attr "type" "ssecvt") (set_attr "length_immediate" "1") (set_attr "prefix_data16" "1,1,*,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,orig,vex,evex") (set_attr "mode" "")]) (define_expand "round2" [(set (match_dup 3) (plus:VF (match_operand:VF 1 "register_operand") (match_dup 2))) (set (match_operand:VF 0 "register_operand") (unspec:VF [(match_dup 3) (match_dup 4)] UNSPEC_ROUND))] "TARGET_SSE4_1 && !flag_trapping_math" { machine_mode scalar_mode; const struct real_format *fmt; REAL_VALUE_TYPE pred_half, half_minus_pred_half; rtx half, vec_half; scalar_mode = GET_MODE_INNER (mode); /* load nextafter (0.5, 0.0) */ fmt = REAL_MODE_FORMAT (scalar_mode); real_2expN (&half_minus_pred_half, -(fmt->p) - 1, scalar_mode); real_arithmetic (&pred_half, MINUS_EXPR, &dconsthalf, &half_minus_pred_half); half = const_double_from_real_value (pred_half, scalar_mode); vec_half = ix86_build_const_vector (mode, true, half); vec_half = force_reg (mode, vec_half); operands[2] = gen_reg_rtx (mode); emit_insn (gen_copysign3 (operands[2], vec_half, operands[1])); operands[3] = gen_reg_rtx (mode); operands[4] = GEN_INT (ROUND_TRUNC); }) (define_expand "round2_sfix" [(match_operand: 0 "register_operand") (match_operand:VF1 1 "register_operand")] "TARGET_SSE4_1 && !flag_trapping_math" { rtx tmp = gen_reg_rtx (mode); emit_insn (gen_round2 (tmp, operands[1])); emit_insn (gen_fix_trunc2 (operands[0], tmp)); DONE; }) (define_expand "round2_vec_pack_sfix" [(match_operand: 0 "register_operand") (match_operand:VF2 1 "register_operand") (match_operand:VF2 2 "register_operand")] "TARGET_SSE4_1 && !flag_trapping_math" { rtx tmp0, tmp1; if (mode == V2DFmode && TARGET_AVX && !TARGET_PREFER_AVX128 && optimize_insn_for_speed_p ()) { rtx tmp2 = gen_reg_rtx (V4DFmode); tmp0 = gen_reg_rtx (V4DFmode); tmp1 = force_reg (V2DFmode, operands[1]); emit_insn (gen_avx_vec_concatv4df (tmp0, tmp1, operands[2])); emit_insn (gen_roundv4df2 (tmp2, tmp0)); emit_insn (gen_fix_truncv4dfv4si2 (operands[0], tmp2)); } else { tmp0 = gen_reg_rtx (mode); tmp1 = gen_reg_rtx (mode); emit_insn (gen_round2 (tmp0, operands[1])); emit_insn (gen_round2 (tmp1, operands[2])); emit_insn (gen_vec_pack_sfix_trunc_ (operands[0], tmp0, tmp1)); } DONE; }) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Intel SSE4.2 string/text processing instructions ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn_and_split "sse4_2_pcmpestr" [(set (match_operand:SI 0 "register_operand" "=c,c") (unspec:SI [(match_operand:V16QI 2 "register_operand" "x,x") (match_operand:SI 3 "register_operand" "a,a") (match_operand:V16QI 4 "nonimmediate_operand" "x,m") (match_operand:SI 5 "register_operand" "d,d") (match_operand:SI 6 "const_0_to_255_operand" "n,n")] UNSPEC_PCMPESTR)) (set (match_operand:V16QI 1 "register_operand" "=Yz,Yz") (unspec:V16QI [(match_dup 2) (match_dup 3) (match_dup 4) (match_dup 5) (match_dup 6)] UNSPEC_PCMPESTR)) (set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 2) (match_dup 3) (match_dup 4) (match_dup 5) (match_dup 6)] UNSPEC_PCMPESTR))] "TARGET_SSE4_2 && ix86_pre_reload_split ()" "#" "&& 1" [(const_int 0)] { int ecx = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[0])); int xmm0 = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[1])); int flags = !find_regno_note (curr_insn, REG_UNUSED, FLAGS_REG); if (ecx) emit_insn (gen_sse4_2_pcmpestri (operands[0], operands[2], operands[3], operands[4], operands[5], operands[6])); if (xmm0) emit_insn (gen_sse4_2_pcmpestrm (operands[1], operands[2], operands[3], operands[4], operands[5], operands[6])); if (flags && !(ecx || xmm0)) emit_insn (gen_sse4_2_pcmpestr_cconly (NULL, NULL, operands[2], operands[3], operands[4], operands[5], operands[6])); if (!(flags || ecx || xmm0)) emit_note (NOTE_INSN_DELETED); DONE; } [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,load") (set_attr "mode" "TI")]) (define_insn "sse4_2_pcmpestri" [(set (match_operand:SI 0 "register_operand" "=c,c") (unspec:SI [(match_operand:V16QI 1 "register_operand" "x,x") (match_operand:SI 2 "register_operand" "a,a") (match_operand:V16QI 3 "nonimmediate_operand" "x,m") (match_operand:SI 4 "register_operand" "d,d") (match_operand:SI 5 "const_0_to_255_operand" "n,n")] UNSPEC_PCMPESTR)) (set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 1) (match_dup 2) (match_dup 3) (match_dup 4) (match_dup 5)] UNSPEC_PCMPESTR))] "TARGET_SSE4_2" "%vpcmpestri\t{%5, %3, %1|%1, %3, %5}" [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_vex") (set_attr "length_immediate" "1") (set_attr "btver2_decode" "vector") (set_attr "memory" "none,load") (set_attr "mode" "TI")]) (define_insn "sse4_2_pcmpestrm" [(set (match_operand:V16QI 0 "register_operand" "=Yz,Yz") (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "x,x") (match_operand:SI 2 "register_operand" "a,a") (match_operand:V16QI 3 "nonimmediate_operand" "x,m") (match_operand:SI 4 "register_operand" "d,d") (match_operand:SI 5 "const_0_to_255_operand" "n,n")] UNSPEC_PCMPESTR)) (set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 1) (match_dup 2) (match_dup 3) (match_dup 4) (match_dup 5)] UNSPEC_PCMPESTR))] "TARGET_SSE4_2" "%vpcmpestrm\t{%5, %3, %1|%1, %3, %5}" [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex") (set_attr "btver2_decode" "vector") (set_attr "memory" "none,load") (set_attr "mode" "TI")]) (define_insn "sse4_2_pcmpestr_cconly" [(set (reg:CC FLAGS_REG) (unspec:CC [(match_operand:V16QI 2 "register_operand" "x,x,x,x") (match_operand:SI 3 "register_operand" "a,a,a,a") (match_operand:V16QI 4 "nonimmediate_operand" "x,m,x,m") (match_operand:SI 5 "register_operand" "d,d,d,d") (match_operand:SI 6 "const_0_to_255_operand" "n,n,n,n")] UNSPEC_PCMPESTR)) (clobber (match_scratch:V16QI 0 "=Yz,Yz,X,X")) (clobber (match_scratch:SI 1 "= X, X,c,c"))] "TARGET_SSE4_2" "@ %vpcmpestrm\t{%6, %4, %2|%2, %4, %6} %vpcmpestrm\t{%6, %4, %2|%2, %4, %6} %vpcmpestri\t{%6, %4, %2|%2, %4, %6} %vpcmpestri\t{%6, %4, %2|%2, %4, %6}" [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,load,none,load") (set_attr "btver2_decode" "vector,vector,vector,vector") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) (define_insn_and_split "sse4_2_pcmpistr" [(set (match_operand:SI 0 "register_operand" "=c,c") (unspec:SI [(match_operand:V16QI 2 "register_operand" "x,x") (match_operand:V16QI 3 "nonimmediate_operand" "x,m") (match_operand:SI 4 "const_0_to_255_operand" "n,n")] UNSPEC_PCMPISTR)) (set (match_operand:V16QI 1 "register_operand" "=Yz,Yz") (unspec:V16QI [(match_dup 2) (match_dup 3) (match_dup 4)] UNSPEC_PCMPISTR)) (set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 2) (match_dup 3) (match_dup 4)] UNSPEC_PCMPISTR))] "TARGET_SSE4_2 && ix86_pre_reload_split ()" "#" "&& 1" [(const_int 0)] { int ecx = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[0])); int xmm0 = !find_regno_note (curr_insn, REG_UNUSED, REGNO (operands[1])); int flags = !find_regno_note (curr_insn, REG_UNUSED, FLAGS_REG); if (ecx) emit_insn (gen_sse4_2_pcmpistri (operands[0], operands[2], operands[3], operands[4])); if (xmm0) emit_insn (gen_sse4_2_pcmpistrm (operands[1], operands[2], operands[3], operands[4])); if (flags && !(ecx || xmm0)) emit_insn (gen_sse4_2_pcmpistr_cconly (NULL, NULL, operands[2], operands[3], operands[4])); if (!(flags || ecx || xmm0)) emit_note (NOTE_INSN_DELETED); DONE; } [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,load") (set_attr "mode" "TI")]) (define_insn "sse4_2_pcmpistri" [(set (match_operand:SI 0 "register_operand" "=c,c") (unspec:SI [(match_operand:V16QI 1 "register_operand" "x,x") (match_operand:V16QI 2 "nonimmediate_operand" "x,m") (match_operand:SI 3 "const_0_to_255_operand" "n,n")] UNSPEC_PCMPISTR)) (set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMPISTR))] "TARGET_SSE4_2" "%vpcmpistri\t{%3, %2, %1|%1, %2, %3}" [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex") (set_attr "memory" "none,load") (set_attr "btver2_decode" "vector") (set_attr "mode" "TI")]) (define_insn "sse4_2_pcmpistrm" [(set (match_operand:V16QI 0 "register_operand" "=Yz,Yz") (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "x,x") (match_operand:V16QI 2 "nonimmediate_operand" "x,m") (match_operand:SI 3 "const_0_to_255_operand" "n,n")] UNSPEC_PCMPISTR)) (set (reg:CC FLAGS_REG) (unspec:CC [(match_dup 1) (match_dup 2) (match_dup 3)] UNSPEC_PCMPISTR))] "TARGET_SSE4_2" "%vpcmpistrm\t{%3, %2, %1|%1, %2, %3}" [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex") (set_attr "memory" "none,load") (set_attr "btver2_decode" "vector") (set_attr "mode" "TI")]) (define_insn "sse4_2_pcmpistr_cconly" [(set (reg:CC FLAGS_REG) (unspec:CC [(match_operand:V16QI 2 "register_operand" "x,x,x,x") (match_operand:V16QI 3 "nonimmediate_operand" "x,m,x,m") (match_operand:SI 4 "const_0_to_255_operand" "n,n,n,n")] UNSPEC_PCMPISTR)) (clobber (match_scratch:V16QI 0 "=Yz,Yz,X,X")) (clobber (match_scratch:SI 1 "= X, X,c,c"))] "TARGET_SSE4_2" "@ %vpcmpistrm\t{%4, %3, %2|%2, %3, %4} %vpcmpistrm\t{%4, %3, %2|%2, %3, %4} %vpcmpistri\t{%4, %3, %2|%2, %3, %4} %vpcmpistri\t{%4, %3, %2|%2, %3, %4}" [(set_attr "type" "sselog") (set_attr "prefix_data16" "1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "memory" "none,load,none,load") (set_attr "prefix" "maybe_vex") (set_attr "btver2_decode" "vector,vector,vector,vector") (set_attr "mode" "TI")]) ;; Packed float variants (define_mode_attr GATHER_SCATTER_SF_MEM_MODE [(V8DI "V8SF") (V16SI "V16SF")]) (define_expand "avx512pf_gatherpfsf" [(unspec [(match_operand: 0 "register_operand") (mem: (match_par_dup 5 [(match_operand 2 "vsib_address_operand") (match_operand:VI48_512 1 "register_operand") (match_operand:SI 3 "const1248_operand")])) (match_operand:SI 4 "const_2_to_3_operand")] UNSPEC_GATHER_PREFETCH)] "TARGET_AVX512PF" { operands[5] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[1], operands[3]), UNSPEC_VSIBADDR); }) (define_insn "*avx512pf_gatherpfsf_mask" [(unspec [(match_operand: 0 "register_operand" "Yk") (match_operator: 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 2 "vsib_address_operand" "Tv") (match_operand:VI48_512 1 "register_operand" "v") (match_operand:SI 3 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (match_operand:SI 4 "const_2_to_3_operand" "n")] UNSPEC_GATHER_PREFETCH)] "TARGET_AVX512PF" { switch (INTVAL (operands[4])) { case 3: /* %X5 so that we don't emit any *WORD PTR for -masm=intel, as gas changed what it requires incompatibly. */ return "%M2vgatherpf0ps\t{%5%{%0%}|%X5%{%0%}}"; case 2: return "%M2vgatherpf1ps\t{%5%{%0%}|%X5%{%0%}}"; default: gcc_unreachable (); } } [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) ;; Packed double variants (define_expand "avx512pf_gatherpfdf" [(unspec [(match_operand: 0 "register_operand") (mem:V8DF (match_par_dup 5 [(match_operand 2 "vsib_address_operand") (match_operand:VI4_256_8_512 1 "register_operand") (match_operand:SI 3 "const1248_operand")])) (match_operand:SI 4 "const_2_to_3_operand")] UNSPEC_GATHER_PREFETCH)] "TARGET_AVX512PF" { operands[5] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[1], operands[3]), UNSPEC_VSIBADDR); }) (define_insn "*avx512pf_gatherpfdf_mask" [(unspec [(match_operand: 0 "register_operand" "Yk") (match_operator:V8DF 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 2 "vsib_address_operand" "Tv") (match_operand:VI4_256_8_512 1 "register_operand" "v") (match_operand:SI 3 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (match_operand:SI 4 "const_2_to_3_operand" "n")] UNSPEC_GATHER_PREFETCH)] "TARGET_AVX512PF" { switch (INTVAL (operands[4])) { case 3: /* %X5 so that we don't emit any *WORD PTR for -masm=intel, as gas changed what it requires incompatibly. */ return "%M2vgatherpf0pd\t{%5%{%0%}|%X5%{%0%}}"; case 2: return "%M2vgatherpf1pd\t{%5%{%0%}|%X5%{%0%}}"; default: gcc_unreachable (); } } [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) ;; Packed float variants (define_expand "avx512pf_scatterpfsf" [(unspec [(match_operand: 0 "register_operand") (mem: (match_par_dup 5 [(match_operand 2 "vsib_address_operand") (match_operand:VI48_512 1 "register_operand") (match_operand:SI 3 "const1248_operand")])) (match_operand:SI 4 "const2367_operand")] UNSPEC_SCATTER_PREFETCH)] "TARGET_AVX512PF" { operands[5] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[1], operands[3]), UNSPEC_VSIBADDR); }) (define_insn "*avx512pf_scatterpfsf_mask" [(unspec [(match_operand: 0 "register_operand" "Yk") (match_operator: 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 2 "vsib_address_operand" "Tv") (match_operand:VI48_512 1 "register_operand" "v") (match_operand:SI 3 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (match_operand:SI 4 "const2367_operand" "n")] UNSPEC_SCATTER_PREFETCH)] "TARGET_AVX512PF" { switch (INTVAL (operands[4])) { case 3: case 7: /* %X5 so that we don't emit any *WORD PTR for -masm=intel, as gas changed what it requires incompatibly. */ return "%M2vscatterpf0ps\t{%5%{%0%}|%X5%{%0%}}"; case 2: case 6: return "%M2vscatterpf1ps\t{%5%{%0%}|%X5%{%0%}}"; default: gcc_unreachable (); } } [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) ;; Packed double variants (define_expand "avx512pf_scatterpfdf" [(unspec [(match_operand: 0 "register_operand") (mem:V8DF (match_par_dup 5 [(match_operand 2 "vsib_address_operand") (match_operand:VI4_256_8_512 1 "register_operand") (match_operand:SI 3 "const1248_operand")])) (match_operand:SI 4 "const2367_operand")] UNSPEC_SCATTER_PREFETCH)] "TARGET_AVX512PF" { operands[5] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[1], operands[3]), UNSPEC_VSIBADDR); }) (define_insn "*avx512pf_scatterpfdf_mask" [(unspec [(match_operand: 0 "register_operand" "Yk") (match_operator:V8DF 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 2 "vsib_address_operand" "Tv") (match_operand:VI4_256_8_512 1 "register_operand" "v") (match_operand:SI 3 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (match_operand:SI 4 "const2367_operand" "n")] UNSPEC_SCATTER_PREFETCH)] "TARGET_AVX512PF" { switch (INTVAL (operands[4])) { case 3: case 7: /* %X5 so that we don't emit any *WORD PTR for -masm=intel, as gas changed what it requires incompatibly. */ return "%M2vscatterpf0pd\t{%5%{%0%}|%X5%{%0%}}"; case 2: case 6: return "%M2vscatterpf1pd\t{%5%{%0%}|%X5%{%0%}}"; default: gcc_unreachable (); } } [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "avx512er_exp2" [(set (match_operand:VF_512 0 "register_operand" "=v") (unspec:VF_512 [(match_operand:VF_512 1 "" "")] UNSPEC_EXP2))] "TARGET_AVX512ER" "vexp2\t{%1, %0|%0, %1}" [(set_attr "prefix" "evex") (set_attr "type" "sse") (set_attr "mode" "")]) (define_insn "avx512er_rcp28" [(set (match_operand:VF_512 0 "register_operand" "=v") (unspec:VF_512 [(match_operand:VF_512 1 "" "")] UNSPEC_RCP28))] "TARGET_AVX512ER" "vrcp28\t{%1, %0|%0, %1}" [(set_attr "prefix" "evex") (set_attr "type" "sse") (set_attr "mode" "")]) (define_insn "avx512er_vmrcp28" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "" "")] UNSPEC_RCP28) (match_operand:VF_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512ER" "vrcp28\t{%1, %2, %0|%0, %2, %1}" [(set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "type" "sse") (set_attr "mode" "")]) (define_insn "avx512er_rsqrt28" [(set (match_operand:VF_512 0 "register_operand" "=v") (unspec:VF_512 [(match_operand:VF_512 1 "" "")] UNSPEC_RSQRT28))] "TARGET_AVX512ER" "vrsqrt28\t{%1, %0|%0, %1}" [(set_attr "prefix" "evex") (set_attr "type" "sse") (set_attr "mode" "")]) (define_insn "avx512er_vmrsqrt28" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "" "")] UNSPEC_RSQRT28) (match_operand:VF_128 2 "register_operand" "v") (const_int 1)))] "TARGET_AVX512ER" "vrsqrt28\t{%1, %2, %0|%0, %2, %1}" [(set_attr "length_immediate" "1") (set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; XOP instructions ;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_code_iterator xop_plus [plus ss_plus]) (define_code_attr macs [(plus "macs") (ss_plus "macss")]) (define_code_attr madcs [(plus "madcs") (ss_plus "madcss")]) ;; XOP parallel integer multiply/add instructions. (define_insn "xop_p" [(set (match_operand:VI24_128 0 "register_operand" "=x") (xop_plus:VI24_128 (mult:VI24_128 (match_operand:VI24_128 1 "nonimmediate_operand" "%x") (match_operand:VI24_128 2 "nonimmediate_operand" "xm")) (match_operand:VI24_128 3 "register_operand" "x")))] "TARGET_XOP" "vp\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "TI")]) (define_insn "xop_pdql" [(set (match_operand:V2DI 0 "register_operand" "=x") (xop_plus:V2DI (mult:V2DI (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "nonimmediate_operand" "%x") (parallel [(const_int 0) (const_int 2)]))) (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 2 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2)])))) (match_operand:V2DI 3 "register_operand" "x")))] "TARGET_XOP" "vpdql\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "TI")]) (define_insn "xop_pdqh" [(set (match_operand:V2DI 0 "register_operand" "=x") (xop_plus:V2DI (mult:V2DI (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "nonimmediate_operand" "%x") (parallel [(const_int 1) (const_int 3)]))) (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 2 "nonimmediate_operand" "xm") (parallel [(const_int 1) (const_int 3)])))) (match_operand:V2DI 3 "register_operand" "x")))] "TARGET_XOP" "vpdqh\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "TI")]) ;; XOP parallel integer multiply/add instructions for the intrinisics (define_insn "xop_pwd" [(set (match_operand:V4SI 0 "register_operand" "=x") (xop_plus:V4SI (mult:V4SI (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 1 "nonimmediate_operand" "%x") (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))) (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 2 "nonimmediate_operand" "xm") (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)])))) (match_operand:V4SI 3 "register_operand" "x")))] "TARGET_XOP" "vpwd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "TI")]) (define_insn "xop_pwd" [(set (match_operand:V4SI 0 "register_operand" "=x") (xop_plus:V4SI (plus:V4SI (mult:V4SI (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 1 "nonimmediate_operand" "%x") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 2 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)])))) (mult:V4SI (sign_extend:V4SI (vec_select:V4HI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))) (sign_extend:V4SI (vec_select:V4HI (match_dup 2) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)]))))) (match_operand:V4SI 3 "register_operand" "x")))] "TARGET_XOP" "vpwd\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "mode" "TI")]) ;; XOP parallel XMM conditional moves (define_insn "xop_pcmov_" [(set (match_operand:V_128_256 0 "register_operand" "=x,x") (if_then_else:V_128_256 (match_operand:V_128_256 3 "nonimmediate_operand" "x,m") (match_operand:V_128_256 1 "register_operand" "x,x") (match_operand:V_128_256 2 "nonimmediate_operand" "xm,x")))] "TARGET_XOP" "vpcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sse4arg")]) ;; XOP horizontal add/subtract instructions (define_insn "xop_phaddbw" [(set (match_operand:V8HI 0 "register_operand" "=x") (plus:V8HI (any_extend:V8HI (vec_select:V8QI (match_operand:V16QI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (any_extend:V8HI (vec_select:V8QI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)])))))] "TARGET_XOP" "vphaddbw\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phaddbd" [(set (match_operand:V4SI 0 "register_operand" "=x") (plus:V4SI (plus:V4SI (any_extend:V4SI (vec_select:V4QI (match_operand:V16QI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 4) (const_int 8) (const_int 12)]))) (any_extend:V4SI (vec_select:V4QI (match_dup 1) (parallel [(const_int 1) (const_int 5) (const_int 9) (const_int 13)])))) (plus:V4SI (any_extend:V4SI (vec_select:V4QI (match_dup 1) (parallel [(const_int 2) (const_int 6) (const_int 10) (const_int 14)]))) (any_extend:V4SI (vec_select:V4QI (match_dup 1) (parallel [(const_int 3) (const_int 7) (const_int 11) (const_int 15)]))))))] "TARGET_XOP" "vphaddbd\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phaddbq" [(set (match_operand:V2DI 0 "register_operand" "=x") (plus:V2DI (plus:V2DI (plus:V2DI (any_extend:V2DI (vec_select:V2QI (match_operand:V16QI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 8)]))) (any_extend:V2DI (vec_select:V2QI (match_dup 1) (parallel [(const_int 1) (const_int 9)])))) (plus:V2DI (any_extend:V2DI (vec_select:V2QI (match_dup 1) (parallel [(const_int 2) (const_int 10)]))) (any_extend:V2DI (vec_select:V2QI (match_dup 1) (parallel [(const_int 3) (const_int 11)]))))) (plus:V2DI (plus:V2DI (any_extend:V2DI (vec_select:V2QI (match_dup 1) (parallel [(const_int 4) (const_int 12)]))) (any_extend:V2DI (vec_select:V2QI (match_dup 1) (parallel [(const_int 5) (const_int 13)])))) (plus:V2DI (any_extend:V2DI (vec_select:V2QI (match_dup 1) (parallel [(const_int 6) (const_int 14)]))) (any_extend:V2DI (vec_select:V2QI (match_dup 1) (parallel [(const_int 7) (const_int 15)])))))))] "TARGET_XOP" "vphaddbq\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phaddwd" [(set (match_operand:V4SI 0 "register_operand" "=x") (plus:V4SI (any_extend:V4SI (vec_select:V4HI (match_operand:V8HI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (any_extend:V4SI (vec_select:V4HI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)])))))] "TARGET_XOP" "vphaddwd\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phaddwq" [(set (match_operand:V2DI 0 "register_operand" "=x") (plus:V2DI (plus:V2DI (any_extend:V2DI (vec_select:V2HI (match_operand:V8HI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 4)]))) (any_extend:V2DI (vec_select:V2HI (match_dup 1) (parallel [(const_int 1) (const_int 5)])))) (plus:V2DI (any_extend:V2DI (vec_select:V2HI (match_dup 1) (parallel [(const_int 2) (const_int 6)]))) (any_extend:V2DI (vec_select:V2HI (match_dup 1) (parallel [(const_int 3) (const_int 7)]))))))] "TARGET_XOP" "vphaddwq\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phadddq" [(set (match_operand:V2DI 0 "register_operand" "=x") (plus:V2DI (any_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2)]))) (any_extend:V2DI (vec_select:V2SI (match_dup 1) (parallel [(const_int 1) (const_int 3)])))))] "TARGET_XOP" "vphadddq\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phsubbw" [(set (match_operand:V8HI 0 "register_operand" "=x") (minus:V8HI (sign_extend:V8HI (vec_select:V8QI (match_operand:V16QI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6) (const_int 8) (const_int 10) (const_int 12) (const_int 14)]))) (sign_extend:V8HI (vec_select:V8QI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7) (const_int 9) (const_int 11) (const_int 13) (const_int 15)])))))] "TARGET_XOP" "vphsubbw\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phsubwd" [(set (match_operand:V4SI 0 "register_operand" "=x") (minus:V4SI (sign_extend:V4SI (vec_select:V4HI (match_operand:V8HI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2) (const_int 4) (const_int 6)]))) (sign_extend:V4SI (vec_select:V4HI (match_dup 1) (parallel [(const_int 1) (const_int 3) (const_int 5) (const_int 7)])))))] "TARGET_XOP" "vphsubwd\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) (define_insn "xop_phsubdq" [(set (match_operand:V2DI 0 "register_operand" "=x") (minus:V2DI (sign_extend:V2DI (vec_select:V2SI (match_operand:V4SI 1 "nonimmediate_operand" "xm") (parallel [(const_int 0) (const_int 2)]))) (sign_extend:V2DI (vec_select:V2SI (match_dup 1) (parallel [(const_int 1) (const_int 3)])))))] "TARGET_XOP" "vphsubdq\t{%1, %0|%0, %1}" [(set_attr "type" "sseiadd1")]) ;; XOP permute instructions (define_insn "xop_pperm" [(set (match_operand:V16QI 0 "register_operand" "=x,x") (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "x,x") (match_operand:V16QI 2 "nonimmediate_operand" "x,m") (match_operand:V16QI 3 "nonimmediate_operand" "xm,x")] UNSPEC_XOP_PERMUTE))] "TARGET_XOP && !(MEM_P (operands[2]) && MEM_P (operands[3]))" "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sse4arg") (set_attr "mode" "TI")]) ;; XOP pack instructions that combine two vectors into a smaller vector (define_insn "xop_pperm_pack_v2di_v4si" [(set (match_operand:V4SI 0 "register_operand" "=x,x") (vec_concat:V4SI (truncate:V2SI (match_operand:V2DI 1 "register_operand" "x,x")) (truncate:V2SI (match_operand:V2DI 2 "nonimmediate_operand" "x,m")))) (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x"))] "TARGET_XOP && !(MEM_P (operands[2]) && MEM_P (operands[3]))" "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sse4arg") (set_attr "mode" "TI")]) (define_insn "xop_pperm_pack_v4si_v8hi" [(set (match_operand:V8HI 0 "register_operand" "=x,x") (vec_concat:V8HI (truncate:V4HI (match_operand:V4SI 1 "register_operand" "x,x")) (truncate:V4HI (match_operand:V4SI 2 "nonimmediate_operand" "x,m")))) (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x"))] "TARGET_XOP && !(MEM_P (operands[2]) && MEM_P (operands[3]))" "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sse4arg") (set_attr "mode" "TI")]) (define_insn "xop_pperm_pack_v8hi_v16qi" [(set (match_operand:V16QI 0 "register_operand" "=x,x") (vec_concat:V16QI (truncate:V8QI (match_operand:V8HI 1 "register_operand" "x,x")) (truncate:V8QI (match_operand:V8HI 2 "nonimmediate_operand" "x,m")))) (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x"))] "TARGET_XOP && !(MEM_P (operands[2]) && MEM_P (operands[3]))" "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sse4arg") (set_attr "mode" "TI")]) ;; XOP packed rotate instructions (define_expand "rotl3" [(set (match_operand:VI_128 0 "register_operand") (rotate:VI_128 (match_operand:VI_128 1 "nonimmediate_operand") (match_operand:SI 2 "general_operand")))] "TARGET_XOP" { /* If we were given a scalar, convert it to parallel */ if (! const_0_to__operand (operands[2], SImode)) { rtvec vs = rtvec_alloc (); rtx par = gen_rtx_PARALLEL (mode, vs); rtx reg = gen_reg_rtx (mode); rtx op2 = operands[2]; int i; if (GET_MODE (op2) != mode) { op2 = gen_reg_rtx (mode); convert_move (op2, operands[2], false); } for (i = 0; i < ; i++) RTVEC_ELT (vs, i) = op2; emit_insn (gen_vec_init (reg, par)); emit_insn (gen_xop_vrotl3 (operands[0], operands[1], reg)); DONE; } }) (define_expand "rotr3" [(set (match_operand:VI_128 0 "register_operand") (rotatert:VI_128 (match_operand:VI_128 1 "nonimmediate_operand") (match_operand:SI 2 "general_operand")))] "TARGET_XOP" { /* If we were given a scalar, convert it to parallel */ if (! const_0_to__operand (operands[2], SImode)) { rtvec vs = rtvec_alloc (); rtx par = gen_rtx_PARALLEL (mode, vs); rtx neg = gen_reg_rtx (mode); rtx reg = gen_reg_rtx (mode); rtx op2 = operands[2]; int i; if (GET_MODE (op2) != mode) { op2 = gen_reg_rtx (mode); convert_move (op2, operands[2], false); } for (i = 0; i < ; i++) RTVEC_ELT (vs, i) = op2; emit_insn (gen_vec_init (reg, par)); emit_insn (gen_neg2 (neg, reg)); emit_insn (gen_xop_vrotl3 (operands[0], operands[1], neg)); DONE; } }) (define_insn "xop_rotl3" [(set (match_operand:VI_128 0 "register_operand" "=x") (rotate:VI_128 (match_operand:VI_128 1 "nonimmediate_operand" "xm") (match_operand:SI 2 "const_0_to__operand" "n")))] "TARGET_XOP" "vprot\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_insn "xop_rotr3" [(set (match_operand:VI_128 0 "register_operand" "=x") (rotatert:VI_128 (match_operand:VI_128 1 "nonimmediate_operand" "xm") (match_operand:SI 2 "const_0_to__operand" "n")))] "TARGET_XOP" { operands[3] = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (operands[2])); return \"vprot\t{%3, %1, %0|%0, %1, %3}\"; } [(set_attr "type" "sseishft") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_expand "vrotr3" [(match_operand:VI_128 0 "register_operand") (match_operand:VI_128 1 "register_operand") (match_operand:VI_128 2 "register_operand")] "TARGET_XOP" { rtx reg = gen_reg_rtx (mode); emit_insn (gen_neg2 (reg, operands[2])); emit_insn (gen_xop_vrotl3 (operands[0], operands[1], reg)); DONE; }) (define_expand "vrotl3" [(match_operand:VI_128 0 "register_operand") (match_operand:VI_128 1 "register_operand") (match_operand:VI_128 2 "register_operand")] "TARGET_XOP" { emit_insn (gen_xop_vrotl3 (operands[0], operands[1], operands[2])); DONE; }) (define_insn "xop_vrotl3" [(set (match_operand:VI_128 0 "register_operand" "=x,x") (if_then_else:VI_128 (ge:VI_128 (match_operand:VI_128 2 "nonimmediate_operand" "x,m") (const_int 0)) (rotate:VI_128 (match_operand:VI_128 1 "nonimmediate_operand" "xm,x") (match_dup 2)) (rotatert:VI_128 (match_dup 1) (neg:VI_128 (match_dup 2)))))] "TARGET_XOP && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vprot\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "prefix_data16" "0") (set_attr "prefix_extra" "2") (set_attr "mode" "TI")]) ;; XOP packed shift instructions. (define_expand "vlshr3" [(set (match_operand:VI12_128 0 "register_operand") (lshiftrt:VI12_128 (match_operand:VI12_128 1 "register_operand") (match_operand:VI12_128 2 "nonimmediate_operand")))] "TARGET_XOP || (TARGET_AVX512BW && TARGET_AVX512VL)" { if (TARGET_XOP) { rtx neg = gen_reg_rtx (mode); emit_insn (gen_neg2 (neg, operands[2])); emit_insn (gen_xop_shl3 (operands[0], operands[1], neg)); DONE; } else if (mode == V16QImode) { ix86_expand_vecop_qihi (LSHIFTRT, operands[0], operands[1], operands[2]); DONE; } }) (define_expand "vlshr3" [(set (match_operand:VI48_128 0 "register_operand") (lshiftrt:VI48_128 (match_operand:VI48_128 1 "register_operand") (match_operand:VI48_128 2 "nonimmediate_operand")))] "TARGET_AVX2 || TARGET_XOP" { if (!TARGET_AVX2) { rtx neg = gen_reg_rtx (mode); emit_insn (gen_neg2 (neg, operands[2])); emit_insn (gen_xop_shl3 (operands[0], operands[1], neg)); DONE; } }) (define_expand "v3" [(set (match_operand:VI12_256_512_AVX512VL 0 "register_operand") (any_shift:VI12_256_512_AVX512VL (match_operand:VI12_256_512_AVX512VL 1 "register_operand") (match_operand:VI12_256_512_AVX512VL 2 "nonimmediate_operand")))] "TARGET_AVX512BW" { if (mode == V32QImode || mode == V64QImode) { ix86_expand_vecop_qihi (, operands[0], operands[1], operands[2]); DONE; } }) (define_expand "vv8qi3" [(set (match_operand:V8QI 0 "register_operand") (any_shift:V8QI (match_operand:V8QI 1 "register_operand") (match_operand:V8QI 2 "nonimmediate_operand")))] "TARGET_AVX512BW && TARGET_AVX512VL && TARGET_64BIT" { ix86_expand_vecop_qihi (, operands[0], operands[1], operands[2]); DONE; }) (define_expand "vlshr3" [(set (match_operand:VI48_512 0 "register_operand") (lshiftrt:VI48_512 (match_operand:VI48_512 1 "register_operand") (match_operand:VI48_512 2 "nonimmediate_operand")))] "TARGET_AVX512F") (define_expand "vlshr3" [(set (match_operand:VI48_256 0 "register_operand") (lshiftrt:VI48_256 (match_operand:VI48_256 1 "register_operand") (match_operand:VI48_256 2 "nonimmediate_operand")))] "TARGET_AVX2") (define_expand "vashrv8di3" [(set (match_operand:V8DI 0 "register_operand") (ashiftrt:V8DI (match_operand:V8DI 1 "register_operand") (match_operand:V8DI 2 "nonimmediate_operand")))] "TARGET_AVX512F") (define_expand "vashrv4di3" [(set (match_operand:V4DI 0 "register_operand") (ashiftrt:V4DI (match_operand:V4DI 1 "register_operand") (match_operand:V4DI 2 "nonimmediate_operand")))] "TARGET_AVX2" { if (!TARGET_AVX512VL) { rtx mask = ix86_build_signbit_mask (V4DImode, 1, 0); rtx t1 = gen_reg_rtx (V4DImode); rtx t2 = gen_reg_rtx (V4DImode); rtx t3 = gen_reg_rtx (V4DImode); emit_insn (gen_vlshrv4di3 (t1, operands[1], operands[2])); emit_insn (gen_vlshrv4di3 (t2, mask, operands[2])); emit_insn (gen_xorv4di3 (t3, t1, t2)); emit_insn (gen_subv4di3 (operands[0], t3, t2)); DONE; } }) (define_expand "vashr3" [(set (match_operand:VI12_128 0 "register_operand") (ashiftrt:VI12_128 (match_operand:VI12_128 1 "register_operand") (match_operand:VI12_128 2 "nonimmediate_operand")))] "TARGET_XOP || (TARGET_AVX512BW && TARGET_AVX512VL)" { if (TARGET_XOP) { rtx neg = gen_reg_rtx (mode); emit_insn (gen_neg2 (neg, operands[2])); emit_insn (gen_xop_sha3 (operands[0], operands[1], neg)); DONE; } else if(mode == V16QImode) { ix86_expand_vecop_qihi (ASHIFTRT, operands[0],operands[1], operands[2]); DONE; } }) (define_expand "vashrv2di3" [(set (match_operand:V2DI 0 "register_operand") (ashiftrt:V2DI (match_operand:V2DI 1 "register_operand") (match_operand:V2DI 2 "nonimmediate_operand")))] "TARGET_XOP || TARGET_AVX2" { if (TARGET_XOP) { rtx neg = gen_reg_rtx (V2DImode); emit_insn (gen_negv2di2 (neg, operands[2])); emit_insn (gen_xop_shav2di3 (operands[0], operands[1], neg)); DONE; } if (!TARGET_AVX512VL) { rtx mask = ix86_build_signbit_mask (V2DImode, 1, 0); rtx t1 = gen_reg_rtx (V2DImode); rtx t2 = gen_reg_rtx (V2DImode); rtx t3 = gen_reg_rtx (V2DImode); emit_insn (gen_vlshrv2di3 (t1, operands[1], operands[2])); emit_insn (gen_vlshrv2di3 (t2, mask, operands[2])); emit_insn (gen_xorv2di3 (t3, t1, t2)); emit_insn (gen_subv2di3 (operands[0], t3, t2)); DONE; } }) (define_expand "vashrv4si3" [(set (match_operand:V4SI 0 "register_operand") (ashiftrt:V4SI (match_operand:V4SI 1 "register_operand") (match_operand:V4SI 2 "nonimmediate_operand")))] "TARGET_AVX2 || TARGET_XOP" { if (!TARGET_AVX2) { rtx neg = gen_reg_rtx (V4SImode); emit_insn (gen_negv4si2 (neg, operands[2])); emit_insn (gen_xop_shav4si3 (operands[0], operands[1], neg)); DONE; } }) (define_expand "vashrv16si3" [(set (match_operand:V16SI 0 "register_operand") (ashiftrt:V16SI (match_operand:V16SI 1 "register_operand") (match_operand:V16SI 2 "nonimmediate_operand")))] "TARGET_AVX512F") (define_expand "vashrv8si3" [(set (match_operand:V8SI 0 "register_operand") (ashiftrt:V8SI (match_operand:V8SI 1 "register_operand") (match_operand:V8SI 2 "nonimmediate_operand")))] "TARGET_AVX2") (define_expand "vashl3" [(set (match_operand:VI12_128 0 "register_operand") (ashift:VI12_128 (match_operand:VI12_128 1 "register_operand") (match_operand:VI12_128 2 "nonimmediate_operand")))] "TARGET_XOP || (TARGET_AVX512BW && TARGET_AVX512VL)" { if (TARGET_XOP) { emit_insn (gen_xop_sha3 (operands[0], operands[1], operands[2])); DONE; } else if (mode == V16QImode) { ix86_expand_vecop_qihi (ASHIFT, operands[0], operands[1], operands[2]); DONE; } }) (define_expand "vashl3" [(set (match_operand:VI48_128 0 "register_operand") (ashift:VI48_128 (match_operand:VI48_128 1 "register_operand") (match_operand:VI48_128 2 "nonimmediate_operand")))] "TARGET_AVX2 || TARGET_XOP" { if (!TARGET_AVX2) { operands[2] = force_reg (mode, operands[2]); emit_insn (gen_xop_sha3 (operands[0], operands[1], operands[2])); DONE; } }) (define_expand "vashl3" [(set (match_operand:VI48_512 0 "register_operand") (ashift:VI48_512 (match_operand:VI48_512 1 "register_operand") (match_operand:VI48_512 2 "nonimmediate_operand")))] "TARGET_AVX512F") (define_expand "vashl3" [(set (match_operand:VI48_256 0 "register_operand") (ashift:VI48_256 (match_operand:VI48_256 1 "register_operand") (match_operand:VI48_256 2 "nonimmediate_operand")))] "TARGET_AVX2") (define_insn "xop_sha3" [(set (match_operand:VI_128 0 "register_operand" "=x,x") (if_then_else:VI_128 (ge:VI_128 (match_operand:VI_128 2 "nonimmediate_operand" "x,m") (const_int 0)) (ashift:VI_128 (match_operand:VI_128 1 "nonimmediate_operand" "xm,x") (match_dup 2)) (ashiftrt:VI_128 (match_dup 1) (neg:VI_128 (match_dup 2)))))] "TARGET_XOP && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpsha\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "prefix_data16" "0") (set_attr "prefix_extra" "2") (set_attr "mode" "TI")]) (define_insn "xop_shl3" [(set (match_operand:VI_128 0 "register_operand" "=x,x") (if_then_else:VI_128 (ge:VI_128 (match_operand:VI_128 2 "nonimmediate_operand" "x,m") (const_int 0)) (ashift:VI_128 (match_operand:VI_128 1 "nonimmediate_operand" "xm,x") (match_dup 2)) (lshiftrt:VI_128 (match_dup 1) (neg:VI_128 (match_dup 2)))))] "TARGET_XOP && !(MEM_P (operands[1]) && MEM_P (operands[2]))" "vpshl\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "prefix_data16" "0") (set_attr "prefix_extra" "2") (set_attr "mode" "TI")]) (define_expand "3" [(set (match_operand:VI1_AVX512 0 "register_operand") (any_shift:VI1_AVX512 (match_operand:VI1_AVX512 1 "register_operand") (match_operand:SI 2 "nonmemory_operand")))] "TARGET_SSE2" { if (TARGET_XOP && mode == V16QImode) { bool negate = false; rtx (*gen) (rtx, rtx, rtx); rtx tmp, par; int i; if ( != ASHIFT) { if (CONST_INT_P (operands[2])) operands[2] = GEN_INT (-INTVAL (operands[2])); else negate = true; } par = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16)); tmp = lowpart_subreg (QImode, operands[2], SImode); for (i = 0; i < 16; i++) XVECEXP (par, 0, i) = tmp; tmp = gen_reg_rtx (V16QImode); emit_insn (gen_vec_initv16qiqi (tmp, par)); if (negate) emit_insn (gen_negv16qi2 (tmp, tmp)); gen = ( == LSHIFTRT ? gen_xop_shlv16qi3 : gen_xop_shav16qi3); emit_insn (gen (operands[0], operands[1], tmp)); } else ix86_expand_vecop_qihi (, operands[0], operands[1], operands[2]); DONE; }) (define_expand "ashrv2di3" [(set (match_operand:V2DI 0 "register_operand") (ashiftrt:V2DI (match_operand:V2DI 1 "register_operand") (match_operand:DI 2 "nonmemory_operand")))] "TARGET_SSE2" { if (!TARGET_AVX512VL) { if (TARGET_SSE4_2 && CONST_INT_P (operands[2]) && UINTVAL (operands[2]) >= 63) { rtx zero = force_reg (V2DImode, CONST0_RTX (V2DImode)); emit_insn (gen_sse4_2_gtv2di3 (operands[0], zero, operands[1])); DONE; } if (operands[2] == const0_rtx) { emit_move_insn (operands[0], operands[1]); DONE; } if (CONST_INT_P (operands[2]) && (!TARGET_XOP || UINTVAL (operands[2]) >= 63)) { vec_perm_builder sel (4, 4, 1); sel.quick_grow (4); rtx arg0, arg1; rtx op1 = lowpart_subreg (V4SImode, operands[1], V2DImode); rtx target = gen_reg_rtx (V4SImode); if (UINTVAL (operands[2]) >= 63) { arg0 = arg1 = gen_reg_rtx (V4SImode); emit_insn (gen_ashrv4si3 (arg0, op1, GEN_INT (31))); sel[0] = 1; sel[1] = 1; sel[2] = 3; sel[3] = 3; } else if (INTVAL (operands[2]) > 32) { arg0 = gen_reg_rtx (V4SImode); arg1 = gen_reg_rtx (V4SImode); emit_insn (gen_ashrv4si3 (arg1, op1, GEN_INT (31))); emit_insn (gen_ashrv4si3 (arg0, op1, GEN_INT (INTVAL (operands[2]) - 32))); sel[0] = 1; sel[1] = 5; sel[2] = 3; sel[3] = 7; } else if (INTVAL (operands[2]) == 32) { arg0 = op1; arg1 = gen_reg_rtx (V4SImode); emit_insn (gen_ashrv4si3 (arg1, op1, GEN_INT (31))); sel[0] = 1; sel[1] = 5; sel[2] = 3; sel[3] = 7; } else { arg0 = gen_reg_rtx (V2DImode); arg1 = gen_reg_rtx (V4SImode); emit_insn (gen_lshrv2di3 (arg0, operands[1], operands[2])); emit_insn (gen_ashrv4si3 (arg1, op1, operands[2])); arg0 = lowpart_subreg (V4SImode, arg0, V2DImode); sel[0] = 0; sel[1] = 5; sel[2] = 2; sel[3] = 7; } vec_perm_indices indices (sel, arg0 != arg1 ? 2 : 1, 4); bool ok = targetm.vectorize.vec_perm_const (V4SImode, target, arg0, arg1, indices); gcc_assert (ok); emit_move_insn (operands[0], lowpart_subreg (V2DImode, target, V4SImode)); DONE; } if (!TARGET_XOP) { rtx zero = force_reg (V2DImode, CONST0_RTX (V2DImode)); rtx zero_or_all_ones; if (TARGET_SSE4_2) { zero_or_all_ones = gen_reg_rtx (V2DImode); emit_insn (gen_sse4_2_gtv2di3 (zero_or_all_ones, zero, operands[1])); } else { rtx temp = gen_reg_rtx (V4SImode); emit_insn (gen_ashrv4si3 (temp, lowpart_subreg (V4SImode, operands[1], V2DImode), GEN_INT (31))); zero_or_all_ones = gen_reg_rtx (V4SImode); emit_insn (gen_sse2_pshufd_1 (zero_or_all_ones, temp, const1_rtx, const1_rtx, GEN_INT (3), GEN_INT (3))); zero_or_all_ones = lowpart_subreg (V2DImode, zero_or_all_ones, V4SImode); } rtx lshr_res = gen_reg_rtx (V2DImode); emit_insn (gen_lshrv2di3 (lshr_res, operands[1], operands[2])); rtx ashl_res = gen_reg_rtx (V2DImode); rtx amount; if (TARGET_64BIT) { amount = gen_reg_rtx (DImode); emit_insn (gen_subdi3 (amount, force_reg (DImode, GEN_INT (64)), operands[2])); } else { rtx temp = gen_reg_rtx (SImode); emit_insn (gen_subsi3 (temp, force_reg (SImode, GEN_INT (64)), lowpart_subreg (SImode, operands[2], DImode))); amount = gen_reg_rtx (V4SImode); emit_insn (gen_vec_setv4si_0 (amount, CONST0_RTX (V4SImode), temp)); } amount = lowpart_subreg (DImode, amount, GET_MODE (amount)); emit_insn (gen_ashlv2di3 (ashl_res, zero_or_all_ones, amount)); emit_insn (gen_iorv2di3 (operands[0], lshr_res, ashl_res)); DONE; } rtx reg = gen_reg_rtx (V2DImode); rtx par; bool negate = false; int i; if (CONST_INT_P (operands[2])) operands[2] = GEN_INT (-INTVAL (operands[2])); else negate = true; par = gen_rtx_PARALLEL (V2DImode, rtvec_alloc (2)); for (i = 0; i < 2; i++) XVECEXP (par, 0, i) = operands[2]; emit_insn (gen_vec_initv2didi (reg, par)); if (negate) emit_insn (gen_negv2di2 (reg, reg)); emit_insn (gen_xop_shav2di3 (operands[0], operands[1], reg)); DONE; } }) ;; XOP FRCZ support (define_insn "xop_frcz2" [(set (match_operand:FMAMODE 0 "register_operand" "=x") (unspec:FMAMODE [(match_operand:FMAMODE 1 "nonimmediate_operand" "xm")] UNSPEC_FRCZ))] "TARGET_XOP" "vfrcz\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt1") (set_attr "mode" "")]) (define_expand "xop_vmfrcz2" [(set (match_operand:VF_128 0 "register_operand") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "nonimmediate_operand")] UNSPEC_FRCZ) (match_dup 2) (const_int 1)))] "TARGET_XOP" "operands[2] = CONST0_RTX (mode);") (define_insn "*xop_vmfrcz2" [(set (match_operand:VF_128 0 "register_operand" "=x") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "nonimmediate_operand" "xm")] UNSPEC_FRCZ) (match_operand:VF_128 2 "const0_operand") (const_int 1)))] "TARGET_XOP" "vfrcz\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt1") (set_attr "mode" "")]) (define_insn "xop_maskcmp3" [(set (match_operand:VI_128 0 "register_operand" "=x") (match_operator:VI_128 1 "ix86_comparison_int_operator" [(match_operand:VI_128 2 "register_operand" "x") (match_operand:VI_128 3 "nonimmediate_operand" "xm")]))] "TARGET_XOP" "vpcom%Y1\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "sse4arg") (set_attr "prefix_data16" "0") (set_attr "prefix_rep" "0") (set_attr "prefix_extra" "2") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_insn "xop_maskcmp_uns3" [(set (match_operand:VI_128 0 "register_operand" "=x") (match_operator:VI_128 1 "ix86_comparison_uns_operator" [(match_operand:VI_128 2 "register_operand" "x") (match_operand:VI_128 3 "nonimmediate_operand" "xm")]))] "TARGET_XOP" "vpcom%Y1u\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "prefix_data16" "0") (set_attr "prefix_rep" "0") (set_attr "prefix_extra" "2") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) ;; Version of pcom*u* that is called from the intrinsics that allows pcomequ* ;; and pcomneu* not to be converted to the signed ones in case somebody needs ;; the exact instruction generated for the intrinsic. (define_insn "xop_maskcmp_uns23" [(set (match_operand:VI_128 0 "register_operand" "=x") (unspec:VI_128 [(match_operator:VI_128 1 "ix86_comparison_uns_operator" [(match_operand:VI_128 2 "register_operand" "x") (match_operand:VI_128 3 "nonimmediate_operand" "xm")])] UNSPEC_XOP_UNSIGNED_CMP))] "TARGET_XOP" "vpcom%Y1u\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "ssecmp") (set_attr "prefix_data16" "0") (set_attr "prefix_extra" "2") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) ;; Pcomtrue and pcomfalse support. These are useless instructions, but are ;; being added here to be complete. (define_insn "xop_pcom_tf3" [(set (match_operand:VI_128 0 "register_operand" "=x") (unspec:VI_128 [(match_operand:VI_128 1 "register_operand" "x") (match_operand:VI_128 2 "nonimmediate_operand" "xm") (match_operand:SI 3 "const_int_operand" "n")] UNSPEC_XOP_TRUEFALSE))] "TARGET_XOP" { return ((INTVAL (operands[3]) != 0) ? "vpcomtrue\t{%2, %1, %0|%0, %1, %2}" : "vpcomfalse\t{%2, %1, %0|%0, %1, %2}"); } [(set_attr "type" "ssecmp") (set_attr "prefix_data16" "0") (set_attr "prefix_extra" "2") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_insn "xop_vpermil23" [(set (match_operand:VF_128_256 0 "register_operand" "=x,x") (unspec:VF_128_256 [(match_operand:VF_128_256 1 "register_operand" "x,x") (match_operand:VF_128_256 2 "nonimmediate_operand" "x,m") (match_operand: 3 "nonimmediate_operand" "xm,x") (match_operand:SI 4 "const_0_to_3_operand" "n,n")] UNSPEC_VPERMIL2))] "TARGET_XOP" "vpermil2\t{%4, %3, %2, %1, %0|%0, %1, %2, %3, %4}" [(set_attr "type" "sse4arg") (set_attr "length_immediate" "1") (set_attr "mode" "")]) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; (define_insn "aesenc" [(set (match_operand:V2DI 0 "register_operand" "=x,x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x") (match_operand:V2DI 2 "vector_operand" "xBm,xm")] UNSPEC_AESENC))] "TARGET_AES" "@ aesenc\t{%2, %0|%0, %2} vaesenc\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "btver2_decode" "double,double") (set_attr "mode" "TI")]) (define_insn "aesenclast" [(set (match_operand:V2DI 0 "register_operand" "=x,x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x") (match_operand:V2DI 2 "vector_operand" "xBm,xm")] UNSPEC_AESENCLAST))] "TARGET_AES" "@ aesenclast\t{%2, %0|%0, %2} vaesenclast\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "btver2_decode" "double,double") (set_attr "mode" "TI")]) (define_insn "aesdec" [(set (match_operand:V2DI 0 "register_operand" "=x,x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x") (match_operand:V2DI 2 "vector_operand" "xBm,xm")] UNSPEC_AESDEC))] "TARGET_AES" "@ aesdec\t{%2, %0|%0, %2} vaesdec\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "btver2_decode" "double,double") (set_attr "mode" "TI")]) (define_insn "aesdeclast" [(set (match_operand:V2DI 0 "register_operand" "=x,x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x") (match_operand:V2DI 2 "vector_operand" "xBm,xm")] UNSPEC_AESDECLAST))] "TARGET_AES" "@ aesdeclast\t{%2, %0|%0, %2} vaesdeclast\t{%2, %1, %0|%0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,vex") (set_attr "btver2_decode" "double,double") (set_attr "mode" "TI")]) (define_insn "aesimc" [(set (match_operand:V2DI 0 "register_operand" "=x") (unspec:V2DI [(match_operand:V2DI 1 "vector_operand" "xBm")] UNSPEC_AESIMC))] "TARGET_AES" "%vaesimc\t{%1, %0|%0, %1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) (define_insn "aeskeygenassist" [(set (match_operand:V2DI 0 "register_operand" "=x") (unspec:V2DI [(match_operand:V2DI 1 "vector_operand" "xBm") (match_operand:SI 2 "const_0_to_255_operand" "n")] UNSPEC_AESKEYGENASSIST))] "TARGET_AES" "%vaeskeygenassist\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "maybe_vex") (set_attr "mode" "TI")]) (define_insn "pclmulqdq" [(set (match_operand:V2DI 0 "register_operand" "=x,x") (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0,x") (match_operand:V2DI 2 "vector_operand" "xBm,xm") (match_operand:SI 3 "const_0_to_255_operand" "n,n")] UNSPEC_PCLMUL))] "TARGET_PCLMUL" "@ pclmulqdq\t{%3, %2, %0|%0, %2, %3} vpclmulqdq\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "isa" "noavx,avx") (set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,vex") (set_attr "mode" "TI")]) (define_expand "avx_vzeroall" [(match_par_dup 0 [(const_int 0)])] "TARGET_AVX" { int nregs = TARGET_64BIT ? 16 : 8; int regno; operands[0] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nregs + 1)); XVECEXP (operands[0], 0, 0) = gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, const0_rtx), UNSPECV_VZEROALL); for (regno = 0; regno < nregs; regno++) XVECEXP (operands[0], 0, regno + 1) = gen_rtx_SET (gen_rtx_REG (V8SImode, GET_SSE_REGNO (regno)), CONST0_RTX (V8SImode)); }) (define_insn "*avx_vzeroall" [(match_parallel 0 "vzeroall_operation" [(unspec_volatile [(const_int 0)] UNSPECV_VZEROALL)])] "TARGET_AVX" "vzeroall" [(set_attr "type" "sse") (set_attr "modrm" "0") (set_attr "memory" "none") (set_attr "prefix" "vex") (set_attr "btver2_decode" "vector") (set_attr "mode" "OI")]) ;; Clear the upper 128bits of AVX registers, equivalent to a NOP ;; if the upper 128bits are unused. Initially we expand the instructions ;; as though they had no effect on the SSE registers, but later add SETs and ;; CLOBBERs to the PARALLEL to model the real effect. (define_expand "avx_vzeroupper" [(parallel [(call (mem:QI (const_int 0)) (const_int 0)) (unspec [(const_int ABI_VZEROUPPER)] UNSPEC_CALLEE_ABI)])] "TARGET_AVX" { ix86_expand_avx_vzeroupper (); DONE; }) (define_insn "avx_vzeroupper_callee_abi" [(call (mem:QI (const_int 0)) (const_int 0)) (unspec [(const_int ABI_VZEROUPPER)] UNSPEC_CALLEE_ABI)] "TARGET_AVX" "vzeroupper" [(set_attr "type" "sse") (set_attr "modrm" "0") (set_attr "memory" "none") (set_attr "prefix" "vex") (set_attr "btver2_decode" "vector") (set_attr "mode" "OI")]) (define_mode_attr pbroadcast_evex_isa [(V64QI "avx512bw") (V32QI "avx512bw") (V16QI "avx512bw") (V32HI "avx512bw") (V16HI "avx512bw") (V8HI "avx512bw") (V16SI "avx512f") (V8SI "avx512f") (V4SI "avx512f") (V8DI "avx512f") (V4DI "avx512f") (V2DI "avx512f") (V32HF "avx512bw") (V16HF "avx512bw") (V8HF "avx512bw")]) (define_insn "avx2_pbroadcast" [(set (match_operand:VIHF 0 "register_operand" "=x,v") (vec_duplicate:VIHF (vec_select: (match_operand: 1 "nonimmediate_operand" "xm,vm") (parallel [(const_int 0)]))))] "TARGET_AVX2" "vpbroadcast\t{%1, %0|%0, %1}" [(set_attr "isa" "*,") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex,evex") (set_attr "mode" "")]) (define_insn "avx2_pbroadcast_1" [(set (match_operand:VIHF_256 0 "register_operand" "=x,x,v,v") (vec_duplicate:VIHF_256 (vec_select: (match_operand:VIHF_256 1 "nonimmediate_operand" "m,x,m,v") (parallel [(const_int 0)]))))] "TARGET_AVX2" "@ vpbroadcast\t{%1, %0|%0, %1} vpbroadcast\t{%x1, %0|%0, %x1} vpbroadcast\t{%1, %0|%0, %1} vpbroadcast\t{%x1, %0|%0, %x1}" [(set_attr "isa" "*,*,,") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "_permvar" [(set (match_operand:VI48F_256_512 0 "register_operand" "=v") (unspec:VI48F_256_512 [(match_operand:VI48F_256_512 1 "nonimmediate_operand" "vm") (match_operand: 2 "register_operand" "v")] UNSPEC_VPERMVAR))] "TARGET_AVX2 && " { if (TARGET_DEST_FALSE_DEP_FOR_GLC && && !reg_mentioned_p (operands[0], operands[1]) && !reg_mentioned_p (operands[0], operands[2])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vperm\t{%1, %2, %0|%0, %2, %1}"; } [(set_attr "type" "sselog") (set_attr "prefix" "") (set_attr "mode" "")]) (define_insn "_permvar" [(set (match_operand:VI1_AVX512VL 0 "register_operand" "=v") (unspec:VI1_AVX512VL [(match_operand:VI1_AVX512VL 1 "nonimmediate_operand" "vm") (match_operand: 2 "register_operand" "v")] UNSPEC_VPERMVAR))] "TARGET_AVX512VBMI && " "vperm\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "sselog") (set_attr "prefix" "") (set_attr "mode" "")]) (define_insn "_permvar" [(set (match_operand:VI2_AVX512VL 0 "register_operand" "=v") (unspec:VI2_AVX512VL [(match_operand:VI2_AVX512VL 1 "nonimmediate_operand" "vm") (match_operand: 2 "register_operand" "v")] UNSPEC_VPERMVAR))] "TARGET_AVX512BW && " "vperm\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "sselog") (set_attr "prefix" "") (set_attr "mode" "")]) ;; Recognize broadcast as a vec_select as produced by builtin_vec_perm. ;; If it so happens that the input is in memory, use vbroadcast. ;; Otherwise use vpermilp (and in the case of 256-bit modes, vperm2f128). (define_insn "*avx_vperm_broadcast_v4sf" [(set (match_operand:V4SF 0 "register_operand" "=v,v,v") (vec_select:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "m,o,v") (match_parallel 2 "avx_vbroadcast_operand" [(match_operand 3 "const_int_operand" "C,n,n")])))] "TARGET_AVX" { int elt = INTVAL (operands[3]); switch (which_alternative) { case 0: case 1: operands[1] = adjust_address_nv (operands[1], SFmode, elt * 4); return "vbroadcastss\t{%1, %0|%0, %k1}"; case 2: operands[2] = GEN_INT (elt * 0x55); return "vpermilps\t{%2, %1, %0|%0, %1, %2}"; default: gcc_unreachable (); } } [(set_attr "type" "ssemov,ssemov,sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "0,0,1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "SF,SF,V4SF")]) (define_insn_and_split "*avx_vperm_broadcast_" [(set (match_operand:VF_256 0 "register_operand" "=v,v,v") (vec_select:VF_256 (match_operand:VF_256 1 "nonimmediate_operand" "m,o,?v") (match_parallel 2 "avx_vbroadcast_operand" [(match_operand 3 "const_int_operand" "C,n,n")])))] "TARGET_AVX && (mode != V4DFmode || !TARGET_AVX2 || operands[3] == const0_rtx)" "#" "&& reload_completed" [(set (match_dup 0) (vec_duplicate:VF_256 (match_dup 1)))] { rtx op0 = operands[0], op1 = operands[1]; int elt = INTVAL (operands[3]); if (REG_P (op1)) { int mask; if (TARGET_AVX2 && elt == 0) { emit_insn (gen_vec_dup (op0, gen_lowpart (mode, op1))); DONE; } /* Shuffle element we care about into all elements of the 128-bit lane. The other lane gets shuffled too, but we don't care. */ if (mode == V4DFmode) mask = (elt & 1 ? 15 : 0); else mask = (elt & 3) * 0x55; emit_insn (gen_avx_vpermil (op0, op1, GEN_INT (mask))); /* Shuffle the lane we care about into both lanes of the dest. */ mask = (elt / ( / 2)) * 0x11; if (EXT_REX_SSE_REG_P (op0)) { /* There is no EVEX VPERM2F128, but we can use either VBROADCASTSS or VSHUFF128. */ gcc_assert (mode == V8SFmode); if ((mask & 1) == 0) emit_insn (gen_avx2_vec_dupv8sf (op0, gen_lowpart (V4SFmode, op0))); else emit_insn (gen_avx512vl_shuf_f32x4_1 (op0, op0, op0, GEN_INT (4), GEN_INT (5), GEN_INT (6), GEN_INT (7), GEN_INT (12), GEN_INT (13), GEN_INT (14), GEN_INT (15))); DONE; } emit_insn (gen_avx_vperm2f1283 (op0, op0, op0, GEN_INT (mask))); DONE; } operands[1] = adjust_address (op1, mode, elt * GET_MODE_SIZE (mode)); }) (define_expand "_vpermil" [(set (match_operand:VF2 0 "register_operand") (vec_select:VF2 (match_operand:VF2 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand")))] "TARGET_AVX && " { int mask = INTVAL (operands[2]); rtx perm[]; int i; for (i = 0; i < ; i = i + 2) { perm[i] = GEN_INT (((mask >> i) & 1) + i); perm[i + 1] = GEN_INT (((mask >> (i + 1)) & 1) + i); } operands[2] = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (, perm)); }) (define_expand "_vpermil" [(set (match_operand:VF1 0 "register_operand") (vec_select:VF1 (match_operand:VF1 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand")))] "TARGET_AVX && " { int mask = INTVAL (operands[2]); rtx perm[]; int i; for (i = 0; i < ; i = i + 4) { perm[i] = GEN_INT (((mask >> 0) & 3) + i); perm[i + 1] = GEN_INT (((mask >> 2) & 3) + i); perm[i + 2] = GEN_INT (((mask >> 4) & 3) + i); perm[i + 3] = GEN_INT (((mask >> 6) & 3) + i); } operands[2] = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (, perm)); }) ;; This pattern needs to come before the avx2_perm*/avx512f_perm* ;; patterns, as they have the same RTL representation (vpermilp* ;; being a subset of what vpermp* can do), but vpermilp* has shorter ;; latency as it never crosses lanes. (define_insn "*_vpermilp" [(set (match_operand:VF 0 "register_operand" "=v") (vec_select:VF (match_operand:VF 1 "nonimmediate_operand" "vm") (match_parallel 2 "" [(match_operand 3 "const_int_operand")])))] "TARGET_AVX && && avx_vpermilp_parallel (operands[2], mode)" { int mask = avx_vpermilp_parallel (operands[2], mode) - 1; operands[2] = GEN_INT (mask); return "vpermil\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "") (set_attr "mode" "")]) (define_expand "avx2_perm" [(match_operand:VI8F_256 0 "register_operand") (match_operand:VI8F_256 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand")] "TARGET_AVX2" { int mask = INTVAL (operands[2]); emit_insn (gen_avx2_perm_1 (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3))); DONE; }) (define_expand "avx512vl_perm_mask" [(match_operand:VI8F_256 0 "register_operand") (match_operand:VI8F_256 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:VI8F_256 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512VL" { int mask = INTVAL (operands[2]); emit_insn (gen__perm_1_mask (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), operands[3], operands[4])); DONE; }) (define_insn "avx2_perm_1" [(set (match_operand:VI8F_256 0 "register_operand" "=v") (vec_select:VI8F_256 (match_operand:VI8F_256 1 "nonimmediate_operand" "vm") (parallel [(match_operand 2 "const_0_to_3_operand") (match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_0_to_3_operand")])))] "TARGET_AVX2 && " { int mask = 0; mask |= INTVAL (operands[2]) << 0; mask |= INTVAL (operands[3]) << 2; mask |= INTVAL (operands[4]) << 4; mask |= INTVAL (operands[5]) << 6; operands[2] = GEN_INT (mask); if (TARGET_DEST_FALSE_DEP_FOR_GLC && && !reg_mentioned_p (operands[0], operands[1])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vperm\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "prefix" "") (set_attr "mode" "")]) (define_expand "avx512f_perm" [(match_operand:V8FI 0 "register_operand") (match_operand:V8FI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[2]); emit_insn (gen_avx512f_perm_1 (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4))); DONE; }) (define_expand "avx512f_perm_mask" [(match_operand:V8FI 0 "register_operand") (match_operand:V8FI 1 "nonimmediate_operand") (match_operand:SI 2 "const_0_to_255_operand") (match_operand:V8FI 3 "nonimm_or_0_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512F" { int mask = INTVAL (operands[2]); emit_insn (gen_avx512f_perm_1_mask (operands[0], operands[1], GEN_INT ((mask >> 0) & 3), GEN_INT ((mask >> 2) & 3), GEN_INT ((mask >> 4) & 3), GEN_INT ((mask >> 6) & 3), GEN_INT (((mask >> 0) & 3) + 4), GEN_INT (((mask >> 2) & 3) + 4), GEN_INT (((mask >> 4) & 3) + 4), GEN_INT (((mask >> 6) & 3) + 4), operands[3], operands[4])); DONE; }) (define_insn "avx512f_perm_1" [(set (match_operand:V8FI 0 "register_operand" "=v") (vec_select:V8FI (match_operand:V8FI 1 "nonimmediate_operand" "vm") (parallel [(match_operand 2 "const_0_to_3_operand") (match_operand 3 "const_0_to_3_operand") (match_operand 4 "const_0_to_3_operand") (match_operand 5 "const_0_to_3_operand") (match_operand 6 "const_4_to_7_operand") (match_operand 7 "const_4_to_7_operand") (match_operand 8 "const_4_to_7_operand") (match_operand 9 "const_4_to_7_operand")])))] "TARGET_AVX512F && && (INTVAL (operands[2]) == (INTVAL (operands[6]) - 4) && INTVAL (operands[3]) == (INTVAL (operands[7]) - 4) && INTVAL (operands[4]) == (INTVAL (operands[8]) - 4) && INTVAL (operands[5]) == (INTVAL (operands[9]) - 4))" { int mask = 0; mask |= INTVAL (operands[2]) << 0; mask |= INTVAL (operands[3]) << 2; mask |= INTVAL (operands[4]) << 4; mask |= INTVAL (operands[5]) << 6; operands[2] = GEN_INT (mask); if (TARGET_DEST_FALSE_DEP_FOR_GLC && && !reg_mentioned_p (operands[0], operands[1])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vperm\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "type" "sselog") (set_attr "prefix" "") (set_attr "mode" "")]) (define_insn "avx2_permv2ti" [(set (match_operand:V4DI 0 "register_operand" "=x") (unspec:V4DI [(match_operand:V4DI 1 "register_operand" "x") (match_operand:V4DI 2 "nonimmediate_operand" "xm") (match_operand:SI 3 "const_0_to_255_operand" "n")] UNSPEC_VPERMTI))] "TARGET_AVX2" "vperm2i128\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sselog") (set_attr "prefix" "vex") (set_attr "mode" "OI")]) (define_insn "avx2_vec_dupv4df" [(set (match_operand:V4DF 0 "register_operand" "=v") (vec_duplicate:V4DF (vec_select:DF (match_operand:V2DF 1 "register_operand" "v") (parallel [(const_int 0)]))))] "TARGET_AVX2" "vbroadcastsd\t{%1, %0|%0, %1}" [(set_attr "type" "sselog1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "V4DF")]) (define_insn "_vec_dup_1" [(set (match_operand:VIHF_AVX512BW 0 "register_operand" "=v,v") (vec_duplicate:VIHF_AVX512BW (vec_select: (match_operand:VIHF_AVX512BW 1 "nonimmediate_operand" "v,m") (parallel [(const_int 0)]))))] "TARGET_AVX512F" "@ vpbroadcast\t{%x1, %0|%0, %x1} vpbroadcast\t{%x1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_vec_dup" [(set (match_operand:V48_AVX512VL 0 "register_operand" "=v") (vec_duplicate:V48_AVX512VL (vec_select: (match_operand: 1 "nonimmediate_operand" "vm") (parallel [(const_int 0)]))))] "TARGET_AVX512F" { /* There is no DF broadcast (in AVX-512*) to 128b register. Mimic it with integer variant. */ if (mode == V2DFmode) return "vpbroadcastq\t{%1, %0|%0, %q1}"; return "vbroadcast\t{%1, %0|%0, %1}"; } [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_vec_dup" [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v") (vec_duplicate:VI12HF_AVX512VL (vec_select: (match_operand: 1 "nonimmediate_operand" "vm") (parallel [(const_int 0)]))))] "TARGET_AVX512BW" "vpbroadcast\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_broadcast" [(set (match_operand:V16FI 0 "register_operand" "=v,v") (vec_duplicate:V16FI (match_operand: 1 "nonimmediate_operand" "v,m")))] "TARGET_AVX512F" "@ vshuf32x4\t{$0x0, %g1, %g1, %0|%0, %g1, %g1, 0x0} vbroadcast32x4\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_broadcast" [(set (match_operand:V8FI 0 "register_operand" "=v,v") (vec_duplicate:V8FI (match_operand: 1 "nonimmediate_operand" "v,m")))] "TARGET_AVX512F" "@ vshuf64x2\t{$0x44, %g1, %g1, %0|%0, %g1, %g1, 0x44} vbroadcast64x4\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_vec_dup_gpr" [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v,v") (vec_duplicate:VI12HF_AVX512VL (match_operand: 1 "nonimmediate_operand" "vm,r")))] "TARGET_AVX512BW" "@ vpbroadcast\t{%1, %0|%0, %1} vpbroadcast\t{%k1, %0|%0, %k1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_vec_dup_gpr" [(set (match_operand:V48_AVX512VL 0 "register_operand" "=v,v") (vec_duplicate:V48_AVX512VL (match_operand: 1 "nonimmediate_operand" "vm,r")))] "TARGET_AVX512F" "vbroadcast\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "") (set (attr "enabled") (if_then_else (eq_attr "alternative" "1") (symbol_ref "GET_MODE_CLASS (mode) == MODE_INT && (mode != DImode || TARGET_64BIT)") (const_int 1)))]) (define_insn "vec_dupv4sf" [(set (match_operand:V4SF 0 "register_operand" "=v,v,x") (vec_duplicate:V4SF (match_operand:SF 1 "nonimmediate_operand" "Yv,m,0")))] "TARGET_SSE" "@ vshufps\t{$0, %1, %1, %0|%0, %1, %1, 0} vbroadcastss\t{%1, %0|%0, %1} shufps\t{$0, %0, %0|%0, %0, 0}" [(set_attr "isa" "avx,avx,noavx") (set_attr "type" "sseshuf1,ssemov,sseshuf1") (set_attr "length_immediate" "1,0,1") (set_attr "prefix_extra" "0,1,*") (set_attr "prefix" "maybe_evex,maybe_evex,orig") (set_attr "mode" "V4SF")]) (define_insn "*vec_dupv4si" [(set (match_operand:V4SI 0 "register_operand" "=v,v,x") (vec_duplicate:V4SI (match_operand:SI 1 "nonimmediate_operand" "Yv,m,0")))] "TARGET_SSE" "@ %vpshufd\t{$0, %1, %0|%0, %1, 0} vbroadcastss\t{%1, %0|%0, %1} shufps\t{$0, %0, %0|%0, %0, 0}" [(set_attr "isa" "sse2,avx,noavx") (set_attr "type" "sselog1,ssemov,sselog1") (set_attr "length_immediate" "1,0,1") (set_attr "prefix_extra" "0,1,*") (set_attr "prefix" "maybe_vex,maybe_evex,orig") (set_attr "mode" "TI,V4SF,V4SF") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "1") (symbol_ref "!TARGET_INTER_UNIT_MOVES_TO_VEC") ] (symbol_ref "true")))]) (define_insn "*vec_dupv2di" [(set (match_operand:V2DI 0 "register_operand" "=x,v,v,x") (vec_duplicate:V2DI (match_operand:DI 1 "nonimmediate_operand" " 0,Yv,vm,0")))] "TARGET_SSE" "@ punpcklqdq\t%0, %0 vpunpcklqdq\t{%d1, %0|%0, %d1} %vmovddup\t{%1, %0|%0, %1} movlhps\t%0, %0" [(set_attr "isa" "sse2_noavx,avx,sse3,noavx") (set_attr "type" "sselog1,sselog1,sselog1,ssemov") (set_attr "prefix" "orig,maybe_evex,maybe_vex,orig") (set_attr "mode" "TI,TI,DF,V4SF")]) (define_insn "avx2_vbroadcasti128_" [(set (match_operand:VI_256 0 "register_operand" "=x,v,v") (vec_concat:VI_256 (match_operand: 1 "memory_operand" "m,m,m") (match_dup 1)))] "TARGET_AVX2" "@ vbroadcasti128\t{%1, %0|%0, %1} vbroadcast\t{%1, %0|%0, %1} vbroadcast32x4\t{%1, %0|%0, %1}" [(set_attr "isa" "*,avx512dq,avx512vl") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex,evex,evex") (set_attr "mode" "OI")]) ;; Modes handled by AVX vec_dup patterns. (define_mode_iterator AVX_VEC_DUP_MODE [V8SI V8SF V4DI V4DF]) (define_mode_attr vecdupssescalarmodesuffix [(V8SF "ss") (V4DF "sd") (V8SI "ss") (V4DI "sd")]) ;; Modes handled by AVX2 vec_dup patterns. (define_mode_iterator AVX2_VEC_DUP_MODE [V32QI V16QI V16HI V8HI V8SI V4SI V16HF V8HF]) (define_insn "*vec_dup" [(set (match_operand:AVX2_VEC_DUP_MODE 0 "register_operand" "=x,x,v") (vec_duplicate:AVX2_VEC_DUP_MODE (match_operand: 1 "nonimmediate_operand" "m,x,$r")))] "TARGET_AVX2" "@ vbroadcast\t{%1, %0|%0, %1} vbroadcast\t{%x1, %0|%0, %x1} #" [(set_attr "isa" "*,*,noavx512vl") (set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "mode" "") (set (attr "preferred_for_speed") (cond [(eq_attr "alternative" "2") (symbol_ref "TARGET_INTER_UNIT_MOVES_TO_VEC") ] (symbol_ref "true")))]) (define_insn "vec_dup" [(set (match_operand:AVX_VEC_DUP_MODE 0 "register_operand" "=x,x,x,v,x") (vec_duplicate:AVX_VEC_DUP_MODE (match_operand: 1 "nonimmediate_operand" "m,m,x,v,?x")))] "TARGET_AVX" "@ vbroadcast\t{%1, %0|%0, %1} vbroadcast\t{%1, %0|%0, %1} vbroadcast\t{%x1, %0|%0, %x1} vbroadcast\t{%x1, %g0|%g0, %x1} #" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "maybe_evex") (set_attr "isa" "avx2,noavx2,avx2,avx512f,noavx2") (set_attr "mode" ",V8SF,,,V8SF")]) (define_split [(set (match_operand:AVX2_VEC_DUP_MODE 0 "register_operand") (vec_duplicate:AVX2_VEC_DUP_MODE (match_operand: 1 "register_operand")))] "TARGET_AVX2 /* Disable this splitter if avx512vl_vec_dup_gprv*[qhs]i insn is available, because then we can broadcast from GPRs directly. For V*[QH]I modes it requires both -mavx512vl and -mavx512bw, for V*SI mode it requires just -mavx512vl. */ && !(TARGET_AVX512VL && (TARGET_AVX512BW || mode == SImode)) && reload_completed && GENERAL_REG_P (operands[1])" [(const_int 0)] { emit_insn (gen_vec_setv4si_0 (gen_lowpart (V4SImode, operands[0]), CONST0_RTX (V4SImode), gen_lowpart (SImode, operands[1]))); emit_insn (gen_avx2_pbroadcast (operands[0], gen_lowpart (mode, operands[0]))); DONE; }) (define_split [(set (match_operand:AVX_VEC_DUP_MODE 0 "register_operand") (vec_duplicate:AVX_VEC_DUP_MODE (match_operand: 1 "register_operand")))] "TARGET_AVX && !TARGET_AVX2 && reload_completed" [(set (match_dup 2) (vec_duplicate: (match_dup 1))) (set (match_dup 0) (vec_concat:AVX_VEC_DUP_MODE (match_dup 2) (match_dup 2)))] "operands[2] = gen_lowpart (mode, operands[0]);") (define_insn "avx_vbroadcastf128_" [(set (match_operand:V_256H 0 "register_operand" "=x,x,x,v,v,v,v") (vec_concat:V_256H (match_operand: 1 "nonimmediate_operand" "m,0,?x,m,0,m,0") (match_dup 1)))] "TARGET_AVX" "@ vbroadcast\t{%1, %0|%0, %1} vinsert\t{$1, %1, %0, %0|%0, %0, %1, 1} vperm2\t{$0, %t1, %t1, %0|%0, %t1, %t1, 0} vbroadcast\t{%1, %0|%0, %1} vinsert\t{$1, %1, %0, %0|%0, %0, %1, 1} vbroadcast32x4\t{%1, %0|%0, %1} vinsert32x4\t{$1, %1, %0, %0|%0, %0, %1, 1}" [(set_attr "isa" "*,*,*,avx512dq,avx512dq,avx512vl,avx512vl") (set_attr "type" "ssemov,sselog1,sselog1,ssemov,sselog1,ssemov,sselog1") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "0,1,1,0,1,0,1") (set_attr "prefix" "vex,vex,vex,evex,evex,evex,evex") (set_attr "mode" "")]) ;; For broadcast[i|f]32x2. Yes there is no v4sf version, only v4si. (define_mode_iterator VI4F_BRCST32x2 [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") V16SF (V8SF "TARGET_AVX512VL")]) (define_mode_attr 64x2mode [(V8DF "V2DF") (V8DI "V2DI") (V4DI "V2DI") (V4DF "V2DF")]) (define_mode_attr 32x2mode [(V16SF "V2SF") (V16SI "V2SI") (V8SI "V2SI") (V8SF "V2SF") (V4SI "V2SI")]) (define_insn "avx512dq_broadcast" [(set (match_operand:VI4F_BRCST32x2 0 "register_operand" "=v") (vec_duplicate:VI4F_BRCST32x2 (vec_select:<32x2mode> (match_operand: 1 "nonimmediate_operand" "vm") (parallel [(const_int 0) (const_int 1)]))))] "TARGET_AVX512DQ" "vbroadcast32x2\t{%1, %0|%0, %q1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512vl_broadcast_1" [(set (match_operand:VI4F_256 0 "register_operand" "=v,v") (vec_duplicate:VI4F_256 (match_operand: 1 "nonimmediate_operand" "v,m")))] "TARGET_AVX512VL" "@ vshuf32x4\t{$0x0, %t1, %t1, %0|%0, %t1, %t1, 0x0} vbroadcast32x4\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_broadcast_1" [(set (match_operand:V16FI 0 "register_operand" "=v,v") (vec_duplicate:V16FI (match_operand: 1 "nonimmediate_operand" "v,m")))] "TARGET_AVX512DQ" "@ vshuf32x4\t{$0x44, %g1, %g1, %0|%0, %g1, %g1, 0x44} vbroadcast32x8\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) ;; For broadcast[i|f]64x2 (define_mode_iterator VI8F_BRCST64x2 [V8DI V8DF (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")]) (define_insn "avx512dq_broadcast_1" [(set (match_operand:VI8F_BRCST64x2 0 "register_operand" "=v,v") (vec_duplicate:VI8F_BRCST64x2 (match_operand:<64x2mode> 1 "nonimmediate_operand" "v,m")))] "TARGET_AVX512DQ" "@ vshuf64x2\t{$0x0, %1, %1, %0|%0, %1, %1, 0x0} vbroadcast64x2\t{%1, %0|%0, %1}" [(set_attr "type" "ssemov") (set_attr "prefix_extra" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512cd_maskb_vec_dup" [(set (match_operand:VI8_AVX512VL 0 "register_operand" "=v") (vec_duplicate:VI8_AVX512VL (zero_extend:DI (match_operand:QI 1 "register_operand" "k"))))] "TARGET_AVX512CD" "vpbroadcastmb2q\t{%1, %0|%0, %1}" [(set_attr "type" "mskmov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "avx512cd_maskw_vec_dup" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_duplicate:VI4_AVX512VL (zero_extend:SI (match_operand:HI 1 "register_operand" "k"))))] "TARGET_AVX512CD" "vpbroadcastmw2d\t{%1, %0|%0, %1}" [(set_attr "type" "mskmov") (set_attr "prefix" "evex") (set_attr "mode" "XI")]) (define_insn "_vpermilvar3" [(set (match_operand:VF 0 "register_operand" "=v") (unspec:VF [(match_operand:VF 1 "register_operand" "v") (match_operand: 2 "nonimmediate_operand" "vm")] UNSPEC_VPERMIL))] "TARGET_AVX && " "vpermil\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "btver2_decode" "vector") (set_attr "prefix" "") (set_attr "mode" "")]) (define_mode_iterator VPERMI2 [V16SI V16SF V8DI V8DF (V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL") (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512BW && TARGET_AVX512VL") (V8HI "TARGET_AVX512BW && TARGET_AVX512VL") (V64QI "TARGET_AVX512VBMI") (V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL") (V16QI "TARGET_AVX512VBMI && TARGET_AVX512VL")]) (define_mode_iterator VPERMI2I [V16SI V8DI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL") (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX512BW && TARGET_AVX512VL") (V8HI "TARGET_AVX512BW && TARGET_AVX512VL") (V64QI "TARGET_AVX512VBMI") (V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL") (V16QI "TARGET_AVX512VBMI && TARGET_AVX512VL")]) (define_expand "_vpermi2var3_mask" [(set (match_operand:VPERMI2 0 "register_operand") (vec_merge:VPERMI2 (unspec:VPERMI2 [(match_operand: 2 "register_operand") (match_operand:VPERMI2 1 "register_operand") (match_operand:VPERMI2 3 "nonimmediate_operand")] UNSPEC_VPERMT2) (match_dup 5) (match_operand: 4 "register_operand")))] "TARGET_AVX512F" { operands[2] = force_reg (mode, operands[2]); operands[5] = gen_lowpart (mode, operands[2]); }) (define_insn "*_vpermi2var3_mask" [(set (match_operand:VPERMI2I 0 "register_operand" "=v") (vec_merge:VPERMI2I (unspec:VPERMI2I [(match_operand: 2 "register_operand" "0") (match_operand:VPERMI2I 1 "register_operand" "v") (match_operand:VPERMI2I 3 "nonimmediate_operand" "vm")] UNSPEC_VPERMT2) (match_dup 2) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vpermi2\t{%3, %1, %0%{%4%}|%0%{%4%}, %1, %3}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*_vpermi2var3_mask" [(set (match_operand:VF_AVX512VL 0 "register_operand" "=v") (vec_merge:VF_AVX512VL (unspec:VF_AVX512VL [(match_operand: 2 "register_operand" "0") (match_operand:VF_AVX512VL 1 "register_operand" "v") (match_operand:VF_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPERMT2) (subreg:VF_AVX512VL (match_dup 2) 0) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vpermi2\t{%3, %1, %0%{%4%}|%0%{%4%}, %1, %3}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_vpermt2var3_maskz" [(match_operand:VPERMI2 0 "register_operand") (match_operand: 1 "register_operand") (match_operand:VPERMI2 2 "register_operand") (match_operand:VPERMI2 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512F" { emit_insn (gen__vpermt2var3_maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "_vpermt2var3" [(set (match_operand:VPERMI2 0 "register_operand" "=v,v") (unspec:VPERMI2 [(match_operand: 1 "register_operand" "v,0") (match_operand:VPERMI2 2 "register_operand" "0,v") (match_operand:VPERMI2 3 "nonimmediate_operand" "vm,vm")] UNSPEC_VPERMT2))] "TARGET_AVX512F" "@ vpermt2\t{%3, %1, %0|%0, %1, %3} vpermi2\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_vpermt2var3_mask" [(set (match_operand:VPERMI2 0 "register_operand" "=v") (vec_merge:VPERMI2 (unspec:VPERMI2 [(match_operand: 1 "register_operand" "v") (match_operand:VPERMI2 2 "register_operand" "0") (match_operand:VPERMI2 3 "nonimmediate_operand" "vm")] UNSPEC_VPERMT2) (match_dup 2) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512F" "vpermt2\t{%3, %1, %0%{%4%}|%0%{%4%}, %1, %3}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "avx_vperm2f1283" [(set (match_operand:AVX256MODE2P 0 "register_operand") (unspec:AVX256MODE2P [(match_operand:AVX256MODE2P 1 "register_operand") (match_operand:AVX256MODE2P 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_255_operand")] UNSPEC_VPERMIL2F128))] "TARGET_AVX" { int mask = INTVAL (operands[3]); if ((mask & 0x88) == 0) { rtx perm[], t1, t2; int i, base, nelt = , nelt2 = nelt / 2; base = (mask & 3) * nelt2; for (i = 0; i < nelt2; ++i) perm[i] = GEN_INT (base + i); base = ((mask >> 4) & 3) * nelt2; for (i = 0; i < nelt2; ++i) perm[i + nelt2] = GEN_INT (base + i); t2 = gen_rtx_VEC_CONCAT (mode, operands[1], operands[2]); t1 = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, perm)); t2 = gen_rtx_VEC_SELECT (mode, t2, t1); t2 = gen_rtx_SET (operands[0], t2); emit_insn (t2); DONE; } }) ;; Note that bits 7 and 3 of the imm8 allow lanes to be zeroed, which ;; means that in order to represent this properly in rtl we'd have to ;; nest *another* vec_concat with a zero operand and do the select from ;; a 4x wide vector. That doesn't seem very nice. (define_insn "*avx_vperm2f128_full" [(set (match_operand:AVX256MODE2P 0 "register_operand" "=x") (unspec:AVX256MODE2P [(match_operand:AVX256MODE2P 1 "register_operand" "x") (match_operand:AVX256MODE2P 2 "nonimmediate_operand" "xm") (match_operand:SI 3 "const_0_to_255_operand" "n")] UNSPEC_VPERMIL2F128))] "TARGET_AVX" "vperm2\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "*avx_vperm2f128_nozero" [(set (match_operand:AVX256MODE2P 0 "register_operand" "=x") (vec_select:AVX256MODE2P (vec_concat: (match_operand:AVX256MODE2P 1 "register_operand" "x") (match_operand:AVX256MODE2P 2 "nonimmediate_operand" "xm")) (match_parallel 3 "" [(match_operand 4 "const_int_operand")])))] "TARGET_AVX && avx_vperm2f128_parallel (operands[3], mode)" { int mask = avx_vperm2f128_parallel (operands[3], mode) - 1; if (mask == 0x12) return "vinsert\t{$0, %x2, %1, %0|%0, %1, %x2, 0}"; if (mask == 0x20) return "vinsert\t{$1, %x2, %1, %0|%0, %1, %x2, 1}"; operands[3] = GEN_INT (mask); return "vperm2\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "*ssse3_palignr_perm" [(set (match_operand:V_128 0 "register_operand" "=x,Yw") (vec_select:V_128 (match_operand:V_128 1 "register_operand" "0,Yw") (match_parallel 2 "palignr_operand" [(match_operand 3 "const_int_operand" "n,n")])))] "TARGET_SSSE3" { operands[2] = (GEN_INT (INTVAL (operands[3]) * GET_MODE_UNIT_SIZE (GET_MODE (operands[0])))); switch (which_alternative) { case 0: return "palignr\t{%2, %1, %0|%0, %1, %2}"; case 1: return "vpalignr\t{%2, %1, %1, %0|%0, %1, %1, %2}"; default: gcc_unreachable (); } } [(set_attr "isa" "noavx,avx") (set_attr "type" "sseishft") (set_attr "atom_unit" "sishuf") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "orig,maybe_evex")]) (define_expand "avx512vl_vinsert" [(match_operand:VI48F_256 0 "register_operand") (match_operand:VI48F_256 1 "register_operand") (match_operand: 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_1_operand") (match_operand:VI48F_256 4 "register_operand") (match_operand: 5 "register_operand")] "TARGET_AVX512VL" { rtx (*insn)(rtx, rtx, rtx, rtx, rtx); switch (INTVAL (operands[3])) { case 0: insn = gen_vec_set_lo__mask; break; case 1: insn = gen_vec_set_hi__mask; break; default: gcc_unreachable (); } emit_insn (insn (operands[0], operands[1], operands[2], operands[4], operands[5])); DONE; }) (define_expand "avx_vinsertf128" [(match_operand:V_256 0 "register_operand") (match_operand:V_256 1 "register_operand") (match_operand: 2 "nonimmediate_operand") (match_operand:SI 3 "const_0_to_1_operand")] "TARGET_AVX" { rtx (*insn)(rtx, rtx, rtx); switch (INTVAL (operands[3])) { case 0: insn = gen_vec_set_lo_; break; case 1: insn = gen_vec_set_hi_; break; default: gcc_unreachable (); } emit_insn (insn (operands[0], operands[1], operands[2])); DONE; }) (define_insn "vec_set_lo_" [(set (match_operand:VI8F_256 0 "register_operand" "=v") (vec_concat:VI8F_256 (match_operand: 2 "nonimmediate_operand" "vm") (vec_select: (match_operand:VI8F_256 1 "register_operand" "v") (parallel [(const_int 2) (const_int 3)]))))] "TARGET_AVX && " { if (TARGET_AVX512DQ) return "vinsert64x2\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"; else if (TARGET_AVX512VL) return "vinsert32x4\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"; else return "vinsert\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"; } [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "vec_set_hi_" [(set (match_operand:VI8F_256 0 "register_operand" "=v") (vec_concat:VI8F_256 (vec_select: (match_operand:VI8F_256 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1)])) (match_operand: 2 "nonimmediate_operand" "vm")))] "TARGET_AVX && " { if (TARGET_AVX512DQ) return "vinsert64x2\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; else if (TARGET_AVX512VL) return "vinsert32x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; else return "vinsert\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; } [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "vec_set_lo_" [(set (match_operand:VI4F_256 0 "register_operand" "=v") (vec_concat:VI4F_256 (match_operand: 2 "nonimmediate_operand" "vm") (vec_select: (match_operand:VI4F_256 1 "register_operand" "v") (parallel [(const_int 4) (const_int 5) (const_int 6) (const_int 7)]))))] "TARGET_AVX" { if (TARGET_AVX512VL) return "vinsert32x4\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"; else return "vinsert\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"; } [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "vec_set_hi_" [(set (match_operand:VI4F_256 0 "register_operand" "=v") (vec_concat:VI4F_256 (vec_select: (match_operand:VI4F_256 1 "register_operand" "v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])) (match_operand: 2 "nonimmediate_operand" "vm")))] "TARGET_AVX" { if (TARGET_AVX512VL) return "vinsert32x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; else return "vinsert\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; } [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "vec_set_lo_" [(set (match_operand:V16_256 0 "register_operand" "=x,v") (vec_concat:V16_256 (match_operand: 2 "nonimmediate_operand" "xm,vm") (vec_select: (match_operand:V16_256 1 "register_operand" "x,v") (parallel [(const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)]))))] "TARGET_AVX" "@ vinsert%~128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0} vinserti32x4\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex,evex") (set_attr "mode" "OI")]) (define_insn "vec_set_hi_" [(set (match_operand:V16_256 0 "register_operand" "=x,v") (vec_concat:V16_256 (vec_select: (match_operand:V16_256 1 "register_operand" "x,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7)])) (match_operand: 2 "nonimmediate_operand" "xm,vm")))] "TARGET_AVX" "@ vinsert%~128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1} vinserti32x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex,evex") (set_attr "mode" "OI")]) (define_insn "vec_set_lo_v32qi" [(set (match_operand:V32QI 0 "register_operand" "=x,v") (vec_concat:V32QI (match_operand:V16QI 2 "nonimmediate_operand" "xm,v") (vec_select:V16QI (match_operand:V32QI 1 "register_operand" "x,v") (parallel [(const_int 16) (const_int 17) (const_int 18) (const_int 19) (const_int 20) (const_int 21) (const_int 22) (const_int 23) (const_int 24) (const_int 25) (const_int 26) (const_int 27) (const_int 28) (const_int 29) (const_int 30) (const_int 31)]))))] "TARGET_AVX" "@ vinsert%~128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0} vinserti32x4\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex,evex") (set_attr "mode" "OI")]) (define_insn "vec_set_hi_v32qi" [(set (match_operand:V32QI 0 "register_operand" "=x,v") (vec_concat:V32QI (vec_select:V16QI (match_operand:V32QI 1 "register_operand" "x,v") (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3) (const_int 4) (const_int 5) (const_int 6) (const_int 7) (const_int 8) (const_int 9) (const_int 10) (const_int 11) (const_int 12) (const_int 13) (const_int 14) (const_int 15)])) (match_operand:V16QI 2 "nonimmediate_operand" "xm,vm")))] "TARGET_AVX" "@ vinsert%~128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1} vinserti32x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}" [(set_attr "type" "sselog") (set_attr "prefix_extra" "1") (set_attr "length_immediate" "1") (set_attr "prefix" "vex,evex") (set_attr "mode" "OI")]) (define_insn "_maskload" [(set (match_operand:V48_AVX2 0 "register_operand" "=x") (unspec:V48_AVX2 [(match_operand: 2 "register_operand" "x") (match_operand:V48_AVX2 1 "memory_operand" "m")] UNSPEC_MASKMOV))] "TARGET_AVX" "vmaskmov\t{%1, %2, %0|%0, %2, %1}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "btver2_decode" "vector") (set_attr "mode" "")]) (define_insn "_maskstore" [(set (match_operand:V48_AVX2 0 "memory_operand" "+m") (unspec:V48_AVX2 [(match_operand: 1 "register_operand" "x") (match_operand:V48_AVX2 2 "register_operand" "x") (match_dup 0)] UNSPEC_MASKMOV))] "TARGET_AVX" "vmaskmov\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog1") (set_attr "prefix_extra" "1") (set_attr "prefix" "vex") (set_attr "btver2_decode" "vector") (set_attr "mode" "")]) (define_expand "maskload" [(set (match_operand:V48_AVX2 0 "register_operand") (unspec:V48_AVX2 [(match_operand: 2 "register_operand") (match_operand:V48_AVX2 1 "memory_operand")] UNSPEC_MASKMOV))] "TARGET_AVX") (define_expand "maskload" [(set (match_operand:V48H_AVX512VL 0 "register_operand") (vec_merge:V48H_AVX512VL (match_operand:V48H_AVX512VL 1 "memory_operand") (match_dup 0) (match_operand: 2 "register_operand")))] "TARGET_AVX512F") (define_expand "maskload" [(set (match_operand:VI12_AVX512VL 0 "register_operand") (vec_merge:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "memory_operand") (match_dup 0) (match_operand: 2 "register_operand")))] "TARGET_AVX512BW") (define_expand "maskstore" [(set (match_operand:V48_AVX2 0 "memory_operand") (unspec:V48_AVX2 [(match_operand: 2 "register_operand") (match_operand:V48_AVX2 1 "register_operand") (match_dup 0)] UNSPEC_MASKMOV))] "TARGET_AVX") (define_expand "maskstore" [(set (match_operand:V48H_AVX512VL 0 "memory_operand") (vec_merge:V48H_AVX512VL (match_operand:V48H_AVX512VL 1 "register_operand") (match_dup 0) (match_operand: 2 "register_operand")))] "TARGET_AVX512F") (define_expand "maskstore" [(set (match_operand:VI12_AVX512VL 0 "memory_operand") (vec_merge:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "register_operand") (match_dup 0) (match_operand: 2 "register_operand")))] "TARGET_AVX512BW") (define_expand "cbranch4" [(set (reg:CC FLAGS_REG) (compare:CC (match_operand:VI48_AVX 1 "register_operand") (match_operand:VI48_AVX 2 "nonimmediate_operand"))) (set (pc) (if_then_else (match_operator 0 "bt_comparison_operator" [(reg:CC FLAGS_REG) (const_int 0)]) (label_ref (match_operand 3)) (pc)))] "TARGET_SSE4_1" { ix86_expand_branch (GET_CODE (operands[0]), operands[1], operands[2], operands[3]); DONE; }) (define_insn_and_split "avx__" [(set (match_operand:AVX256MODE2P 0 "nonimmediate_operand" "=x,m") (vec_concat:AVX256MODE2P (match_operand: 1 "nonimmediate_operand" "xm,x") (unspec: [(const_int 0)] UNSPEC_CAST)))] "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] { if (REG_P (operands[0])) operands[0] = gen_lowpart (mode, operands[0]); else operands[1] = lowpart_subreg (mode, operands[1], mode); }) ;; Modes handled by vec_init expanders. (define_mode_iterator VEC_INIT_MODE [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2") (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")]) ;; Likewise, but for initialization from half sized vectors. ;; Thus, these are all VEC_INIT_MODE modes except V2??. (define_mode_iterator VEC_INIT_HALF_MODE [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V4TI "TARGET_AVX512F")]) (define_expand "vec_init" [(match_operand:VEC_INIT_MODE 0 "register_operand") (match_operand 1)] "TARGET_SSE" { ix86_expand_vector_init (false, operands[0], operands[1]); DONE; }) (define_expand "vec_init" [(match_operand:VEC_INIT_HALF_MODE 0 "register_operand") (match_operand 1)] "TARGET_SSE" { ix86_expand_vector_init (false, operands[0], operands[1]); DONE; }) (define_expand "cond_" [(set (match_operand:VI248_AVX512VLBW 0 "register_operand") (vec_merge:VI248_AVX512VLBW (any_shift:VI248_AVX512VLBW (match_operand:VI248_AVX512VLBW 2 "register_operand") (match_operand:VI248_AVX512VLBW 3 "nonimmediate_or_const_vec_dup_operand")) (match_operand:VI248_AVX512VLBW 4 "nonimm_or_0_operand") (match_operand: 1 "register_operand")))] "TARGET_AVX512F" { if (const_vec_duplicate_p (operands[3])) { operands[3] = unwrap_const_vec_duplicate (operands[3]); operands[3] = lowpart_subreg (DImode, operands[3], mode); emit_insn (gen_3_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); } else emit_insn (gen__v_mask (operands[0], operands[2], operands[3], operands[4], operands[1])); DONE; }) (define_insn "_ashrv" [(set (match_operand:VI48_AVX512F_AVX512VL 0 "register_operand" "=v") (ashiftrt:VI48_AVX512F_AVX512VL (match_operand:VI48_AVX512F_AVX512VL 1 "register_operand" "v") (match_operand:VI48_AVX512F_AVX512VL 2 "nonimmediate_operand" "vm")))] "TARGET_AVX2 && " "vpsrav\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn "_ashrv" [(set (match_operand:VI2_AVX512VL 0 "register_operand" "=v") (ashiftrt:VI2_AVX512VL (match_operand:VI2_AVX512VL 1 "register_operand" "v") (match_operand:VI2_AVX512VL 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512BW" "vpsravw\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn "_v" [(set (match_operand:VI48_AVX512F 0 "register_operand" "=v") (any_lshift:VI48_AVX512F (match_operand:VI48_AVX512F 1 "register_operand" "v") (match_operand:VI48_AVX512F 2 "nonimmediate_operand" "vm")))] "TARGET_AVX2 && " "vpv\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn "_v" [(set (match_operand:VI2_AVX512VL 0 "register_operand" "=v") (any_lshift:VI2_AVX512VL (match_operand:VI2_AVX512VL 1 "register_operand" "v") (match_operand:VI2_AVX512VL 2 "nonimmediate_operand" "vm")))] "TARGET_AVX512BW" "vpv\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sseishft") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn "avx_vec_concat" [(set (match_operand:V_256_512 0 "register_operand" "=x,v,x,Yv") (vec_concat:V_256_512 (match_operand: 1 "nonimmediate_operand" "x,v,xm,vm") (match_operand: 2 "nonimm_or_0_operand" "xm,vm,C,C")))] "TARGET_AVX && (operands[2] == CONST0_RTX (mode) || !MEM_P (operands[1]))" { switch (which_alternative) { case 0: return "vinsert\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; case 1: if ( == 64) { if (TARGET_AVX512DQ && GET_MODE_SIZE (mode) == 4) return "vinsert32x8\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; else return "vinsert64x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; } else { if (TARGET_AVX512DQ && GET_MODE_SIZE (mode) == 8) return "vinsert64x2\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; else return "vinsert32x4\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"; } case 2: case 3: switch (get_attr_mode (insn)) { case MODE_V16SF: if (misaligned_operand (operands[1], mode)) return "vmovups\t{%1, %t0|%t0, %1}"; else return "vmovaps\t{%1, %t0|%t0, %1}"; case MODE_V8DF: if (misaligned_operand (operands[1], mode)) return "vmovupd\t{%1, %t0|%t0, %1}"; else return "vmovapd\t{%1, %t0|%t0, %1}"; case MODE_V8SF: if (misaligned_operand (operands[1], mode)) return "vmovups\t{%1, %x0|%x0, %1}"; else return "vmovaps\t{%1, %x0|%x0, %1}"; case MODE_V4DF: if (misaligned_operand (operands[1], mode)) return "vmovupd\t{%1, %x0|%x0, %1}"; else return "vmovapd\t{%1, %x0|%x0, %1}"; case MODE_XI: if (misaligned_operand (operands[1], mode)) { if (which_alternative == 2) return "vmovdqu\t{%1, %t0|%t0, %1}"; else if (GET_MODE_SIZE (mode) == 8) return "vmovdqu64\t{%1, %t0|%t0, %1}"; else return "vmovdqu32\t{%1, %t0|%t0, %1}"; } else { if (which_alternative == 2) return "vmovdqa\t{%1, %t0|%t0, %1}"; else if (GET_MODE_SIZE (mode) == 8) return "vmovdqa64\t{%1, %t0|%t0, %1}"; else return "vmovdqa32\t{%1, %t0|%t0, %1}"; } case MODE_OI: if (misaligned_operand (operands[1], mode)) { if (which_alternative == 2) return "vmovdqu\t{%1, %x0|%x0, %1}"; else if (GET_MODE_SIZE (mode) == 8) return "vmovdqu64\t{%1, %x0|%x0, %1}"; else return "vmovdqu32\t{%1, %x0|%x0, %1}"; } else { if (which_alternative == 2) return "vmovdqa\t{%1, %x0|%x0, %1}"; else if (GET_MODE_SIZE (mode) == 8) return "vmovdqa64\t{%1, %x0|%x0, %1}"; else return "vmovdqa32\t{%1, %x0|%x0, %1}"; } default: gcc_unreachable (); } default: gcc_unreachable (); } } [(set_attr "type" "sselog,sselog,ssemov,ssemov") (set_attr "prefix_extra" "1,1,*,*") (set_attr "length_immediate" "1,1,*,*") (set_attr "prefix" "maybe_evex") (set_attr "mode" "")]) (define_insn_and_split "*vec_concat_0_1" [(set (match_operand:V 0 "register_operand") (vec_select:V (vec_concat: (match_operand:V 1 "nonimmediate_operand") (match_operand:V 2 "const0_operand")) (match_parallel 3 "movq_parallel" [(match_operand 4 "const_int_operand")])))] "TARGET_SSE2 && ix86_pre_reload_split ()" "#" "&& 1" [(set (match_dup 0) (vec_concat:V (match_dup 1) (match_dup 5)))] { operands[1] = gen_lowpart (mode, operands[1]); operands[5] = CONST0_RTX (mode); }) (define_insn "vcvtph2ps" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_select:V4SF (unspec:V8SF [(match_operand:V8HI 1 "register_operand" "v")] UNSPEC_VCVTPH2PS) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)])))] "TARGET_F16C || TARGET_AVX512VL" "vcvtph2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "mode" "V4SF")]) (define_insn "*vcvtph2ps_load" [(set (match_operand:V4SF 0 "register_operand" "=v") (unspec:V4SF [(match_operand:V4HI 1 "memory_operand" "m")] UNSPEC_VCVTPH2PS))] "TARGET_F16C || TARGET_AVX512VL" "vcvtph2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "vex") (set_attr "mode" "V8SF")]) (define_insn "vcvtph2ps256" [(set (match_operand:V8SF 0 "register_operand" "=v") (unspec:V8SF [(match_operand:V8HI 1 "nonimmediate_operand" "vm")] UNSPEC_VCVTPH2PS))] "TARGET_F16C || TARGET_AVX512VL" "vcvtph2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "vex") (set_attr "btver2_decode" "double") (set_attr "mode" "V8SF")]) (define_insn "avx512f_vcvtph2ps512" [(set (match_operand:V16SF 0 "register_operand" "=v") (unspec:V16SF [(match_operand:V16HI 1 "" "")] UNSPEC_VCVTPH2PS))] "TARGET_AVX512F" "vcvtph2ps\t{%1, %0|%0, %1}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) (define_expand "vcvtps2ph_mask" [(set (match_operand:V8HI 0 "register_operand") (vec_merge:V8HI (vec_concat:V8HI (unspec:V4HI [(match_operand:V4SF 1 "register_operand") (match_operand:SI 2 "const_0_to_255_operand")] UNSPEC_VCVTPS2PH) (match_dup 5)) (match_operand:V8HI 3 "nonimm_or_0_operand") (match_operand:QI 4 "register_operand")))] "TARGET_AVX512VL" "operands[5] = CONST0_RTX (V4HImode);") (define_expand "vcvtps2ph" [(set (match_operand:V8HI 0 "register_operand") (vec_concat:V8HI (unspec:V4HI [(match_operand:V4SF 1 "register_operand") (match_operand:SI 2 "const_0_to_255_operand")] UNSPEC_VCVTPS2PH) (match_dup 3)))] "TARGET_F16C" "operands[3] = CONST0_RTX (V4HImode);") (define_insn "*vcvtps2ph" [(set (match_operand:V8HI 0 "register_operand" "=v") (vec_concat:V8HI (unspec:V4HI [(match_operand:V4SF 1 "register_operand" "v") (match_operand:SI 2 "const_0_to_255_operand" "N")] UNSPEC_VCVTPS2PH) (match_operand:V4HI 3 "const0_operand")))] "(TARGET_F16C || TARGET_AVX512VL) && " "vcvtps2ph\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "mode" "V4SF")]) (define_insn "*vcvtps2ph_store" [(set (match_operand:V4HI 0 "memory_operand" "=m") (unspec:V4HI [(match_operand:V4SF 1 "register_operand" "v") (match_operand:SI 2 "const_0_to_255_operand" "N")] UNSPEC_VCVTPS2PH))] "TARGET_F16C || TARGET_AVX512VL" "vcvtps2ph\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "mode" "V4SF")]) (define_insn "vcvtps2ph256" [(set (match_operand:V8HI 0 "register_operand" "=v") (unspec:V8HI [(match_operand:V8SF 1 "register_operand" "v") (match_operand:SI 2 "const_0_to_255_operand" "N")] UNSPEC_VCVTPS2PH))] "TARGET_F16C || TARGET_AVX512VL" "vcvtps2ph\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "btver2_decode" "vector") (set_attr "mode" "V8SF")]) (define_insn "*vcvtps2ph256" [(set (match_operand:V8HI 0 "memory_operand" "=m") (unspec:V8HI [(match_operand:V8SF 1 "register_operand" "v") (match_operand:SI 2 "const_0_to_255_operand" "N")] UNSPEC_VCVTPS2PH))] "TARGET_F16C || TARGET_AVX512VL" "vcvtps2ph\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecvt") (set_attr "prefix" "maybe_evex") (set_attr "btver2_decode" "vector") (set_attr "mode" "V8SF")]) (define_insn "avx512f_vcvtps2ph512" [(set (match_operand:V16HI 0 "register_operand" "=v") (unspec:V16HI [(match_operand:V16SF 1 "register_operand" "v") (match_operand:SI 2 "const_0_to_255_operand" "N")] UNSPEC_VCVTPS2PH))] "TARGET_AVX512F" "vcvtps2ph\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) (define_insn "*avx512f_vcvtps2ph512" [(set (match_operand:V16HI 0 "memory_operand" "=m") (unspec:V16HI [(match_operand:V16SF 1 "register_operand" "v") (match_operand:SI 2 "const_0_to_255_operand" "N")] UNSPEC_VCVTPS2PH))] "TARGET_AVX512F" "vcvtps2ph\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "ssecvt") (set_attr "prefix" "evex") (set_attr "mode" "V16SF")]) ;; For gather* insn patterns (define_mode_iterator VEC_GATHER_MODE [V2DI V2DF V4DI V4DF V4SI V4SF V8SI V8SF]) (define_mode_attr VEC_GATHER_IDXSI [(V2DI "V4SI") (V4DI "V4SI") (V8DI "V8SI") (V2DF "V4SI") (V4DF "V4SI") (V8DF "V8SI") (V4SI "V4SI") (V8SI "V8SI") (V16SI "V16SI") (V4SF "V4SI") (V8SF "V8SI") (V16SF "V16SI")]) (define_mode_attr VEC_GATHER_IDXDI [(V2DI "V2DI") (V4DI "V4DI") (V8DI "V8DI") (V2DF "V2DI") (V4DF "V4DI") (V8DF "V8DI") (V4SI "V2DI") (V8SI "V4DI") (V16SI "V8DI") (V4SF "V2DI") (V8SF "V4DI") (V16SF "V8DI")]) (define_mode_attr VEC_GATHER_SRCDI [(V2DI "V2DI") (V4DI "V4DI") (V8DI "V8DI") (V2DF "V2DF") (V4DF "V4DF") (V8DF "V8DF") (V4SI "V4SI") (V8SI "V4SI") (V16SI "V8SI") (V4SF "V4SF") (V8SF "V4SF") (V16SF "V8SF")]) (define_expand "avx2_gathersi" [(parallel [(set (match_operand:VEC_GATHER_MODE 0 "register_operand") (unspec:VEC_GATHER_MODE [(match_operand:VEC_GATHER_MODE 1 "register_operand") (mem: (match_par_dup 6 [(match_operand 2 "vsib_address_operand") (match_operand: 3 "register_operand") (match_operand:SI 5 "const1248_operand ")])) (mem:BLK (scratch)) (match_operand:VEC_GATHER_MODE 4 "register_operand")] UNSPEC_GATHER)) (clobber (match_scratch:VEC_GATHER_MODE 7))])] "TARGET_AVX2" { operands[6] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[3], operands[5]), UNSPEC_VSIBADDR); }) (define_insn "*avx2_gathersi" [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x") (unspec:VEC_GATHER_MODE [(match_operand:VEC_GATHER_MODE 2 "register_operand" "0") (match_operator: 7 "vsib_mem_operator" [(unspec:P [(match_operand:P 3 "vsib_address_operand" "Tv") (match_operand: 4 "register_operand" "x") (match_operand:SI 6 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (mem:BLK (scratch)) (match_operand:VEC_GATHER_MODE 5 "register_operand" "1")] UNSPEC_GATHER)) (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))] "TARGET_AVX2" "%M3vgatherd\t{%1, %7, %0|%0, %7, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "*avx2_gathersi_2" [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x") (unspec:VEC_GATHER_MODE [(pc) (match_operator: 6 "vsib_mem_operator" [(unspec:P [(match_operand:P 2 "vsib_address_operand" "Tv") (match_operand: 3 "register_operand" "x") (match_operand:SI 5 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (mem:BLK (scratch)) (match_operand:VEC_GATHER_MODE 4 "register_operand" "1")] UNSPEC_GATHER)) (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))] "TARGET_AVX2" "%M2vgatherd\t{%1, %6, %0|%0, %6, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_expand "avx2_gatherdi" [(parallel [(set (match_operand:VEC_GATHER_MODE 0 "register_operand") (unspec:VEC_GATHER_MODE [(match_operand: 1 "register_operand") (mem: (match_par_dup 6 [(match_operand 2 "vsib_address_operand") (match_operand: 3 "register_operand") (match_operand:SI 5 "const1248_operand ")])) (mem:BLK (scratch)) (match_operand: 4 "register_operand")] UNSPEC_GATHER)) (clobber (match_scratch:VEC_GATHER_MODE 7))])] "TARGET_AVX2" { operands[6] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[3], operands[5]), UNSPEC_VSIBADDR); }) (define_insn "*avx2_gatherdi" [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x") (unspec:VEC_GATHER_MODE [(match_operand: 2 "register_operand" "0") (match_operator: 7 "vsib_mem_operator" [(unspec:P [(match_operand:P 3 "vsib_address_operand" "Tv") (match_operand: 4 "register_operand" "x") (match_operand:SI 6 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (mem:BLK (scratch)) (match_operand: 5 "register_operand" "1")] UNSPEC_GATHER)) (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))] "TARGET_AVX2" "%M3vgatherq\t{%5, %7, %2|%2, %7, %5}" [(set_attr "type" "ssemov") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "*avx2_gatherdi_2" [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x") (unspec:VEC_GATHER_MODE [(pc) (match_operator: 6 "vsib_mem_operator" [(unspec:P [(match_operand:P 2 "vsib_address_operand" "Tv") (match_operand: 3 "register_operand" "x") (match_operand:SI 5 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (mem:BLK (scratch)) (match_operand: 4 "register_operand" "1")] UNSPEC_GATHER)) (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))] "TARGET_AVX2" { if (mode != mode) return "%M2vgatherq\t{%4, %6, %x0|%x0, %6, %4}"; return "%M2vgatherq\t{%4, %6, %0|%0, %6, %4}"; } [(set_attr "type" "ssemov") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "*avx2_gatherdi_3" [(set (match_operand: 0 "register_operand" "=&x") (vec_select: (unspec:VI4F_256 [(match_operand: 2 "register_operand" "0") (match_operator: 7 "vsib_mem_operator" [(unspec:P [(match_operand:P 3 "vsib_address_operand" "Tv") (match_operand: 4 "register_operand" "x") (match_operand:SI 6 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (mem:BLK (scratch)) (match_operand: 5 "register_operand" "1")] UNSPEC_GATHER) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))) (clobber (match_scratch:VI4F_256 1 "=&x"))] "TARGET_AVX2" "%M3vgatherq\t{%5, %7, %0|%0, %7, %5}" [(set_attr "type" "ssemov") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_insn "*avx2_gatherdi_4" [(set (match_operand: 0 "register_operand" "=&x") (vec_select: (unspec:VI4F_256 [(pc) (match_operator: 6 "vsib_mem_operator" [(unspec:P [(match_operand:P 2 "vsib_address_operand" "Tv") (match_operand: 3 "register_operand" "x") (match_operand:SI 5 "const1248_operand" "n")] UNSPEC_VSIBADDR)]) (mem:BLK (scratch)) (match_operand: 4 "register_operand" "1")] UNSPEC_GATHER) (parallel [(const_int 0) (const_int 1) (const_int 2) (const_int 3)]))) (clobber (match_scratch:VI4F_256 1 "=&x"))] "TARGET_AVX2" "%M2vgatherq\t{%4, %6, %0|%0, %6, %4}" [(set_attr "type" "ssemov") (set_attr "prefix" "vex") (set_attr "mode" "")]) (define_expand "_gathersi" [(parallel [(set (match_operand:VI48F 0 "register_operand") (unspec:VI48F [(match_operand:VI48F 1 "register_operand") (match_operand: 4 "register_operand") (mem: (match_par_dup 6 [(match_operand 2 "vsib_address_operand") (match_operand: 3 "register_operand") (match_operand:SI 5 "const1248_operand")]))] UNSPEC_GATHER)) (clobber (match_scratch: 7))])] "TARGET_AVX512F" { operands[6] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[3], operands[5]), UNSPEC_VSIBADDR); }) (define_insn "*avx512f_gathersi" [(set (match_operand:VI48F 0 "register_operand" "=&v") (unspec:VI48F [(match_operand:VI48F 1 "register_operand" "0") (match_operand: 7 "register_operand" "2") (match_operator: 6 "vsib_mem_operator" [(unspec:P [(match_operand:P 4 "vsib_address_operand" "Tv") (match_operand: 3 "register_operand" "v") (match_operand:SI 5 "const1248_operand" "n")] UNSPEC_VSIBADDR)])] UNSPEC_GATHER)) (clobber (match_scratch: 2 "=&Yk"))] "TARGET_AVX512F" ;; %X6 so that we don't emit any *WORD PTR for -masm=intel, as ;; gas changed what it requires incompatibly. "%M4vgatherd\t{%6, %0%{%2%}|%0%{%2%}, %X6}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512f_gathersi_2" [(set (match_operand:VI48F 0 "register_operand" "=&v") (unspec:VI48F [(pc) (match_operand: 6 "register_operand" "1") (match_operator: 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 3 "vsib_address_operand" "Tv") (match_operand: 2 "register_operand" "v") (match_operand:SI 4 "const1248_operand" "n")] UNSPEC_VSIBADDR)])] UNSPEC_GATHER)) (clobber (match_scratch: 1 "=&Yk"))] "TARGET_AVX512F" ;; %X5 so that we don't emit any *WORD PTR for -masm=intel, as ;; gas changed what it requires incompatibly. "%M3vgatherd\t{%5, %0%{%1%}|%0%{%1%}, %X5}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_gatherdi" [(parallel [(set (match_operand:VI48F 0 "register_operand") (unspec:VI48F [(match_operand: 1 "register_operand") (match_operand:QI 4 "register_operand") (mem: (match_par_dup 6 [(match_operand 2 "vsib_address_operand") (match_operand: 3 "register_operand") (match_operand:SI 5 "const1248_operand")]))] UNSPEC_GATHER)) (clobber (match_scratch:QI 7))])] "TARGET_AVX512F" { operands[6] = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[3], operands[5]), UNSPEC_VSIBADDR); }) (define_insn "*avx512f_gatherdi" [(set (match_operand:VI48F 0 "register_operand" "=&v") (unspec:VI48F [(match_operand: 1 "register_operand" "0") (match_operand:QI 7 "register_operand" "2") (match_operator: 6 "vsib_mem_operator" [(unspec:P [(match_operand:P 4 "vsib_address_operand" "Tv") (match_operand: 3 "register_operand" "v") (match_operand:SI 5 "const1248_operand" "n")] UNSPEC_VSIBADDR)])] UNSPEC_GATHER)) (clobber (match_scratch:QI 2 "=&Yk"))] "TARGET_AVX512F" ;; %X6 so that we don't emit any *WORD PTR for -masm=intel, as ;; gas changed what it requires incompatibly. "%M4vgatherq\t{%6, %1%{%2%}|%1%{%2%}, %X6}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "*avx512f_gatherdi_2" [(set (match_operand:VI48F 0 "register_operand" "=&v") (unspec:VI48F [(pc) (match_operand:QI 6 "register_operand" "1") (match_operator: 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 3 "vsib_address_operand" "Tv") (match_operand: 2 "register_operand" "v") (match_operand:SI 4 "const1248_operand" "n")] UNSPEC_VSIBADDR)])] UNSPEC_GATHER)) (clobber (match_scratch:QI 1 "=&Yk"))] "TARGET_AVX512F" { /* %X5 so that we don't emit any *WORD PTR for -masm=intel, as gas changed what it requires incompatibly. */ if (mode != mode) { if ( != 64) return "%M3vgatherq\t{%5, %x0%{%1%}|%x0%{%1%}, %X5}"; else return "%M3vgatherq\t{%5, %t0%{%1%}|%t0%{%1%}, %X5}"; } return "%M3vgatherq\t{%5, %0%{%1%}|%0%{%1%}, %X5}"; } [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_scattersi" [(parallel [(set (mem:VI48F (match_par_dup 5 [(match_operand 0 "vsib_address_operand") (match_operand: 2 "register_operand") (match_operand:SI 4 "const1248_operand")])) (unspec:VI48F [(match_operand: 1 "register_operand") (match_operand:VI48F 3 "register_operand")] UNSPEC_SCATTER)) (clobber (match_scratch: 6))])] "TARGET_AVX512F" { operands[5] = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, operands[0], operands[2], operands[4], operands[1]), UNSPEC_VSIBADDR); }) (define_insn "*avx512f_scattersi" [(set (match_operator:VI48F 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 0 "vsib_address_operand" "Tv") (match_operand: 2 "register_operand" "v") (match_operand:SI 4 "const1248_operand" "n") (match_operand: 6 "register_operand" "1")] UNSPEC_VSIBADDR)]) (unspec:VI48F [(match_dup 6) (match_operand:VI48F 3 "register_operand" "v")] UNSPEC_SCATTER)) (clobber (match_scratch: 1 "=&Yk"))] "TARGET_AVX512F" ;; %X5 so that we don't emit any *WORD PTR for -masm=intel, as ;; gas changed what it requires incompatibly. "%M0vscatterd\t{%3, %5%{%1%}|%X5%{%1%}, %3}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_expand "_scatterdi" [(parallel [(set (mem:VI48F (match_par_dup 5 [(match_operand 0 "vsib_address_operand") (match_operand: 2 "register_operand") (match_operand:SI 4 "const1248_operand")])) (unspec:VI48F [(match_operand:QI 1 "register_operand") (match_operand: 3 "register_operand")] UNSPEC_SCATTER)) (clobber (match_scratch:QI 6))])] "TARGET_AVX512F" { operands[5] = gen_rtx_UNSPEC (Pmode, gen_rtvec (4, operands[0], operands[2], operands[4], operands[1]), UNSPEC_VSIBADDR); }) (define_insn "*avx512f_scatterdi" [(set (match_operator:VI48F 5 "vsib_mem_operator" [(unspec:P [(match_operand:P 0 "vsib_address_operand" "Tv") (match_operand: 2 "register_operand" "v") (match_operand:SI 4 "const1248_operand" "n") (match_operand:QI 6 "register_operand" "1")] UNSPEC_VSIBADDR)]) (unspec:VI48F [(match_dup 6) (match_operand: 3 "register_operand" "v")] UNSPEC_SCATTER)) (clobber (match_scratch:QI 1 "=&Yk"))] "TARGET_AVX512F" ;; %X5 so that we don't emit any *WORD PTR for -masm=intel, as ;; gas changed what it requires incompatibly. "%M0vscatterq\t{%3, %5%{%1%}|%X5%{%1%}, %3}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_compress_mask" [(set (match_operand:VI48F 0 "register_operand" "=v") (unspec:VI48F [(match_operand:VI48F 1 "register_operand" "v") (match_operand:VI48F 2 "nonimm_or_0_operand" "0C") (match_operand: 3 "register_operand" "Yk")] UNSPEC_COMPRESS))] "TARGET_AVX512F" "vcompress\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "compress_mask" [(set (match_operand:VI12_AVX512VLBW 0 "register_operand" "=v") (unspec:VI12_AVX512VLBW [(match_operand:VI12_AVX512VLBW 1 "register_operand" "v") (match_operand:VI12_AVX512VLBW 2 "nonimm_or_0_operand" "0C") (match_operand: 3 "register_operand" "Yk")] UNSPEC_COMPRESS))] "TARGET_AVX512VBMI2" "vpcompress\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_compressstore_mask" [(set (match_operand:VI48F 0 "memory_operand" "=m") (unspec:VI48F [(match_operand:VI48F 1 "register_operand" "x") (match_dup 0) (match_operand: 2 "register_operand" "Yk")] UNSPEC_COMPRESS_STORE))] "TARGET_AVX512F" "vcompress\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "store") (set_attr "mode" "")]) (define_insn "compressstore_mask" [(set (match_operand:VI12_AVX512VLBW 0 "memory_operand" "=m") (unspec:VI12_AVX512VLBW [(match_operand:VI12_AVX512VLBW 1 "register_operand" "x") (match_dup 0) (match_operand: 2 "register_operand" "Yk")] UNSPEC_COMPRESS_STORE))] "TARGET_AVX512VBMI2" "vpcompress\t{%1, %0%{%2%}|%0%{%2%}, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "store") (set_attr "mode" "")]) (define_expand "_expand_maskz" [(set (match_operand:VI48F 0 "register_operand") (unspec:VI48F [(match_operand:VI48F 1 "nonimmediate_operand") (match_operand:VI48F 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")] UNSPEC_EXPAND))] "TARGET_AVX512F" "operands[2] = CONST0_RTX (mode);") (define_insn "expand_mask" [(set (match_operand:VI48F 0 "register_operand" "=v,v") (unspec:VI48F [(match_operand:VI48F 1 "nonimmediate_operand" "v,m") (match_operand:VI48F 2 "nonimm_or_0_operand" "0C,0C") (match_operand: 3 "register_operand" "Yk,Yk")] UNSPEC_EXPAND))] "TARGET_AVX512F" "vexpand\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "none,load") (set_attr "mode" "")]) (define_insn "expand_mask" [(set (match_operand:VI12_AVX512VLBW 0 "register_operand" "=v,v") (unspec:VI12_AVX512VLBW [(match_operand:VI12_AVX512VLBW 1 "nonimmediate_operand" "v,m") (match_operand:VI12_AVX512VLBW 2 "nonimm_or_0_operand" "0C,0C") (match_operand: 3 "register_operand" "Yk,Yk")] UNSPEC_EXPAND))] "TARGET_AVX512VBMI2" "vexpand\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}" [(set_attr "type" "ssemov") (set_attr "prefix" "evex") (set_attr "memory" "none,load") (set_attr "mode" "")]) (define_insn_and_split "*expand_mask" [(set (match_operand:VI12_VI48F_AVX512VLBW 0 "register_operand") (unspec:VI12_VI48F_AVX512VLBW [(match_operand:VI12_VI48F_AVX512VLBW 1 "nonimmediate_operand") (match_operand:VI12_VI48F_AVX512VLBW 2 "nonimm_or_0_operand") (match_operand 3 "const_int_operand")] UNSPEC_EXPAND))] "ix86_pre_reload_split () && (TARGET_AVX512VBMI2 || GET_MODE_SIZE (GET_MODE_INNER (mode)) >= 4)" "#" "&& 1" [(const_int 0)] { unsigned HOST_WIDE_INT mask = INTVAL (operands[3]); bool has_zero = false; unsigned n = GET_MODE_NUNITS (mode), i; unsigned ones = 0; /* If all ones bits is in mask's lower part, get number of ones and assign it to ONES. */ for (i = 0; i != n; i++) { if ((mask & HOST_WIDE_INT_1U << i) && has_zero) break; /* Record first zero bit. */ if (!(mask & HOST_WIDE_INT_1U << i) && !has_zero) { has_zero = true; ones = i; } } if (!has_zero) ones = n; if (i != n || (ones != 0 && ones != n)) { rtx reg = gen_reg_rtx (mode); emit_move_insn (reg, operands[3]); enum insn_code icode; if (i == n) /* For masks with all one bits in it's lower part, we can transform v{,p}expand* to vmovdq* with mask operand. */ icode = CODE_FOR__load_mask; else icode = CODE_FOR_expand_mask; emit_insn (GEN_FCN (icode) (operands[0], operands[1], operands[2], reg)); } else /* For ALL_MASK_ONES or CONST0_RTX mask, transform it to simple mov. */ emit_move_insn (operands[0], ones ? operands[1] : operands[2]); DONE; }) (define_expand "expand_maskz" [(set (match_operand:VI12_AVX512VLBW 0 "register_operand") (unspec:VI12_AVX512VLBW [(match_operand:VI12_AVX512VLBW 1 "nonimmediate_operand") (match_operand:VI12_AVX512VLBW 2 "nonimm_or_0_operand") (match_operand: 3 "register_operand")] UNSPEC_EXPAND))] "TARGET_AVX512VBMI2" "operands[2] = CONST0_RTX (mode);") (define_insn "avx512dq_rangep" [(set (match_operand:VF_AVX512VL 0 "register_operand" "=v") (unspec:VF_AVX512VL [(match_operand:VF_AVX512VL 1 "register_operand" "v") (match_operand:VF_AVX512VL 2 "" "") (match_operand:SI 3 "const_0_to_15_operand")] UNSPEC_RANGE))] "TARGET_AVX512DQ && " { if (TARGET_DEST_FALSE_DEP_FOR_GLC && && !reg_mentioned_p (operands[0], operands[1]) && !reg_mentioned_p (operands[0], operands[2])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vrange\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_ranges" [(set (match_operand:VF_128 0 "register_operand" "=v") (vec_merge:VF_128 (unspec:VF_128 [(match_operand:VF_128 1 "register_operand" "v") (match_operand:VF_128 2 "" "") (match_operand:SI 3 "const_0_to_15_operand")] UNSPEC_RANGE) (match_dup 1) (const_int 1)))] "TARGET_AVX512DQ" { if (TARGET_DEST_FALSE_DEP_FOR_GLC && && !reg_mentioned_p (operands[0], operands[1]) && !reg_mentioned_p (operands[0], operands[2])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vrange\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_fpclass" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VFH_AVX512VL 1 "vector_operand" "vm") (match_operand 2 "const_0_to_255_operand" "n")] UNSPEC_FPCLASS))] "TARGET_AVX512DQ || VALID_AVX512FP16_REG_MODE(mode)" "vfpclass\t{%2, %1, %0|%0, %1, %2}"; [(set_attr "type" "sse") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512dq_vmfpclass" [(set (match_operand: 0 "register_operand" "=k") (and: (unspec: [(match_operand:VFH_128 1 "nonimmediate_operand" "vm") (match_operand 2 "const_0_to_255_operand" "n")] UNSPEC_FPCLASS) (const_int 1)))] "TARGET_AVX512DQ || VALID_AVX512FP16_REG_MODE(mode)" "vfpclass\t{%2, %1, %0|%0, %1, %2}"; [(set_attr "type" "sse") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "_getmant" [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v") (unspec:VFH_AVX512VL [(match_operand:VFH_AVX512VL 1 "nonimmediate_operand" "") (match_operand:SI 2 "const_0_to_15_operand")] UNSPEC_GETMANT))] "TARGET_AVX512F" { if (TARGET_DEST_FALSE_DEP_FOR_GLC && && MEM_P (operands[1])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vgetmant\t{%2, %1, %0|%0, %1, %2}"; } [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "avx512f_vgetmant" [(set (match_operand:VFH_128 0 "register_operand" "=v") (vec_merge:VFH_128 (unspec:VFH_128 [(match_operand:VFH_128 1 "register_operand" "v") (match_operand:VFH_128 2 "" "") (match_operand:SI 3 "const_0_to_15_operand")] UNSPEC_GETMANT) (match_dup 1) (const_int 1)))] "TARGET_AVX512F" { if (TARGET_DEST_FALSE_DEP_FOR_GLC && && !reg_mentioned_p (operands[0], operands[1]) && !reg_mentioned_p (operands[0], operands[2])) output_asm_insn ("vxorps\t%x0, %x0, %x0", operands); return "vgetmant\t{%3, %2, %1, %0|%0, %1, %2, %3}"; } [(set_attr "prefix" "evex") (set_attr "mode" "")]) ;; The correct representation for this is absolutely enormous, and ;; surely not generally useful. (define_insn "avx512bw_dbpsadbw" [(set (match_operand:VI2_AVX512VL 0 "register_operand" "=v") (unspec:VI2_AVX512VL [(match_operand: 1 "register_operand" "v") (match_operand: 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "const_0_to_255_operand")] UNSPEC_DBPSADBW))] "TARGET_AVX512BW" "vdbpsadbw\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "type" "sselog1") (set_attr "length_immediate" "1") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "clz2" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (clz:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512CD" "vplzcnt\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "conflict" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (unspec:VI48_AVX512VL [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "vm")] UNSPEC_CONFLICT))] "TARGET_AVX512CD" "vpconflict\t{%1, %0|%0, %1}" [(set_attr "type" "sse") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "sha1msg1" [(set (match_operand:V4SI 0 "register_operand" "=x") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") (match_operand:V4SI 2 "vector_operand" "xBm")] UNSPEC_SHA1MSG1))] "TARGET_SHA" "sha1msg1\t{%2, %0|%0, %2}" [(set_attr "type" "sselog1") (set_attr "mode" "TI")]) (define_insn "sha1msg2" [(set (match_operand:V4SI 0 "register_operand" "=x") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") (match_operand:V4SI 2 "vector_operand" "xBm")] UNSPEC_SHA1MSG2))] "TARGET_SHA" "sha1msg2\t{%2, %0|%0, %2}" [(set_attr "type" "sselog1") (set_attr "mode" "TI")]) (define_insn "sha1nexte" [(set (match_operand:V4SI 0 "register_operand" "=x") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") (match_operand:V4SI 2 "vector_operand" "xBm")] UNSPEC_SHA1NEXTE))] "TARGET_SHA" "sha1nexte\t{%2, %0|%0, %2}" [(set_attr "type" "sselog1") (set_attr "mode" "TI")]) (define_insn "sha1rnds4" [(set (match_operand:V4SI 0 "register_operand" "=x") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") (match_operand:V4SI 2 "vector_operand" "xBm") (match_operand:SI 3 "const_0_to_3_operand" "n")] UNSPEC_SHA1RNDS4))] "TARGET_SHA" "sha1rnds4\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "sselog1") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_insn "sha256msg1" [(set (match_operand:V4SI 0 "register_operand" "=x") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") (match_operand:V4SI 2 "vector_operand" "xBm")] UNSPEC_SHA256MSG1))] "TARGET_SHA" "sha256msg1\t{%2, %0|%0, %2}" [(set_attr "type" "sselog1") (set_attr "mode" "TI")]) (define_insn "sha256msg2" [(set (match_operand:V4SI 0 "register_operand" "=x") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") (match_operand:V4SI 2 "vector_operand" "xBm")] UNSPEC_SHA256MSG2))] "TARGET_SHA" "sha256msg2\t{%2, %0|%0, %2}" [(set_attr "type" "sselog1") (set_attr "mode" "TI")]) (define_insn "sha256rnds2" [(set (match_operand:V4SI 0 "register_operand" "=x") (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0") (match_operand:V4SI 2 "vector_operand" "xBm") (match_operand:V4SI 3 "register_operand" "Yz")] UNSPEC_SHA256RNDS2))] "TARGET_SHA" "sha256rnds2\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "sselog1") (set_attr "length_immediate" "1") (set_attr "mode" "TI")]) (define_insn_and_split "avx512f__" [(set (match_operand:AVX512MODE2P 0 "nonimmediate_operand" "=x,m") (vec_concat:AVX512MODE2P (vec_concat: (match_operand: 1 "nonimmediate_operand" "xm,x") (unspec: [(const_int 0)] UNSPEC_CAST)) (unspec: [(const_int 0)] UNSPEC_CAST)))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] { if (REG_P (operands[0])) operands[0] = gen_lowpart (mode, operands[0]); else operands[1] = lowpart_subreg (mode, operands[1], mode); }) (define_insn_and_split "avx512f__256" [(set (match_operand:AVX512MODE2P 0 "nonimmediate_operand" "=x,m") (vec_concat:AVX512MODE2P (match_operand: 1 "nonimmediate_operand" "xm,x") (unspec: [(const_int 0)] UNSPEC_CAST)))] "TARGET_AVX512F && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1))] { if (REG_P (operands[0])) operands[0] = gen_lowpart (mode, operands[0]); else operands[1] = lowpart_subreg (mode, operands[1], mode); }) (define_int_iterator VPMADD52 [UNSPEC_VPMADD52LUQ UNSPEC_VPMADD52HUQ]) (define_int_attr vpmadd52type [(UNSPEC_VPMADD52LUQ "luq") (UNSPEC_VPMADD52HUQ "huq")]) (define_expand "vpamdd52huq_maskz" [(match_operand:VI8_AVX512VL 0 "register_operand") (match_operand:VI8_AVX512VL 1 "register_operand") (match_operand:VI8_AVX512VL 2 "register_operand") (match_operand:VI8_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512IFMA" { emit_insn (gen_vpamdd52huq_maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_expand "vpamdd52luq_maskz" [(match_operand:VI8_AVX512VL 0 "register_operand") (match_operand:VI8_AVX512VL 1 "register_operand") (match_operand:VI8_AVX512VL 2 "register_operand") (match_operand:VI8_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512IFMA" { emit_insn (gen_vpamdd52luq_maskz_1 ( operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "vpamdd52" [(set (match_operand:VI8_AVX512VL 0 "register_operand" "=v") (unspec:VI8_AVX512VL [(match_operand:VI8_AVX512VL 1 "register_operand" "0") (match_operand:VI8_AVX512VL 2 "register_operand" "v") (match_operand:VI8_AVX512VL 3 "nonimmediate_operand" "vm")] VPMADD52))] "TARGET_AVX512IFMA" "vpmadd52\t{%3, %2, %0|%0, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vpamdd52_mask" [(set (match_operand:VI8_AVX512VL 0 "register_operand" "=v") (vec_merge:VI8_AVX512VL (unspec:VI8_AVX512VL [(match_operand:VI8_AVX512VL 1 "register_operand" "0") (match_operand:VI8_AVX512VL 2 "register_operand" "v") (match_operand:VI8_AVX512VL 3 "nonimmediate_operand" "vm")] VPMADD52) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512IFMA" "vpmadd52\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}" [(set_attr "type" "ssemuladd") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_insn "vpmultishiftqb" [(set (match_operand:VI1_AVX512VL 0 "register_operand" "=v") (unspec:VI1_AVX512VL [(match_operand:VI1_AVX512VL 1 "register_operand" "v") (match_operand:VI1_AVX512VL 2 "nonimmediate_operand" "vm")] UNSPEC_VPMULTISHIFT))] "TARGET_AVX512VBMI" "vpmultishiftqb\t{%2, %1, %0|%0, %1, %2}" [(set_attr "type" "sselog") (set_attr "prefix" "evex") (set_attr "mode" "")]) (define_mode_iterator IMOD4 [(V64SF "TARGET_AVX5124FMAPS") (V64SI "TARGET_AVX5124VNNIW")]) (define_mode_attr imod4_narrow [(V64SF "V16SF") (V64SI "V16SI")]) (define_expand "mov" [(set (match_operand:IMOD4 0 "nonimmediate_operand") (match_operand:IMOD4 1 "nonimm_or_0_operand"))] "TARGET_AVX512F" { ix86_expand_vector_move (mode, operands); DONE; }) (define_insn_and_split "*mov_internal" [(set (match_operand:IMOD4 0 "nonimmediate_operand" "=v,v ,m") (match_operand:IMOD4 1 "nonimm_or_0_operand" " C,vm,v"))] "TARGET_AVX512F && (register_operand (operands[0], mode) || register_operand (operands[1], mode))" "#" "&& reload_completed" [(const_int 0)] { rtx op0, op1; int i; for (i = 0; i < 4; i++) { op0 = simplify_subreg (mode, operands[0], mode, i * 64); op1 = simplify_subreg (mode, operands[1], mode, i * 64); emit_move_insn (op0, op1); } DONE; }) (define_insn "avx5124fmaddps_4fmaddps" [(set (match_operand:V16SF 0 "register_operand" "=v") (unspec:V16SF [(match_operand:V16SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FMADD))] "TARGET_AVX5124FMAPS" "v4fmaddps\t{%3, %g2, %0|%0, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("V16SF"))]) (define_insn "avx5124fmaddps_4fmaddps_mask" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_merge:V16SF (unspec:V16SF [(match_operand:V64SF 1 "register_operand" "v") (match_operand:V4SF 2 "memory_operand" "m")] UNSPEC_VP4FMADD) (match_operand:V16SF 3 "register_operand" "0") (match_operand:HI 4 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fmaddps\t{%2, %g1, %0%{%4%}|%0%{%4%}, %g1, %2}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("V16SF"))]) (define_insn "avx5124fmaddps_4fmaddps_maskz" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_merge:V16SF (unspec:V16SF [(match_operand:V16SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FMADD) (match_operand:V16SF 4 "const0_operand" "C") (match_operand:HI 5 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fmaddps\t{%3, %g2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("V16SF"))]) (define_insn "avx5124fmaddps_4fmaddss" [(set (match_operand:V4SF 0 "register_operand" "=v") (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FMADD))] "TARGET_AVX5124FMAPS" "v4fmaddss\t{%3, %x2, %0|%0, %x2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("SF"))]) (define_insn "avx5124fmaddps_4fmaddss_mask" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_merge:V4SF (unspec:V4SF [(match_operand:V64SF 1 "register_operand" "v") (match_operand:V4SF 2 "memory_operand" "m")] UNSPEC_VP4FMADD) (match_operand:V4SF 3 "register_operand" "0") (match_operand:QI 4 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fmaddss\t{%2, %x1, %0%{%4%}|%0%{%4%}, %x1, %2}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("SF"))]) (define_insn "avx5124fmaddps_4fmaddss_maskz" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_merge:V4SF (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FMADD) (match_operand:V4SF 4 "const0_operand" "C") (match_operand:QI 5 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fmaddss\t{%3, %x2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %x2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("SF"))]) (define_insn "avx5124fmaddps_4fnmaddps" [(set (match_operand:V16SF 0 "register_operand" "=v") (unspec:V16SF [(match_operand:V16SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FNMADD))] "TARGET_AVX5124FMAPS" "v4fnmaddps\t{%3, %g2, %0|%0, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("V16SF"))]) (define_insn "avx5124fmaddps_4fnmaddps_mask" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_merge:V16SF (unspec:V16SF [(match_operand:V64SF 1 "register_operand" "v") (match_operand:V4SF 2 "memory_operand" "m")] UNSPEC_VP4FNMADD) (match_operand:V16SF 3 "register_operand" "0") (match_operand:HI 4 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fnmaddps\t{%2, %g1, %0%{%4%}|%0%{%4%}, %g1, %2}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("V16SF"))]) (define_insn "avx5124fmaddps_4fnmaddps_maskz" [(set (match_operand:V16SF 0 "register_operand" "=v") (vec_merge:V16SF (unspec:V16SF [(match_operand:V16SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FNMADD) (match_operand:V16SF 4 "const0_operand" "C") (match_operand:HI 5 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fnmaddps\t{%3, %g2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("V16SF"))]) (define_insn "avx5124fmaddps_4fnmaddss" [(set (match_operand:V4SF 0 "register_operand" "=v") (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FNMADD))] "TARGET_AVX5124FMAPS" "v4fnmaddss\t{%3, %x2, %0|%0, %x2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("SF"))]) (define_insn "avx5124fmaddps_4fnmaddss_mask" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_merge:V4SF (unspec:V4SF [(match_operand:V64SF 1 "register_operand" "v") (match_operand:V4SF 2 "memory_operand" "m")] UNSPEC_VP4FNMADD) (match_operand:V4SF 3 "register_operand" "0") (match_operand:QI 4 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fnmaddss\t{%2, %x1, %0%{%4%}|%0%{%4%}, %x1, %2}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("SF"))]) (define_insn "avx5124fmaddps_4fnmaddss_maskz" [(set (match_operand:V4SF 0 "register_operand" "=v") (vec_merge:V4SF (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "0") (match_operand:V64SF 2 "register_operand" "v") (match_operand:V4SF 3 "memory_operand" "m")] UNSPEC_VP4FNMADD) (match_operand:V4SF 4 "const0_operand" "C") (match_operand:QI 5 "register_operand" "Yk")))] "TARGET_AVX5124FMAPS" "v4fnmaddss\t{%3, %x2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %x2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("SF"))]) (define_insn "avx5124vnniw_vp4dpwssd" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V64SI 2 "register_operand" "v") (match_operand:V4SI 3 "memory_operand" "m")] UNSPEC_VP4DPWSSD))] "TARGET_AVX5124VNNIW" "vp4dpwssd\t{%3, %g2, %0|%0, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("TI"))]) (define_insn "avx5124vnniw_vp4dpwssd_mask" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_merge:V16SI (unspec:V16SI [(match_operand:V64SI 1 "register_operand" "v") (match_operand:V4SI 2 "memory_operand" "m")] UNSPEC_VP4DPWSSD) (match_operand:V16SI 3 "register_operand" "0") (match_operand:HI 4 "register_operand" "Yk")))] "TARGET_AVX5124VNNIW" "vp4dpwssd\t{%2, %g1, %0%{%4%}|%0%{%4%}, %g1, %2}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("TI"))]) (define_insn "avx5124vnniw_vp4dpwssd_maskz" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_merge:V16SI (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V64SI 2 "register_operand" "v") (match_operand:V4SI 3 "memory_operand" "m")] UNSPEC_VP4DPWSSD) (match_operand:V16SI 4 "const0_operand" "C") (match_operand:HI 5 "register_operand" "Yk")))] "TARGET_AVX5124VNNIW" "vp4dpwssd\t{%3, %g2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("TI"))]) (define_insn "avx5124vnniw_vp4dpwssds" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V64SI 2 "register_operand" "v") (match_operand:V4SI 3 "memory_operand" "m")] UNSPEC_VP4DPWSSDS))] "TARGET_AVX5124VNNIW" "vp4dpwssds\t{%3, %g2, %0|%0, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("TI"))]) (define_insn "avx5124vnniw_vp4dpwssds_mask" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_merge:V16SI (unspec:V16SI [(match_operand:V64SI 1 "register_operand" "v") (match_operand:V4SI 2 "memory_operand" "m")] UNSPEC_VP4DPWSSDS) (match_operand:V16SI 3 "register_operand" "0") (match_operand:HI 4 "register_operand" "Yk")))] "TARGET_AVX5124VNNIW" "vp4dpwssds\t{%2, %g1, %0%{%4%}|%0%{%4%}, %g1, %2}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("TI"))]) (define_insn "avx5124vnniw_vp4dpwssds_maskz" [(set (match_operand:V16SI 0 "register_operand" "=v") (vec_merge:V16SI (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V64SI 2 "register_operand" "v") (match_operand:V4SI 3 "memory_operand" "m")] UNSPEC_VP4DPWSSDS) (match_operand:V16SI 4 "const0_operand" "C") (match_operand:HI 5 "register_operand" "Yk")))] "TARGET_AVX5124VNNIW" "vp4dpwssds\t{%3, %g2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %g2, %3}" [(set_attr ("type") ("ssemuladd")) (set_attr ("prefix") ("evex")) (set_attr ("mode") ("TI"))]) (define_expand "popcount2" [(set (match_operand:VI48_AVX512VL 0 "register_operand") (popcount:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand")))] "TARGET_AVX512VPOPCNTDQ") (define_insn "vpopcount" [(set (match_operand:VI48_AVX512VL 0 "register_operand" "=v") (popcount:VI48_AVX512VL (match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512VPOPCNTDQ" "vpopcnt\t{%1, %0|%0, %1}") ;; Save multiple registers out-of-line. (define_insn "*save_multiple" [(match_parallel 0 "save_multiple" [(use (match_operand:P 1 "symbol_operand"))])] "TARGET_SSE && TARGET_64BIT" "call\t%P1") ;; Restore multiple registers out-of-line. (define_insn "*restore_multiple" [(match_parallel 0 "restore_multiple" [(use (match_operand:P 1 "symbol_operand"))])] "TARGET_SSE && TARGET_64BIT" "call\t%P1") ;; Restore multiple registers out-of-line and return. (define_insn "*restore_multiple_and_return" [(match_parallel 0 "restore_multiple" [(return) (use (match_operand:P 1 "symbol_operand")) (set (reg:DI SP_REG) (reg:DI R10_REG)) ])] "TARGET_SSE && TARGET_64BIT" "jmp\t%P1") ;; Restore multiple registers out-of-line when hard frame pointer is used, ;; perform the leave operation prior to returning (from the function). (define_insn "*restore_multiple_leave_return" [(match_parallel 0 "restore_multiple" [(return) (use (match_operand:P 1 "symbol_operand")) (set (reg:DI SP_REG) (plus:DI (reg:DI BP_REG) (const_int 8))) (set (reg:DI BP_REG) (mem:DI (reg:DI BP_REG))) (clobber (mem:BLK (scratch))) ])] "TARGET_SSE && TARGET_64BIT" "jmp\t%P1") (define_expand "popcount2" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (popcount:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512BITALG") (define_insn "vpopcount" [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v") (popcount:VI12_AVX512VL (match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "vm")))] "TARGET_AVX512BITALG" "vpopcnt\t{%1, %0|%0, %1}") (define_insn "vgf2p8affineinvqb_" [(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,v") (unspec:VI1_AVX512F [(match_operand:VI1_AVX512F 1 "register_operand" "0,v") (match_operand:VI1_AVX512F 2 "vector_operand" "xBm,vm") (match_operand 3 "const_0_to_255_operand" "n,n")] UNSPEC_GF2P8AFFINEINV))] "TARGET_GFNI" "@ gf2p8affineinvqb\t{%3, %2, %0| %0, %2, %3} vgf2p8affineinvqb\t{%3, %2, %1, %0| %0, %1, %2, %3}" [(set_attr "isa" "noavx,avx") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "")]) (define_insn "vgf2p8affineqb_" [(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,v") (unspec:VI1_AVX512F [(match_operand:VI1_AVX512F 1 "register_operand" "0,v") (match_operand:VI1_AVX512F 2 "vector_operand" "xBm,vm") (match_operand 3 "const_0_to_255_operand" "n,n")] UNSPEC_GF2P8AFFINE))] "TARGET_GFNI" "@ gf2p8affineqb\t{%3, %2, %0| %0, %2, %3} vgf2p8affineqb\t{%3, %2, %1, %0| %0, %1, %2, %3}" [(set_attr "isa" "noavx,avx") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "")]) (define_insn "vgf2p8mulb_" [(set (match_operand:VI1_AVX512F 0 "register_operand" "=x,v") (unspec:VI1_AVX512F [(match_operand:VI1_AVX512F 1 "register_operand" "%0,v") (match_operand:VI1_AVX512F 2 "vector_operand" "xBm,vm")] UNSPEC_GF2P8MUL))] "TARGET_GFNI" "@ gf2p8mulb\t{%2, %0| %0, %2} vgf2p8mulb\t{%2, %1, %0| %0, %1, %2}" [(set_attr "isa" "noavx,avx") (set_attr "prefix_data16" "1,*") (set_attr "prefix_extra" "1") (set_attr "prefix" "orig,maybe_evex") (set_attr "mode" "")]) (define_insn "vpshrd_" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "v") (match_operand:VI248_AVX512VL 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "const_0_to_255_operand" "n")] UNSPEC_VPSHRD))] "TARGET_AVX512VBMI2" "vpshrd\t{%3, %2, %1, %0|%0, %1, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_insn "vpshld_" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "v") (match_operand:VI248_AVX512VL 2 "nonimmediate_operand" "vm") (match_operand:SI 3 "const_0_to_255_operand" "n")] UNSPEC_VPSHLD))] "TARGET_AVX512VBMI2" "vpshld\t{%3, %2, %1, %0|%0, %1, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_insn "vpshrdv_" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "0") (match_operand:VI248_AVX512VL 2 "register_operand" "v") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPSHRDV))] "TARGET_AVX512VBMI2" "vpshrdv\t{%3, %2, %0|%0, %2, %3 }" [(set_attr ("prefix") ("evex")) (set_attr "mode" "")]) (define_insn "vpshrdv__mask" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (vec_merge:VI248_AVX512VL (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "0") (match_operand:VI248_AVX512VL 2 "register_operand" "v") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPSHRDV) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512VBMI2" "vpshrdv\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3 }" [(set_attr ("prefix") ("evex")) (set_attr "mode" "")]) (define_expand "vpshrdv__maskz" [(match_operand:VI248_AVX512VL 0 "register_operand") (match_operand:VI248_AVX512VL 1 "register_operand") (match_operand:VI248_AVX512VL 2 "register_operand") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512VBMI2" { emit_insn (gen_vpshrdv__maskz_1 (operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "vpshrdv__maskz_1" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (vec_merge:VI248_AVX512VL (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "0") (match_operand:VI248_AVX512VL 2 "register_operand" "v") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPSHRDV) (match_operand:VI248_AVX512VL 4 "const0_operand" "C") (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512VBMI2" "vpshrdv\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3 }" [(set_attr ("prefix") ("evex")) (set_attr "mode" "")]) (define_insn "vpshldv_" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "0") (match_operand:VI248_AVX512VL 2 "register_operand" "v") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPSHLDV))] "TARGET_AVX512VBMI2" "vpshldv\t{%3, %2, %0|%0, %2, %3 }" [(set_attr ("prefix") ("evex")) (set_attr "mode" "")]) (define_insn "vpshldv__mask" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (vec_merge:VI248_AVX512VL (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "0") (match_operand:VI248_AVX512VL 2 "register_operand" "v") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPSHLDV) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512VBMI2" "vpshldv\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3 }" [(set_attr ("prefix") ("evex")) (set_attr "mode" "")]) (define_expand "vpshldv__maskz" [(match_operand:VI248_AVX512VL 0 "register_operand") (match_operand:VI248_AVX512VL 1 "register_operand") (match_operand:VI248_AVX512VL 2 "register_operand") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512VBMI2" { emit_insn (gen_vpshldv__maskz_1 (operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "vpshldv__maskz_1" [(set (match_operand:VI248_AVX512VL 0 "register_operand" "=v") (vec_merge:VI248_AVX512VL (unspec:VI248_AVX512VL [(match_operand:VI248_AVX512VL 1 "register_operand" "0") (match_operand:VI248_AVX512VL 2 "register_operand" "v") (match_operand:VI248_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPSHLDV) (match_operand:VI248_AVX512VL 4 "const0_operand" "C") (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512VBMI2" "vpshldv\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3 }" [(set_attr ("prefix") ("evex")) (set_attr "mode" "")]) (define_mode_attr VI1SI [(V64QI "V16SI") (V32QI "V8SI") (V16QI "V4SI")]) (define_mode_attr vi1si [(V64QI "v16si") (V32QI "v8si") (V16QI "v4si")]) (define_expand "usdot_prod" [(match_operand: 0 "register_operand") (match_operand:VI1_AVX512VNNI 1 "register_operand") (match_operand:VI1_AVX512VNNI 2 "register_operand") (match_operand: 3 "register_operand")] "( == 64 ||((TARGET_AVX512VNNI && TARGET_AVX512VL) || TARGET_AVXVNNI))" { operands[1] = lowpart_subreg (mode, operands[1], mode); operands[2] = lowpart_subreg (mode, operands[2], mode); emit_insn (gen_rtx_SET (operands[0], operands[3])); emit_insn (gen_vpdpbusd_ (operands[0], operands[3], operands[1], operands[2])); DONE; }) (define_insn "vpdpbusd_v16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V16SI 2 "register_operand" "v") (match_operand:V16SI 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDUBSWACCD))] "TARGET_AVX512VNNI" "vpdpbusd\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("evex"))]) (define_insn "vpdpbusd_" [(set (match_operand:VI4_AVX2 0 "register_operand" "=x,v") (unspec:VI4_AVX2 [(match_operand:VI4_AVX2 1 "register_operand" "0,0") (match_operand:VI4_AVX2 2 "register_operand" "x,v") (match_operand:VI4_AVX2 3 "nonimmediate_operand" "xm,vm")] UNSPEC_VPMADDUBSWACCD))] "TARGET_AVXVNNI || (TARGET_AVX512VNNI && TARGET_AVX512VL)" "@ %{vex%} vpdpbusd\t{%3, %2, %0|%0, %2, %3} vpdpbusd\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("vex,evex")) (set_attr ("isa") ("avxvnni,avx512vnnivl"))]) (define_insn "vpdpbusd__mask" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDUBSWACCD) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpbusd\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_expand "vpdpbusd__maskz" [(match_operand:VI4_AVX512VL 0 "register_operand") (match_operand:VI4_AVX512VL 1 "register_operand") (match_operand:VI4_AVX512VL 2 "register_operand") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512VNNI" { emit_insn (gen_vpdpbusd__maskz_1 (operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "vpdpbusd__maskz_1" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm") ] UNSPEC_VPMADDUBSWACCD) (match_operand:VI4_AVX512VL 4 "const0_operand" "C") (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpbusd\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_insn "vpdpbusds_v16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V16SI 2 "register_operand" "v") (match_operand:V16SI 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDUBSWACCSSD))] "TARGET_AVX512VNNI" "vpdpbusds\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("evex"))]) (define_insn "vpdpbusds_" [(set (match_operand:VI4_AVX2 0 "register_operand" "=x,v") (unspec:VI4_AVX2 [(match_operand:VI4_AVX2 1 "register_operand" "0,0") (match_operand:VI4_AVX2 2 "register_operand" "x,v") (match_operand:VI4_AVX2 3 "nonimmediate_operand" "xm,vm")] UNSPEC_VPMADDUBSWACCSSD))] "TARGET_AVXVNNI || (TARGET_AVX512VNNI && TARGET_AVX512VL)" "@ %{vex%} vpdpbusds\t{%3, %2, %0|%0, %2, %3} vpdpbusds\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("vex,evex")) (set_attr ("isa") ("avxvnni,avx512vnnivl"))]) (define_insn "vpdpbusds__mask" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDUBSWACCSSD) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpbusds\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_expand "vpdpbusds__maskz" [(match_operand:VI4_AVX512VL 0 "register_operand") (match_operand:VI4_AVX512VL 1 "register_operand") (match_operand:VI4_AVX512VL 2 "register_operand") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512VNNI" { emit_insn (gen_vpdpbusds__maskz_1 (operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "vpdpbusds__maskz_1" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDUBSWACCSSD) (match_operand:VI4_AVX512VL 4 "const0_operand" "C") (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpbusds\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_insn "vpdpwssd_v16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V16SI 2 "register_operand" "v") (match_operand:V16SI 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDWDACCD))] "TARGET_AVX512VNNI" "vpdpwssd\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("evex"))]) (define_insn "vpdpwssd_" [(set (match_operand:VI4_AVX2 0 "register_operand" "=x,v") (unspec:VI4_AVX2 [(match_operand:VI4_AVX2 1 "register_operand" "0,0") (match_operand:VI4_AVX2 2 "register_operand" "x,v") (match_operand:VI4_AVX2 3 "nonimmediate_operand" "xm,vm")] UNSPEC_VPMADDWDACCD))] "TARGET_AVXVNNI || (TARGET_AVX512VNNI && TARGET_AVX512VL)" "@ %{vex%} vpdpwssd\t{%3, %2, %0|%0, %2, %3} vpdpwssd\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("vex,evex")) (set_attr ("isa") ("avxvnni,avx512vnnivl"))]) (define_insn "vpdpwssd__mask" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDWDACCD) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpwssd\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_expand "vpdpwssd__maskz" [(match_operand:VI4_AVX512VL 0 "register_operand") (match_operand:VI4_AVX512VL 1 "register_operand") (match_operand:VI4_AVX512VL 2 "register_operand") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512VNNI" { emit_insn (gen_vpdpwssd__maskz_1 (operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "vpdpwssd__maskz_1" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDWDACCD) (match_operand:VI4_AVX512VL 4 "const0_operand" "C") (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpwssd\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_insn "vpdpwssds_v16si" [(set (match_operand:V16SI 0 "register_operand" "=v") (unspec:V16SI [(match_operand:V16SI 1 "register_operand" "0") (match_operand:V16SI 2 "register_operand" "v") (match_operand:V16SI 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDWDACCSSD))] "TARGET_AVX512VNNI" "vpdpwssds\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("evex"))]) (define_insn "vpdpwssds_" [(set (match_operand:VI4_AVX2 0 "register_operand" "=x,v") (unspec:VI4_AVX2 [(match_operand:VI4_AVX2 1 "register_operand" "0,0") (match_operand:VI4_AVX2 2 "register_operand" "x,v") (match_operand:VI4_AVX2 3 "nonimmediate_operand" "xm,vm")] UNSPEC_VPMADDWDACCSSD))] "TARGET_AVXVNNI || (TARGET_AVX512VNNI && TARGET_AVX512VL)" "@ %{vex%} vpdpwssds\t{%3, %2, %0|%0, %2, %3} vpdpwssds\t{%3, %2, %0|%0, %2, %3}" [(set_attr ("prefix") ("vex,evex")) (set_attr ("isa") ("avxvnni,avx512vnnivl"))]) (define_insn "vpdpwssds__mask" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDWDACCSSD) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpwssds\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_expand "vpdpwssds__maskz" [(match_operand:VI4_AVX512VL 0 "register_operand") (match_operand:VI4_AVX512VL 1 "register_operand") (match_operand:VI4_AVX512VL 2 "register_operand") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512VNNI" { emit_insn (gen_vpdpwssds__maskz_1 (operands[0], operands[1], operands[2], operands[3], CONST0_RTX (mode), operands[4])); DONE; }) (define_insn "vpdpwssds__maskz_1" [(set (match_operand:VI4_AVX512VL 0 "register_operand" "=v") (vec_merge:VI4_AVX512VL (unspec:VI4_AVX512VL [(match_operand:VI4_AVX512VL 1 "register_operand" "0") (match_operand:VI4_AVX512VL 2 "register_operand" "v") (match_operand:VI4_AVX512VL 3 "nonimmediate_operand" "vm")] UNSPEC_VPMADDWDACCSSD) (match_operand:VI4_AVX512VL 4 "const0_operand" "C") (match_operand: 5 "register_operand" "Yk")))] "TARGET_AVX512VNNI" "vpdpwssds\t{%3, %2, %0%{%5%}%{z%}|%0%{%5%}%{z%}, %2, %3 }" [(set_attr ("prefix") ("evex"))]) (define_insn "vaesdec_" [(set (match_operand:VI1_AVX512VL_F 0 "register_operand" "=v") (unspec:VI1_AVX512VL_F [(match_operand:VI1_AVX512VL_F 1 "register_operand" "v") (match_operand:VI1_AVX512VL_F 2 "vector_operand" "vm")] UNSPEC_VAESDEC))] "TARGET_VAES" "vaesdec\t{%2, %1, %0|%0, %1, %2}" ) (define_insn "vaesdeclast_" [(set (match_operand:VI1_AVX512VL_F 0 "register_operand" "=v") (unspec:VI1_AVX512VL_F [(match_operand:VI1_AVX512VL_F 1 "register_operand" "v") (match_operand:VI1_AVX512VL_F 2 "vector_operand" "vm")] UNSPEC_VAESDECLAST))] "TARGET_VAES" "vaesdeclast\t{%2, %1, %0|%0, %1, %2}" ) (define_insn "vaesenc_" [(set (match_operand:VI1_AVX512VL_F 0 "register_operand" "=v") (unspec:VI1_AVX512VL_F [(match_operand:VI1_AVX512VL_F 1 "register_operand" "v") (match_operand:VI1_AVX512VL_F 2 "vector_operand" "vm")] UNSPEC_VAESENC))] "TARGET_VAES" "vaesenc\t{%2, %1, %0|%0, %1, %2}" ) (define_insn "vaesenclast_" [(set (match_operand:VI1_AVX512VL_F 0 "register_operand" "=v") (unspec:VI1_AVX512VL_F [(match_operand:VI1_AVX512VL_F 1 "register_operand" "v") (match_operand:VI1_AVX512VL_F 2 "vector_operand" "vm")] UNSPEC_VAESENCLAST))] "TARGET_VAES" "vaesenclast\t{%2, %1, %0|%0, %1, %2}" ) (define_insn "vpclmulqdq_" [(set (match_operand:VI8_FVL 0 "register_operand" "=v") (unspec:VI8_FVL [(match_operand:VI8_FVL 1 "register_operand" "v") (match_operand:VI8_FVL 2 "vector_operand" "vm") (match_operand:SI 3 "const_0_to_255_operand" "n")] UNSPEC_VPCLMULQDQ))] "TARGET_VPCLMULQDQ" "vpclmulqdq\t{%3, %2, %1, %0|%0, %1, %2, %3}" [(set_attr "mode" "DI")]) (define_insn "avx512vl_vpshufbitqmb" [(set (match_operand: 0 "register_operand" "=k") (unspec: [(match_operand:VI1_AVX512VLBW 1 "register_operand" "v") (match_operand:VI1_AVX512VLBW 2 "nonimmediate_operand" "vm")] UNSPEC_VPSHUFBIT))] "TARGET_AVX512BITALG" "vpshufbitqmb\t{%2, %1, %0|%0, %1, %2}" [(set_attr "prefix" "evex") (set_attr "mode" "")]) (define_mode_iterator VI48_AVX512VP2VL [V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")]) (define_mode_iterator MASK_DWI [P2QI P2HI]) (define_expand "mov" [(set (match_operand:MASK_DWI 0 "nonimmediate_operand") (match_operand:MASK_DWI 1 "nonimmediate_operand"))] "TARGET_AVX512VP2INTERSECT" { if (MEM_P (operands[0]) && MEM_P (operands[1])) operands[1] = force_reg (mode, operands[1]); }) (define_insn_and_split "*mov_internal" [(set (match_operand:MASK_DWI 0 "nonimmediate_operand" "=k,o") (match_operand:MASK_DWI 1 "nonimmediate_operand" "ko,k"))] "TARGET_AVX512VP2INTERSECT && !(MEM_P (operands[0]) && MEM_P (operands[1]))" "#" "&& reload_completed" [(set (match_dup 0) (match_dup 1)) (set (match_dup 2) (match_dup 3))] { split_double_mode (mode, &operands[0], 2, &operands[0], &operands[2]); }) (define_insn "avx512vp2intersect_2intersect" [(set (match_operand:P2QI 0 "register_operand" "=k") (unspec:P2QI [(match_operand:VI48_AVX512VP2VL 1 "register_operand" "v") (match_operand:VI48_AVX512VP2VL 2 "vector_operand" "vm")] UNSPEC_VP2INTERSECT))] "TARGET_AVX512VP2INTERSECT" "vp2intersect\t{%2, %1, %0|%0, %1, %2}" [(set_attr ("prefix") ("evex"))]) (define_insn "avx512vp2intersect_2intersectv16si" [(set (match_operand:P2HI 0 "register_operand" "=k") (unspec:P2HI [(match_operand:V16SI 1 "register_operand" "v") (match_operand:V16SI 2 "vector_operand" "vm")] UNSPEC_VP2INTERSECT))] "TARGET_AVX512VP2INTERSECT" "vp2intersectd\t{%2, %1, %0|%0, %1, %2}" [(set_attr ("prefix") ("evex"))]) (define_mode_iterator BF16 [V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")]) ;; Converting from BF to SF (define_mode_attr bf16_cvt_2sf [(V32HI "V16SF") (V16HI "V8SF") (V8HI "V4SF")]) ;; Converting from SF to BF (define_mode_attr sf_cvt_bf16 [(V4SF "V8HI") (V8SF "V8HI") (V16SF "V16HI")]) ;; Mapping from BF to SF (define_mode_attr sf_bf16 [(V4SF "V8HI") (V8SF "V16HI") (V16SF "V32HI")]) (define_expand "avx512f_cvtne2ps2bf16__maskz" [(match_operand:BF16 0 "register_operand") (match_operand: 1 "register_operand") (match_operand: 2 "register_operand") (match_operand: 3 "register_operand")] "TARGET_AVX512BF16" { emit_insn (gen_avx512f_cvtne2ps2bf16__mask(operands[0], operands[1], operands[2], CONST0_RTX(mode), operands[3])); DONE; }) (define_insn "avx512f_cvtne2ps2bf16_" [(set (match_operand:BF16 0 "register_operand" "=v") (unspec:BF16 [(match_operand: 1 "register_operand" "v") (match_operand: 2 "register_operand" "v")] UNSPEC_VCVTNE2PS2BF16))] "TARGET_AVX512BF16" "vcvtne2ps2bf16\t{%2, %1, %0|%0, %1, %2}") (define_expand "avx512f_cvtneps2bf16__maskz" [(match_operand: 0 "register_operand") (match_operand:VF1_AVX512VL 1 "register_operand") (match_operand: 2 "register_operand")] "TARGET_AVX512BF16" { emit_insn (gen_avx512f_cvtneps2bf16__mask(operands[0], operands[1], CONST0_RTX(mode), operands[2])); DONE; }) (define_insn "avx512f_cvtneps2bf16_" [(set (match_operand: 0 "register_operand" "=v") (unspec: [(match_operand:VF1_AVX512VL 1 "register_operand" "v")] UNSPEC_VCVTNEPS2BF16))] "TARGET_AVX512BF16" "vcvtneps2bf16\t{%1, %0|%0, %1}") (define_expand "avx512f_dpbf16ps__maskz" [(match_operand:VF1_AVX512VL 0 "register_operand") (match_operand:VF1_AVX512VL 1 "register_operand") (match_operand: 2 "register_operand") (match_operand: 3 "register_operand") (match_operand: 4 "register_operand")] "TARGET_AVX512BF16" { emit_insn (gen_avx512f_dpbf16ps__maskz_1(operands[0], operands[1], operands[2], operands[3], CONST0_RTX(mode), operands[4])); DONE; }) (define_insn "avx512f_dpbf16ps_" [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v") (unspec:VF1_AVX512VL [(match_operand:VF1_AVX512VL 1 "register_operand" "0") (match_operand: 2 "register_operand" "v") (match_operand: 3 "register_operand" "v")] UNSPEC_VDPBF16PS))] "TARGET_AVX512BF16" "vdpbf16ps\t{%3, %2, %0|%0, %2, %3}") (define_insn "avx512f_dpbf16ps__mask" [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v") (vec_merge:VF1_AVX512VL (unspec:VF1_AVX512VL [(match_operand:VF1_AVX512VL 1 "register_operand" "0") (match_operand: 2 "register_operand" "v") (match_operand: 3 "register_operand" "v")] UNSPEC_VDPBF16PS) (match_dup 1) (match_operand: 4 "register_operand" "Yk")))] "TARGET_AVX512BF16" "vdpbf16ps\t{%3, %2, %0%{%4%}|%0%{%4%}, %2, %3}") ;; KEYLOCKER (define_insn "loadiwkey" [(unspec_volatile:V2DI [(match_operand:V2DI 0 "register_operand" "v") (match_operand:V2DI 1 "register_operand" "v") (match_operand:V2DI 2 "register_operand" "Yz") (match_operand:SI 3 "register_operand" "a")] UNSPECV_LOADIWKEY) (clobber (reg:CC FLAGS_REG))] "TARGET_KL" "loadiwkey\t{%0, %1|%1, %0}" [(set_attr "type" "other")]) (define_expand "encodekey128u32" [(match_par_dup 2 [(set (match_operand:SI 0 "register_operand") (unspec_volatile:SI [(match_operand:SI 1 "register_operand") (reg:V2DI XMM0_REG)] UNSPECV_ENCODEKEY128U32))])] "TARGET_KL" { rtx xmm_regs[7]; rtx tmp_unspec; unsigned i; /* parallel rtx for encodekey128 predicate */ operands[2] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (8)); for (i = 0; i < 7; i++) xmm_regs[i] = gen_rtx_REG (V2DImode, GET_SSE_REGNO (i)); tmp_unspec = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (2, operands[1], xmm_regs[0]), UNSPECV_ENCODEKEY128U32); XVECEXP (operands[2], 0, 0) = gen_rtx_SET (operands[0], tmp_unspec); tmp_unspec = gen_rtx_UNSPEC_VOLATILE (V2DImode, gen_rtvec (1, const0_rtx), UNSPECV_ENCODEKEY128U32); for (i = 0; i < 3; i++) XVECEXP (operands[2], 0, i + 1) = gen_rtx_SET (xmm_regs[i], tmp_unspec); for (i = 4; i < 7; i++) XVECEXP (operands[2], 0, i) = gen_rtx_SET (xmm_regs[i], CONST0_RTX (V2DImode)); XVECEXP (operands[2], 0, 7) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG)); }) (define_insn "*encodekey128u32" [(match_parallel 2 "encodekey128_operation" [(set (match_operand:SI 0 "register_operand" "=r") (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r") (reg:V2DI XMM0_REG)] UNSPECV_ENCODEKEY128U32))])] "TARGET_KL" "encodekey128\t{%1, %0|%0, %1}" [(set_attr "type" "other")]) (define_expand "encodekey256u32" [(match_par_dup 2 [(set (match_operand:SI 0 "register_operand") (unspec_volatile:SI [(match_operand:SI 1 "register_operand") (reg:V2DI XMM0_REG) (reg:V2DI XMM1_REG)] UNSPECV_ENCODEKEY256U32))])] "TARGET_KL" { rtx xmm_regs[7]; rtx tmp_unspec; unsigned i; /* parallel rtx for encodekey256 predicate */ operands[2] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (9)); for (i = 0; i < 7; i++) xmm_regs[i] = gen_rtx_REG (V2DImode, GET_SSE_REGNO (i)); tmp_unspec = gen_rtx_UNSPEC_VOLATILE (SImode, gen_rtvec (3, operands[1], xmm_regs[0], xmm_regs[1]), UNSPECV_ENCODEKEY256U32); XVECEXP (operands[2], 0, 0) = gen_rtx_SET (operands[0], tmp_unspec); tmp_unspec = gen_rtx_UNSPEC_VOLATILE (V2DImode, gen_rtvec (1, const0_rtx), UNSPECV_ENCODEKEY256U32); for (i = 0; i < 4; i++) XVECEXP (operands[2], 0, i + 1) = gen_rtx_SET (xmm_regs[i], tmp_unspec); for (i = 4; i < 7; i++) XVECEXP (operands[2], 0, i + 1) = gen_rtx_SET (xmm_regs[i], CONST0_RTX (V2DImode)); XVECEXP (operands[2], 0, 8) = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, FLAGS_REG)); }) (define_insn "*encodekey256u32" [(match_parallel 2 "encodekey256_operation" [(set (match_operand:SI 0 "register_operand" "=r") (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r") (reg:V2DI XMM0_REG) (reg:V2DI XMM1_REG)] UNSPECV_ENCODEKEY256U32))])] "TARGET_KL" "encodekey256\t{%1, %0|%0, %1}" [(set_attr "type" "other")]) (define_int_iterator AESDECENCKL [UNSPECV_AESDEC128KLU8 UNSPECV_AESDEC256KLU8 UNSPECV_AESENC128KLU8 UNSPECV_AESENC256KLU8]) (define_int_attr aesklvariant [(UNSPECV_AESDEC128KLU8 "dec128kl") (UNSPECV_AESDEC256KLU8 "dec256kl") (UNSPECV_AESENC128KLU8 "enc128kl") (UNSPECV_AESENC256KLU8 "enc256kl")]) (define_insn "aesu8" [(set (match_operand:V2DI 0 "register_operand" "=v") (unspec_volatile:V2DI [(match_operand:V2DI 1 "register_operand" "0") (match_operand:BLK 2 "memory_operand" "m")] AESDECENCKL)) (set (reg:CCZ FLAGS_REG) (unspec_volatile:CCZ [(match_dup 1) (match_dup 2)] AESDECENCKL))] "TARGET_KL" "aes\t{%2, %0|%0, %2}" [(set_attr "type" "other")]) (define_int_iterator AESDECENCWIDEKL [UNSPECV_AESDECWIDE128KLU8 UNSPECV_AESDECWIDE256KLU8 UNSPECV_AESENCWIDE128KLU8 UNSPECV_AESENCWIDE256KLU8]) (define_int_attr aeswideklvariant [(UNSPECV_AESDECWIDE128KLU8 "decwide128kl") (UNSPECV_AESDECWIDE256KLU8 "decwide256kl") (UNSPECV_AESENCWIDE128KLU8 "encwide128kl") (UNSPECV_AESENCWIDE256KLU8 "encwide256kl")]) (define_int_attr AESWIDEKLVARIANT [(UNSPECV_AESDECWIDE128KLU8 "AESDECWIDE128KLU8") (UNSPECV_AESDECWIDE256KLU8 "AESDECWIDE256KLU8") (UNSPECV_AESENCWIDE128KLU8 "AESENCWIDE128KLU8") (UNSPECV_AESENCWIDE256KLU8 "AESENCWIDE256KLU8")]) (define_expand "aesu8" [(match_par_dup 1 [(set (reg:CCZ FLAGS_REG) (unspec_volatile:CCZ [(match_operand:BLK 0 "memory_operand")] AESDECENCWIDEKL))])] "TARGET_WIDEKL" { rtx tmp_unspec; unsigned i; /* parallel rtx for widekl predicate */ operands[1] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (9)); tmp_unspec = gen_rtx_UNSPEC_VOLATILE (CCZmode, gen_rtvec (1, operands[0]), UNSPECV_); XVECEXP (operands[1], 0, 0) = gen_rtx_SET (gen_rtx_REG (CCZmode, FLAGS_REG), tmp_unspec); for (i = 0; i < 8; i++) { rtx xmm_reg = gen_rtx_REG (V2DImode, GET_SSE_REGNO (i)); tmp_unspec = gen_rtx_UNSPEC_VOLATILE (V2DImode, gen_rtvec (1, xmm_reg), UNSPECV_); XVECEXP (operands[1], 0, i + 1) = gen_rtx_SET (xmm_reg, tmp_unspec); } }) (define_insn "*aesu8" [(match_parallel 1 "aeswidekl_operation" [(set (reg:CCZ FLAGS_REG) (unspec_volatile:CCZ [(match_operand:BLK 0 "memory_operand" "m")] AESDECENCWIDEKL))])] "TARGET_WIDEKL" "aes\t%0" [(set_attr "type" "other")]) ;; Modes handled by broadcast patterns. NB: Allow V64QI and V32HI with ;; TARGET_AVX512F since ix86_expand_vector_init_duplicate can expand ;; without TARGET_AVX512BW which is used by memset vector broadcast ;; expander to XI with: ;; vmovd %edi, %xmm15 ;; vpbroadcastb %xmm15, %ymm15 ;; vinserti64x4 $0x1, %ymm15, %zmm15, %zmm15 (define_mode_iterator INT_BROADCAST_MODE [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI (V8DI "TARGET_AVX512F && TARGET_64BIT") (V4DI "TARGET_AVX && TARGET_64BIT") (V2DI "TARGET_64BIT")]) ;; Broadcast from an integer. NB: Enable broadcast only if we can move ;; from GPR to SSE register directly. (define_expand "vec_duplicate" [(set (match_operand:INT_BROADCAST_MODE 0 "register_operand") (vec_duplicate:INT_BROADCAST_MODE (match_operand: 1 "nonimmediate_operand")))] "TARGET_SSE2 && TARGET_INTER_UNIT_MOVES_TO_VEC" { if (!ix86_expand_vector_init_duplicate (false, GET_MODE (operands[0]), operands[0], operands[1])) gcc_unreachable (); DONE; })