aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/i386/sse.md
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/i386/sse.md')
-rw-r--r--gcc/config/i386/sse.md1398
1 files changed, 717 insertions, 681 deletions
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index b280676..ec74f93 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -279,63 +279,63 @@
;; All vector modes including V?TImode, used in move patterns.
(define_mode_iterator VMOVE
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
- (V4TI "TARGET_AVX512F && TARGET_EVEX512") (V2TI "TARGET_AVX") V1TI
- (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512F && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF])
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
+ (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX") V1TI
+ (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512F") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
;; All AVX-512{F,VL} vector modes without HF. Supposed TARGET_AVX512F baseline.
(define_mode_iterator V48_AVX512VL
- [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
- (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
+ V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator V48_256_512_AVX512VL
- [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL")
- (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL")
- (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")
- (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL")])
+ [V16SI (V8SI "TARGET_AVX512VL")
+ V8DI (V4DI "TARGET_AVX512VL")
+ V16SF (V8SF "TARGET_AVX512VL")
+ V8DF (V4DF "TARGET_AVX512VL")])
;; All AVX-512{F,VL} vector modes. Supposed TARGET_AVX512F baseline.
(define_mode_iterator V48H_AVX512VL
- [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
- (V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
+ [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
+ (V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
;; 1,2 byte AVX-512{BW,VL} vector modes. Supposed TARGET_AVX512BW baseline.
(define_mode_iterator VI12_AVX512VL
- [(V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
- (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
+ [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
+ V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
(define_mode_iterator VI12HFBF_AVX512VL
- [(V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
- (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")
- (V32HF "TARGET_EVEX512") (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
- (V32BF "TARGET_EVEX512") (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
+ [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
+ V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")
+ V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
+ V32BF (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
(define_mode_iterator VI1_AVX512VL
- [(V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")])
+ [V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")])
;; All vector modes
(define_mode_iterator V
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512F && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512F") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; All 128bit vector modes
(define_mode_iterator V_128
@@ -352,54 +352,44 @@
;; All 512bit vector modes
(define_mode_iterator V_512
- [(V64QI "TARGET_EVEX512") (V32HI "TARGET_EVEX512")
- (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
- (V16SF "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
- (V32HF "TARGET_EVEX512") (V32BF "TARGET_EVEX512")])
+ [V64QI V32HI V16SI V8DI
+ V16SF V8DF V32HF V32BF])
;; All 256bit and 512bit vector modes
(define_mode_iterator V_256_512
[V32QI V16HI V16HF V16BF V8SI V4DI V8SF V4DF
- (V64QI "TARGET_AVX512F && TARGET_EVEX512")
- (V32HI "TARGET_AVX512F && TARGET_EVEX512")
- (V32HF "TARGET_AVX512F && TARGET_EVEX512")
- (V32BF "TARGET_AVX512F && TARGET_EVEX512")
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")])
+ (V64QI "TARGET_AVX512F") (V32HI "TARGET_AVX512F")
+ (V32HF "TARGET_AVX512F") (V32BF "TARGET_AVX512F")
+ (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
+ (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")])
;; All vector float modes
(define_mode_iterator VF
- [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX")
+ [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX")
(V2DF "TARGET_SSE2")])
(define_mode_iterator VF1_VF2_AVX512DQ
- [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512DQ && TARGET_EVEX512")
+ [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512DQ")
(V4DF "TARGET_AVX512DQ && TARGET_AVX512VL")
(V2DF "TARGET_AVX512DQ && TARGET_AVX512VL")])
-(define_mode_iterator VF1_VF2_AVX10_2
- [(V16SF "TARGET_AVX10_2") V8SF V4SF
- (V8DF "TARGET_AVX10_2") V4DF V2DF])
-
(define_mode_iterator VFH
- [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
+ [(V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX")
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX")
(V2DF "TARGET_SSE2")])
(define_mode_iterator VF_BHSD
- [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
+ [(V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F")
(V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F")
(V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")
(V32BF "TARGET_AVX10_2")
(V16BF "TARGET_AVX10_2")
@@ -408,12 +398,12 @@
;; 128-, 256- and 512-bit float vector modes for bitwise operations
(define_mode_iterator VFB
- [(V32BF "TARGET_AVX512F && TARGET_EVEX512")
+ [(V32BF "TARGET_AVX512F")
(V16BF "TARGET_AVX") (V8BF "TARGET_SSE2")
- (V32HF "TARGET_AVX512F && TARGET_EVEX512")
+ (V32HF "TARGET_AVX512F")
(V16HF "TARGET_AVX") (V8HF "TARGET_SSE2")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F")
(V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; 128- and 256-bit float vector modes
@@ -430,44 +420,39 @@
;; All SFmode vector float modes
(define_mode_iterator VF1
- [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF])
+ [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF])
(define_mode_iterator VF1_AVX2
- [(V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX2") V4SF])
+ [(V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX2") V4SF])
;; 128- and 256-bit SF vector modes
(define_mode_iterator VF1_128_256
[(V8SF "TARGET_AVX") V4SF])
(define_mode_iterator VF1_128_256VL
- [(V8SF "TARGET_EVEX512") (V4SF "TARGET_AVX512VL")])
+ [V8SF (V4SF "TARGET_AVX512VL")])
;; All DFmode vector float modes
(define_mode_iterator VF2
- [(V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF])
-
-(define_mode_iterator VF2_AVX10_2
- [(V8DF "TARGET_AVX10_2") V4DF V2DF])
+ [(V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
;; All DFmode & HFmode & BFmode vector float modes
(define_mode_iterator VF2HB
- [(V32BF "TARGET_AVX10_2")
- (V16BF "TARGET_AVX10_2")
- (V8BF "TARGET_AVX10_2")
- (V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
+ [(V32BF "TARGET_AVX10_2") (V16BF "TARGET_AVX10_2")
+ (V8BF "TARGET_AVX10_2") (V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF])
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF])
;; 128- and 256-bit DF vector modes
(define_mode_iterator VF2_128_256
[(V4DF "TARGET_AVX") V2DF])
(define_mode_iterator VF2_512_256
- [(V8DF "TARGET_AVX512F && TARGET_EVEX512") V4DF])
+ [(V8DF "TARGET_AVX512F") V4DF])
(define_mode_iterator VF2_512_256VL
- [(V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL")])
+ [V8DF (V4DF "TARGET_AVX512VL")])
;; All 128bit vector SF/DF modes
(define_mode_iterator VF_128
@@ -484,116 +469,102 @@
;; All 512bit vector float modes
(define_mode_iterator VF_512
- [(V16SF "TARGET_EVEX512") (V8DF "TARGET_EVEX512")])
+ [V16SF V8DF])
;; All 512bit vector float modes for bitwise operations
(define_mode_iterator VFB_512
- [(V32BF "TARGET_EVEX512")
- (V32HF "TARGET_EVEX512")
- (V16SF "TARGET_EVEX512")
- (V8DF "TARGET_EVEX512")])
+ [V32BF V32HF V16SF V8DF])
(define_mode_iterator V24F_128
[V4SF V8HF V8BF])
(define_mode_iterator VI48_AVX512VL
- [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI1248_AVX512VLBW
- [(V64QI "TARGET_AVX512BW && TARGET_EVEX512")
+ [(V64QI "TARGET_AVX512BW")
(V32QI "TARGET_AVX512VL && TARGET_AVX512BW")
(V16QI "TARGET_AVX512VL && TARGET_AVX512BW")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW")
(V16HI "TARGET_AVX512VL && TARGET_AVX512BW")
(V8HI "TARGET_AVX512VL && TARGET_AVX512BW")
- (V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
-
-(define_mode_iterator VI1248_AVX10_2
- [(V64QI "TARGET_AVX10_2") V32QI V16QI
- (V32HI "TARGET_AVX10_2") V16HI V8HI
- (V16SI "TARGET_AVX10_2") V8SI V4SI
- (V8DI "TARGET_AVX10_2") V4DI V2DI])
+ V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VF_AVX512VL
- [(V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ [V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VFH_AVX512VL
- [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
+ [(V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- (V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+
+(define_mode_iterator V48_AVX512VL_4
+ [(V4SF "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
+ (V4SI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL")])
+
+(define_mode_iterator VI48_AVX512VL_4
+ [(V4SI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL")])
-(define_mode_iterator VFH_AVX10_2
- [(V32HF "TARGET_AVX10_2") V16HF V8HF
- (V16SF "TARGET_AVX10_2") V8SF V4SF
- (V8DF "TARGET_AVX10_2") V4DF V2DF])
+(define_mode_iterator V8_AVX512VL_2
+ [(V2DF "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VF2_AVX512VL
- [(V8DF "TARGET_EVEX512") (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ [V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VF1_AVX512VL
- [(V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")])
+ [V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")])
(define_mode_iterator VF1_AVX512BW
- [(V16SF "TARGET_AVX512BW && TARGET_EVEX512") (V8SF "TARGET_AVX2") V4SF])
-
-(define_mode_iterator VF1_AVX10_2
- [(V16SF "TARGET_AVX10_2") V8SF V4SF])
+ [(V16SF "TARGET_AVX512BW") (V8SF "TARGET_AVX2") V4SF])
(define_mode_iterator VHFBF
- [(V32HF "TARGET_EVEX512") V16HF V8HF
- (V32BF "TARGET_EVEX512") V16BF V8BF])
+ [V32HF V16HF V8HF V32BF V16BF V8BF])
(define_mode_iterator VHFBF_256 [V16HF V16BF])
(define_mode_iterator VHFBF_128 [V8HF V8BF])
(define_mode_iterator VHF_AVX512VL
- [(V32HF "TARGET_EVEX512") (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")])
+ [V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")])
(define_mode_iterator VHFBF_AVX512VL
- [(V32HF "TARGET_EVEX512") (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
- (V32BF "TARGET_EVEX512") (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
-
-(define_mode_iterator VHF_AVX10_2
- [(V32HF "TARGET_AVX10_2") V16HF V8HF])
+ [V32HF (V16HF "TARGET_AVX512VL") (V8HF "TARGET_AVX512VL")
+ V32BF (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
-(define_mode_iterator VBF_AVX10_2
- [(V32BF "TARGET_AVX10_2") V16BF V8BF])
+(define_mode_iterator VBF
+ [V32BF V16BF V8BF])
;; All vector integer modes
(define_mode_iterator VI
- [(V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
+ [(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
+ (V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
;; All vector integer and HF modes
(define_mode_iterator VIHFBF
- [(V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V8SI "TARGET_AVX") V4SI
- (V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512BW && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512BW && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF])
+ [(V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
+ (V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
+ (V8SI "TARGET_AVX") V4SI (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512BW") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512BW") (V16BF "TARGET_AVX") V8BF])
(define_mode_iterator VI_AVX2
- [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
+ [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI_AVX_AVX512F
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI])
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI])
;; All QImode vector integer modes
(define_mode_iterator VI1
@@ -611,56 +582,50 @@
(V8SI "TARGET_AVX") (V4DI "TARGET_AVX")])
(define_mode_iterator VI8
- [(V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI])
-
-(define_mode_iterator VI8_AVX10_2
- [(V8DI "TARGET_AVX10_2") V4DI V2DI])
+ [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI8_FVL
- [(V8DI "TARGET_AVX512F && TARGET_EVEX512") V4DI (V2DI "TARGET_AVX512VL")])
+ [(V8DI "TARGET_AVX512F") V4DI (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI8_AVX512VL
- [(V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ [V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI8_256_512
- [(V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL")])
+ [V8DI (V4DI "TARGET_AVX512VL")])
(define_mode_iterator VI1_AVX2
[(V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI1_AVX512
- [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI])
+ [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI1_AVX512F
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI])
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI])
(define_mode_iterator VI1_AVX512VNNI
- [(V64QI "TARGET_AVX512VNNI && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI])
+ [(V64QI "TARGET_AVX512VNNI") (V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI1_AVX512VNNIBW
- [(V64QI "(TARGET_AVX512BW || TARGET_AVX512VNNI) && TARGET_EVEX512")
+ [(V64QI "TARGET_AVX512BW || TARGET_AVX512VNNI")
(V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI12_256_512_AVX512VL
- [(V64QI "TARGET_EVEX512") (V32QI "TARGET_AVX512VL")
- (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL")])
+ [V64QI (V32QI "TARGET_AVX512VL")
+ V32HI (V16HI "TARGET_AVX512VL")])
(define_mode_iterator VI2_AVX2
[(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX2_AVX512BW
- [(V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI])
+ [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX512F
- [(V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI])
+ [(V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI2_AVX512VNNIBW
- [(V32HI "(TARGET_AVX512BW || TARGET_AVX512VNNI) && TARGET_EVEX512")
+ [(V32HI "TARGET_AVX512BW || TARGET_AVX512VNNI")
(V16HI "TARGET_AVX2") V8HI])
-(define_mode_iterator VI2_AVX10_2
- [(V32HI "TARGET_AVX10_2") V16HI V8HI])
-
(define_mode_iterator VI4_AVX
[(V8SI "TARGET_AVX") V4SI])
@@ -668,65 +633,64 @@
[(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI4_AVX512F
- [(V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI])
+ [(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI4_AVX512VL
- [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
+ [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
(define_mode_iterator VI4_AVX10_2
[(V16SI "TARGET_AVX10_2") V8SI V4SI])
(define_mode_iterator VI48_AVX512F_AVX512VL
- [V4SI V8SI (V16SI "TARGET_AVX512F && TARGET_EVEX512")
+ [V4SI V8SI (V16SI "TARGET_AVX512F")
(V2DI "TARGET_AVX512VL") (V4DI "TARGET_AVX512VL")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")])
+ (V8DI "TARGET_AVX512F")])
(define_mode_iterator VI2_AVX512VL
- [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") (V32HI "TARGET_EVEX512")])
+ [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI])
(define_mode_iterator VI2HFBF_AVX512VL
- [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") (V32HI "TARGET_EVEX512")
- (V8HF "TARGET_AVX512VL") (V16HF "TARGET_AVX512VL") (V32HF "TARGET_EVEX512")
- (V8BF "TARGET_AVX512VL") (V16BF "TARGET_AVX512VL") (V32BF "TARGET_EVEX512")])
+ [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI
+ (V8HF "TARGET_AVX512VL") (V16HF "TARGET_AVX512VL") V32HF
+ (V8BF "TARGET_AVX512VL") (V16BF "TARGET_AVX512VL") V32BF])
(define_mode_iterator VI2H_AVX512VL
- [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") (V32HI "TARGET_EVEX512")
- (V8SI "TARGET_AVX512VL") (V16SI "TARGET_EVEX512")
- (V8DI "TARGET_EVEX512")])
+ [(V8HI "TARGET_AVX512VL") (V16HI "TARGET_AVX512VL") V32HI
+ (V8SI "TARGET_AVX512VL") V16SI V8DI])
(define_mode_iterator VI1_AVX512VL_F
- [V32QI (V16QI "TARGET_AVX512VL") (V64QI "TARGET_AVX512F && TARGET_EVEX512")])
+ [V32QI (V16QI "TARGET_AVX512VL") (V64QI "TARGET_AVX512F")])
(define_mode_iterator VI8_AVX2_AVX512BW
- [(V8DI "TARGET_AVX512BW && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
+ [(V8DI "TARGET_AVX512BW") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX2
[(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX2_AVX512F
- [(V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
+ [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI8_AVX_AVX512F
- [(V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX")])
+ [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX")])
(define_mode_iterator VI4_128_8_256
[V4SI V4DI])
;; All V8D* modes
(define_mode_iterator V8FI
- [(V8DF "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
+ [V8DF V8DI])
;; All V16S* modes
(define_mode_iterator V16FI
- [(V16SF "TARGET_EVEX512") (V16SI "TARGET_EVEX512")])
+ [V16SF V16SI])
;; ??? We should probably use TImode instead.
(define_mode_iterator VIMAX_AVX2_AVX512BW
- [(V4TI "TARGET_AVX512BW && TARGET_EVEX512") (V2TI "TARGET_AVX2") V1TI])
+ [(V4TI "TARGET_AVX512BW") (V2TI "TARGET_AVX2") V1TI])
;; Suppose TARGET_AVX512BW as baseline
(define_mode_iterator VIMAX_AVX512VL
- [(V4TI "TARGET_EVEX512") (V2TI "TARGET_AVX512VL") (V1TI "TARGET_AVX512VL")])
+ [V4TI (V2TI "TARGET_AVX512VL") (V1TI "TARGET_AVX512VL")])
(define_mode_iterator VIMAX_AVX2
[(V2TI "TARGET_AVX2") V1TI])
@@ -736,17 +700,17 @@
(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI12_AVX2_AVX512BW
- [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI])
+ [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI24_AVX2
[(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI124_AVX2_24_AVX512F_1_AVX512BW
- [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI])
+ [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI124_AVX2
[(V32QI "TARGET_AVX2") V16QI
@@ -754,17 +718,17 @@
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI248_AVX512VL
- [(V32HI "TARGET_EVEX512") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
+ [V32HI V16SI V8DI
(V16HI "TARGET_AVX512VL") (V8SI "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI248_AVX512VLBW
- [(V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ [(V32HI "TARGET_AVX512BW")
(V16HI "TARGET_AVX512VL && TARGET_AVX512BW")
(V8HI "TARGET_AVX512VL && TARGET_AVX512BW")
- (V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- (V8DI "TARGET_EVEX512") (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
+ V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")])
(define_mode_iterator VI48_AVX2
[(V8SI "TARGET_AVX2") V4SI
@@ -776,17 +740,16 @@
(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI248_AVX2_8_AVX512F_24_AVX512BW
- [(V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512BW && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX2") V2DI])
+ [(V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512BW") (V8SI "TARGET_AVX2") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI248_AVX512BW
- [(V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16SI "TARGET_EVEX512")
- (V8DI "TARGET_EVEX512")])
+ [(V32HI "TARGET_AVX512BW") V16SI V8DI])
(define_mode_iterator VI248_AVX512BW_AVX512VL
- [(V32HI "TARGET_AVX512BW && TARGET_EVEX512")
- (V4DI "TARGET_AVX512VL") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
+ [(V32HI "TARGET_AVX512BW")
+ (V4DI "TARGET_AVX512VL") V16SI V8DI])
;; Suppose TARGET_AVX512VL as baseline
(define_mode_iterator VI248_AVX512BW_1
@@ -800,16 +763,16 @@
V4DI V2DI])
(define_mode_iterator VI48_AVX512F
- [(V16SI "TARGET_AVX512F && TARGET_EVEX512") V8SI V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") V4DI V2DI])
+ [(V16SI "TARGET_AVX512F") V8SI V4SI
+ (V8DI "TARGET_AVX512F") V4DI V2DI])
(define_mode_iterator VI48_AVX_AVX512F
- [(V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI])
+ [(V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI12_AVX_AVX512F
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI])
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI])
(define_mode_iterator V48_128_256
[V4SF V2DF
@@ -950,10 +913,10 @@
(define_mode_iterator VI248_128 [V8HI V4SI V2DI])
(define_mode_iterator VI248_256 [V16HI V8SI V4DI])
(define_mode_iterator VI248_512
- [(V32HI "TARGET_EVEX512") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
+ [V32HI V16SI V8DI])
(define_mode_iterator VI48_128 [V4SI V2DI])
(define_mode_iterator VI148_512
- [(V64QI "TARGET_EVEX512") (V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
+ [V64QI V16SI V8DI])
(define_mode_iterator VI148_256 [V32QI V8SI V4DI])
(define_mode_iterator VI148_128 [V16QI V4SI V2DI])
@@ -961,75 +924,62 @@
(define_mode_iterator VI124_256 [V32QI V16HI V8SI])
(define_mode_iterator VI124_256_AVX512F_AVX512BW
[V32QI V16HI V8SI
- (V64QI "TARGET_AVX512BW && TARGET_EVEX512")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")])
+ (V64QI "TARGET_AVX512BW") (V32HI "TARGET_AVX512BW")
+ (V16SI "TARGET_AVX512F")])
(define_mode_iterator VI48_256 [V8SI V4DI])
(define_mode_iterator VI48_512
- [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
+ [V16SI V8DI])
(define_mode_iterator VI4_256_8_512 [V8SI V8DI])
(define_mode_iterator VI_AVX512BW
- [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
- (V64QI "TARGET_AVX512BW && TARGET_EVEX512")])
+ [V16SI V8DI
+ (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")])
(define_mode_iterator VIHFBF_AVX512BW
- [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
- (V64QI "TARGET_AVX512BW && TARGET_EVEX512")
- (V32HF "TARGET_AVX512BW && TARGET_EVEX512")
- (V32BF "TARGET_AVX512BW && TARGET_EVEX512")])
+ [V16SI V8DI
+ (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512BW")
+ (V32HF "TARGET_AVX512BW") (V32BF "TARGET_AVX512BW")])
;; Int-float size matches
(define_mode_iterator VI2F_256_512
- [V16HI (V32HI "TARGET_EVEX512")
- V16HF (V32HF "TARGET_EVEX512")
- V16BF (V32BF "TARGET_EVEX512")])
+ [V16HI V32HI V16HF V32HF V16BF V32BF])
(define_mode_iterator VI4F_128 [V4SI V4SF])
(define_mode_iterator VI8F_128 [V2DI V2DF])
(define_mode_iterator VI4F_256 [V8SI V8SF])
(define_mode_iterator VI8F_256 [V4DI V4DF])
(define_mode_iterator VI4F_256_512
- [V8SI V8SF
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")])
+ [V8SI V8SF (V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")])
(define_mode_iterator VI48F_256_512
[V8SI V8SF
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
- (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
+ (V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
+ (V8DI "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
+ (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
(define_mode_iterator VF48H_AVX512VL
- [(V8DF "TARGET_EVEX512") (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")])
+ [V8DF V16SF (V8SF "TARGET_AVX512VL")])
(define_mode_iterator VF48_128
[V2DF V4SF])
(define_mode_iterator VI48F
- [(V16SI "TARGET_EVEX512") (V16SF "TARGET_EVEX512")
- (V8DI "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
+ [V16SI V16SF V8DI V8DF
(V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_mode_iterator VI12_VI48F_AVX512VL
- [(V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
+ [(V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
+ (V8DI "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
(V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")
- (V64QI "TARGET_EVEX512") (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
- (V32HI "TARGET_EVEX512") (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
+ V64QI (V16QI "TARGET_AVX512VL") (V32QI "TARGET_AVX512VL")
+ V32HI (V16HI "TARGET_AVX512VL") (V8HI "TARGET_AVX512VL")])
(define_mode_iterator VI48F_256 [V8SI V8SF V4DI V4DF])
(define_mode_iterator V8_128 [V8HI V8HF V8BF])
(define_mode_iterator V16_256 [V16HI V16HF V16BF])
(define_mode_iterator V32_512
- [(V32HI "TARGET_EVEX512") (V32HF "TARGET_EVEX512") (V32BF "TARGET_EVEX512")])
+ [V32HI V32HF V32BF])
;; Mapping from float mode to required SSE level
(define_mode_attr sse
@@ -1441,7 +1391,7 @@
;; Mix-n-match
(define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF])
(define_mode_iterator AVX512MODE2P
- [(V16SI "TARGET_EVEX512") (V16SF "TARGET_EVEX512") (V8DF "TARGET_EVEX512")])
+ [V16SI V16SF V8DF])
;; Mapping for dbpsabbw modes
(define_mode_attr dbpsadbwmode
@@ -1639,6 +1589,44 @@
"&& 1"
[(set (match_dup 0) (match_dup 1))])
+(define_insn_and_split "*<avx512>_load<mode>mask_and15"
+ [(set (match_operand:V48_AVX512VL_4 0 "register_operand" "=v")
+ (vec_merge:V48_AVX512VL_4
+ (unspec:V48_AVX512VL_4
+ [(match_operand:V48_AVX512VL_4 1 "memory_operand" "m")]
+ UNSPEC_MASKLOAD)
+ (match_operand:V48_AVX512VL_4 2 "nonimm_or_0_operand" "0C")
+ (and:QI
+ (match_operand:QI 3 "register_operand" "Yk")
+ (const_int 15))))]
+ "TARGET_AVX512F"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V48_AVX512VL_4
+ (unspec:V48_AVX512VL_4 [(match_dup 1)] UNSPEC_MASKLOAD)
+ (match_dup 2)
+ (match_dup 3)))])
+
+(define_insn_and_split "*<avx512>_load<mode>mask_and3"
+ [(set (match_operand:V8_AVX512VL_2 0 "register_operand" "=v")
+ (vec_merge:V8_AVX512VL_2
+ (unspec:V8_AVX512VL_2
+ [(match_operand:V8_AVX512VL_2 1 "memory_operand" "m")]
+ UNSPEC_MASKLOAD)
+ (match_operand:V8_AVX512VL_2 2 "nonimm_or_0_operand" "0C")
+ (and:QI
+ (match_operand:QI 3 "register_operand" "Yk")
+ (const_int 3))))]
+ "TARGET_AVX512F"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V8_AVX512VL_2
+ (unspec:V8_AVX512VL_2 [(match_dup 1)] UNSPEC_MASKLOAD)
+ (match_dup 2)
+ (match_dup 3)))])
+
(define_expand "<avx512>_load<mode>_mask"
[(set (match_operand:VI12_AVX512VL 0 "register_operand")
(vec_merge:VI12_AVX512VL
@@ -2049,11 +2037,9 @@
(define_mode_iterator STORENT_MODE
[(DI "TARGET_SSE2 && TARGET_64BIT") (SI "TARGET_SSE2")
(SF "TARGET_SSE4A") (DF "TARGET_SSE4A")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V4DI "TARGET_AVX") (V2DI "TARGET_SSE2")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
- (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") (V2DI "TARGET_SSE2")
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
(define_expand "storent<mode>"
[(set (match_operand:STORENT_MODE 0 "memory_operand")
@@ -2857,10 +2843,10 @@
})
(define_expand "div<mode>3"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand")
- (div:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "register_operand")
- (match_operand:VBF_AVX10_2 2 "vector_operand")))]
+ [(set (match_operand:VBF 0 "register_operand")
+ (div:VBF
+ (match_operand:VBF 1 "register_operand")
+ (match_operand:VBF 2 "vector_operand")))]
"TARGET_AVX10_2"
{
if (TARGET_RECIP_VEC_DIV
@@ -3897,15 +3883,12 @@
(define_mode_iterator REDUC_PLUS_MODE
[(V4DF "TARGET_AVX") (V8SF "TARGET_AVX")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL && TARGET_EVEX512")
+ (V8DF "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
+ (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V32QI "TARGET_AVX") (V16HI "TARGET_AVX")
(V8SI "TARGET_AVX") (V4DI "TARGET_AVX")
- (V64QI "TARGET_AVX512F && TARGET_EVEX512")
- (V32HI "TARGET_AVX512F && TARGET_EVEX512")
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")])
+ (V64QI "TARGET_AVX512F") (V32HI "TARGET_AVX512F")
+ (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")])
(define_expand "reduc_plus_scal_<mode>"
[(plus:REDUC_PLUS_MODE
@@ -3948,13 +3931,11 @@
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")
(V8SF "TARGET_AVX") (V4DF "TARGET_AVX")
- (V64QI "TARGET_AVX512BW && TARGET_EVEX512")
- (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL && TARGET_EVEX512")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")])
+ (V64QI "TARGET_AVX512BW")
+ (V32HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
+ (V32HI "TARGET_AVX512BW") (V16SI "TARGET_AVX512F")
+ (V8DI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
+ (V8DF "TARGET_AVX512F")])
(define_expand "reduc_<code>_scal_<mode>"
[(smaxmin:REDUC_SMINMAX_MODE
@@ -4063,10 +4044,8 @@
(define_mode_iterator REDUC_ANY_LOGIC_MODE
[(V32QI "TARGET_AVX") (V16HI "TARGET_AVX")
(V8SI "TARGET_AVX") (V4DI "TARGET_AVX")
- (V64QI "TARGET_AVX512F && TARGET_EVEX512")
- (V32HI "TARGET_AVX512F && TARGET_EVEX512")
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")])
+ (V64QI "TARGET_AVX512F") (V32HI "TARGET_AVX512F")
+ (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")])
(define_expand "reduc_<code>_scal_<mode>"
[(any_logic:REDUC_ANY_LOGIC_MODE
@@ -4410,7 +4389,7 @@
(unspec:<V48H_AVX512VL:avx512fmaskmode>
[(match_operand:V48H_AVX512VL 1 "nonimmediate_operand" "v")
(match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "vm")
- (match_operand:SI 3 "const_0_to_7_operand" "n")]
+ (match_operand:SI 3 "<cmp_imm_predicate>" "n")]
UNSPEC_PCMP)))]
"TARGET_AVX512F
&& (!VALID_MASK_AVX512BW_MODE (<SWI248x:MODE>mode) || TARGET_AVX512BW)
@@ -4428,7 +4407,7 @@
(unspec:<V48H_AVX512VL:avx512fmaskmode>
[(match_operand:V48H_AVX512VL 1 "nonimmediate_operand")
(match_operand:V48H_AVX512VL 2 "nonimmediate_operand")
- (match_operand:SI 3 "const_0_to_7_operand")]
+ (match_operand:SI 3 "<cmp_imm_predicate>")]
UNSPEC_PCMP)))
(set (match_operand:<V48H_AVX512VL:avx512fmaskmode> 4 "register_operand")
(unspec:<V48H_AVX512VL:avx512fmaskmode>
@@ -4469,7 +4448,8 @@
(match_operand:V48H_AVX512VL 2 "nonimmediate_operand")
(match_operand:SI 3 "<cmp_imm_predicate>" "n")]
UNSPEC_PCMP)))]
- "TARGET_AVX512F && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && GET_MODE_NUNITS (<MODE>mode) >= 8
+ && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -4480,6 +4460,70 @@
UNSPEC_PCMP))]
"operands[4] = GEN_INT (INTVAL (operands[3]) ^ 4);")
+(define_insn "*<avx512>_cmp<mode>3_and15"
+ [(set (match_operand:QI 0 "register_operand" "=k")
+ (and:QI
+ (unspec:QI
+ [(match_operand:V48_AVX512VL_4 1 "nonimmediate_operand" "v")
+ (match_operand:V48_AVX512VL_4 2 "nonimmediate_operand" "vm")
+ (match_operand:SI 3 "<cmp_imm_predicate>" "n")]
+ UNSPEC_PCMP)
+ (const_int 15)))]
+ "TARGET_AVX512F"
+ "v<ssecmpintprefix>cmp<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "*<avx512>_ucmp<mode>3_and15"
+ [(set (match_operand:QI 0 "register_operand" "=k")
+ (and:QI
+ (unspec:QI
+ [(match_operand:VI48_AVX512VL_4 1 "nonimmediate_operand" "v")
+ (match_operand:VI48_AVX512VL_4 2 "nonimmediate_operand" "vm")
+ (match_operand:SI 3 "const_0_to_7_operand" "n")]
+ UNSPEC_UNSIGNED_PCMP)
+ (const_int 15)))]
+ "TARGET_AVX512F"
+ "vpcmpu<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "*<avx512>_cmp<mode>3_and3"
+ [(set (match_operand:QI 0 "register_operand" "=k")
+ (and:QI
+ (unspec:QI
+ [(match_operand:V8_AVX512VL_2 1 "nonimmediate_operand" "v")
+ (match_operand:V8_AVX512VL_2 2 "nonimmediate_operand" "vm")
+ (match_operand:SI 3 "<cmp_imm_predicate>" "n")]
+ UNSPEC_PCMP)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ "v<ssecmpintprefix>cmp<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "*avx512vl_ucmpv2di3_and3"
+ [(set (match_operand:QI 0 "register_operand" "=k")
+ (and:QI
+ (unspec:QI
+ [(match_operand:V2DI 1 "nonimmediate_operand" "v")
+ (match_operand:V2DI 2 "nonimmediate_operand" "vm")
+ (match_operand:SI 3 "const_0_to_7_operand" "n")]
+ UNSPEC_UNSIGNED_PCMP)
+ (const_int 3)))]
+ "TARGET_AVX512F"
+ "vpcmpuq\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "evex")
+ (set_attr "mode" "TI")])
+
(define_insn "<avx512>_cmp<mode>3<mask_scalar_merge_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
(unspec:<avx512fmaskmode>
@@ -4762,7 +4806,8 @@
(match_operand:VI48_AVX512VL 2 "nonimmediate_operand")
(match_operand:SI 3 "const_0_to_7_operand")]
UNSPEC_UNSIGNED_PCMP)))]
- "TARGET_AVX512F && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && ix86_pre_reload_split ()
+ && GET_MODE_NUNITS (<MODE>mode) >= 8"
"#"
"&& 1"
[(set (match_dup 0)
@@ -4923,8 +4968,8 @@
(define_expand "vec_cmp<mode><avx512fmaskmodelower>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand")
(match_operator:<avx512fmaskmode> 1 ""
- [(match_operand:VBF_AVX10_2 2 "register_operand")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand")]))]
+ [(match_operand:VBF 2 "register_operand")
+ (match_operand:VBF 3 "nonimmediate_operand")]))]
"TARGET_AVX10_2"
{
bool ok = ix86_expand_mask_vec_cmp (operands[0], GET_CODE (operands[1]),
@@ -5142,7 +5187,7 @@
(define_expand "vcond_mask_<mode><sseintvecmodelower>"
[(set (match_operand:VI_256_AVX2 0 "register_operand")
(vec_merge:VI_256_AVX2
- (match_operand:VI_256_AVX2 1 "nonimmediate_operand")
+ (match_operand:VI_256_AVX2 1 "nonimm_or_0_or_1s_operand")
(match_operand:VI_256_AVX2 2 "nonimm_or_0_operand")
(match_operand:<sseintvecmode> 3 "register_operand")))]
"TARGET_AVX"
@@ -5155,7 +5200,7 @@
(define_expand "vcond_mask_<mode><sseintvecmodelower>"
[(set (match_operand:VI_128 0 "register_operand")
(vec_merge:VI_128
- (match_operand:VI_128 1 "vector_operand")
+ (match_operand:VI_128 1 "vector_or_0_or_1s_operand")
(match_operand:VI_128 2 "nonimm_or_0_operand")
(match_operand:<sseintvecmode> 3 "register_operand")))]
"TARGET_SSE2"
@@ -5168,7 +5213,7 @@
(define_expand "vcond_mask_v1tiv1ti"
[(set (match_operand:V1TI 0 "register_operand")
(vec_merge:V1TI
- (match_operand:V1TI 1 "vector_operand")
+ (match_operand:V1TI 1 "vector_or_0_or_1s_operand")
(match_operand:V1TI 2 "nonimm_or_0_operand")
(match_operand:V1TI 3 "register_operand")))]
"TARGET_SSE2"
@@ -5181,7 +5226,7 @@
(define_expand "vcond_mask_<mode><sseintvecmodelower>"
[(set (match_operand:VF_256 0 "register_operand")
(vec_merge:VF_256
- (match_operand:VF_256 1 "nonimmediate_operand")
+ (match_operand:VF_256 1 "nonimm_or_0_or_1s_operand")
(match_operand:VF_256 2 "nonimm_or_0_operand")
(match_operand:<sseintvecmode> 3 "register_operand")))]
"TARGET_AVX"
@@ -5194,7 +5239,7 @@
(define_expand "vcond_mask_<mode><sseintvecmodelower>"
[(set (match_operand:VF_128 0 "register_operand")
(vec_merge:VF_128
- (match_operand:VF_128 1 "vector_operand")
+ (match_operand:VF_128 1 "vector_or_0_or_1s_operand")
(match_operand:VF_128 2 "nonimm_or_0_operand")
(match_operand:<sseintvecmode> 3 "register_operand")))]
"TARGET_SSE"
@@ -5573,7 +5618,7 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512vl,avx512f_512")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
@@ -5630,7 +5675,7 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx_noavx512vl,avx512vl,avx512f_512")
+ [(set_attr "isa" "noavx,avx_noavx512f,avx512vl,avx512f")
(set_attr "addr" "*,gpr16,*,*")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
@@ -5703,7 +5748,7 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512vl,avx512f_512")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex,evex,evex")
(set (attr "mode")
@@ -5765,7 +5810,7 @@
output_asm_insn (buf, operands);
return "";
}
- [(set_attr "isa" "noavx,avx,avx512vl,avx512f_512")
+ [(set_attr "isa" "noavx,avx,avx512vl,avx512f")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
@@ -5811,15 +5856,10 @@
(V2DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V8SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V4DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
- (HF "TARGET_AVX512FP16")
- (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
- (V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
- (V8BF "TARGET_AVX10_2")
- (V16BF "TARGET_AVX10_2")
- (V32BF "TARGET_AVX10_2")])
+ (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
+ (HF "TARGET_AVX512FP16") (V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
+ (V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL") (V32HF "TARGET_AVX512FP16")
+ (V8BF "TARGET_AVX10_2") (V16BF "TARGET_AVX10_2") (V32BF "TARGET_AVX10_2")])
(define_expand "fma<mode>4"
[(set (match_operand:FMAMODEM 0 "register_operand")
@@ -5857,8 +5897,7 @@
(V2DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V8SF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
(V4DF "TARGET_FMA || TARGET_FMA4 || TARGET_AVX512VL")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")])
+ (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")])
(define_mode_iterator FMAMODE
[SF DF V4SF V2DF V8SF V4DF])
@@ -5928,14 +5967,12 @@
;; Suppose AVX-512F as baseline
(define_mode_iterator VFH_SF_AVX512VL
- [(V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")
+ [(V32HF "TARGET_AVX512FP16")
(V16HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(V8HF "TARGET_AVX512FP16 && TARGET_AVX512VL")
(HF "TARGET_AVX512FP16")
- SF (V16SF "TARGET_EVEX512")
- (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
- DF (V8DF "TARGET_EVEX512")
- (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
+ SF V16SF (V8SF "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
+ DF V8DF (V4DF "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")])
(define_insn "<sd_mask_codefor>fma_fmadd_<mode><sd_maskz_name><round_name>"
[(set (match_operand:VFH_SF_AVX512VL 0 "register_operand" "=v,v,v")
@@ -8683,7 +8720,7 @@
(unspec:V16SI
[(match_operand:V16SF 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtps2dq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8751,7 +8788,7 @@
(unspec:V16SI
[(match_operand:V16SF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvttps2<vcvtt_suffix>dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -8761,7 +8798,7 @@
[(set (match_operand:V16SI 0 "register_operand" "=v")
(any_fix:V16SI
(match_operand:V16SF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvttps2<fixsuffix>dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9349,7 +9386,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtdq2pd\t{%t1, %0|%0, %t1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9385,7 +9422,7 @@
(unspec:V8SI
[(match_operand:V8DF 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_FIX_NOTRUNC))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtpd2dq\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9544,7 +9581,7 @@
(unspec:V8SI
[(match_operand:V8DF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTT_U))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvttpd2<vcvtt_suffix>dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -9554,7 +9591,7 @@
[(set (match_operand:V8SI 0 "register_operand" "=v")
(any_fix:V8SI
(match_operand:V8DF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvttpd2<fixsuffix>dq\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -10070,7 +10107,7 @@
[(set (match_operand:V8SF 0 "register_operand" "=v")
(float_truncate:V8SF
(match_operand:V8DF 1 "<round_nimm_predicate>" "<round_constraint>")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtpd2ps\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -10232,7 +10269,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtps2pd\t{%t1, %0|%0, %t1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -10438,7 +10475,7 @@
(set (match_operand:V8DF 0 "register_operand")
(float_extend:V8DF
(match_dup 2)))]
-"TARGET_AVX512F && TARGET_EVEX512"
+"TARGET_AVX512F"
"operands[2] = gen_reg_rtx (V8SFmode);")
(define_expand "vec_unpacks_lo_v4sf"
@@ -10576,7 +10613,7 @@
(set (match_operand:V8DF 0 "register_operand")
(float:V8DF
(match_dup 2)))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"operands[2] = gen_reg_rtx (V8SImode);")
(define_expand "vec_unpacks_float_lo_v16si"
@@ -10588,7 +10625,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_expand "vec_unpacku_float_hi_v4si"
[(set (match_dup 5)
@@ -10684,7 +10721,7 @@
(define_expand "vec_unpacku_float_hi_v16si"
[(match_operand:V8DF 0 "register_operand")
(match_operand:V16SI 1 "register_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
REAL_VALUE_TYPE TWO32r;
rtx k, x, tmp[4];
@@ -10733,7 +10770,7 @@
(define_expand "vec_unpacku_float_lo_v16si"
[(match_operand:V8DF 0 "register_operand")
(match_operand:V16SI 1 "nonimmediate_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
REAL_VALUE_TYPE TWO32r;
rtx k, x, tmp[3];
@@ -10827,7 +10864,7 @@
[(match_operand:V16SI 0 "register_operand")
(match_operand:V8DF 1 "nonimmediate_operand")
(match_operand:V8DF 2 "nonimmediate_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
rtx r1, r2;
@@ -10942,7 +10979,7 @@
[(match_operand:V16SI 0 "register_operand")
(match_operand:V8DF 1 "nonimmediate_operand")
(match_operand:V8DF 2 "nonimmediate_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
rtx r1, r2;
@@ -11135,7 +11172,7 @@
(const_int 11) (const_int 27)
(const_int 14) (const_int 30)
(const_int 15) (const_int 31)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vunpckhps\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -11223,7 +11260,7 @@
(const_int 9) (const_int 25)
(const_int 12) (const_int 28)
(const_int 13) (const_int 29)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vunpcklps\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -11363,7 +11400,7 @@
(const_int 11) (const_int 11)
(const_int 13) (const_int 13)
(const_int 15) (const_int 15)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vmovshdup\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
@@ -11416,7 +11453,7 @@
(const_int 10) (const_int 10)
(const_int 12) (const_int 12)
(const_int 14) (const_int 14)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vmovsldup\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "evex")
@@ -12376,9 +12413,7 @@
(V8SF "32x4") (V8SI "32x4") (V4DF "64x2") (V4DI "64x2")])
(define_mode_iterator AVX512_VEC
- [(V8DF "TARGET_AVX512DQ && TARGET_EVEX512")
- (V8DI "TARGET_AVX512DQ && TARGET_EVEX512")
- (V16SF "TARGET_EVEX512") (V16SI "TARGET_EVEX512")])
+ [(V8DF "TARGET_AVX512DQ") (V8DI "TARGET_AVX512DQ") V16SF V16SI])
(define_expand "<extract_type>_vextract<shuffletype><extract_suf>_mask"
[(match_operand:<ssequartermode> 0 "nonimmediate_operand")
@@ -12547,9 +12582,7 @@
[(V16SF "32x8") (V16SI "32x8") (V8DF "64x4") (V8DI "64x4")])
(define_mode_iterator AVX512_VEC_2
- [(V16SF "TARGET_AVX512DQ && TARGET_EVEX512")
- (V16SI "TARGET_AVX512DQ && TARGET_EVEX512")
- (V8DF "TARGET_EVEX512") (V8DI "TARGET_EVEX512")])
+ [(V16SF "TARGET_AVX512DQ") (V16SI "TARGET_AVX512DQ") V8DF V8DI])
(define_expand "<extract_type_2>_vextract<shuffletype><extract_suf_2>_mask"
[(match_operand:<ssehalfvecmode> 0 "nonimmediate_operand")
@@ -12723,7 +12756,7 @@
lo insns have =m and 0C constraints. */
: (operands[2] != const0_rtx
|| (!rtx_equal_p (dest, operands[3])
- && GET_CODE (operands[3]) != CONST_VECTOR))))
+ && !CONST_VECTOR_P (operands[3])))))
dest = gen_reg_rtx (<ssehalfvecmode>mode);
switch (INTVAL (operands[2]))
{
@@ -13110,7 +13143,7 @@
(const_int 26) (const_int 27)
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)])))]
- "TARGET_AVX512F && TARGET_EVEX512
+ "TARGET_AVX512F
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
if (TARGET_AVX512VL
@@ -13159,7 +13192,7 @@
(const_int 58) (const_int 59)
(const_int 60) (const_int 61)
(const_int 62) (const_int 63)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vextracti64x4\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog1")
(set_attr "length_immediate" "1")
@@ -13257,15 +13290,15 @@
;; Modes handled by vec_extract patterns.
(define_mode_iterator VEC_EXTRACT_MODE
- [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512BW && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512BW && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX") V2DF
- (V4TI "TARGET_AVX512F && TARGET_EVEX512") (V2TI "TARGET_AVX")])
+ [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512BW") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512BW") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") V2DF
+ (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")])
(define_expand "vec_extract<mode><ssescalarmodelower>"
[(match_operand:<ssescalarmode> 0 "register_operand")
@@ -13307,7 +13340,7 @@
(const_int 3) (const_int 11)
(const_int 5) (const_int 13)
(const_int 7) (const_int 15)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vunpckhpd\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -13421,9 +13454,9 @@
(const_int 2) (const_int 10)
(const_int 4) (const_int 12)
(const_int 6) (const_int 14)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vmovddup\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
- [(set_attr "type" "sselog1")
+ [(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
(set_attr "mode" "V8DF")])
@@ -13437,7 +13470,7 @@
(const_int 2) (const_int 10)
(const_int 4) (const_int 12)
(const_int 6) (const_int 14)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vunpcklpd\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -13454,7 +13487,7 @@
(const_int 2) (const_int 6)])))]
"TARGET_AVX && <mask_avx512vl_condition>"
"vmovddup\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
- [(set_attr "type" "sselog1")
+ [(set_attr "type" "ssemov")
(set_attr "prefix" "<mask_prefix>")
(set_attr "mode" "V4DF")])
@@ -13649,7 +13682,7 @@
(match_operand:SI 4 "const_0_to_255_operand")]
UNSPEC_VTERNLOG))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
/* Disallow embeded broadcast for vector HFmode since
it's not real AVX512FP16 instruction. */
&& (GET_MODE_SIZE (GET_MODE_INNER (<MODE>mode)) >= 4
@@ -13731,7 +13764,7 @@
[(set (match_operand:V 0 "register_operand")
(match_operand:V 1 "ternlog_operand"))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
&& ix86_pre_reload_split ()"
"#"
"&& 1"
@@ -13761,7 +13794,7 @@
(match_operand:V 3 "regmem_or_bitnot_regmem_operand")
(match_operand:V 4 "regmem_or_bitnot_regmem_operand"))))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
&& ix86_pre_reload_split ()
&& (rtx_equal_p (STRIP_UNARY (operands[1]),
STRIP_UNARY (operands[4]))
@@ -13846,7 +13879,7 @@
(match_operand:V 3 "regmem_or_bitnot_regmem_operand"))
(match_operand:V 4 "regmem_or_bitnot_regmem_operand")))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
&& ix86_pre_reload_split ()
&& (rtx_equal_p (STRIP_UNARY (operands[1]),
STRIP_UNARY (operands[4]))
@@ -13930,7 +13963,7 @@
(match_operand:V 2 "regmem_or_bitnot_regmem_operand"))
(match_operand:V 3 "regmem_or_bitnot_regmem_operand")))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
&& ix86_pre_reload_split ()"
"#"
"&& 1"
@@ -14080,7 +14113,7 @@
(match_operand:SI 3 "const_0_to_255_operand")
(match_operand:V16SF 4 "register_operand")
(match_operand:HI 5 "register_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
int mask = INTVAL (operands[3]);
emit_insn (gen_avx512f_shufps512_1_mask (operands[0], operands[1], operands[2],
@@ -14267,7 +14300,7 @@
(match_operand 16 "const_12_to_15_operand")
(match_operand 17 "const_28_to_31_operand")
(match_operand 18 "const_28_to_31_operand")])))]
- "TARGET_AVX512F && TARGET_EVEX512
+ "TARGET_AVX512F
&& (INTVAL (operands[3]) == (INTVAL (operands[7]) - 4)
&& INTVAL (operands[4]) == (INTVAL (operands[8]) - 4)
&& INTVAL (operands[5]) == (INTVAL (operands[9]) - 4)
@@ -14302,7 +14335,7 @@
(match_operand:SI 3 "const_0_to_255_operand")
(match_operand:V8DF 4 "register_operand")
(match_operand:QI 5 "register_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
int mask = INTVAL (operands[3]);
emit_insn (gen_avx512f_shufpd512_1_mask (operands[0], operands[1], operands[2],
@@ -14332,7 +14365,7 @@
(match_operand 8 "const_12_to_13_operand")
(match_operand 9 "const_6_to_7_operand")
(match_operand 10 "const_14_to_15_operand")])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
int mask;
mask = INTVAL (operands[3]);
@@ -14464,7 +14497,7 @@
(const_int 3) (const_int 11)
(const_int 5) (const_int 13)
(const_int 7) (const_int 15)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpunpckhqdq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -14514,7 +14547,7 @@
(const_int 2) (const_int 10)
(const_int 4) (const_int 12)
(const_int 6) (const_int 14)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpunpcklqdq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -14880,7 +14913,7 @@
(set_attr "mode" "V2DF,DF,V8DF")
(set (attr "enabled")
(cond [(eq_attr "alternative" "2")
- (symbol_ref "TARGET_AVX512F && TARGET_EVEX512
+ (symbol_ref "TARGET_AVX512F
&& !TARGET_AVX512VL && !TARGET_PREFER_AVX256")
(match_test "<mask_avx512vl_condition>")
(const_string "*")
@@ -14965,13 +14998,13 @@
[(set (match_operand:PMOV_DST_MODE_1 0 "nonimmediate_operand")
(truncate:PMOV_DST_MODE_1
(match_operand:<pmov_src_mode> 1 "register_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_insn "*avx512f_<code><pmov_src_lower><mode>2"
[(set (match_operand:PMOV_DST_MODE_1 0 "nonimmediate_operand" "=v,m")
(any_truncate:PMOV_DST_MODE_1
(match_operand:<pmov_src_mode> 1 "register_operand" "v,v")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<trunsuffix><pmov_suff_1>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -14993,7 +15026,7 @@
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)])))]
- "TARGET_AVX512BW && TARGET_EVEX512 && ix86_pre_reload_split ()"
+ "TARGET_AVX512BW && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -15018,7 +15051,7 @@
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)])))]
- "TARGET_AVX512BW && TARGET_EVEX512 && ix86_pre_reload_split ()"
+ "TARGET_AVX512BW && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -15102,7 +15135,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
- "TARGET_AVX512F && TARGET_EVEX512 && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -15118,7 +15151,7 @@
(match_operand:<pmov_src_mode> 1 "register_operand" "v,v"))
(match_operand:PMOV_DST_MODE_1 2 "nonimm_or_0_operand" "0C,0")
(match_operand:<avx512fmaskmode> 3 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<trunsuffix><pmov_suff_1>\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -15132,19 +15165,19 @@
(match_operand:<pmov_src_mode> 1 "register_operand"))
(match_dup 0)
(match_operand:<avx512fmaskmode> 2 "register_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_expand "truncv32hiv32qi2"
[(set (match_operand:V32QI 0 "nonimmediate_operand")
(truncate:V32QI
(match_operand:V32HI 1 "register_operand")))]
- "TARGET_AVX512BW && TARGET_EVEX512")
+ "TARGET_AVX512BW")
(define_insn "avx512bw_<code>v32hiv32qi2"
[(set (match_operand:V32QI 0 "nonimmediate_operand" "=v,m")
(any_truncate:V32QI
(match_operand:V32HI 1 "register_operand" "v,v")))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpmov<trunsuffix>wb\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -15174,7 +15207,7 @@
(const_int 26) (const_int 27)
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)])))]
- "TARGET_AVX512VBMI && TARGET_EVEX512 && ix86_pre_reload_split ()"
+ "TARGET_AVX512VBMI && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -15190,7 +15223,7 @@
(match_operand:V32HI 1 "register_operand" "v,v"))
(match_operand:V32QI 2 "nonimm_or_0_operand" "0C,0")
(match_operand:SI 3 "register_operand" "Yk,Yk")))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpmov<trunsuffix>wb\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "none,store")
@@ -15204,7 +15237,7 @@
(match_operand:V32HI 1 "register_operand"))
(match_dup 0)
(match_operand:SI 2 "register_operand")))]
- "TARGET_AVX512BW && TARGET_EVEX512")
+ "TARGET_AVX512BW")
(define_mode_iterator PMOV_DST_MODE_2
[V4SI V8HI (V16QI "TARGET_AVX512BW")])
@@ -16062,7 +16095,7 @@
[(set (match_operand:V8QI 0 "register_operand")
(truncate:V8QI
(match_operand:V8DI 1 "register_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
rtx op0 = gen_reg_rtx (V16QImode);
@@ -16082,7 +16115,7 @@
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<trunsuffix>qb\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -16092,7 +16125,7 @@
[(set (match_operand:V8QI 0 "memory_operand" "=m")
(any_truncate:V8QI
(match_operand:V8DI 1 "register_operand" "v")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<trunsuffix>qb\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "store")
@@ -16104,7 +16137,7 @@
(subreg:DI
(any_truncate:V8QI
(match_operand:V8DI 1 "register_operand")) 0))]
- "TARGET_AVX512F && TARGET_EVEX512 && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -16128,7 +16161,7 @@
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<trunsuffix>qb\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -16149,7 +16182,7 @@
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)
(const_int 0) (const_int 0)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<trunsuffix>qb\t{%1, %0%{%2%}%{z%}|%0%{%2%}%{z%}, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -16162,7 +16195,7 @@
(match_operand:V8DI 1 "register_operand" "v"))
(match_dup 0)
(match_operand:QI 2 "register_operand" "Yk")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<trunsuffix>qb\t{%1, %0%{%2%}|%0%{%2%}, %1}"
[(set_attr "type" "ssemov")
(set_attr "memory" "store")
@@ -16174,7 +16207,7 @@
(any_truncate:V8QI
(match_operand:V8DI 1 "register_operand"))
(match_operand:QI 2 "register_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
operands[0] = adjust_address_nv (operands[0], V8QImode, 0);
emit_insn (gen_avx512f_<code>v8div16qi2_mask_store_1 (operands[0],
@@ -16431,7 +16464,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"ix86_fixup_binary_operands_no_copy (MULT, V16SImode, operands);")
(define_insn "*vec_widen_umult_even_v16si<mask_name>"
@@ -16451,7 +16484,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && TARGET_EVEX512
+ "TARGET_AVX512F
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuludq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
@@ -16547,7 +16580,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"ix86_fixup_binary_operands_no_copy (MULT, V16SImode, operands);")
(define_insn "*vec_widen_smult_even_v16si<mask_name>"
@@ -16567,7 +16600,7 @@
(const_int 4) (const_int 6)
(const_int 8) (const_int 10)
(const_int 12) (const_int 14)])))))]
- "TARGET_AVX512F && TARGET_EVEX512
+ "TARGET_AVX512F
&& !(MEM_P (operands[1]) && MEM_P (operands[2]))"
"vpmuldq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
@@ -16969,7 +17002,7 @@
"TARGET_SSE2"
{
/* Try with vnni instructions. */
- if ((<MODE_SIZE> == 64 && TARGET_AVX512VNNI && TARGET_EVEX512)
+ if ((<MODE_SIZE> == 64 && TARGET_AVX512VNNI)
|| (<MODE_SIZE> < 64
&& ((TARGET_AVX512VNNI && TARGET_AVX512VL) || TARGET_AVXVNNI)))
{
@@ -17062,7 +17095,7 @@
(match_operand:V64QI 1 "register_operand")
(match_operand:V64QI 2 "nonimmediate_operand")
(match_operand:V16SI 3 "nonimmediate_operand")]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
{
rtx t1 = gen_reg_rtx (V8DImode);
rtx t2 = gen_reg_rtx (V16SImode);
@@ -18300,13 +18333,10 @@
(V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")
(V8SF "TARGET_AVX2") (V4DF "TARGET_AVX2")
(V16HF "TARGET_AVX512FP16")
- (V16SF "TARGET_AVX512F && TARGET_EVEX512")
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
- (V16SI "TARGET_AVX512F && TARGET_EVEX512")
- (V8DI "TARGET_AVX512F && TARGET_EVEX512")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
- (V64QI "TARGET_AVX512VBMI && TARGET_EVEX512")
- (V32HF "TARGET_AVX512FP16 && TARGET_EVEX512")])
+ (V16SF "TARGET_AVX512F") (V8DF "TARGET_AVX512F")
+ (V16SI "TARGET_AVX512F") (V8DI "TARGET_AVX512F")
+ (V32HI "TARGET_AVX512BW") (V64QI "TARGET_AVX512VBMI")
+ (V32HF "TARGET_AVX512FP16")])
(define_expand "vec_perm<mode>"
[(match_operand:VEC_PERM_AVX2 0 "register_operand")
@@ -18333,7 +18363,7 @@
{
operands[2] = CONSTM1_RTX (<MODE>mode);
- if (!TARGET_AVX512F || (!TARGET_AVX512VL && !TARGET_EVEX512))
+ if (!TARGET_AVX512F)
operands[2] = force_reg (<MODE>mode, operands[2]);
})
@@ -18342,7 +18372,6 @@
(xor:VI (match_operand:VI 1 "bcst_vector_operand" " 0, m,Br")
(match_operand:VI 2 "vector_all_ones_operand" "BC,BC,BC")))]
"TARGET_AVX512F
- && (<MODE_SIZE> == 64 || TARGET_AVX512VL || TARGET_EVEX512)
&& (!<mask_applied>
|| <ssescalarmode>mode == SImode
|| <ssescalarmode>mode == DImode)"
@@ -18409,7 +18438,7 @@
(match_operand:VI 2 "vector_all_ones_operand" "BC,BC,BC")))
(unspec [(match_operand:VI 3 "register_operand" "0,0,0")]
UNSPEC_INSN_FALSE_DEP)]
- "TARGET_AVX512F && (<MODE_SIZE> == 64 || TARGET_AVX512VL || TARGET_EVEX512)"
+ "TARGET_AVX512F"
{
if (TARGET_AVX512VL)
return "vpternlog<ternlogsuffix>\t{$0x55, %1, %0, %0<mask_operand3>|%0<mask_operand3>, %0, %1, 0x55}";
@@ -18433,7 +18462,7 @@
(not:<ssescalarmode>
(match_operand:<ssescalarmode> 1 "nonimmediate_operand"))))]
"<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)"
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256)"
[(set (match_dup 0)
(xor:VI48_AVX512F
(vec_duplicate:VI48_AVX512F (match_dup 1))
@@ -18587,8 +18616,7 @@
(symbol_ref "<MODE_SIZE> == 64 || TARGET_AVX512VL")
(eq_attr "alternative" "4")
(symbol_ref "<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512
- && !TARGET_PREFER_AVX256)")
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256)")
]
(const_string "*")))])
@@ -18632,7 +18660,7 @@
(match_operand:<ssescalarmode> 1 "nonimmediate_operand")))
(match_operand:VI 2 "vector_operand")))]
"<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)"
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256)"
[(set (match_dup 3)
(vec_duplicate:VI (match_dup 1)))
(set (match_dup 0)
@@ -18647,7 +18675,7 @@
(match_operand:<ssescalarmode> 1 "nonimmediate_operand")))
(match_operand:VI 2 "vector_operand")))]
"<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256)"
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256)"
[(set (match_dup 3)
(vec_duplicate:VI (match_dup 1)))
(set (match_dup 0)
@@ -18941,7 +18969,7 @@
(match_operand:VI 1 "bcst_vector_operand" "0,m, 0,vBr"))
(match_operand:VI 2 "bcst_vector_operand" "m,0,vBr, 0")))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
&& (register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
{
@@ -18974,7 +19002,7 @@
(match_operand:VI 1 "bcst_vector_operand" "%0, 0")
(match_operand:VI 2 "bcst_vector_operand" " m,vBr"))))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
&& (register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
{
@@ -19005,7 +19033,7 @@
(not:VI (match_operand:VI 1 "bcst_vector_operand" "%0, 0"))
(not:VI (match_operand:VI 2 "bcst_vector_operand" "m,vBr"))))]
"(<MODE_SIZE> == 64 || TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512 && !TARGET_PREFER_AVX256))
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256))
&& (register_operand (operands[1], <MODE>mode)
|| register_operand (operands[2], <MODE>mode))"
{
@@ -19027,7 +19055,7 @@
(const_string "*")))])
(define_mode_iterator AVX512ZEXTMASK
- [(DI "TARGET_AVX512BW && TARGET_EVEX512") (SI "TARGET_AVX512BW") HI])
+ [(DI "TARGET_AVX512BW") (SI "TARGET_AVX512BW") HI])
(define_insn "<avx512>_testm<mode>3<mask_scalar_merge_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
@@ -19276,7 +19304,7 @@
(const_int 60) (const_int 61)
(const_int 62) (const_int 63)])))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpacksswb\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "<mask_prefix>")
@@ -19345,7 +19373,7 @@
(const_int 14) (const_int 15)
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)])))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpackssdw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "<mask_prefix>")
@@ -19407,7 +19435,7 @@
(const_int 61) (const_int 125)
(const_int 62) (const_int 126)
(const_int 63) (const_int 127)])))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpunpckhbw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -19503,7 +19531,7 @@
(const_int 53) (const_int 117)
(const_int 54) (const_int 118)
(const_int 55) (const_int 119)])))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpunpcklbw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -19727,7 +19755,7 @@
(const_int 11) (const_int 27)
(const_int 14) (const_int 30)
(const_int 15) (const_int 31)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpunpckhdq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -19782,7 +19810,7 @@
(const_int 9) (const_int 25)
(const_int 12) (const_int 28)
(const_int 13) (const_int 29)])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpunpckldq\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -20488,7 +20516,7 @@
(match_operand:SI 2 "const_0_to_255_operand")
(match_operand:V16SI 3 "register_operand")
(match_operand:HI 4 "register_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
int mask = INTVAL (operands[2]);
emit_insn (gen_avx512f_pshufd_1_mask (operands[0], operands[1],
@@ -20532,7 +20560,7 @@
(match_operand 15 "const_12_to_15_operand")
(match_operand 16 "const_12_to_15_operand")
(match_operand 17 "const_12_to_15_operand")])))]
- "TARGET_AVX512F && TARGET_EVEX512
+ "TARGET_AVX512F
&& INTVAL (operands[2]) + 4 == INTVAL (operands[6])
&& INTVAL (operands[3]) + 4 == INTVAL (operands[7])
&& INTVAL (operands[4]) + 4 == INTVAL (operands[8])
@@ -20698,7 +20726,7 @@
[(match_operand:V32HI 1 "nonimmediate_operand" "vm")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_PSHUFLW))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpshuflw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -20874,7 +20902,7 @@
[(match_operand:V32HI 1 "nonimmediate_operand" "vm")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_PSHUFHW))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpshufhw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix" "evex")
@@ -21408,7 +21436,7 @@
(match_operand:V4TI 1 "register_operand" "v")
(parallel
[(match_operand:SI 2 "const_0_to_3_operand")])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vextracti32x4\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "length_immediate" "1")
@@ -21416,7 +21444,7 @@
(set_attr "mode" "XI")])
(define_mode_iterator VEXTRACTI128_MODE
- [(V4TI "TARGET_AVX512F && TARGET_EVEX512") V2TI])
+ [(V4TI "TARGET_AVX512F") V2TI])
(define_split
[(set (match_operand:TI 0 "nonimmediate_operand")
@@ -21439,7 +21467,7 @@
&& VECTOR_MODE_P (GET_MODE (operands[1]))
&& ((TARGET_SSE && GET_MODE_SIZE (GET_MODE (operands[1])) == 16)
|| (TARGET_AVX && GET_MODE_SIZE (GET_MODE (operands[1])) == 32)
- || (TARGET_AVX512F && TARGET_EVEX512
+ || (TARGET_AVX512F
&& GET_MODE_SIZE (GET_MODE (operands[1])) == 64))
&& (<MODE>mode == SImode || TARGET_64BIT || MEM_P (operands[0]))"
[(set (match_dup 0) (vec_select:SWI48x (match_dup 1)
@@ -21701,6 +21729,19 @@
(const_string "orig")))
(set_attr "mode" "TI,TI,TI,TI,TI,TI,V4SF,V2SF,V2SF")])
+;; Eliminate redundancy caused by
+;; /* Special case TImode to 128-bit vector conversions via V2DI. */
+;; in ix86_expand_vector_move
+
+(define_split
+ [(set (match_operand:V2DI 0 "register_operand")
+ (vec_concat:V2DI
+ (subreg:DI (match_operand:TI 1 "register_operand") 0)
+ (subreg:DI (match_dup 1) 8)))]
+ "TARGET_SSE2 && ix86_pre_reload_split ()"
+ [(set (match_dup 0)
+ (subreg:V2DI (match_dup 1) 0))])
+
(define_insn "*vec_concatv2di_0"
[(set (match_operand:V2DI 0 "register_operand" "=v,v ,x")
(vec_concat:V2DI
@@ -22814,7 +22855,7 @@
(const_int 1) (const_int 1)
(const_int 1) (const_int 1)]))
(const_int 1))))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpmulhrsw\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix" "evex")
@@ -23328,10 +23369,10 @@
;; Mode iterator to handle singularity w/ absence of V2DI and V4DI
;; modes for abs instruction on pre AVX-512 targets.
(define_mode_iterator VI1248_AVX512VL_AVX512BW
- [(V64QI "TARGET_AVX512BW && TARGET_EVEX512") (V32QI "TARGET_AVX2") V16QI
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512") (V16HI "TARGET_AVX2") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX2") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX512VL")
+ [(V64QI "TARGET_AVX512BW") (V32QI "TARGET_AVX2") V16QI
+ (V32HI "TARGET_AVX512BW") (V16HI "TARGET_AVX2") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX2") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL")])
(define_insn "*abs<mode>2"
@@ -24159,7 +24200,7 @@
[(set (match_operand:V32HI 0 "register_operand" "=v")
(any_extend:V32HI
(match_operand:V32QI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"vpmov<extsuffix>bw\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -24173,7 +24214,7 @@
(match_operand:V64QI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V32HI (match_dup 1)))]
@@ -24193,7 +24234,7 @@
(match_operand:V64QI 3 "const0_operand"))
(match_parallel 4 "pmovzx_parallel"
[(match_operand 5 "const_int_operand")])))]
- "TARGET_AVX512BW && TARGET_EVEX512"
+ "TARGET_AVX512BW"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V32HI (match_dup 1)))]
@@ -24206,7 +24247,7 @@
[(set (match_operand:V32HI 0 "register_operand")
(any_extend:V32HI
(match_operand:V32QI 1 "nonimmediate_operand")))]
- "TARGET_AVX512BW && TARGET_EVEX512")
+ "TARGET_AVX512BW")
(define_insn "sse4_1_<code>v8qiv8hi2<mask_name>"
[(set (match_operand:V8HI 0 "register_operand" "=Yr,*x,Yw")
@@ -24354,7 +24395,7 @@
[(set (match_operand:V16SI 0 "register_operand" "=v")
(any_extend:V16SI
(match_operand:V16QI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<extsuffix>bd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -24364,7 +24405,7 @@
[(set (match_operand:V16SI 0 "register_operand")
(any_extend:V16SI
(match_operand:V16QI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_insn "avx2_<code>v8qiv8si2<mask_name>"
[(set (match_operand:V8SI 0 "register_operand" "=v")
@@ -24497,7 +24538,7 @@
[(set (match_operand:V16SI 0 "register_operand" "=v")
(any_extend:V16SI
(match_operand:V16HI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<extsuffix>wd\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -24507,7 +24548,7 @@
[(set (match_operand:V16SI 0 "register_operand")
(any_extend:V16SI
(match_operand:V16HI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_insn_and_split "avx512f_zero_extendv16hiv16si2_1"
[(set (match_operand:V32HI 0 "register_operand" "=v")
@@ -24517,7 +24558,7 @@
(match_operand:V32HI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V16SI (match_dup 1)))]
@@ -24741,7 +24782,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<extsuffix>bq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -24751,7 +24792,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8QI 1 "memory_operand" "m")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<extsuffix>bq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -24769,7 +24810,7 @@
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX512F && TARGET_EVEX512 && ix86_pre_reload_split ()"
+ "TARGET_AVX512F && ix86_pre_reload_split ()"
"#"
"&& 1"
[(set (match_dup 0)
@@ -24780,7 +24821,7 @@
[(set (match_operand:V8DI 0 "register_operand")
(any_extend:V8DI
(match_operand:V8QI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
if (!MEM_P (operands[1]))
{
@@ -24922,7 +24963,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8HI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<extsuffix>wq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -24932,7 +24973,7 @@
[(set (match_operand:V8DI 0 "register_operand")
(any_extend:V8DI
(match_operand:V8HI 1 "nonimmediate_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_insn "avx2_<code>v4hiv4di2<mask_name>"
[(set (match_operand:V4DI 0 "register_operand" "=v")
@@ -25059,7 +25100,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8SI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vpmov<extsuffix>dq\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "evex")
@@ -25073,7 +25114,7 @@
(match_operand:V16SI 2 "const0_operand"))
(match_parallel 3 "pmovzx_parallel"
[(match_operand 4 "const_int_operand")])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V8DI (match_dup 1)))]
@@ -25092,7 +25133,7 @@
(match_operand:V16SI 3 "const0_operand"))
(match_parallel 4 "pmovzx_parallel"
[(match_operand 5 "const_int_operand")])))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"#"
"&& reload_completed"
[(set (match_dup 0) (zero_extend:V8DI (match_dup 1)))]
@@ -25104,7 +25145,7 @@
[(set (match_operand:V8DI 0 "register_operand" "=v")
(any_extend:V8DI
(match_operand:V8SI 1 "nonimmediate_operand" "vm")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_insn "avx2_<code>v4siv4di2<mask_name>"
[(set (match_operand:V4DI 0 "register_operand" "=v")
@@ -25505,7 +25546,7 @@
[(match_operand:V16SI 0 "register_operand")
(match_operand:V16SF 1 "nonimmediate_operand")
(match_operand:SI 2 "const_0_to_15_operand")]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
rtx tmp = gen_reg_rtx (V16SFmode);
emit_insn (gen_avx512f_rndscalev16sf (tmp, operands[1], operands[2]));
@@ -26723,7 +26764,7 @@
(ashiftrt:V8DI
(match_operand:V8DI 1 "register_operand")
(match_operand:V8DI 2 "nonimmediate_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_expand "vashrv4di3"
[(set (match_operand:V4DI 0 "register_operand")
@@ -26814,7 +26855,7 @@
[(set (match_operand:V16SI 0 "register_operand")
(ashiftrt:V16SI (match_operand:V16SI 1 "register_operand")
(match_operand:V16SI 2 "nonimmediate_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512")
+ "TARGET_AVX512F")
(define_expand "vashrv8si3"
[(set (match_operand:V8SI 0 "register_operand")
@@ -27257,12 +27298,12 @@
(set_attr "mode" "OI")])
(define_mode_attr pbroadcast_evex_isa
- [(V64QI "avx512bw_512") (V32QI "avx512bw") (V16QI "avx512bw")
- (V32HI "avx512bw_512") (V16HI "avx512bw") (V8HI "avx512bw")
- (V16SI "avx512f_512") (V8SI "avx512f") (V4SI "avx512f")
- (V8DI "avx512f_512") (V4DI "avx512f") (V2DI "avx512f")
- (V32HF "avx512bw_512") (V16HF "avx512bw") (V8HF "avx512bw")
- (V32BF "avx512bw_512") (V16BF "avx512bw") (V8BF "avx512bw")])
+ [(V64QI "avx512bw") (V32QI "avx512bw") (V16QI "avx512bw")
+ (V32HI "avx512bw") (V16HI "avx512bw") (V8HI "avx512bw")
+ (V16SI "avx512f") (V8SI "avx512f") (V4SI "avx512f")
+ (V8DI "avx512f") (V4DI "avx512f") (V2DI "avx512f")
+ (V32HF "avx512bw") (V16HF "avx512bw") (V8HF "avx512bw")
+ (V32BF "avx512bw") (V16BF "avx512bw") (V8BF "avx512bw")])
(define_insn "avx2_pbroadcast<mode>"
[(set (match_operand:VIHFBF 0 "register_operand" "=x,v")
@@ -27806,7 +27847,7 @@
(set (attr "enabled")
(if_then_else (eq_attr "alternative" "1")
(symbol_ref "TARGET_AVX512F && !TARGET_AVX512VL
- && TARGET_EVEX512 && !TARGET_PREFER_AVX256")
+ && !TARGET_PREFER_AVX256")
(const_string "*")))])
(define_insn "*vec_dupv4si"
@@ -27834,7 +27875,7 @@
(set (attr "enabled")
(if_then_else (eq_attr "alternative" "1")
(symbol_ref "TARGET_AVX512F && !TARGET_AVX512VL
- && TARGET_EVEX512 && !TARGET_PREFER_AVX256")
+ && !TARGET_PREFER_AVX256")
(const_string "*")))])
(define_insn "*vec_dupv2di"
@@ -27849,7 +27890,7 @@
%vmovddup\t{%1, %0|%0, %1}
movlhps\t%0, %0"
[(set_attr "isa" "sse2_noavx,avx,avx512f,sse3,noavx")
- (set_attr "type" "sselog1,sselog1,ssemov,sselog1,ssemov")
+ (set_attr "type" "sselog1,sselog1,ssemov,ssemov,ssemov")
(set_attr "prefix" "orig,maybe_evex,evex,maybe_vex,orig")
(set (attr "mode")
(cond [(and (eq_attr "alternative" "2")
@@ -27865,8 +27906,7 @@
(if_then_else
(eq_attr "alternative" "2")
(symbol_ref "TARGET_AVX512VL
- || (TARGET_AVX512F && TARGET_EVEX512
- && !TARGET_PREFER_AVX256)")
+ || (TARGET_AVX512F && !TARGET_PREFER_AVX256)")
(const_string "*")))])
(define_insn "avx2_vbroadcasti128_<mode>"
@@ -27946,7 +27986,7 @@
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_evex")
- (set_attr "isa" "avx2,noavx2,avx2,avx512f_512,noavx2")
+ (set_attr "isa" "avx2,noavx2,avx2,avx512f,noavx2")
(set_attr "mode" "<sseinsnmode>,V8SF,<sseinsnmode>,<sseinsnmode>,V8SF")])
(define_split
@@ -28010,8 +28050,8 @@
;; For broadcast[i|f]32x2. Yes there is no v4sf version, only v4si.
(define_mode_iterator VI4F_BRCST32x2
- [(V16SI "TARGET_EVEX512") (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
- (V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")])
+ [V16SI (V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
+ V16SF (V8SF "TARGET_AVX512VL")])
(define_mode_attr 64x2mode
[(V8DF "V2DF") (V8DI "V2DI") (V4DI "V2DI") (V4DF "V2DF")])
@@ -28061,8 +28101,7 @@
;; For broadcast[i|f]64x2
(define_mode_iterator VI8F_BRCST64x2
- [(V8DI "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
- (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
+ [V8DI V8DF (V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")])
(define_insn "<mask_codefor>avx512dq_broadcast<mode><mask_name>_1"
[(set (match_operand:VI8F_BRCST64x2 0 "register_operand" "=v,v")
@@ -28118,27 +28157,26 @@
(set_attr "mode" "<sseinsnmode>")])
(define_mode_iterator VPERMI2
- [(V16SI "TARGET_EVEX512") (V16SF "TARGET_EVEX512")
- (V8DI "TARGET_EVEX512") (V8DF "TARGET_EVEX512")
+ [V16SI V16SF V8DI V8DF
(V8SI "TARGET_AVX512VL") (V8SF "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V4DF "TARGET_AVX512VL")
(V4SI "TARGET_AVX512VL") (V4SF "TARGET_AVX512VL")
(V2DI "TARGET_AVX512VL") (V2DF "TARGET_AVX512VL")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW")
(V16HI "TARGET_AVX512BW && TARGET_AVX512VL")
(V8HI "TARGET_AVX512BW && TARGET_AVX512VL")
- (V64QI "TARGET_AVX512VBMI && TARGET_EVEX512")
+ (V64QI "TARGET_AVX512VBMI")
(V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL")
(V16QI "TARGET_AVX512VBMI && TARGET_AVX512VL")])
(define_mode_iterator VPERMI2I
- [(V16SI "TARGET_EVEX512") (V8DI "TARGET_EVEX512")
+ [V16SI V8DI
(V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")
(V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
- (V32HI "TARGET_AVX512BW && TARGET_EVEX512")
+ (V32HI "TARGET_AVX512BW")
(V16HI "TARGET_AVX512BW && TARGET_AVX512VL")
(V8HI "TARGET_AVX512BW && TARGET_AVX512VL")
- (V64QI "TARGET_AVX512VBMI && TARGET_EVEX512")
+ (V64QI "TARGET_AVX512VBMI")
(V32QI "TARGET_AVX512VBMI && TARGET_AVX512VL")
(V16QI "TARGET_AVX512VBMI && TARGET_AVX512VL")])
@@ -28813,29 +28851,28 @@
;; Modes handled by vec_init expanders.
(define_mode_iterator VEC_INIT_MODE
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX") V2DI
- (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512F && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512")
- (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")
- (V4TI "TARGET_AVX512F && TARGET_EVEX512") (V2TI "TARGET_AVX")])
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX") V2DI
+ (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512F") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")
+ (V4TI "TARGET_AVX512F") (V2TI "TARGET_AVX")])
;; Likewise, but for initialization from half sized vectors.
;; Thus, these are all VEC_INIT_MODE modes except V2??.
(define_mode_iterator VEC_INIT_HALF_MODE
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512") (V4DI "TARGET_AVX")
- (V32HF "TARGET_AVX512F && TARGET_EVEX512") (V16HF "TARGET_AVX") V8HF
- (V32BF "TARGET_AVX512F && TARGET_EVEX512") (V16BF "TARGET_AVX") V8BF
- (V16SF "TARGET_AVX512F && TARGET_EVEX512") (V8SF "TARGET_AVX") V4SF
- (V8DF "TARGET_AVX512F && TARGET_EVEX512") (V4DF "TARGET_AVX")
- (V4TI "TARGET_AVX512F && TARGET_EVEX512")])
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX")
+ (V32HF "TARGET_AVX512F") (V16HF "TARGET_AVX") V8HF
+ (V32BF "TARGET_AVX512F") (V16BF "TARGET_AVX") V8BF
+ (V16SF "TARGET_AVX512F") (V8SF "TARGET_AVX") V4SF
+ (V8DF "TARGET_AVX512F") (V4DF "TARGET_AVX")
+ (V4TI "TARGET_AVX512F")])
(define_expand "vec_init<mode><ssescalarmodelower>"
[(match_operand:VEC_INIT_MODE 0 "register_operand")
@@ -29096,7 +29133,7 @@
(unspec:V16SF
[(match_operand:V16HI 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_VCVTPH2PS))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtph2ps\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -29186,7 +29223,7 @@
UNSPEC_VCVTPS2PH)
(match_operand:V16HI 3 "nonimm_or_0_operand")
(match_operand:HI 4 "register_operand")))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
{
int round = INTVAL (operands[2]);
/* Separate {sae} from rounding control imm,
@@ -29205,7 +29242,7 @@
[(match_operand:V16SF 1 "register_operand" "v")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_VCVTPS2PH))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtps2ph\t{%2, <round_saeonly_mask_op3>%1, %0<mask_operand3>|%0<mask_operand3>, %1<round_saeonly_mask_op3>, %2}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -29217,7 +29254,7 @@
[(match_operand:V16SF 1 "register_operand" "v")
(match_operand:SI 2 "const_0_to_255_operand")]
UNSPEC_VCVTPS2PH))]
- "TARGET_AVX512F && TARGET_EVEX512"
+ "TARGET_AVX512F"
"vcvtps2ph\t{%2, %1, %0<merge_mask_operand3>|%0<merge_mask_operand3>, %1, %2}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "evex")
@@ -30196,7 +30233,7 @@
(match_operand:V8DI 2 "register_operand" "v")
(match_operand:V8DI 3 "nonimmediate_operand" "vm")]
VPMADD52))]
- "TARGET_AVX512IFMA && TARGET_EVEX512"
+ "TARGET_AVX512IFMA"
"vpmadd52<vpmadd52type>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "prefix" "evex")
@@ -30567,7 +30604,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPBUSD))]
- "TARGET_AVX512VNNI && TARGET_EVEX512"
+ "TARGET_AVX512VNNI"
"vpdpbusd\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -30636,7 +30673,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPBUSDS))]
- "TARGET_AVX512VNNI && TARGET_EVEX512"
+ "TARGET_AVX512VNNI"
"vpdpbusds\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -30705,7 +30742,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPWSSD))]
- "TARGET_AVX512VNNI && TARGET_EVEX512"
+ "TARGET_AVX512VNNI"
"vpdpwssd\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -30774,7 +30811,7 @@
(match_operand:V16SI 2 "register_operand" "v")
(match_operand:V16SI 3 "nonimmediate_operand" "vm")]
UNSPEC_VPDPWSSDS))]
- "TARGET_AVX512VNNI && TARGET_EVEX512"
+ "TARGET_AVX512VNNI"
"vpdpwssds\t{%3, %2, %0|%0, %2, %3}"
[(set_attr ("prefix") ("evex"))])
@@ -30930,8 +30967,7 @@
(set_attr "mode" "<sseinsnmode>")])
(define_mode_iterator VI48_AVX512VP2VL
- [(V8DI "TARGET_EVEX512")
- (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
+ [V8DI (V4DI "TARGET_AVX512VL") (V2DI "TARGET_AVX512VL")
(V8SI "TARGET_AVX512VL") (V4SI "TARGET_AVX512VL")])
(define_mode_iterator MASK_DWI [P2QI P2HI])
@@ -30973,12 +31009,12 @@
(unspec:P2HI [(match_operand:V16SI 1 "register_operand" "v")
(match_operand:V16SI 2 "vector_operand" "vm")]
UNSPEC_VP2INTERSECT))]
- "TARGET_AVX512VP2INTERSECT && TARGET_EVEX512"
+ "TARGET_AVX512VP2INTERSECT"
"vp2intersectd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr ("prefix") ("evex"))])
(define_mode_iterator VF_AVX512BF16VL
- [(V32BF "TARGET_EVEX512") (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
+ [V32BF (V16BF "TARGET_AVX512VL") (V8BF "TARGET_AVX512VL")])
;; Converting from BF to SF
(define_mode_attr bf16_cvt_2sf
[(V32BF "V16SF") (V16BF "V8SF") (V8BF "V4SF")])
@@ -31098,7 +31134,7 @@
"vcvtneps2bf16{x}\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}")
(define_mode_iterator VF1_AVX512_256
- [(V16SF "TARGET_EVEX512") (V8SF "TARGET_AVX512VL")])
+ [V16SF (V8SF "TARGET_AVX512VL")])
(define_expand "avx512f_cvtneps2bf16_<mode>_maskz"
[(match_operand:<sf_cvt_bf16> 0 "register_operand")
@@ -31144,7 +31180,7 @@
[(set (match_operand:V16BF 0 "register_operand")
(float_truncate:V16BF
(match_operand:V16SF 1 "nonimmediate_operand")))]
- "TARGET_AVX512BW && TARGET_EVEX512
+ "TARGET_AVX512BW
&& !HONOR_NANS (BFmode) && !flag_rounding_math
&& (flag_unsafe_math_optimizations || TARGET_AVX512BF16)"
{
@@ -31428,10 +31464,10 @@
;; vinserti64x4 $0x1, %ymm15, %zmm15, %zmm15
(define_mode_iterator INT_BROADCAST_MODE
- [(V64QI "TARGET_AVX512F && TARGET_EVEX512") (V32QI "TARGET_AVX") V16QI
- (V32HI "TARGET_AVX512F && TARGET_EVEX512") (V16HI "TARGET_AVX") V8HI
- (V16SI "TARGET_AVX512F && TARGET_EVEX512") (V8SI "TARGET_AVX") V4SI
- (V8DI "TARGET_AVX512F && TARGET_EVEX512 && TARGET_64BIT")
+ [(V64QI "TARGET_AVX512F") (V32QI "TARGET_AVX") V16QI
+ (V32HI "TARGET_AVX512F") (V16HI "TARGET_AVX") V8HI
+ (V16SI "TARGET_AVX512F") (V8SI "TARGET_AVX") V4SI
+ (V8DI "TARGET_AVX512F && TARGET_64BIT")
(V4DI "TARGET_AVX && TARGET_64BIT") (V2DI "TARGET_64BIT")])
;; Broadcast from an integer. NB: Enable broadcast only if we can move
@@ -31705,8 +31741,8 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_cvt2ps2phx_<mode><mask_name><round_name>"
- [(set (match_operand:VHF_AVX10_2 0 "register_operand" "=v")
- (vec_concat:VHF_AVX10_2
+ [(set (match_operand:VHF_AVX512VL 0 "register_operand" "=v")
+ (vec_concat:VHF_AVX512VL
(float_truncate:<ssehalfvecmode>
(match_operand:<ssePSmode> 2 "<round_nimm_predicate>" "<round_constraint>"))
(float_truncate:<ssehalfvecmode>
@@ -31730,8 +31766,8 @@
(define_insn "vcvt<convertfp8_pack><mode><mask_name>"
[(set (match_operand:<ssebvecmode> 0 "register_operand" "=v")
(unspec:<ssebvecmode>
- [(match_operand:VHF_AVX10_2 1 "register_operand" "v")
- (match_operand:VHF_AVX10_2 2 "nonimmediate_operand" "vm")]
+ [(match_operand:VHF_AVX512VL 1 "register_operand" "v")
+ (match_operand:VHF_AVX512VL 2 "nonimmediate_operand" "vm")]
UNSPEC_CONVERTFP8_PACK))]
"TARGET_AVX10_2"
"vcvt<convertfp8_pack>\t{%2, %1, %0<mask_operand3>|%0<mask_operand2>, %1, %2}"
@@ -31814,7 +31850,7 @@
[(set_attr "prefix" "evex")])
(define_mode_iterator VHF_AVX10_2_2
- [(V32HF "TARGET_AVX10_2") V16HF])
+ [V32HF V16HF])
(define_insn "vcvt<biasph2fp8_pack><mode><mask_name>"
[(set (match_operand:<ssebvecmode_2> 0 "register_operand" "=v")
@@ -31911,8 +31947,8 @@
[(set_attr "prefix" "evex")])
(define_insn "vcvthf82ph<mode><mask_name>"
- [(set (match_operand:VHF_AVX10_2 0 "register_operand" "=v")
- (unspec:VHF_AVX10_2
+ [(set (match_operand:VHF_AVX512VL 0 "register_operand" "=v")
+ (unspec:VHF_AVX512VL
[(match_operand:<ssebvecmode_2> 1 "nonimmediate_operand" "vm")]
UNSPEC_VCVTHF82PH))]
"TARGET_AVX10_2"
@@ -31934,8 +31970,8 @@
(define_expand "usdot_prod<sseunpackmodelower><mode>"
[(match_operand:<sseunpackmode> 0 "register_operand")
- (match_operand:VI2_AVX10_2 1 "register_operand")
- (match_operand:VI2_AVX10_2 2 "register_operand")
+ (match_operand:VI2_AVX512F 1 "register_operand")
+ (match_operand:VI2_AVX512F 2 "register_operand")
(match_operand:<sseunpackmode> 3 "register_operand")]
"TARGET_AVXVNNIINT16 || TARGET_AVX10_2"
{
@@ -31952,8 +31988,8 @@
(define_expand "udot_prod<sseunpackmodelower><mode>"
[(match_operand:<sseunpackmode> 0 "register_operand")
- (match_operand:VI2_AVX10_2 1 "register_operand")
- (match_operand:VI2_AVX10_2 2 "register_operand")
+ (match_operand:VI2_AVX512F 1 "register_operand")
+ (match_operand:VI2_AVX512F 2 "register_operand")
(match_operand:<sseunpackmode> 3 "register_operand")]
"TARGET_AVXVNNIINT16 || TARGET_AVX10_2"
{
@@ -32032,23 +32068,23 @@
[(set_attr "prefix" "evex")])
(define_insn "vdpphps_<mode>"
- [(set (match_operand:VF1_AVX10_2 0 "register_operand" "=v")
- (unspec:VF1_AVX10_2
- [(match_operand:VF1_AVX10_2 1 "register_operand" "0")
- (match_operand:VF1_AVX10_2 2 "register_operand" "v")
- (match_operand:VF1_AVX10_2 3 "nonimmediate_operand" "vm")]
+ [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v")
+ (unspec:VF1_AVX512VL
+ [(match_operand:VF1_AVX512VL 1 "register_operand" "0")
+ (match_operand:VF1_AVX512VL 2 "register_operand" "v")
+ (match_operand:VF1_AVX512VL 3 "nonimmediate_operand" "vm")]
UNSPEC_VDPPHPS))]
"TARGET_AVX10_2"
"vdpphps\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "prefix" "evex")])
(define_insn "vdpphps_<mode>_mask"
- [(set (match_operand:VF1_AVX10_2 0 "register_operand" "=v")
- (vec_merge:VF1_AVX10_2
- (unspec:VF1_AVX10_2
- [(match_operand:VF1_AVX10_2 1 "register_operand" "0")
- (match_operand:VF1_AVX10_2 2 "register_operand" "v")
- (match_operand:VF1_AVX10_2 3 "nonimmediate_operand" "vm")]
+ [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v")
+ (vec_merge:VF1_AVX512VL
+ (unspec:VF1_AVX512VL
+ [(match_operand:VF1_AVX512VL 1 "register_operand" "0")
+ (match_operand:VF1_AVX512VL 2 "register_operand" "v")
+ (match_operand:VF1_AVX512VL 3 "nonimmediate_operand" "vm")]
UNSPEC_VDPPHPS)
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
@@ -32057,10 +32093,10 @@
[(set_attr "prefix" "evex")])
(define_expand "vdpphps_<mode>_maskz"
- [(match_operand:VF1_AVX10_2 0 "register_operand")
- (match_operand:VF1_AVX10_2 1 "register_operand")
- (match_operand:VF1_AVX10_2 2 "register_operand")
- (match_operand:VF1_AVX10_2 3 "nonimmediate_operand")
+ [(match_operand:VF1_AVX512VL 0 "register_operand")
+ (match_operand:VF1_AVX512VL 1 "register_operand")
+ (match_operand:VF1_AVX512VL 2 "register_operand")
+ (match_operand:VF1_AVX512VL 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
"TARGET_AVX10_2"
{
@@ -32070,60 +32106,60 @@
})
(define_insn "vdpphps_<mode>_maskz_1"
- [(set (match_operand:VF1_AVX10_2 0 "register_operand" "=v")
- (vec_merge:VF1_AVX10_2
- (unspec:VF1_AVX10_2
- [(match_operand:VF1_AVX10_2 1 "register_operand" "0")
- (match_operand:VF1_AVX10_2 2 "register_operand" "v")
- (match_operand:VF1_AVX10_2 3 "nonimmediate_operand" "vm")]
+ [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=v")
+ (vec_merge:VF1_AVX512VL
+ (unspec:VF1_AVX512VL
+ [(match_operand:VF1_AVX512VL 1 "register_operand" "0")
+ (match_operand:VF1_AVX512VL 2 "register_operand" "v")
+ (match_operand:VF1_AVX512VL 3 "nonimmediate_operand" "vm")]
UNSPEC_VDPPHPS)
- (match_operand:VF1_AVX10_2 4 "const0_operand" "C")
+ (match_operand:VF1_AVX512VL 4 "const0_operand" "C")
(match_operand:<avx512fmaskmode> 5 "register_operand" "Yk")))]
"TARGET_AVX10_2"
"vdpphps\t{%3, %2, %0%{%5%}%N4|%0%{%5%}%N4, %2, %3}"
[(set_attr "prefix" "evex")])
(define_insn "avx10_2_scalefbf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (unspec:VBF_AVX10_2
- [(match_operand:VBF_AVX10_2 1 "register_operand" "v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")]
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (unspec:VBF
+ [(match_operand:VBF 1 "register_operand" "v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")]
UNSPEC_VSCALEFBF16))]
"TARGET_AVX10_2"
"vscalefbf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
(define_expand "<code><mode>3"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand")
- (smaxmin:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "register_operand")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand")))]
+ [(set (match_operand:VBF 0 "register_operand")
+ (smaxmin:VBF
+ (match_operand:VBF 1 "register_operand")
+ (match_operand:VBF 2 "nonimmediate_operand")))]
"TARGET_AVX10_2")
(define_insn "avx10_2_<code>bf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (smaxmin:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "register_operand" "v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")))]
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (smaxmin:VBF
+ (match_operand:VBF 1 "register_operand" "v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")))]
"TARGET_AVX10_2"
"v<maxmin_float>bf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")
(set_attr "mode" "<MODE>")])
(define_insn "avx10_2_<insn>bf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (plusminusmultdiv:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "register_operand" "v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")))]
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (plusminusmultdiv:VBF
+ (match_operand:VBF 1 "register_operand" "v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")))]
"TARGET_AVX10_2"
"v<insn>bf16\t{%2, %1, %0<mask_operand3>|%0<mask_operand3>, %1, %2}"
[(set_attr "prefix" "evex")])
(define_expand "avx10_2_fmaddbf16_<mode>_maskz"
- [(match_operand:VBF_AVX10_2 0 "register_operand")
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
+ [(match_operand:VBF 0 "register_operand")
+ (match_operand:VBF 1 "nonimmediate_operand")
+ (match_operand:VBF 2 "nonimmediate_operand")
+ (match_operand:VBF 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
"TARGET_AVX10_2"
{
@@ -32135,11 +32171,11 @@
})
(define_insn "avx10_2_fmaddbf16_<mode><sd_maskz_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v,v")
- (fma:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%0,0,v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0")))]
+ [(set (match_operand:VBF 0 "register_operand" "=v,v,v")
+ (fma:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%0,0,v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v,vm")
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm,0")))]
"TARGET_AVX10_2"
"@
vfmadd132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
@@ -32150,12 +32186,12 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fmaddbf16_<mode>_mask"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "0,0")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm"))
+ [(set (match_operand:VBF 0 "register_operand" "=v,v")
+ (vec_merge:VBF
+ (fma:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "0,0")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v")
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
"TARGET_AVX10_2"
@@ -32167,12 +32203,12 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fmaddbf16_<mode>_mask3"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0"))
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (vec_merge:VBF
+ (fma:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")
+ (match_operand:VBF 3 "nonimmediate_operand" "0"))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
"TARGET_AVX10_2"
@@ -32182,10 +32218,10 @@
(set_attr "mode" "<sseinsnmode>")])
(define_expand "avx10_2_fnmaddbf16_<mode>_maskz"
- [(match_operand:VBF_AVX10_2 0 "register_operand")
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
+ [(match_operand:VBF 0 "register_operand")
+ (match_operand:VBF 1 "nonimmediate_operand")
+ (match_operand:VBF 2 "nonimmediate_operand")
+ (match_operand:VBF 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
"TARGET_AVX10_2"
{
@@ -32197,12 +32233,12 @@
})
(define_insn "avx10_2_fnmaddbf16_<mode><sd_maskz_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v,v")
- (fma:VBF_AVX10_2
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%0,0,v"))
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0")))]
+ [(set (match_operand:VBF 0 "register_operand" "=v,v,v")
+ (fma:VBF
+ (neg:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%0,0,v"))
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v,vm")
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm,0")))]
"TARGET_AVX10_2"
"@
vfnmadd132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
@@ -32213,13 +32249,13 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fnmaddbf16_<mode>_mask"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "0,0"))
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm"))
+ [(set (match_operand:VBF 0 "register_operand" "=v,v")
+ (vec_merge:VBF
+ (fma:VBF
+ (neg:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "0,0"))
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v")
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm"))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
"TARGET_AVX10_2"
@@ -32231,13 +32267,13 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fnmaddbf16_<mode>_mask3"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%v"))
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0"))
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (vec_merge:VBF
+ (fma:VBF
+ (neg:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%v"))
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")
+ (match_operand:VBF 3 "nonimmediate_operand" "0"))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
"TARGET_AVX10_2"
@@ -32247,10 +32283,10 @@
(set_attr "mode" "<sseinsnmode>")])
(define_expand "avx10_2_fmsubbf16_<mode>_maskz"
- [(match_operand:VBF_AVX10_2 0 "register_operand")
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
+ [(match_operand:VBF 0 "register_operand")
+ (match_operand:VBF 1 "nonimmediate_operand")
+ (match_operand:VBF 2 "nonimmediate_operand")
+ (match_operand:VBF 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
"TARGET_AVX10_2"
{
@@ -32262,12 +32298,12 @@
})
(define_insn "avx10_2_fmsubbf16_<mode><sd_maskz_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v,v")
- (fma:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%0,0,v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0"))))]
+ [(set (match_operand:VBF 0 "register_operand" "=v,v,v")
+ (fma:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%0,0,v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v,vm")
+ (neg:VBF
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm,0"))))]
"TARGET_AVX10_2"
"@
vfmsub132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
@@ -32278,13 +32314,13 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fmsubbf16_<mode>_mask"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "0,0")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v")
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm")))
+ [(set (match_operand:VBF 0 "register_operand" "=v,v")
+ (vec_merge:VBF
+ (fma:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "0,0")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v")
+ (neg:VBF
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm")))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
"TARGET_AVX10_2"
@@ -32296,13 +32332,13 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fmsubbf16_<mode>_mask3"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0")))
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (vec_merge:VBF
+ (fma:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")
+ (neg:VBF
+ (match_operand:VBF 3 "nonimmediate_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
"TARGET_AVX10_2"
@@ -32312,10 +32348,10 @@
(set_attr "mode" "<sseinsnmode>")])
(define_expand "avx10_2_fnmsubbf16_<mode>_maskz"
- [(match_operand:VBF_AVX10_2 0 "register_operand")
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand")
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand")
+ [(match_operand:VBF 0 "register_operand")
+ (match_operand:VBF 1 "nonimmediate_operand")
+ (match_operand:VBF 2 "nonimmediate_operand")
+ (match_operand:VBF 3 "nonimmediate_operand")
(match_operand:<avx512fmaskmode> 4 "register_operand")]
"TARGET_AVX10_2"
{
@@ -32327,13 +32363,13 @@
})
(define_insn "avx10_2_fnmsubbf16_<mode><sd_maskz_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v,v")
- (fma:VBF_AVX10_2
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%0,0,v"))
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v,vm")
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm,0"))))]
+ [(set (match_operand:VBF 0 "register_operand" "=v,v,v")
+ (fma:VBF
+ (neg:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%0,0,v"))
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v,vm")
+ (neg:VBF
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm,0"))))]
"TARGET_AVX10_2"
"@
vfnmsub132bf16\t{%2, %3, %0<sd_mask_op4>|%0<sd_mask_op4>, %3, %2}
@@ -32344,14 +32380,14 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fnmsubbf16_<mode>_mask"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v,v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "0,0"))
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm,v")
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "v,vm")))
+ [(set (match_operand:VBF 0 "register_operand" "=v,v")
+ (vec_merge:VBF
+ (fma:VBF
+ (neg:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "0,0"))
+ (match_operand:VBF 2 "nonimmediate_operand" "vm,v")
+ (neg:VBF
+ (match_operand:VBF 3 "nonimmediate_operand" "v,vm")))
(match_dup 1)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk,Yk")))]
"TARGET_AVX10_2"
@@ -32363,14 +32399,14 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_fnmsubbf16_<mode>_mask3"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (vec_merge:VBF_AVX10_2
- (fma:VBF_AVX10_2
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "%v"))
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")
- (neg:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 3 "nonimmediate_operand" "0")))
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (vec_merge:VBF
+ (fma:VBF
+ (neg:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "%v"))
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")
+ (neg:VBF
+ (match_operand:VBF 3 "nonimmediate_operand" "0")))
(match_dup 3)
(match_operand:<avx512fmaskmode> 4 "register_operand" "Yk")))]
"TARGET_AVX10_2"
@@ -32380,35 +32416,35 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_rsqrtbf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (unspec:VBF_AVX10_2
- [(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (unspec:VBF
+ [(match_operand:VBF 1 "nonimmediate_operand" "vm")]
UNSPEC_RSQRT))]
"TARGET_AVX10_2"
"vrsqrtbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
(define_insn "avx10_2_sqrtbf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (sqrt:VBF_AVX10_2
- (match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")))]
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (sqrt:VBF
+ (match_operand:VBF 1 "nonimmediate_operand" "vm")))]
"TARGET_AVX10_2"
"vsqrtbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
(define_insn "avx10_2_rcpbf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (unspec:VBF_AVX10_2
- [(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (unspec:VBF
+ [(match_operand:VBF 1 "nonimmediate_operand" "vm")]
UNSPEC_RCP))]
"TARGET_AVX10_2"
"vrcpbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
[(set_attr "prefix" "evex")])
(define_insn "avx10_2_getexpbf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (unspec:VBF_AVX10_2
- [(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")]
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (unspec:VBF
+ [(match_operand:VBF 1 "nonimmediate_operand" "vm")]
UNSPEC_GETEXP))]
"TARGET_AVX10_2"
"vgetexpbf16\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
@@ -32425,9 +32461,9 @@
(UNSPEC_VGETMANTBF16 "getmant")])
(define_insn "avx10_2_<bf16immop>bf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (unspec:VBF_AVX10_2
- [(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (unspec:VBF
+ [(match_operand:VBF 1 "nonimmediate_operand" "vm")
(match_operand:SI 2 "const_0_to_255_operand")]
BF16IMMOP))]
"TARGET_AVX10_2"
@@ -32437,7 +32473,7 @@
(define_insn "avx10_2_fpclassbf16_<mode><mask_scalar_merge_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
(unspec:<avx512fmaskmode>
- [(match_operand:VBF_AVX10_2 1 "nonimmediate_operand" "vm")
+ [(match_operand:VBF 1 "nonimmediate_operand" "vm")
(match_operand 2 "const_0_to_255_operand")]
UNSPEC_VFPCLASSBF16))]
"TARGET_AVX10_2"
@@ -32447,8 +32483,8 @@
(define_insn "avx10_2_cmpbf16_<mode><mask_scalar_merge_name>"
[(set (match_operand:<avx512fmaskmode> 0 "register_operand" "=k")
(unspec:<avx512fmaskmode>
- [(match_operand:VBF_AVX10_2 1 "register_operand" "v")
- (match_operand:VBF_AVX10_2 2 "nonimmediate_operand" "vm")
+ [(match_operand:VBF 1 "register_operand" "v")
+ (match_operand:VBF 2 "nonimmediate_operand" "vm")
(match_operand 3 "const_0_to_31_operand" "n")]
UNSPEC_PCMP))]
"TARGET_AVX10_2"
@@ -32486,7 +32522,7 @@
(define_insn "avx10_2_cvt<sat_cvt_trunc_prefix>bf162i<sat_cvt_sign_prefix>bs<mode><mask_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unspec:<sseintvecmode>
- [(match_operand:VBF_AVX10_2 1 "vector_operand" "vm")]
+ [(match_operand:VBF 1 "vector_operand" "vm")]
UNSPEC_CVT_BF16_IBS_ITER))]
"TARGET_AVX10_2"
"vcvt<sat_cvt_trunc_prefix>bf162i<sat_cvt_sign_prefix>bs\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"
@@ -32501,7 +32537,7 @@
(define_insn "avx10_2_cvtph2i<sat_cvt_sign_prefix>bs<mode><mask_name><round_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unspec:<sseintvecmode>
- [(match_operand:VHF_AVX10_2 1 "<round_nimm_predicate>" "<round_constraint>")]
+ [(match_operand:VHF_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_CVT_PH_IBS_ITER))]
"TARGET_AVX10_2 && <round_mode512bit_condition>"
"vcvtph2i<sat_cvt_sign_prefix>bs\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
@@ -32516,7 +32552,7 @@
(define_insn "avx10_2_cvttph2i<sat_cvt_sign_prefix>bs<mode><mask_name><round_saeonly_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unspec:<sseintvecmode>
- [(match_operand:VHF_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ [(match_operand:VHF_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_CVTT_PH_IBS_ITER))]
"TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvttph2i<sat_cvt_sign_prefix>bs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
@@ -32531,7 +32567,7 @@
(define_insn "avx10_2_cvtps2i<sat_cvt_sign_prefix>bs<mode><mask_name><round_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unspec:<sseintvecmode>
- [(match_operand:VF1_AVX10_2 1 "<round_nimm_predicate>" "<round_constraint>")]
+ [(match_operand:VF1_AVX512VL 1 "<round_nimm_predicate>" "<round_constraint>")]
UNSPEC_CVT_PS_IBS_ITER))]
"TARGET_AVX10_2 && <round_mode512bit_condition>"
"vcvtps2i<sat_cvt_sign_prefix>bs\t{<round_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_mask_op2>}"
@@ -32546,7 +32582,7 @@
(define_insn "avx10_2_cvttps2i<sat_cvt_sign_prefix>bs<mode><mask_name><round_saeonly_name>"
[(set (match_operand:<sseintvecmode> 0 "register_operand" "=v")
(unspec:<sseintvecmode>
- [(match_operand:VF1_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ [(match_operand:VF1_AVX512VL 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_CVTT_PS_IBS_ITER))]
"TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvttps2i<sat_cvt_sign_prefix>bs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
@@ -32565,7 +32601,7 @@
(define_insn "avx10_2_vcvtt<castmode>2<sat_cvt_sign_prefix>dqs<mode><mask_name><round_saeonly_name>"
[(set (match_operand:<VEC_GATHER_IDXSI> 0 "register_operand" "=v")
(unspec:<VEC_GATHER_IDXSI>
- [(match_operand:VF1_VF2_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ [(match_operand:VF 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
"TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvtt<castmode>2<sat_cvt_sign_prefix>dqs<pd2dqssuff>\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
@@ -32576,7 +32612,7 @@
(define_insn "avx10_2_vcvttpd2<sat_cvt_sign_prefix>qqs<mode><mask_name><round_saeonly_name>"
[(set (match_operand:<VEC_GATHER_IDXDI> 0 "register_operand" "=v")
(unspec:<VEC_GATHER_IDXDI>
- [(match_operand:VF2_AVX10_2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
+ [(match_operand:VF2 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
"TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
"vcvttpd2<sat_cvt_sign_prefix>qqs\t{<round_saeonly_mask_op2>%1, %0<mask_operand2>|%0<mask_operand2>, %1<round_saeonly_mask_op2>}"
@@ -32585,8 +32621,8 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_vcvttps2<sat_cvt_sign_prefix>qqs<mode><mask_name><round_saeonly_name>"
- [(set (match_operand:VI8_AVX10_2 0 "register_operand" "=v")
- (unspec:VI8_AVX10_2
+ [(set (match_operand:VI8 0 "register_operand" "=v")
+ (unspec:VI8
[(match_operand:<vpckfloat_temp_mode> 1 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")]
UNSPEC_SAT_CVT_DS_SIGN_ITER))]
"TARGET_AVX10_2 && <round_saeonly_mode512bit_condition>"
@@ -32622,10 +32658,10 @@
(set_attr "mode" "<MODE>")])
(define_insn "avx10_2_minmaxbf16_<mode><mask_name>"
- [(set (match_operand:VBF_AVX10_2 0 "register_operand" "=v")
- (unspec:VBF_AVX10_2
- [(match_operand:VBF_AVX10_2 1 "register_operand" "v")
- (match_operand:VBF_AVX10_2 2 "bcst_vector_operand" "vmBr")
+ [(set (match_operand:VBF 0 "register_operand" "=v")
+ (unspec:VBF
+ [(match_operand:VBF 1 "register_operand" "v")
+ (match_operand:VBF 2 "bcst_vector_operand" "vmBr")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_MINMAXBF16))]
"TARGET_AVX10_2"
@@ -32634,10 +32670,10 @@
(set_attr "mode" "<sseinsnmode>")])
(define_insn "avx10_2_minmaxp<mode><mask_name><round_saeonly_name>"
- [(set (match_operand:VFH_AVX10_2 0 "register_operand" "=v")
- (unspec:VFH_AVX10_2
- [(match_operand:VFH_AVX10_2 1 "register_operand" "v")
- (match_operand:VFH_AVX10_2 2 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
+ [(set (match_operand:VFH_AVX512VL 0 "register_operand" "=v")
+ (unspec:VFH_AVX512VL
+ [(match_operand:VFH_AVX512VL 1 "register_operand" "v")
+ (match_operand:VFH_AVX512VL 2 "<round_saeonly_nimm_predicate>" "<round_saeonly_constraint>")
(match_operand:SI 3 "const_0_to_255_operand")]
UNSPEC_MINMAX))]
"TARGET_AVX10_2"
@@ -32661,9 +32697,9 @@
(set_attr "mode" "<ssescalarmode>")])
(define_insn "avx10_2_vmovrs<ssemodesuffix><mode><mask_name>"
- [(set (match_operand:VI1248_AVX10_2 0 "register_operand" "=v")
- (unspec:VI1248_AVX10_2
- [(match_operand:VI1248_AVX10_2 1 "memory_operand" "m")]
+ [(set (match_operand:VI1248_AVX512VLBW 0 "register_operand" "=v")
+ (unspec:VI1248_AVX512VLBW
+ [(match_operand:VI1248_AVX512VLBW 1 "memory_operand" "m")]
UNSPEC_VMOVRS))]
"TARGET_AVX10_2 && TARGET_MOVRS"
"vmovrs<ssemodesuffix>\t{%1, %0<mask_operand2>|%0<mask_operand2>, %1}"