;; GCC machine description for SSE instructions
;; Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
;; Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; .
(define_c_enum "unspec" [
;; SSE
UNSPEC_MOVNT
UNSPEC_MOVU
;; SSE3
UNSPEC_LDDQU
;; SSSE3
UNSPEC_PSHUFB
UNSPEC_PSIGN
UNSPEC_PALIGNR
;; For SSE4A support
UNSPEC_EXTRQI
UNSPEC_EXTRQ
UNSPEC_INSERTQI
UNSPEC_INSERTQ
;; For SSE4.1 support
UNSPEC_BLENDV
UNSPEC_INSERTPS
UNSPEC_DP
UNSPEC_MOVNTDQA
UNSPEC_MPSADBW
UNSPEC_PHMINPOSUW
UNSPEC_PTEST
;; For SSE4.2 support
UNSPEC_PCMPESTR
UNSPEC_PCMPISTR
;; For FMA4 support
UNSPEC_FMADDSUB
UNSPEC_XOP_UNSIGNED_CMP
UNSPEC_XOP_TRUEFALSE
UNSPEC_XOP_PERMUTE
UNSPEC_FRCZ
;; For AES support
UNSPEC_AESENC
UNSPEC_AESENCLAST
UNSPEC_AESDEC
UNSPEC_AESDECLAST
UNSPEC_AESIMC
UNSPEC_AESKEYGENASSIST
;; For PCLMUL support
UNSPEC_PCLMUL
;; For AVX support
UNSPEC_PCMP
UNSPEC_VPERMIL
UNSPEC_VPERMIL2
UNSPEC_VPERMIL2F128
UNSPEC_CAST
UNSPEC_VTESTP
UNSPEC_VCVTPH2PS
UNSPEC_VCVTPS2PH
;; For AVX2 support
UNSPEC_VPERMVAR
UNSPEC_VPERMTI
UNSPEC_GATHER
UNSPEC_VSIBADDR
])
(define_c_enum "unspecv" [
UNSPECV_LDMXCSR
UNSPECV_STMXCSR
UNSPECV_CLFLUSH
UNSPECV_MONITOR
UNSPECV_MWAIT
UNSPECV_VZEROALL
UNSPECV_VZEROUPPER
])
;; All vector modes including V?TImode, used in move patterns.
(define_mode_iterator V16
[(V32QI "TARGET_AVX") V16QI
(V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI
(V2TI "TARGET_AVX") V1TI
(V8SF "TARGET_AVX") V4SF
(V4DF "TARGET_AVX") V2DF])
;; All vector modes
(define_mode_iterator V
[(V32QI "TARGET_AVX") V16QI
(V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI
(V8SF "TARGET_AVX") V4SF
(V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; All 128bit vector modes
(define_mode_iterator V_128
[V16QI V8HI V4SI V2DI V4SF (V2DF "TARGET_SSE2")])
;; All 256bit vector modes
(define_mode_iterator V_256
[V32QI V16HI V8SI V4DI V8SF V4DF])
;; All vector float modes
(define_mode_iterator VF
[(V8SF "TARGET_AVX") V4SF
(V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; All SFmode vector float modes
(define_mode_iterator VF1
[(V8SF "TARGET_AVX") V4SF])
;; All DFmode vector float modes
(define_mode_iterator VF2
[(V4DF "TARGET_AVX") V2DF])
;; All 128bit vector float modes
(define_mode_iterator VF_128
[V4SF (V2DF "TARGET_SSE2")])
;; All 256bit vector float modes
(define_mode_iterator VF_256
[V8SF V4DF])
;; All vector integer modes
(define_mode_iterator VI
[(V32QI "TARGET_AVX") V16QI
(V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI_AVX2
[(V32QI "TARGET_AVX2") V16QI
(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI
(V4DI "TARGET_AVX2") V2DI])
;; All QImode vector integer modes
(define_mode_iterator VI1
[(V32QI "TARGET_AVX") V16QI])
;; All DImode vector integer modes
(define_mode_iterator VI8
[(V4DI "TARGET_AVX") V2DI])
(define_mode_iterator VI1_AVX2
[(V32QI "TARGET_AVX2") V16QI])
(define_mode_iterator VI2_AVX2
[(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI4_AVX2
[(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI8_AVX2
[(V4DI "TARGET_AVX2") V2DI])
;; ??? We should probably use TImode instead.
(define_mode_iterator VIMAX_AVX2
[(V2TI "TARGET_AVX2") V1TI])
;; ??? This should probably be dropped in favor of VIMAX_AVX2.
(define_mode_iterator SSESCALARMODE
[(V2TI "TARGET_AVX2") TI])
(define_mode_iterator VI12_AVX2
[(V32QI "TARGET_AVX2") V16QI
(V16HI "TARGET_AVX2") V8HI])
(define_mode_iterator VI24_AVX2
[(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI124_AVX2
[(V32QI "TARGET_AVX2") V16QI
(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI])
(define_mode_iterator VI248_AVX2
[(V16HI "TARGET_AVX2") V8HI
(V8SI "TARGET_AVX2") V4SI
(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator VI48_AVX2
[(V8SI "TARGET_AVX2") V4SI
(V4DI "TARGET_AVX2") V2DI])
(define_mode_iterator V48_AVX2
[V4SF V2DF
V8SF V4DF
(V4SI "TARGET_AVX2") (V2DI "TARGET_AVX2")
(V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")])
(define_mode_attr sse2_avx2
[(V16QI "sse2") (V32QI "avx2")
(V8HI "sse2") (V16HI "avx2")
(V4SI "sse2") (V8SI "avx2")
(V2DI "sse2") (V4DI "avx2")
(V1TI "sse2") (V2TI "avx2")])
(define_mode_attr ssse3_avx2
[(V16QI "ssse3") (V32QI "avx2")
(V8HI "ssse3") (V16HI "avx2")
(V4SI "ssse3") (V8SI "avx2")
(V2DI "ssse3") (V4DI "avx2")
(TI "ssse3") (V2TI "avx2")])
(define_mode_attr sse4_1_avx2
[(V16QI "sse4_1") (V32QI "avx2")
(V8HI "sse4_1") (V16HI "avx2")
(V4SI "sse4_1") (V8SI "avx2")
(V2DI "sse4_1") (V4DI "avx2")])
(define_mode_attr avx_avx2
[(V4SF "avx") (V2DF "avx")
(V8SF "avx") (V4DF "avx")
(V4SI "avx2") (V2DI "avx2")
(V8SI "avx2") (V4DI "avx2")])
(define_mode_attr vec_avx2
[(V16QI "vec") (V32QI "avx2")
(V8HI "vec") (V16HI "avx2")
(V4SI "vec") (V8SI "avx2")
(V2DI "vec") (V4DI "avx2")])
(define_mode_attr ssedoublemode
[(V16HI "V16SI") (V8HI "V8SI")])
(define_mode_attr ssebytemode
[(V4DI "V32QI") (V2DI "V16QI")])
;; All 128bit vector integer modes
(define_mode_iterator VI_128 [V16QI V8HI V4SI V2DI])
;; All 256bit vector integer modes
(define_mode_iterator VI_256 [V32QI V16HI V8SI V4DI])
;; Random 128bit vector integer mode combinations
(define_mode_iterator VI12_128 [V16QI V8HI])
(define_mode_iterator VI14_128 [V16QI V4SI])
(define_mode_iterator VI124_128 [V16QI V8HI V4SI])
(define_mode_iterator VI128_128 [V16QI V8HI V2DI])
(define_mode_iterator VI24_128 [V8HI V4SI])
(define_mode_iterator VI248_128 [V8HI V4SI V2DI])
(define_mode_iterator VI48_128 [V4SI V2DI])
;; Random 256bit vector integer mode combinations
(define_mode_iterator VI124_256 [V32QI V16HI V8SI])
(define_mode_iterator VI48_256 [V8SI V4DI])
;; Int-float size matches
(define_mode_iterator VI4F_128 [V4SI V4SF])
(define_mode_iterator VI8F_128 [V2DI V2DF])
(define_mode_iterator VI4F_256 [V8SI V8SF])
(define_mode_iterator VI8F_256 [V4DI V4DF])
;; Mapping from float mode to required SSE level
(define_mode_attr sse
[(SF "sse") (DF "sse2")
(V4SF "sse") (V2DF "sse2")
(V8SF "avx") (V4DF "avx")])
(define_mode_attr sse2
[(V16QI "sse2") (V32QI "avx")
(V2DI "sse2") (V4DI "avx")])
(define_mode_attr sse3
[(V16QI "sse3") (V32QI "avx")])
(define_mode_attr sse4_1
[(V4SF "sse4_1") (V2DF "sse4_1")
(V8SF "avx") (V4DF "avx")])
(define_mode_attr avxsizesuffix
[(V32QI "256") (V16HI "256") (V8SI "256") (V4DI "256")
(V16QI "") (V8HI "") (V4SI "") (V2DI "")
(V8SF "256") (V4DF "256")
(V4SF "") (V2DF "")])
;; SSE instruction mode
(define_mode_attr sseinsnmode
[(V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI") (V2TI "OI")
(V16QI "TI") (V8HI "TI") (V4SI "TI") (V2DI "TI") (V1TI "TI")
(V8SF "V8SF") (V4DF "V4DF")
(V4SF "V4SF") (V2DF "V2DF")
(TI "TI")])
;; Mapping of vector float modes to an integer mode of the same size
(define_mode_attr sseintvecmode
[(V8SF "V8SI") (V4DF "V4DI")
(V4SF "V4SI") (V2DF "V2DI")
(V8SI "V8SI") (V4DI "V4DI")
(V4SI "V4SI") (V2DI "V2DI")
(V16HI "V16HI") (V8HI "V8HI")
(V32QI "V32QI") (V16QI "V16QI")])
(define_mode_attr sseintvecmodelower
[(V8SF "v8si") (V4DF "v4di")
(V4SF "v4si") (V2DF "v2di")
(V8SI "v8si") (V4DI "v4di")
(V4SI "v4si") (V2DI "v2di")
(V16HI "v16hi") (V8HI "v8hi")
(V32QI "v32qi") (V16QI "v16qi")])
;; Mapping of vector modes to a vector mode of double size
(define_mode_attr ssedoublevecmode
[(V32QI "V64QI") (V16HI "V32HI") (V8SI "V16SI") (V4DI "V8DI")
(V16QI "V32QI") (V8HI "V16HI") (V4SI "V8SI") (V2DI "V4DI")
(V8SF "V16SF") (V4DF "V8DF")
(V4SF "V8SF") (V2DF "V4DF")])
;; Mapping of vector modes to a vector mode of half size
(define_mode_attr ssehalfvecmode
[(V32QI "V16QI") (V16HI "V8HI") (V8SI "V4SI") (V4DI "V2DI")
(V16QI "V8QI") (V8HI "V4HI") (V4SI "V2SI")
(V8SF "V4SF") (V4DF "V2DF")
(V4SF "V2SF")])
;; Mapping of vector modes ti packed single mode of the same size
(define_mode_attr ssePSmode
[(V32QI "V8SF") (V16QI "V4SF")
(V16HI "V8SF") (V8HI "V4SF")
(V8SI "V8SF") (V4SI "V4SF")
(V4DI "V8SF") (V2DI "V4SF")
(V2TI "V8SF") (V1TI "V4SF")
(V8SF "V8SF") (V4SF "V4SF")
(V4DF "V8SF") (V2DF "V4SF")])
;; Mapping of vector modes back to the scalar modes
(define_mode_attr ssescalarmode
[(V32QI "QI") (V16HI "HI") (V8SI "SI") (V4DI "DI")
(V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI")
(V8SF "SF") (V4DF "DF")
(V4SF "SF") (V2DF "DF")])
;; Number of scalar elements in each vector type
(define_mode_attr ssescalarnum
[(V32QI "32") (V16HI "16") (V8SI "8") (V4DI "4")
(V16QI "16") (V8HI "8") (V4SI "4") (V2DI "2")
(V8SF "8") (V4DF "4")
(V4SF "4") (V2DF "2")])
;; SSE prefix for integer vector modes
(define_mode_attr sseintprefix
[(V2DI "p") (V2DF "")
(V4DI "p") (V4DF "")
(V4SI "p") (V4SF "")
(V8SI "p") (V8SF "")])
;; SSE scalar suffix for vector modes
(define_mode_attr ssescalarmodesuffix
[(SF "ss") (DF "sd")
(V8SF "ss") (V4DF "sd")
(V4SF "ss") (V2DF "sd")
(V8SI "ss") (V4DI "sd")
(V4SI "d")])
;; Pack/unpack vector modes
(define_mode_attr sseunpackmode
[(V16QI "V8HI") (V8HI "V4SI") (V4SI "V2DI")
(V32QI "V16HI") (V16HI "V8SI") (V8SI "V4DI")])
(define_mode_attr ssepackmode
[(V8HI "V16QI") (V4SI "V8HI") (V2DI "V4SI")
(V16HI "V32QI") (V8SI "V16HI") (V4DI "V8SI")])
;; Mapping of the max integer size for xop rotate immediate constraint
(define_mode_attr sserotatemax
[(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")])
;; Mapping of mode to cast intrinsic name
(define_mode_attr castmode [(V8SI "si") (V8SF "ps") (V4DF "pd")])
;; Instruction suffix for sign and zero extensions.
(define_code_attr extsuffix [(sign_extend "sx") (zero_extend "zx")])
;; i128 for integer vectors and TARGET_AVX2, f128 otherwise.
(define_mode_attr i128
[(V8SF "f128") (V4DF "f128") (V32QI "%~128") (V16HI "%~128")
(V8SI "%~128") (V4DI "%~128")])
;; Mix-n-match
(define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF])
;; Mapping of immediate bits for blend instructions
(define_mode_attr blendbits
[(V8SF "255") (V4SF "15") (V4DF "15") (V2DF "3")])
;; Patterns whose name begins with "sse{,2,3}_" are invoked by intrinsics.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Move patterns
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; All of these patterns are enabled for SSE1 as well as SSE2.
;; This is essential for maintaining stable calling conventions.
(define_expand "mov"
[(set (match_operand:V16 0 "nonimmediate_operand")
(match_operand:V16 1 "nonimmediate_operand"))]
"TARGET_SSE"
{
ix86_expand_vector_move (mode, operands);
DONE;
})
(define_insn "*mov_internal"
[(set (match_operand:V16 0 "nonimmediate_operand" "=x,x ,m")
(match_operand:V16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
"TARGET_SSE
&& (register_operand (operands[0], mode)
|| register_operand (operands[1], mode))"
{
switch (which_alternative)
{
case 0:
return standard_sse_constant_opcode (insn, operands[1]);
case 1:
case 2:
switch (get_attr_mode (insn))
{
case MODE_V8SF:
case MODE_V4SF:
if (TARGET_AVX
&& (misaligned_operand (operands[0], mode)
|| misaligned_operand (operands[1], mode)))
return "vmovups\t{%1, %0|%0, %1}";
else
return "%vmovaps\t{%1, %0|%0, %1}";
case MODE_V4DF:
case MODE_V2DF:
if (TARGET_AVX
&& (misaligned_operand (operands[0], mode)
|| misaligned_operand (operands[1], mode)))
return "vmovupd\t{%1, %0|%0, %1}";
else
return "%vmovapd\t{%1, %0|%0, %1}";
case MODE_OI:
case MODE_TI:
if (TARGET_AVX
&& (misaligned_operand (operands[0], mode)
|| misaligned_operand (operands[1], mode)))
return "vmovdqu\t{%1, %0|%0, %1}";
else
return "%vmovdqa\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "")
(and (eq_attr "alternative" "2")
(match_test "TARGET_SSE_TYPELESS_STORES"))
(const_string "")
(match_test "TARGET_AVX")
(const_string "")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
(and (eq_attr "alternative" "0")
(match_test "TARGET_SSE_LOAD0_BY_PXOR"))
(const_string "TI")
]
(const_string "")))])
(define_insn "sse2_movq128"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(vec_concat:V2DI
(vec_select:DI
(match_operand:V2DI 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))
(const_int 0)))]
"TARGET_SSE2"
"%vmovq\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
;; Move a DI from a 32-bit register pair (e.g. %edx:%eax) to an xmm.
;; We'd rather avoid this entirely; if the 32-bit reg pair was loaded
;; from memory, we'd prefer to load the memory directly into the %xmm
;; register. To facilitate this happy circumstance, this pattern won't
;; split until after register allocation. If the 64-bit value didn't
;; come from memory, this is the best we can do. This is much better
;; than storing %edx:%eax into a stack temporary and loading an %xmm
;; from there.
(define_insn_and_split "movdi_to_sse"
[(parallel
[(set (match_operand:V4SI 0 "register_operand" "=?x,x")
(subreg:V4SI (match_operand:DI 1 "nonimmediate_operand" "r,m") 0))
(clobber (match_scratch:V4SI 2 "=&x,X"))])]
"!TARGET_64BIT && TARGET_SSE2 && TARGET_INTER_UNIT_MOVES"
"#"
"&& reload_completed"
[(const_int 0)]
{
if (register_operand (operands[1], DImode))
{
/* The DImode arrived in a pair of integral registers (e.g. %edx:%eax).
Assemble the 64-bit DImode value in an xmm register. */
emit_insn (gen_sse2_loadld (operands[0], CONST0_RTX (V4SImode),
gen_rtx_SUBREG (SImode, operands[1], 0)));
emit_insn (gen_sse2_loadld (operands[2], CONST0_RTX (V4SImode),
gen_rtx_SUBREG (SImode, operands[1], 4)));
emit_insn (gen_vec_interleave_lowv4si (operands[0], operands[0],
operands[2]));
}
else if (memory_operand (operands[1], DImode))
emit_insn (gen_vec_concatv2di (gen_lowpart (V2DImode, operands[0]),
operands[1], const0_rtx));
else
gcc_unreachable ();
})
(define_split
[(set (match_operand:V4SF 0 "register_operand")
(match_operand:V4SF 1 "zero_extended_scalar_load_operand"))]
"TARGET_SSE && reload_completed"
[(set (match_dup 0)
(vec_merge:V4SF
(vec_duplicate:V4SF (match_dup 1))
(match_dup 2)
(const_int 1)))]
{
operands[1] = simplify_gen_subreg (SFmode, operands[1], V4SFmode, 0);
operands[2] = CONST0_RTX (V4SFmode);
})
(define_split
[(set (match_operand:V2DF 0 "register_operand")
(match_operand:V2DF 1 "zero_extended_scalar_load_operand"))]
"TARGET_SSE2 && reload_completed"
[(set (match_dup 0) (vec_concat:V2DF (match_dup 1) (match_dup 2)))]
{
operands[1] = simplify_gen_subreg (DFmode, operands[1], V2DFmode, 0);
operands[2] = CONST0_RTX (DFmode);
})
(define_expand "push1"
[(match_operand:V16 0 "register_operand")]
"TARGET_SSE"
{
ix86_expand_push (mode, operands[0]);
DONE;
})
(define_expand "movmisalign"
[(set (match_operand:V16 0 "nonimmediate_operand")
(match_operand:V16 1 "nonimmediate_operand"))]
"TARGET_SSE"
{
ix86_expand_vector_move_misalign (mode, operands);
DONE;
})
(define_insn "_movu"
[(set (match_operand:VF 0 "nonimmediate_operand" "=x,m")
(unspec:VF
[(match_operand:VF 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
"TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
switch (get_attr_mode (insn))
{
case MODE_V8SF:
case MODE_V4SF:
return "%vmovups\t{%1, %0|%0, %1}";
default:
return "%vmovu\t{%1, %0|%0, %1}";
}
}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "")
(and (eq_attr "alternative" "1")
(match_test "TARGET_SSE_TYPELESS_STORES"))
(const_string "")
(match_test "TARGET_AVX")
(const_string "")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
]
(const_string "")))])
(define_insn "_movdqu"
[(set (match_operand:VI1 0 "nonimmediate_operand" "=x,m")
(unspec:VI1 [(match_operand:VI1 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
"TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
switch (get_attr_mode (insn))
{
case MODE_V8SF:
case MODE_V4SF:
return "%vmovups\t{%1, %0|%0, %1}";
default:
return "%vmovdqu\t{%1, %0|%0, %1}";
}
}
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "")
(and (eq_attr "alternative" "1")
(match_test "TARGET_SSE_TYPELESS_STORES"))
(const_string "")
(match_test "TARGET_AVX")
(const_string "")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
]
(const_string "")))])
(define_insn "_lddqu"
[(set (match_operand:VI1 0 "register_operand" "=x")
(unspec:VI1 [(match_operand:VI1 1 "memory_operand" "m")]
UNSPEC_LDDQU))]
"TARGET_SSE3"
"%vlddqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
(const_string "*")
(const_string "0")))
(set (attr "prefix_rep")
(if_then_else
(match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "sse2_movnti"
[(set (match_operand:SWI48 0 "memory_operand" "=m")
(unspec:SWI48 [(match_operand:SWI48 1 "register_operand" "r")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"movnti\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_data16" "0")
(set_attr "mode" "")])
(define_insn "_movnt"
[(set (match_operand:VF 0 "memory_operand" "=m")
(unspec:VF [(match_operand:VF 1 "register_operand" "x")]
UNSPEC_MOVNT))]
"TARGET_SSE"
"%vmovnt\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "_movnt"
[(set (match_operand:VI8 0 "memory_operand" "=m")
(unspec:VI8 [(match_operand:VI8 1 "register_operand" "x")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"%vmovntdq\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set (attr "prefix_data16")
(if_then_else
(match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
; Expand patterns for non-temporal stores. At the moment, only those
; that directly map to insns are defined; it would be possible to
; define patterns for other modes that would expand to several insns.
;; Modes handled by storent patterns.
(define_mode_iterator STORENT_MODE
[(DI "TARGET_SSE2 && TARGET_64BIT") (SI "TARGET_SSE2")
(SF "TARGET_SSE4A") (DF "TARGET_SSE4A")
(V4DI "TARGET_AVX") (V2DI "TARGET_SSE2")
(V8SF "TARGET_AVX") V4SF
(V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
(define_expand "storent"
[(set (match_operand:STORENT_MODE 0 "memory_operand")
(unspec:STORENT_MODE
[(match_operand:STORENT_MODE 1 "register_operand")]
UNSPEC_MOVNT))]
"TARGET_SSE")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel floating point arithmetic
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_expand "2"
[(set (match_operand:VF 0 "register_operand")
(absneg:VF
(match_operand:VF 1 "register_operand")))]
"TARGET_SSE"
"ix86_expand_fp_absneg_operator (, mode, operands); DONE;")
(define_insn_and_split "*absneg2"
[(set (match_operand:VF 0 "register_operand" "=x,x,x,x")
(match_operator:VF 3 "absneg_operator"
[(match_operand:VF 1 "nonimmediate_operand" "0, xm,x, m")]))
(use (match_operand:VF 2 "nonimmediate_operand" "xm,0, xm,x"))]
"TARGET_SSE"
"#"
"&& reload_completed"
[(const_int 0)]
{
enum rtx_code absneg_op;
rtx op1, op2;
rtx t;
if (TARGET_AVX)
{
if (MEM_P (operands[1]))
op1 = operands[2], op2 = operands[1];
else
op1 = operands[1], op2 = operands[2];
}
else
{
op1 = operands[0];
if (rtx_equal_p (operands[0], operands[1]))
op2 = operands[2];
else
op2 = operands[1];
}
absneg_op = GET_CODE (operands[3]) == NEG ? XOR : AND;
t = gen_rtx_fmt_ee (absneg_op, mode, op1, op2);
t = gen_rtx_SET (VOIDmode, operands[0], t);
emit_insn (t);
DONE;
}
[(set_attr "isa" "noavx,noavx,avx,avx")])
(define_expand "3"
[(set (match_operand:VF 0 "register_operand")
(plusminus:VF
(match_operand:VF 1 "nonimmediate_operand")
(match_operand:VF 2 "nonimmediate_operand")))]
"TARGET_SSE"
"ix86_fixup_binary_operands_no_copy (, mode, operands);")
(define_insn "*3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(plusminus:VF
(match_operand:VF 1 "nonimmediate_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE && ix86_binary_operator_ok (, mode, operands)"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_vm3"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
(plusminus:VF_128
(match_operand:VF_128 1 "register_operand" "0,x")
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_expand "mul3"
[(set (match_operand:VF 0 "register_operand")
(mult:VF
(match_operand:VF 1 "nonimmediate_operand")
(match_operand:VF 2 "nonimmediate_operand")))]
"TARGET_SSE"
"ix86_fixup_binary_operands_no_copy (MULT, mode, operands);")
(define_insn "*mul3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(mult:VF
(match_operand:VF 1 "nonimmediate_operand" "%0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE && ix86_binary_operator_ok (MULT, mode, operands)"
"@
mul\t{%2, %0|%0, %2}
vmul\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssemul")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_vmmul3"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
(mult:VF_128
(match_operand:VF_128 1 "register_operand" "0,x")
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
"@
mul\t{%2, %0|%0, %2}
vmul\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssemul")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_expand "div3"
[(set (match_operand:VF2 0 "register_operand")
(div:VF2 (match_operand:VF2 1 "register_operand")
(match_operand:VF2 2 "nonimmediate_operand")))]
"TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (DIV, mode, operands);")
(define_expand "div3"
[(set (match_operand:VF1 0 "register_operand")
(div:VF1 (match_operand:VF1 1 "register_operand")
(match_operand:VF1 2 "nonimmediate_operand")))]
"TARGET_SSE"
{
ix86_fixup_binary_operands_no_copy (DIV, mode, operands);
if (TARGET_SSE_MATH
&& TARGET_RECIP_VEC_DIV
&& !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
ix86_emit_swdivsf (operands[0], operands[1], operands[2], mode);
DONE;
}
})
(define_insn "_div3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(div:VF
(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE"
"@
div\t{%2, %0|%0, %2}
vdiv\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssediv")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_vmdiv3"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
(div:VF_128
(match_operand:VF_128 1 "register_operand" "0,x")
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
"@
div\t{%2, %0|%0, %2}
vdiv\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssediv")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_rcp2"
[(set (match_operand:VF1 0 "register_operand" "=x")
(unspec:VF1
[(match_operand:VF1 1 "nonimmediate_operand" "xm")] UNSPEC_RCP))]
"TARGET_SSE"
"%vrcpps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "sse_vmrcpv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
(unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xm")]
UNSPEC_RCP)
(match_operand:V4SF 2 "register_operand" "0,x")
(const_int 1)))]
"TARGET_SSE"
"@
rcpss\t{%1, %0|%0, %1}
vrcpss\t{%1, %2, %0|%0, %2, %1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
(define_expand "sqrt2"
[(set (match_operand:VF2 0 "register_operand")
(sqrt:VF2 (match_operand:VF2 1 "nonimmediate_operand")))]
"TARGET_SSE2")
(define_expand "sqrt2"
[(set (match_operand:VF1 0 "register_operand")
(sqrt:VF1 (match_operand:VF1 1 "nonimmediate_operand")))]
"TARGET_SSE"
{
if (TARGET_SSE_MATH
&& TARGET_RECIP_VEC_SQRT
&& !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
ix86_emit_swsqrtsf (operands[0], operands[1], mode, false);
DONE;
}
})
(define_insn "_sqrt2"
[(set (match_operand:VF 0 "register_operand" "=x")
(sqrt:VF (match_operand:VF 1 "nonimmediate_operand" "xm")))]
"TARGET_SSE"
"%vsqrt\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "atom_sse_attr" "sqrt")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "_vmsqrt2"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
(sqrt:VF_128
(match_operand:VF_128 1 "nonimmediate_operand" "xm,xm"))
(match_operand:VF_128 2 "register_operand" "0,x")
(const_int 1)))]
"TARGET_SSE"
"@
sqrt\t{%1, %0|%0, %1}
vsqrt\t{%1, %2, %0|%0, %2, %1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
(set_attr "atom_sse_attr" "sqrt")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_expand "rsqrt2"
[(set (match_operand:VF1 0 "register_operand")
(unspec:VF1
[(match_operand:VF1 1 "nonimmediate_operand")] UNSPEC_RSQRT))]
"TARGET_SSE_MATH"
{
ix86_emit_swsqrtsf (operands[0], operands[1], mode, true);
DONE;
})
(define_insn "_rsqrt2"
[(set (match_operand:VF1 0 "register_operand" "=x")
(unspec:VF1
[(match_operand:VF1 1 "nonimmediate_operand" "xm")] UNSPEC_RSQRT))]
"TARGET_SSE"
"%vrsqrtps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "")])
(define_insn "sse_vmrsqrtv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
(unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,xm")]
UNSPEC_RSQRT)
(match_operand:V4SF 2 "register_operand" "0,x")
(const_int 1)))]
"TARGET_SSE"
"@
rsqrtss\t{%1, %0|%0, %1}
vrsqrtss\t{%1, %2, %0|%0, %2, %1}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
;; ??? For !flag_finite_math_only, the representation with SMIN/SMAX
;; isn't really correct, as those rtl operators aren't defined when
;; applied to NaNs. Hopefully the optimizers won't get too smart on us.
(define_expand "3"
[(set (match_operand:VF 0 "register_operand")
(smaxmin:VF
(match_operand:VF 1 "nonimmediate_operand")
(match_operand:VF 2 "nonimmediate_operand")))]
"TARGET_SSE"
{
if (!flag_finite_math_only)
operands[1] = force_reg (mode, operands[1]);
ix86_fixup_binary_operands_no_copy (, mode, operands);
})
(define_insn "*3_finite"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(smaxmin:VF
(match_operand:VF 1 "nonimmediate_operand" "%0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE && flag_finite_math_only
&& ix86_binary_operator_ok (, mode, operands)"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "*3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(smaxmin:VF
(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE && !flag_finite_math_only"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_vm3"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
(smaxmin:VF_128
(match_operand:VF_128 1 "register_operand" "0,x")
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sse")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
;; These versions of the min/max patterns implement exactly the operations
;; min = (op1 < op2 ? op1 : op2)
;; max = (!(op1 < op2) ? op1 : op2)
;; Their operands are not commutative, and thus they may be used in the
;; presence of -0.0 and NaN.
(define_insn "*ieee_smin3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(unspec:VF
[(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")]
UNSPEC_IEEE_MIN))]
"TARGET_SSE"
"@
min\t{%2, %0|%0, %2}
vmin\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "*ieee_smax3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(unspec:VF
[(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")]
UNSPEC_IEEE_MAX))]
"TARGET_SSE"
"@
max\t{%2, %0|%0, %2}
vmax\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "avx_addsubv4df3"
[(set (match_operand:V4DF 0 "register_operand" "=x")
(vec_merge:V4DF
(plus:V4DF
(match_operand:V4DF 1 "register_operand" "x")
(match_operand:V4DF 2 "nonimmediate_operand" "xm"))
(minus:V4DF (match_dup 1) (match_dup 2))
(const_int 10)))]
"TARGET_AVX"
"vaddsubpd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix" "vex")
(set_attr "mode" "V4DF")])
(define_insn "sse3_addsubv2df3"
[(set (match_operand:V2DF 0 "register_operand" "=x,x")
(vec_merge:V2DF
(plus:V2DF
(match_operand:V2DF 1 "register_operand" "0,x")
(match_operand:V2DF 2 "nonimmediate_operand" "xm,xm"))
(minus:V2DF (match_dup 1) (match_dup 2))
(const_int 2)))]
"TARGET_SSE3"
"@
addsubpd\t{%2, %0|%0, %2}
vaddsubpd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "atom_unit" "complex")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "V2DF")])
(define_insn "avx_addsubv8sf3"
[(set (match_operand:V8SF 0 "register_operand" "=x")
(vec_merge:V8SF
(plus:V8SF
(match_operand:V8SF 1 "register_operand" "x")
(match_operand:V8SF 2 "nonimmediate_operand" "xm"))
(minus:V8SF (match_dup 1) (match_dup 2))
(const_int 170)))]
"TARGET_AVX"
"vaddsubps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
(define_insn "sse3_addsubv4sf3"
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_merge:V4SF
(plus:V4SF
(match_operand:V4SF 1 "register_operand" "0,x")
(match_operand:V4SF 2 "nonimmediate_operand" "xm,xm"))
(minus:V4SF (match_dup 1) (match_dup 2))
(const_int 10)))]
"TARGET_SSE3"
"@
addsubps\t{%2, %0|%0, %2}
vaddsubps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "prefix_rep" "1,*")
(set_attr "mode" "V4SF")])
(define_insn "avx_hv4df3"
[(set (match_operand:V4DF 0 "register_operand" "=x")
(vec_concat:V4DF
(vec_concat:V2DF
(plusminus:DF
(vec_select:DF
(match_operand:V4DF 1 "register_operand" "x")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
(plusminus:DF
(vec_select:DF
(match_operand:V4DF 2 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 2) (parallel [(const_int 1)]))))
(vec_concat:V2DF
(plusminus:DF
(vec_select:DF (match_dup 1) (parallel [(const_int 2)]))
(vec_select:DF (match_dup 1) (parallel [(const_int 3)])))
(plusminus:DF
(vec_select:DF (match_dup 2) (parallel [(const_int 2)]))
(vec_select:DF (match_dup 2) (parallel [(const_int 3)]))))))]
"TARGET_AVX"
"vhpd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix" "vex")
(set_attr "mode" "V4DF")])
(define_insn "sse3_hv2df3"
[(set (match_operand:V2DF 0 "register_operand" "=x,x")
(vec_concat:V2DF
(plusminus:DF
(vec_select:DF
(match_operand:V2DF 1 "register_operand" "0,x")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
(plusminus:DF
(vec_select:DF
(match_operand:V2DF 2 "nonimmediate_operand" "xm,xm")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))]
"TARGET_SSE3"
"@
hpd\t{%2, %0|%0, %2}
vhpd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "V2DF")])
(define_insn "avx_hv8sf3"
[(set (match_operand:V8SF 0 "register_operand" "=x")
(vec_concat:V8SF
(vec_concat:V4SF
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF
(match_operand:V8SF 1 "register_operand" "x")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
(plusminus:SF
(vec_select:SF (match_dup 1) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 3)]))))
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF
(match_operand:V8SF 2 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
(plusminus:SF
(vec_select:SF (match_dup 2) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 3)])))))
(vec_concat:V4SF
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF (match_dup 1) (parallel [(const_int 4)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 5)])))
(plusminus:SF
(vec_select:SF (match_dup 1) (parallel [(const_int 6)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 7)]))))
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF (match_dup 2) (parallel [(const_int 4)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 5)])))
(plusminus:SF
(vec_select:SF (match_dup 2) (parallel [(const_int 6)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 7)])))))))]
"TARGET_AVX"
"vhps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
(define_insn "sse3_hv4sf3"
[(set (match_operand:V4SF 0 "register_operand" "=x,x")
(vec_concat:V4SF
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF
(match_operand:V4SF 1 "register_operand" "0,x")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
(plusminus:SF
(vec_select:SF (match_dup 1) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 3)]))))
(vec_concat:V2SF
(plusminus:SF
(vec_select:SF
(match_operand:V4SF 2 "nonimmediate_operand" "xm,xm")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
(plusminus:SF
(vec_select:SF (match_dup 2) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 3)]))))))]
"TARGET_SSE3"
"@
hps\t{%2, %0|%0, %2}
vhps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "atom_unit" "complex")
(set_attr "prefix" "orig,vex")
(set_attr "prefix_rep" "1,*")
(set_attr "mode" "V4SF")])
(define_expand "reduc_splus_v4df"
[(match_operand:V4DF 0 "register_operand")
(match_operand:V4DF 1 "register_operand")]
"TARGET_AVX"
{
rtx tmp = gen_reg_rtx (V4DFmode);
rtx tmp2 = gen_reg_rtx (V4DFmode);
emit_insn (gen_avx_haddv4df3 (tmp, operands[1], operands[1]));
emit_insn (gen_avx_vperm2f128v4df3 (tmp2, tmp, tmp, GEN_INT (1)));
emit_insn (gen_addv4df3 (operands[0], tmp, tmp2));
DONE;
})
(define_expand "reduc_splus_v2df"
[(match_operand:V2DF 0 "register_operand")
(match_operand:V2DF 1 "register_operand")]
"TARGET_SSE3"
{
emit_insn (gen_sse3_haddv2df3 (operands[0], operands[1], operands[1]));
DONE;
})
(define_expand "reduc_splus_v8sf"
[(match_operand:V8SF 0 "register_operand")
(match_operand:V8SF 1 "register_operand")]
"TARGET_AVX"
{
rtx tmp = gen_reg_rtx (V8SFmode);
rtx tmp2 = gen_reg_rtx (V8SFmode);
emit_insn (gen_avx_haddv8sf3 (tmp, operands[1], operands[1]));
emit_insn (gen_avx_haddv8sf3 (tmp2, tmp, tmp));
emit_insn (gen_avx_vperm2f128v8sf3 (tmp, tmp2, tmp2, GEN_INT (1)));
emit_insn (gen_addv8sf3 (operands[0], tmp, tmp2));
DONE;
})
(define_expand "reduc_splus_v4sf"
[(match_operand:V4SF 0 "register_operand")
(match_operand:V4SF 1 "register_operand")]
"TARGET_SSE"
{
if (TARGET_SSE3)
{
rtx tmp = gen_reg_rtx (V4SFmode);
emit_insn (gen_sse3_haddv4sf3 (tmp, operands[1], operands[1]));
emit_insn (gen_sse3_haddv4sf3 (operands[0], tmp, tmp));
}
else
ix86_expand_reduc (gen_addv4sf3, operands[0], operands[1]);
DONE;
})
;; Modes handled by reduc_sm{in,ax}* patterns.
(define_mode_iterator REDUC_SMINMAX_MODE
[(V32QI "TARGET_AVX2") (V16HI "TARGET_AVX2")
(V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")
(V8SF "TARGET_AVX") (V4DF "TARGET_AVX")
(V4SF "TARGET_SSE")])
(define_expand "reduc__"
[(smaxmin:REDUC_SMINMAX_MODE
(match_operand:REDUC_SMINMAX_MODE 0 "register_operand")
(match_operand:REDUC_SMINMAX_MODE 1 "register_operand"))]
""
{
ix86_expand_reduc (gen_3, operands[0], operands[1]);
DONE;
})
(define_expand "reduc__"
[(umaxmin:VI_256
(match_operand:VI_256 0 "register_operand")
(match_operand:VI_256 1 "register_operand"))]
"TARGET_AVX2"
{
ix86_expand_reduc (gen_3, operands[0], operands[1]);
DONE;
})
(define_expand "reduc_umin_v8hi"
[(umin:V8HI
(match_operand:V8HI 0 "register_operand")
(match_operand:V8HI 1 "register_operand"))]
"TARGET_SSE4_1"
{
ix86_expand_reduc (gen_uminv8hi3, operands[0], operands[1]);
DONE;
})
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel floating point comparisons
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_insn "avx_cmp3"
[(set (match_operand:VF 0 "register_operand" "=x")
(unspec:VF
[(match_operand:VF 1 "register_operand" "x")
(match_operand:VF 2 "nonimmediate_operand" "xm")
(match_operand:SI 3 "const_0_to_31_operand" "n")]
UNSPEC_PCMP))]
"TARGET_AVX"
"vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "")])
(define_insn "avx_vmcmp3"
[(set (match_operand:VF_128 0 "register_operand" "=x")
(vec_merge:VF_128
(unspec:VF_128
[(match_operand:VF_128 1 "register_operand" "x")
(match_operand:VF_128 2 "nonimmediate_operand" "xm")
(match_operand:SI 3 "const_0_to_31_operand" "n")]
UNSPEC_PCMP)
(match_dup 1)
(const_int 1)))]
"TARGET_AVX"
"vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "")])
(define_insn "*_maskcmp3_comm"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(match_operator:VF 3 "sse_comparison_operator"
[(match_operand:VF 1 "register_operand" "%0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")]))]
"TARGET_SSE
&& GET_RTX_CLASS (GET_CODE (operands[3])) == RTX_COMM_COMPARE"
"@
cmp%D3\t{%2, %0|%0, %2}
vcmp%D3\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_maskcmp3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(match_operator:VF 3 "sse_comparison_operator"
[(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")]))]
"TARGET_SSE"
"@
cmp%D3\t{%2, %0|%0, %2}
vcmp%D3\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_vmmaskcmp3"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
(match_operator:VF_128 3 "sse_comparison_operator"
[(match_operand:VF_128 1 "register_operand" "0,x")
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm")])
(match_dup 1)
(const_int 1)))]
"TARGET_SSE"
"@
cmp%D3\t{%2, %0|%0, %2}
vcmp%D3\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "ssecmp")
(set_attr "length_immediate" "1,*")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "")])
(define_insn "_comi"
[(set (reg:CCFP FLAGS_REG)
(compare:CCFP
(vec_select:MODEF
(match_operand: 0 "register_operand" "x")
(parallel [(const_int 0)]))
(vec_select:MODEF
(match_operand: 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))))]
"SSE_FLOAT_MODE_P (mode)"
"%vcomi\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "maybe_vex")
(set_attr "prefix_rep" "0")
(set (attr "prefix_data16")
(if_then_else (eq_attr "mode" "DF")
(const_string "1")
(const_string "0")))
(set_attr "mode" "")])
(define_insn "_ucomi"
[(set (reg:CCFPU FLAGS_REG)
(compare:CCFPU
(vec_select:MODEF
(match_operand: 0 "register_operand" "x")
(parallel [(const_int 0)]))
(vec_select:MODEF
(match_operand: 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))))]
"SSE_FLOAT_MODE_P (mode)"
"%vucomi\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "maybe_vex")
(set_attr "prefix_rep" "0")
(set (attr "prefix_data16")
(if_then_else (eq_attr "mode" "DF")
(const_string "1")
(const_string "0")))
(set_attr "mode" "")])
(define_expand "vcond"
[(set (match_operand:V_256 0 "register_operand")
(if_then_else:V_256
(match_operator 3 ""
[(match_operand:VF_256 4 "nonimmediate_operand")
(match_operand:VF_256 5 "nonimmediate_operand")])
(match_operand:V_256 1 "general_operand")
(match_operand:V_256 2 "general_operand")))]
"TARGET_AVX
&& (GET_MODE_NUNITS (mode)
== GET_MODE_NUNITS (mode))"
{
bool ok = ix86_expand_fp_vcond (operands);
gcc_assert (ok);
DONE;
})
(define_expand "vcond"
[(set (match_operand:V_128 0 "register_operand")
(if_then_else:V_128
(match_operator 3 ""
[(match_operand:VF_128 4 "nonimmediate_operand")
(match_operand:VF_128 5 "nonimmediate_operand")])
(match_operand:V_128 1 "general_operand")
(match_operand:V_128 2 "general_operand")))]
"TARGET_SSE
&& (GET_MODE_NUNITS (mode)
== GET_MODE_NUNITS (mode))"
{
bool ok = ix86_expand_fp_vcond (operands);
gcc_assert (ok);
DONE;
})
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel floating point logical operations
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_insn "_andnot3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(and:VF
(not:VF
(match_operand:VF 1 "register_operand" "0,x"))
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE"
{
static char buf[32];
const char *ops;
const char *suffix;
switch (get_attr_mode (insn))
{
case MODE_V8SF:
case MODE_V4SF:
suffix = "ps";
break;
default:
suffix = "";
}
switch (which_alternative)
{
case 0:
ops = "andn%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, suffix);
return buf;
}
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "")
(match_test "TARGET_AVX")
(const_string "")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
]
(const_string "")))])
(define_expand "3"
[(set (match_operand:VF 0 "register_operand")
(any_logic:VF
(match_operand:VF 1 "nonimmediate_operand")
(match_operand:VF 2 "nonimmediate_operand")))]
"TARGET_SSE"
"ix86_fixup_binary_operands_no_copy (, mode, operands);")
(define_insn "*3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(any_logic:VF
(match_operand:VF 1 "nonimmediate_operand" "%0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE && ix86_binary_operator_ok (, mode, operands)"
{
static char buf[32];
const char *ops;
const char *suffix;
switch (get_attr_mode (insn))
{
case MODE_V8SF:
case MODE_V4SF:
suffix = "ps";
break;
default:
suffix = "";
}
switch (which_alternative)
{
case 0:
ops = "%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, suffix);
return buf;
}
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "")
(match_test "TARGET_AVX")
(const_string "")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
]
(const_string "")))])
(define_expand "copysign3"
[(set (match_dup 4)
(and:VF
(not:VF (match_dup 3))
(match_operand:VF 1 "nonimmediate_operand")))
(set (match_dup 5)
(and:VF (match_dup 3)
(match_operand:VF 2 "nonimmediate_operand")))
(set (match_operand:VF 0 "register_operand")
(ior:VF (match_dup 4) (match_dup 5)))]
"TARGET_SSE"
{
operands[3] = ix86_build_signbit_mask (mode, 1, 0);
operands[4] = gen_reg_rtx (mode);
operands[5] = gen_reg_rtx (mode);
})
;; Also define scalar versions. These are used for abs, neg, and
;; conditional move. Using subregs into vector modes causes register
;; allocation lossage. These patterns do not allow memory operands
;; because the native instructions read the full 128-bits.
(define_insn "*andnot3"
[(set (match_operand:MODEF 0 "register_operand" "=x,x")
(and:MODEF
(not:MODEF
(match_operand:MODEF 1 "register_operand" "0,x"))
(match_operand:MODEF 2 "register_operand" "x,x")))]
"SSE_FLOAT_MODE_P (mode)"
{
static char buf[32];
const char *ops;
const char *suffix
= (get_attr_mode (insn) == MODE_V4SF) ? "ps" : "";
switch (which_alternative)
{
case 0:
ops = "andn%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
ops = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, suffix);
return buf;
}
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
]
(const_string "")))])
(define_insn "*andnottf3"
[(set (match_operand:TF 0 "register_operand" "=x,x")
(and:TF
(not:TF (match_operand:TF 1 "register_operand" "0,x"))
(match_operand:TF 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE"
{
static char buf[32];
const char *ops;
const char *tmp
= (get_attr_mode (insn) == MODE_V4SF) ? "andnps" : "pandn";
switch (which_alternative)
{
case 0:
ops = "%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, tmp);
return buf;
}
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sselog")
(set (attr "prefix_data16")
(if_then_else
(and (eq_attr "alternative" "0")
(eq_attr "mode" "TI"))
(const_string "1")
(const_string "*")))
(set_attr "prefix" "orig,vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "TI")
(ior (not (match_test "TARGET_SSE2"))
(match_test "optimize_function_for_size_p (cfun)"))
(const_string "V4SF")
]
(const_string "TI")))])
(define_insn "*3"
[(set (match_operand:MODEF 0 "register_operand" "=x,x")
(any_logic:MODEF
(match_operand:MODEF 1 "register_operand" "%0,x")
(match_operand:MODEF 2 "register_operand" "x,x")))]
"SSE_FLOAT_MODE_P (mode)"
{
static char buf[32];
const char *ops;
const char *suffix
= (get_attr_mode (insn) == MODE_V4SF) ? "ps" : "";
switch (which_alternative)
{
case 0:
ops = "%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
ops = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
default:
gcc_unreachable ();
}
snprintf (buf, sizeof (buf), ops, suffix);
return buf;
}
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sselog")
(set_attr "prefix" "orig,vex")
(set (attr "mode")
(cond [(match_test "TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL")
(const_string "V4SF")
(match_test "TARGET_AVX")
(const_string "")
(match_test "optimize_function_for_size_p (cfun)")
(const_string "V4SF")
]
(const_string "")))])
(define_expand "