;; GCC machine description for SSE instructions
;; Copyright (C) 2005, 2006, 2007, 2008
;; Free Software Foundation, Inc.
;;
;; This file is part of GCC.
;;
;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
;;
;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; .
;; 16 byte integral modes handled by SSE, minus TImode, which gets
;; special-cased for TARGET_64BIT.
(define_mode_iterator SSEMODEI [V16QI V8HI V4SI V2DI])
;; All 16-byte vector modes handled by SSE
(define_mode_iterator SSEMODE [V16QI V8HI V4SI V2DI V4SF V2DF])
;; Mix-n-match
(define_mode_iterator SSEMODE12 [V16QI V8HI])
(define_mode_iterator SSEMODE24 [V8HI V4SI])
(define_mode_iterator SSEMODE14 [V16QI V4SI])
(define_mode_iterator SSEMODE124 [V16QI V8HI V4SI])
(define_mode_iterator SSEMODE248 [V8HI V4SI V2DI])
(define_mode_iterator SSEMODE1248 [V16QI V8HI V4SI V2DI])
(define_mode_iterator SSEMODEF4 [SF DF V4SF V2DF])
(define_mode_iterator SSEMODEF2P [V4SF V2DF])
;; Mapping from float mode to required SSE level
(define_mode_attr sse [(SF "sse") (DF "sse2") (V4SF "sse") (V2DF "sse2")])
;; Mapping from integer vector mode to mnemonic suffix
(define_mode_attr ssevecsize [(V16QI "b") (V8HI "w") (V4SI "d") (V2DI "q")])
;; Mapping of the sse5 suffix
(define_mode_attr ssemodesuffixf4 [(SF "ss") (DF "sd") (V4SF "ps") (V2DF "pd")])
(define_mode_attr ssemodesuffixf2s [(SF "ss") (DF "sd") (V4SF "ss") (V2DF "sd")])
(define_mode_attr ssemodesuffixf2c [(V4SF "s") (V2DF "d")])
;; Mapping of the max integer size for sse5 rotate immediate constraint
(define_mode_attr sserotatemax [(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")])
;; Mapping of vector modes back to the scalar modes
(define_mode_attr ssescalarmode [(V4SF "SF") (V2DF "DF")])
;; Patterns whose name begins with "sse{,2,3}_" are invoked by intrinsics.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Move patterns
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; All of these patterns are enabled for SSE1 as well as SSE2.
;; This is essential for maintaining stable calling conventions.
(define_expand "mov"
[(set (match_operand:SSEMODEI 0 "nonimmediate_operand" "")
(match_operand:SSEMODEI 1 "nonimmediate_operand" ""))]
"TARGET_SSE"
{
ix86_expand_vector_move (mode, operands);
DONE;
})
(define_insn "*mov_internal"
[(set (match_operand:SSEMODEI 0 "nonimmediate_operand" "=x,x ,m")
(match_operand:SSEMODEI 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
"TARGET_SSE
&& (register_operand (operands[0], mode)
|| register_operand (operands[1], mode))"
{
switch (which_alternative)
{
case 0:
return standard_sse_constant_opcode (insn, operands[1]);
case 1:
case 2:
if (get_attr_mode (insn) == MODE_V4SF)
return "movaps\t{%1, %0|%0, %1}";
else
return "movdqa\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
(set (attr "mode")
(if_then_else
(ior (ior (ne (symbol_ref "optimize_size") (const_int 0))
(eq (symbol_ref "TARGET_SSE2") (const_int 0)))
(and (eq_attr "alternative" "2")
(ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
(const_int 0))))
(const_string "V4SF")
(const_string "TI")))])
;; Move a DI from a 32-bit register pair (e.g. %edx:%eax) to an xmm.
;; We'd rather avoid this entirely; if the 32-bit reg pair was loaded
;; from memory, we'd prefer to load the memory directly into the %xmm
;; register. To facilitate this happy circumstance, this pattern won't
;; split until after register allocation. If the 64-bit value didn't
;; come from memory, this is the best we can do. This is much better
;; than storing %edx:%eax into a stack temporary and loading an %xmm
;; from there.
(define_insn_and_split "movdi_to_sse"
[(parallel
[(set (match_operand:V4SI 0 "register_operand" "=?x,x")
(subreg:V4SI (match_operand:DI 1 "nonimmediate_operand" "r,m") 0))
(clobber (match_scratch:V4SI 2 "=&x,X"))])]
"!TARGET_64BIT && TARGET_SSE2 && TARGET_INTER_UNIT_MOVES"
"#"
"&& reload_completed"
[(const_int 0)]
{
if (register_operand (operands[1], DImode))
{
/* The DImode arrived in a pair of integral registers (e.g. %edx:%eax).
Assemble the 64-bit DImode value in an xmm register. */
emit_insn (gen_sse2_loadld (operands[0], CONST0_RTX (V4SImode),
gen_rtx_SUBREG (SImode, operands[1], 0)));
emit_insn (gen_sse2_loadld (operands[2], CONST0_RTX (V4SImode),
gen_rtx_SUBREG (SImode, operands[1], 4)));
emit_insn (gen_sse2_punpckldq (operands[0], operands[0], operands[2]));
}
else if (memory_operand (operands[1], DImode))
emit_insn (gen_vec_concatv2di (gen_lowpart (V2DImode, operands[0]), operands[1], const0_rtx));
else
gcc_unreachable ();
})
(define_expand "mov"
[(set (match_operand:SSEMODEF2P 0 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" ""))]
"TARGET_SSE"
{
ix86_expand_vector_move (mode, operands);
DONE;
})
(define_insn "*movv4sf_internal"
[(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V4SF 1 "nonimmediate_or_sse_const_operand" "C,xm,x"))]
"TARGET_SSE
&& (register_operand (operands[0], V4SFmode)
|| register_operand (operands[1], V4SFmode))"
{
switch (which_alternative)
{
case 0:
return standard_sse_constant_opcode (insn, operands[1]);
case 1:
case 2:
return "movaps\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
(set_attr "mode" "V4SF")])
(define_split
[(set (match_operand:V4SF 0 "register_operand" "")
(match_operand:V4SF 1 "zero_extended_scalar_load_operand" ""))]
"TARGET_SSE && reload_completed"
[(set (match_dup 0)
(vec_merge:V4SF
(vec_duplicate:V4SF (match_dup 1))
(match_dup 2)
(const_int 1)))]
{
operands[1] = simplify_gen_subreg (SFmode, operands[1], V4SFmode, 0);
operands[2] = CONST0_RTX (V4SFmode);
})
(define_insn "*movv2df_internal"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m")
(match_operand:V2DF 1 "nonimmediate_or_sse_const_operand" "C,xm,x"))]
"TARGET_SSE
&& (register_operand (operands[0], V2DFmode)
|| register_operand (operands[1], V2DFmode))"
{
switch (which_alternative)
{
case 0:
return standard_sse_constant_opcode (insn, operands[1]);
case 1:
case 2:
if (get_attr_mode (insn) == MODE_V4SF)
return "movaps\t{%1, %0|%0, %1}";
else
return "movapd\t{%1, %0|%0, %1}";
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
(set (attr "mode")
(if_then_else
(ior (ior (ne (symbol_ref "optimize_size") (const_int 0))
(eq (symbol_ref "TARGET_SSE2") (const_int 0)))
(and (eq_attr "alternative" "2")
(ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
(const_int 0))))
(const_string "V4SF")
(const_string "V2DF")))])
(define_split
[(set (match_operand:V2DF 0 "register_operand" "")
(match_operand:V2DF 1 "zero_extended_scalar_load_operand" ""))]
"TARGET_SSE2 && reload_completed"
[(set (match_dup 0) (vec_concat:V2DF (match_dup 1) (match_dup 2)))]
{
operands[1] = simplify_gen_subreg (DFmode, operands[1], V2DFmode, 0);
operands[2] = CONST0_RTX (DFmode);
})
(define_expand "push1"
[(match_operand:SSEMODE 0 "register_operand" "")]
"TARGET_SSE"
{
ix86_expand_push (mode, operands[0]);
DONE;
})
(define_expand "movmisalign"
[(set (match_operand:SSEMODE 0 "nonimmediate_operand" "")
(match_operand:SSEMODE 1 "nonimmediate_operand" ""))]
"TARGET_SSE"
{
ix86_expand_vector_move_misalign (mode, operands);
DONE;
})
(define_insn "sse_movups"
[(set (match_operand:V4SF 0 "nonimmediate_operand" "=x,m")
(unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
"TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"movups\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "mode" "V2DF")])
(define_insn "sse2_movupd"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,m")
(unspec:V2DF [(match_operand:V2DF 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
"TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"movupd\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "mode" "V2DF")])
(define_insn "sse2_movdqu"
[(set (match_operand:V16QI 0 "nonimmediate_operand" "=x,m")
(unspec:V16QI [(match_operand:V16QI 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
"TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"movdqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_data16" "1")
(set_attr "mode" "TI")])
(define_insn "_movnt"
[(set (match_operand:SSEMODEF2P 0 "memory_operand" "=m")
(unspec:SSEMODEF2P
[(match_operand:SSEMODEF2P 1 "register_operand" "x")]
UNSPEC_MOVNT))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"movntp\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "mode" "")])
(define_insn "sse2_movntv2di"
[(set (match_operand:V2DI 0 "memory_operand" "=m")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "x")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"movntdq\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix_data16" "1")
(set_attr "mode" "TI")])
(define_insn "sse2_movntsi"
[(set (match_operand:SI 0 "memory_operand" "=m")
(unspec:SI [(match_operand:SI 1 "register_operand" "r")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"movnti\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "mode" "V2DF")])
(define_insn "sse3_lddqu"
[(set (match_operand:V16QI 0 "register_operand" "=x")
(unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "m")]
UNSPEC_LDDQU))]
"TARGET_SSE3"
"lddqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix_rep" "1")
(set_attr "mode" "TI")])
; Expand patterns for non-temporal stores. At the moment, only those
; that directly map to insns are defined; it would be possible to
; define patterns for other modes that would expand to several insns.
(define_expand "storent"
[(set (match_operand:SSEMODEF2P 0 "memory_operand" "")
(unspec:SSEMODEF2P
[(match_operand:SSEMODEF2P 1 "register_operand" "")]
UNSPEC_MOVNT))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"")
(define_expand "storent"
[(set (match_operand:MODEF 0 "memory_operand" "")
(unspec:MODEF
[(match_operand:MODEF 1 "register_operand" "")]
UNSPEC_MOVNT))]
"TARGET_SSE4A"
"")
(define_expand "storentv2di"
[(set (match_operand:V2DI 0 "memory_operand" "")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"")
(define_expand "storentsi"
[(set (match_operand:SI 0 "memory_operand" "")
(unspec:SI [(match_operand:SI 1 "register_operand" "")]
UNSPEC_MOVNT))]
"TARGET_SSE2"
"")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel floating point arithmetic
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_expand "neg2"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(neg:SSEMODEF2P (match_operand:SSEMODEF2P 1 "register_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_expand_fp_absneg_operator (NEG, mode, operands); DONE;")
(define_expand "abs2"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(abs:SSEMODEF2P (match_operand:SSEMODEF2P 1 "register_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_expand_fp_absneg_operator (ABS, mode, operands); DONE;")
(define_expand "add3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(plus:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_fixup_binary_operands_no_copy (PLUS, mode, operands);")
(define_insn "*add3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(plus:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)
&& ix86_binary_operator_ok (PLUS, mode, operands)"
"addp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "_vmadd3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(plus:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm"))
(match_dup 1)
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode)
&& ix86_binary_operator_ok (PLUS, V4SFmode, operands)"
"adds\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_expand "sub3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_fixup_binary_operands_no_copy (MINUS, mode, operands);")
(define_insn "*sub3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"subp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "_vmsub3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm"))
(match_dup 1)
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"subs\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_expand "mul3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_fixup_binary_operands_no_copy (MULT, mode, operands);")
(define_insn "*mul3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)
&& ix86_binary_operator_ok (MULT, mode, operands)"
"mulp\t{%2, %0|%0, %2}"
[(set_attr "type" "ssemul")
(set_attr "mode" "")])
(define_insn "_vmmul3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm"))
(match_dup 1)
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode)
&& ix86_binary_operator_ok (MULT, mode, operands)"
"muls\t{%2, %0|%0, %2}"
[(set_attr "type" "ssemul")
(set_attr "mode" "")])
(define_expand "divv4sf3"
[(set (match_operand:V4SF 0 "register_operand" "")
(div:V4SF (match_operand:V4SF 1 "register_operand" "")
(match_operand:V4SF 2 "nonimmediate_operand" "")))]
"TARGET_SSE"
{
ix86_fixup_binary_operands_no_copy (DIV, V4SFmode, operands);
if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
ix86_emit_swdivsf (operands[0], operands[1],
operands[2], V4SFmode);
DONE;
}
})
(define_expand "divv2df3"
[(set (match_operand:V2DF 0 "register_operand" "")
(div:V2DF (match_operand:V2DF 1 "register_operand" "")
(match_operand:V2DF 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (DIV, V2DFmode, operands);")
(define_insn "_div3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(div:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"divp\t{%2, %0|%0, %2}"
[(set_attr "type" "ssediv")
(set_attr "mode" "")])
(define_insn "_vmdiv3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(div:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm"))
(match_dup 1)
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"divs\t{%2, %0|%0, %2}"
[(set_attr "type" "ssediv")
(set_attr "mode" "")])
(define_insn "sse_rcpv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(unspec:V4SF
[(match_operand:V4SF 1 "nonimmediate_operand" "xm")] UNSPEC_RCP))]
"TARGET_SSE"
"rcpps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "mode" "V4SF")])
(define_insn "sse_vmrcpv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(vec_merge:V4SF
(unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm")]
UNSPEC_RCP)
(match_operand:V4SF 2 "register_operand" "0")
(const_int 1)))]
"TARGET_SSE"
"rcpss\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "mode" "SF")])
(define_expand "sqrtv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "")
(sqrt:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "")))]
"TARGET_SSE"
{
if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
ix86_emit_swsqrtsf (operands[0], operands[1], V4SFmode, 0);
DONE;
}
})
(define_insn "sse_sqrtv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(sqrt:V4SF (match_operand:V4SF 1 "nonimmediate_operand" "xm")))]
"TARGET_SSE"
"sqrtps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "mode" "V4SF")])
(define_insn "sqrtv2df2"
[(set (match_operand:V2DF 0 "register_operand" "=x")
(sqrt:V2DF (match_operand:V2DF 1 "nonimmediate_operand" "xm")))]
"TARGET_SSE2"
"sqrtpd\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "mode" "V2DF")])
(define_insn "_vmsqrt2"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(sqrt:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "xm"))
(match_operand:SSEMODEF2P 2 "register_operand" "0")
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"sqrts\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "mode" "")])
(define_expand "rsqrtv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "")
(unspec:V4SF
[(match_operand:V4SF 1 "nonimmediate_operand" "")] UNSPEC_RSQRT))]
"TARGET_SSE_MATH"
{
ix86_emit_swsqrtsf (operands[0], operands[1], V4SFmode, 1);
DONE;
})
(define_insn "sse_rsqrtv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(unspec:V4SF
[(match_operand:V4SF 1 "nonimmediate_operand" "xm")] UNSPEC_RSQRT))]
"TARGET_SSE"
"rsqrtps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "mode" "V4SF")])
(define_insn "sse_vmrsqrtv4sf2"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(vec_merge:V4SF
(unspec:V4SF [(match_operand:V4SF 1 "nonimmediate_operand" "xm")]
UNSPEC_RSQRT)
(match_operand:V4SF 2 "register_operand" "0")
(const_int 1)))]
"TARGET_SSE"
"rsqrtss\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "mode" "SF")])
;; ??? For !flag_finite_math_only, the representation with SMIN/SMAX
;; isn't really correct, as those rtl operators aren't defined when
;; applied to NaNs. Hopefully the optimizers won't get too smart on us.
(define_expand "smin3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(smin:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
{
if (!flag_finite_math_only)
operands[1] = force_reg (mode, operands[1]);
ix86_fixup_binary_operands_no_copy (SMIN, mode, operands);
})
(define_insn "*smin3_finite"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(smin:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode) && flag_finite_math_only
&& ix86_binary_operator_ok (SMIN, mode, operands)"
"minp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "*smin3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(smin:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"minp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "_vmsmin3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(smin:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm"))
(match_dup 1)
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"mins\t{%2, %0|%0, %2}"
[(set_attr "type" "sse")
(set_attr "mode" "")])
(define_expand "smax3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(smax:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
{
if (!flag_finite_math_only)
operands[1] = force_reg (mode, operands[1]);
ix86_fixup_binary_operands_no_copy (SMAX, mode, operands);
})
(define_insn "*smax3_finite"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(smax:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode) && flag_finite_math_only
&& ix86_binary_operator_ok (SMAX, mode, operands)"
"maxp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "*smax3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(smax:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"maxp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "_vmsmax3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(smax:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm"))
(match_dup 1)
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"maxs\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
;; These versions of the min/max patterns implement exactly the operations
;; min = (op1 < op2 ? op1 : op2)
;; max = (!(op1 < op2) ? op1 : op2)
;; Their operands are not commutative, and thus they may be used in the
;; presence of -0.0 and NaN.
(define_insn "*ieee_smin3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(unspec:SSEMODEF2P
[(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")]
UNSPEC_IEEE_MIN))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"minp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "*ieee_smax3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(unspec:SSEMODEF2P
[(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")]
UNSPEC_IEEE_MAX))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"maxp\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "")])
(define_insn "sse3_addsubv4sf3"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(vec_merge:V4SF
(plus:V4SF
(match_operand:V4SF 1 "register_operand" "0")
(match_operand:V4SF 2 "nonimmediate_operand" "xm"))
(minus:V4SF (match_dup 1) (match_dup 2))
(const_int 5)))]
"TARGET_SSE3"
"addsubps\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix_rep" "1")
(set_attr "mode" "V4SF")])
(define_insn "sse3_addsubv2df3"
[(set (match_operand:V2DF 0 "register_operand" "=x")
(vec_merge:V2DF
(plus:V2DF
(match_operand:V2DF 1 "register_operand" "0")
(match_operand:V2DF 2 "nonimmediate_operand" "xm"))
(minus:V2DF (match_dup 1) (match_dup 2))
(const_int 1)))]
"TARGET_SSE3"
"addsubpd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "V2DF")])
(define_insn "sse3_haddv4sf3"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(vec_concat:V4SF
(vec_concat:V2SF
(plus:SF
(vec_select:SF
(match_operand:V4SF 1 "register_operand" "0")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
(plus:SF
(vec_select:SF (match_dup 1) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 3)]))))
(vec_concat:V2SF
(plus:SF
(vec_select:SF
(match_operand:V4SF 2 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
(plus:SF
(vec_select:SF (match_dup 2) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 3)]))))))]
"TARGET_SSE3"
"haddps\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix_rep" "1")
(set_attr "mode" "V4SF")])
(define_insn "sse3_haddv2df3"
[(set (match_operand:V2DF 0 "register_operand" "=x")
(vec_concat:V2DF
(plus:DF
(vec_select:DF
(match_operand:V2DF 1 "register_operand" "0")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
(plus:DF
(vec_select:DF
(match_operand:V2DF 2 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))]
"TARGET_SSE3"
"haddpd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "V2DF")])
(define_insn "sse3_hsubv4sf3"
[(set (match_operand:V4SF 0 "register_operand" "=x")
(vec_concat:V4SF
(vec_concat:V2SF
(minus:SF
(vec_select:SF
(match_operand:V4SF 1 "register_operand" "0")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 1)])))
(minus:SF
(vec_select:SF (match_dup 1) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 1) (parallel [(const_int 3)]))))
(vec_concat:V2SF
(minus:SF
(vec_select:SF
(match_operand:V4SF 2 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 1)])))
(minus:SF
(vec_select:SF (match_dup 2) (parallel [(const_int 2)]))
(vec_select:SF (match_dup 2) (parallel [(const_int 3)]))))))]
"TARGET_SSE3"
"hsubps\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix_rep" "1")
(set_attr "mode" "V4SF")])
(define_insn "sse3_hsubv2df3"
[(set (match_operand:V2DF 0 "register_operand" "=x")
(vec_concat:V2DF
(minus:DF
(vec_select:DF
(match_operand:V2DF 1 "register_operand" "0")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 1) (parallel [(const_int 1)])))
(minus:DF
(vec_select:DF
(match_operand:V2DF 2 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))
(vec_select:DF (match_dup 2) (parallel [(const_int 1)])))))]
"TARGET_SSE3"
"hsubpd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
(set_attr "mode" "V2DF")])
(define_expand "reduc_splus_v4sf"
[(match_operand:V4SF 0 "register_operand" "")
(match_operand:V4SF 1 "register_operand" "")]
"TARGET_SSE"
{
if (TARGET_SSE3)
{
rtx tmp = gen_reg_rtx (V4SFmode);
emit_insn (gen_sse3_haddv4sf3 (tmp, operands[1], operands[1]));
emit_insn (gen_sse3_haddv4sf3 (operands[0], tmp, tmp));
}
else
ix86_expand_reduc_v4sf (gen_addv4sf3, operands[0], operands[1]);
DONE;
})
(define_expand "reduc_splus_v2df"
[(match_operand:V2DF 0 "register_operand" "")
(match_operand:V2DF 1 "register_operand" "")]
"TARGET_SSE3"
{
emit_insn (gen_sse3_haddv2df3 (operands[0], operands[1], operands[1]));
DONE;
})
(define_expand "reduc_smax_v4sf"
[(match_operand:V4SF 0 "register_operand" "")
(match_operand:V4SF 1 "register_operand" "")]
"TARGET_SSE"
{
ix86_expand_reduc_v4sf (gen_smaxv4sf3, operands[0], operands[1]);
DONE;
})
(define_expand "reduc_smin_v4sf"
[(match_operand:V4SF 0 "register_operand" "")
(match_operand:V4SF 1 "register_operand" "")]
"TARGET_SSE"
{
ix86_expand_reduc_v4sf (gen_sminv4sf3, operands[0], operands[1]);
DONE;
})
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel floating point comparisons
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_insn "_maskcmp3"
[(set (match_operand:SSEMODEF4 0 "register_operand" "=x")
(match_operator:SSEMODEF4 3 "sse_comparison_operator"
[(match_operand:SSEMODEF4 1 "register_operand" "0")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" "xm")]))]
"(SSE_FLOAT_MODE_P (mode) || SSE_VEC_FLOAT_MODE_P (mode))
&& !TARGET_SSE5"
"cmp%D3\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecmp")
(set_attr "mode" "")])
(define_insn "_vmmaskcmp3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(match_operator:SSEMODEF2P 3 "sse_comparison_operator"
[(match_operand:SSEMODEF2P 1 "register_operand" "0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")])
(match_dup 1)
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (mode) && !TARGET_SSE5"
"cmp%D3s\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecmp")
(set_attr "mode" "")])
(define_insn "_comi"
[(set (reg:CCFP FLAGS_REG)
(compare:CCFP
(vec_select:MODEF
(match_operand: 0 "register_operand" "x")
(parallel [(const_int 0)]))
(vec_select:MODEF
(match_operand: 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))))]
"SSE_FLOAT_MODE_P (mode)"
"comis\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "mode" "")])
(define_insn "_ucomi"
[(set (reg:CCFPU FLAGS_REG)
(compare:CCFPU
(vec_select:MODEF
(match_operand: 0 "register_operand" "x")
(parallel [(const_int 0)]))
(vec_select:MODEF
(match_operand: 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))))]
"SSE_FLOAT_MODE_P (mode)"
"ucomis\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "mode" "")])
(define_expand "vcond"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(if_then_else:SSEMODEF2P
(match_operator 3 ""
[(match_operand:SSEMODEF2P 4 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 5 "nonimmediate_operand" "")])
(match_operand:SSEMODEF2P 1 "general_operand" "")
(match_operand:SSEMODEF2P 2 "general_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
{
if (ix86_expand_fp_vcond (operands))
DONE;
else
FAIL;
})
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel floating point logical operations
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_expand "and3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(and:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_fixup_binary_operands_no_copy (AND, mode, operands);")
(define_insn "*and3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(and:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)
&& ix86_binary_operator_ok (AND, V4SFmode, operands)"
"andp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
(define_insn "_nand3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(and:SSEMODEF2P
(not:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0"))
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"andnp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
(define_expand "ior3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(ior:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_fixup_binary_operands_no_copy (IOR, mode, operands);")
(define_insn "*ior3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(ior:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)
&& ix86_binary_operator_ok (IOR, mode, operands)"
"orp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
(define_expand "xor3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(xor:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (mode)"
"ix86_fixup_binary_operands_no_copy (XOR, mode, operands);")
(define_insn "*xor3"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(xor:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")))]
"SSE_VEC_FLOAT_MODE_P (mode)
&& ix86_binary_operator_ok (XOR, mode, operands)"
"xorp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
;; Also define scalar versions. These are used for abs, neg, and
;; conditional move. Using subregs into vector modes causes register
;; allocation lossage. These patterns do not allow memory operands
;; because the native instructions read the full 128-bits.
(define_insn "*and3"
[(set (match_operand:MODEF 0 "register_operand" "=x")
(and:MODEF
(match_operand:MODEF 1 "register_operand" "0")
(match_operand:MODEF 2 "register_operand" "x")))]
"SSE_FLOAT_MODE_P (mode)"
"andp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
(define_insn "*nand3"
[(set (match_operand:MODEF 0 "register_operand" "=x")
(and:MODEF
(not:MODEF
(match_operand:MODEF 1 "register_operand" "0"))
(match_operand:MODEF 2 "register_operand" "x")))]
"SSE_FLOAT_MODE_P (mode)"
"andnp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
(define_insn "*ior3"
[(set (match_operand:MODEF 0 "register_operand" "=x")
(ior:MODEF
(match_operand:MODEF 1 "register_operand" "0")
(match_operand:MODEF 2 "register_operand" "x")))]
"SSE_FLOAT_MODE_P (mode)"
"orp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
(define_insn "*xor3"
[(set (match_operand:MODEF 0 "register_operand" "=x")
(xor:MODEF
(match_operand:MODEF 1 "register_operand" "0")
(match_operand:MODEF 2 "register_operand" "x")))]
"SSE_FLOAT_MODE_P (mode)"
"xorp\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog")
(set_attr "mode" "")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; SSE5 floating point multiply/accumulate instructions This includes the
;; scalar version of the instructions as well as the vector
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; In order to match (*a * *b) + *c, particularly when vectorizing, allow
;; combine to generate a multiply/add with two memory references. We then
;; split this insn, into loading up the destination register with one of the
;; memory operations. If we don't manage to split the insn, reload will
;; generate the appropriate moves. The reason this is needed, is that combine
;; has already folded one of the memory references into both the multiply and
;; add insns, and it can't generate a new pseudo. I.e.:
;; (set (reg1) (mem (addr1)))
;; (set (reg2) (mult (reg1) (mem (addr2))))
;; (set (reg3) (plus (reg2) (mem (addr3))))
(define_insn "sse5_fmadd4"
[(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x,x")
(plus:SSEMODEF4
(mult:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "%0,0,x,xm")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,xm,x"))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,0,0")))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)"
"fmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Split fmadd with two memory operands into a load and the fmadd.
(define_split
[(set (match_operand:SSEMODEF4 0 "register_operand" "")
(plus:SSEMODEF4
(mult:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "")))]
"TARGET_SSE5
&& !ix86_sse5_valid_op_p (operands, insn, 4, true, 1)
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
ix86_expand_sse5_multiple_memory (operands, 4, mode);
emit_insn (gen_sse5_fmadd4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fmadd
(define_insn "sse5_vmfmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(plus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Floating multiply and subtract
;; Allow two memory operands the same as fmadd
(define_insn "sse5_fmsub4"
[(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x,x")
(minus:SSEMODEF4
(mult:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "%0,0,x,xm")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,xm,x"))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,0,0")))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)"
"fmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Split fmsub with two memory operands into a load and the fmsub.
(define_split
[(set (match_operand:SSEMODEF4 0 "register_operand" "")
(minus:SSEMODEF4
(mult:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "")))]
"TARGET_SSE5
&& !ix86_sse5_valid_op_p (operands, insn, 4, true, 1)
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
ix86_expand_sse5_multiple_memory (operands, 4, mode);
emit_insn (gen_sse5_fmsub4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fmsub
(define_insn "sse5_vmfmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Floating point negative multiply and add
;; Rewrite (- (a * b) + c) into the canonical form: c - (a * b)
;; Note operands are out of order to simplify call to ix86_sse5_valid_p
;; Allow two memory operands to help in optimizing.
(define_insn "sse5_fnmadd4"
[(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x,x")
(minus:SSEMODEF4
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,0,0")
(mult:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "%0,0,x,xm")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,xm,x"))))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)"
"fnmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Split fnmadd with two memory operands into a load and the fnmadd.
(define_split
[(set (match_operand:SSEMODEF4 0 "register_operand" "")
(minus:SSEMODEF4
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "")
(mult:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))))]
"TARGET_SSE5
&& !ix86_sse5_valid_op_p (operands, insn, 4, true, 1)
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
ix86_expand_sse5_multiple_memory (operands, 4, mode);
emit_insn (gen_sse5_fnmadd4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fnmadd
(define_insn "sse5_vmfnmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x")
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm")))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fnmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Floating point negative multiply and subtract
;; Rewrite (- (a * b) - c) into the canonical form: ((-a) * b) - c
;; Allow 2 memory operands to help with optimization
(define_insn "sse5_fnmsub4"
[(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x")
(minus:SSEMODEF4
(mult:SSEMODEF4
(neg:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "0,0"))
(match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x")))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)"
"fnmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Split fnmsub with two memory operands into a load and the fmsub.
(define_split
[(set (match_operand:SSEMODEF4 0 "register_operand" "")
(minus:SSEMODEF4
(mult:SSEMODEF4
(neg:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "")))]
"TARGET_SSE5
&& !ix86_sse5_valid_op_p (operands, insn, 4, true, 1)
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
ix86_expand_sse5_multiple_memory (operands, 4, mode);
emit_insn (gen_sse5_fnmsub4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fnmsub
(define_insn "sse5_vmfnmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
(neg:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0"))
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
(match_dup 1)
(const_int 1)))]
"TARGET_SSE5 && TARGET_FUSED_MADD
&& ix86_sse5_valid_op_p (operands, insn, 4, true, 2)"
"fnmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; The same instructions using an UNSPEC to allow the intrinsic to be used
;; even if the user used -mno-fused-madd
;; Parallel instructions. During instruction generation, just default
;; to registers, and let combine later build the appropriate instruction.
(define_expand "sse5i_fmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(unspec:SSEMODEF2P
[(plus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "")
(match_operand:SSEMODEF2P 2 "register_operand" ""))
(match_operand:SSEMODEF2P 3 "register_operand" ""))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5"
{
/* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
if (TARGET_FUSED_MADD)
{
emit_insn (gen_sse5_fmadd4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
})
(define_insn "*sse5i_fmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
(unspec:SSEMODEF2P
[(plus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0,0,x,xm")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0"))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
(define_expand "sse5i_fmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "")
(match_operand:SSEMODEF2P 2 "register_operand" ""))
(match_operand:SSEMODEF2P 3 "register_operand" ""))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5"
{
/* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
if (TARGET_FUSED_MADD)
{
emit_insn (gen_sse5_fmsub4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
})
(define_insn "*sse5i_fmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "%0,0,x,xm")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0"))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Rewrite (- (a * b) + c) into the canonical form: c - (a * b)
;; Note operands are out of order to simplify call to ix86_sse5_valid_p
(define_expand "sse5i_fnmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 3 "register_operand" "")
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "")
(match_operand:SSEMODEF2P 2 "register_operand" "")))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5"
{
/* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
if (TARGET_FUSED_MADD)
{
emit_insn (gen_sse5_fnmadd4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
})
(define_insn "*sse5i_fnmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0")
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0,0,x,xm")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x")))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fnmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Rewrite (- (a * b) - c) into the canonical form: ((-a) * b) - c
(define_expand "sse5i_fnmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
(mult:SSEMODEF2P
(neg:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" ""))
(match_operand:SSEMODEF2P 2 "register_operand" ""))
(match_operand:SSEMODEF2P 3 "register_operand" ""))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5"
{
/* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
if (TARGET_FUSED_MADD)
{
emit_insn (gen_sse5_fnmsub4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
})
(define_insn "*sse5i_fnmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
(mult:SSEMODEF2P
(neg:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0,0,x,xm"))
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0"))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fnmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Scalar instructions
(define_expand "sse5i_vmfmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(plus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "")
(match_operand:SSEMODEF2P 2 "register_operand" ""))
(match_operand:SSEMODEF2P 3 "register_operand" ""))
(match_dup 1)
(const_int 0))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5"
{
/* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
if (TARGET_FUSED_MADD)
{
emit_insn (gen_sse5_vmfmadd4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are accepted.
(define_insn "*sse5i_vmfmadd4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(plus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "0,0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
(match_dup 0)
(const_int 0))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
(define_expand "sse5i_vmfmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "register_operand" "")
(match_operand:SSEMODEF2P 2 "register_operand" ""))
(match_operand:SSEMODEF2P 3 "register_operand" ""))
(match_dup 0)
(const_int 1))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5"
{
/* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
if (TARGET_FUSED_MADD)
{
emit_insn (gen_sse5_vmfmsub4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
}
})
(define_insn "*sse5i_vmfmsub4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
(match_dup 1)
(const_int 1))]
UNSPEC_SSE5_INTRINSIC))]
"TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1)"
"fmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
;; Note operands are out of order to simplify call to ix86_sse5_valid_p
(define_expand "sse5i_vmfnmadd