aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorUros Bizjak <uros@gcc.gnu.org>2011-04-16 10:34:30 +0200
committerUros Bizjak <uros@gcc.gnu.org>2011-04-16 10:34:30 +0200
commite81b856471b18de439f9157932ee8aa0fa190928 (patch)
tree6f5c049832df3c28a43f28b8c73499a92cae6ec8 /gcc/config
parent271d3f8d24a5b54ce5f1462fa1f6aa48b0d28d89 (diff)
downloadgcc-e81b856471b18de439f9157932ee8aa0fa190928.zip
gcc-e81b856471b18de439f9157932ee8aa0fa190928.tar.gz
gcc-e81b856471b18de439f9157932ee8aa0fa190928.tar.bz2
sse.md (V16): New mode iterator.
* config/i386/sse.md (V16): New mode iterator. (VI1, VI8): Ditto. (AVXMODEQI, AVXMODEDI): Remove. (sse2, sse3): New mode attribute. (mov<mode>): Use V16 mode iterator. (*mov<mode>_internal): Merge with *avx_mov<mode>_internal. (push<mode>1): Use V16 mode iterator. (movmisalign<mode>): Ditto. (<sse>_movu<ssemodesuffix><avxmodesuffix>): Merge from <sse>_movu<ssemodesuffix> and avx_movu<ssemodesuffix><avxmodesuffix>. (*<sse>_movu<ssemodesuffix><avxmodesuffix>): Merge from *<sse>_movu<ssemodesuffix> and *avx_movu<ssemodesuffix><avxmodesuffix>. (<sse2>_movdqu<avxmodesuffix>): Merge from sse2_movdqu and avx_movdqu<avxmodesuffix>. (*<sse2>_movdqu<avxmodesuffix>): Merge from *sse2_movdqu and *avx_movdqu<avxmodesuffix>. (<sse3>_lddqu<avxmodesuffix>) Merge from sse3_lddqu and avx_lddqu<avxmodesuffix>. (<sse>_movnt<mode>): Merge with avx_movnt<AVXMODEF2P:mode>. (<sse2>_movnt<mode>): Merge from sse2_movntv2di and avx_movnt<AVXMODEDI:mode>. * config/i386/i386.c (ix86_expand_vector_move_misalign): Update for renamed sse_movups, sse2_movupd and sse2_movdqu patterns. testsuite/ChangeLog: * gcc.target/i386/avx256-unaligned-load-1.c: Update scan patterns. * gcc.target/i386/avx256-unaligned-load-2.c: Ditto. * gcc.target/i386/avx256-unaligned-load-3.c: Ditto. * gcc.target/i386/avx256-unaligned-store-1.c: Ditto. * gcc.target/i386/avx256-unaligned-store-2.c: Ditto. * gcc.target/i386/avx256-unaligned-store-3.c: Ditto. From-SVN: r172580
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/i386/i386.c10
-rw-r--r--gcc/config/i386/sse.md385
2 files changed, 150 insertions, 245 deletions
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 6218702..36320d8 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -15769,12 +15769,12 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
{
op0 = gen_lowpart (V4SFmode, op0);
op1 = gen_lowpart (V4SFmode, op1);
- emit_insn (gen_avx_movups (op0, op1));
+ emit_insn (gen_sse_movups (op0, op1));
return;
}
op0 = gen_lowpart (V16QImode, op0);
op1 = gen_lowpart (V16QImode, op1);
- emit_insn (gen_avx_movdqu (op0, op1));
+ emit_insn (gen_sse2_movdqu (op0, op1));
break;
case 32:
op0 = gen_lowpart (V32QImode, op0);
@@ -15792,7 +15792,7 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
switch (mode)
{
case V4SFmode:
- emit_insn (gen_avx_movups (op0, op1));
+ emit_insn (gen_sse_movups (op0, op1));
break;
case V8SFmode:
ix86_avx256_split_vector_move_misalign (op0, op1);
@@ -15802,10 +15802,10 @@ ix86_expand_vector_move_misalign (enum machine_mode mode, rtx operands[])
{
op0 = gen_lowpart (V4SFmode, op0);
op1 = gen_lowpart (V4SFmode, op1);
- emit_insn (gen_avx_movups (op0, op1));
+ emit_insn (gen_sse_movups (op0, op1));
return;
}
- emit_insn (gen_avx_movupd (op0, op1));
+ emit_insn (gen_sse2_movupd (op0, op1));
break;
case V4DFmode:
ix86_avx256_split_vector_move_misalign (op0, op1);
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index fabd35b..40e3972 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -18,6 +18,15 @@
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
+;; All vector modes including V1TImode.
+(define_mode_iterator V16
+ [(V32QI "TARGET_AVX") V16QI
+ (V16HI "TARGET_AVX") V8HI
+ (V8SI "TARGET_AVX") V4SI
+ (V4DI "TARGET_AVX") V2DI
+ V1TI
+ (V8SF "TARGET_AVX") V4SF
+ (V4DF "TARGET_AVX") V2DF])
;; All vector float modes
(define_mode_iterator VF
@@ -43,6 +52,14 @@
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
+;; All QImode vector integer modes
+(define_mode_iterator VI1
+ [(V32QI "TARGET_AVX") V16QI])
+
+;; All DImode vector integer modes
+(define_mode_iterator VI8
+ [(V4DI "TARGET_AVX") V2DI])
+
;; All 128bit vector integer modes
(define_mode_iterator VI_128 [V16QI V8HI V4SI V2DI])
@@ -59,22 +76,13 @@
;; All 16-byte vector modes handled by SSE
(define_mode_iterator SSEMODE [V16QI V8HI V4SI V2DI V4SF V2DF])
-(define_mode_iterator SSEMODE16 [V16QI V8HI V4SI V2DI V1TI V4SF V2DF])
;; All 32-byte vector modes handled by AVX
(define_mode_iterator AVX256MODE [V32QI V16HI V8SI V4DI V8SF V4DF])
-;; All QI vector modes handled by AVX
-(define_mode_iterator AVXMODEQI [V32QI V16QI])
-
-;; All DI vector modes handled by AVX
-(define_mode_iterator AVXMODEDI [V4DI V2DI])
-
;; All vector modes handled by AVX
(define_mode_iterator AVXMODE
[V16QI V8HI V4SI V2DI V4SF V2DF V32QI V16HI V8SI V4DI V8SF V4DF])
-(define_mode_iterator AVXMODE16
- [V16QI V8HI V4SI V2DI V1TI V4SF V2DF V32QI V16HI V8SI V4DI V8SF V4DF])
;; Mix-n-match
(define_mode_iterator SSEMODE124 [V16QI V8HI V4SI])
@@ -107,8 +115,8 @@
;; Modes handled by storent patterns.
(define_mode_iterator STORENT_MODE
[(SF "TARGET_SSE4A") (DF "TARGET_SSE4A")
- (SI "TARGET_SSE2") (V2DI "TARGET_SSE2") (V2DF "TARGET_SSE2")
- (V4SF "TARGET_SSE")
+ (SI "TARGET_SSE2") (V2DI "TARGET_SSE2")
+ (V4SF "TARGET_SSE") (V2DF "TARGET_SSE2")
(V4DF "TARGET_AVX") (V8SF "TARGET_AVX")])
;; Modes handled by vector extract patterns.
@@ -124,6 +132,13 @@
(V4SF "sse") (V2DF "sse2")
(V8SF "avx") (V4DF "avx")])
+(define_mode_attr sse2
+ [(V16QI "sse2") (V32QI "avx")
+ (V2DI "sse2") (V4DI "avx")])
+
+(define_mode_attr sse3
+ [(V16QI "sse3") (V32QI "avx")])
+
(define_mode_attr sse4_1
[(V4SF "sse4_1") (V2DF "sse4_1")
(V8SF "avx") (V4DF "avx")])
@@ -192,19 +207,22 @@
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; All of these patterns are enabled for SSE1 as well as SSE2.
+;; This is essential for maintaining stable calling conventions.
+
(define_expand "mov<mode>"
- [(set (match_operand:AVX256MODE 0 "nonimmediate_operand" "")
- (match_operand:AVX256MODE 1 "nonimmediate_operand" ""))]
- "TARGET_AVX"
+ [(set (match_operand:V16 0 "nonimmediate_operand" "")
+ (match_operand:V16 1 "nonimmediate_operand" ""))]
+ "TARGET_SSE"
{
ix86_expand_vector_move (<MODE>mode, operands);
DONE;
})
-(define_insn "*avx_mov<mode>_internal"
- [(set (match_operand:AVXMODE16 0 "nonimmediate_operand" "=x,x ,m")
- (match_operand:AVXMODE16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
- "TARGET_AVX
+(define_insn "*mov<mode>_internal"
+ [(set (match_operand:V16 0 "nonimmediate_operand" "=x,x ,m")
+ (match_operand:V16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
+ "TARGET_SSE
&& (register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode))"
{
@@ -218,85 +236,51 @@
{
case MODE_V8SF:
case MODE_V4SF:
- if (misaligned_operand (operands[0], <MODE>mode)
- || misaligned_operand (operands[1], <MODE>mode))
+ if (TARGET_AVX
+ && (misaligned_operand (operands[0], <MODE>mode)
+ || misaligned_operand (operands[1], <MODE>mode)))
return "vmovups\t{%1, %0|%0, %1}";
else
- return "vmovaps\t{%1, %0|%0, %1}";
+ return "%vmovaps\t{%1, %0|%0, %1}";
+
case MODE_V4DF:
case MODE_V2DF:
- if (misaligned_operand (operands[0], <MODE>mode)
- || misaligned_operand (operands[1], <MODE>mode))
+ if (TARGET_AVX
+ && (misaligned_operand (operands[0], <MODE>mode)
+ || misaligned_operand (operands[1], <MODE>mode)))
return "vmovupd\t{%1, %0|%0, %1}";
else if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vmovaps\t{%1, %0|%0, %1}";
+ return "%vmovaps\t{%1, %0|%0, %1}";
else
- return "vmovapd\t{%1, %0|%0, %1}";
- default:
- if (misaligned_operand (operands[0], <MODE>mode)
- || misaligned_operand (operands[1], <MODE>mode))
+ return "%vmovapd\t{%1, %0|%0, %1}";
+
+ case MODE_OI:
+ case MODE_TI:
+ if (TARGET_AVX
+ && (misaligned_operand (operands[0], <MODE>mode)
+ || misaligned_operand (operands[1], <MODE>mode)))
return "vmovdqu\t{%1, %0|%0, %1}";
else if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vmovaps\t{%1, %0|%0, %1}";
+ return "%vmovaps\t{%1, %0|%0, %1}";
else
- return "vmovdqa\t{%1, %0|%0, %1}";
- }
- default:
- gcc_unreachable ();
- }
-}
- [(set_attr "type" "sselog1,ssemov,ssemov")
- (set_attr "prefix" "vex")
- (set_attr "mode" "<avxvecmode>")])
-
-;; All of these patterns are enabled for SSE1 as well as SSE2.
-;; This is essential for maintaining stable calling conventions.
-
-(define_expand "mov<mode>"
- [(set (match_operand:SSEMODE16 0 "nonimmediate_operand" "")
- (match_operand:SSEMODE16 1 "nonimmediate_operand" ""))]
- "TARGET_SSE"
-{
- ix86_expand_vector_move (<MODE>mode, operands);
- DONE;
-})
+ return "%vmovdqa\t{%1, %0|%0, %1}";
-(define_insn "*mov<mode>_internal"
- [(set (match_operand:SSEMODE16 0 "nonimmediate_operand" "=x,x ,m")
- (match_operand:SSEMODE16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
- "TARGET_SSE
- && (register_operand (operands[0], <MODE>mode)
- || register_operand (operands[1], <MODE>mode))"
-{
- switch (which_alternative)
- {
- case 0:
- return standard_sse_constant_opcode (insn, operands[1]);
- case 1:
- case 2:
- switch (get_attr_mode (insn))
- {
- case MODE_V4SF:
- return "movaps\t{%1, %0|%0, %1}";
- case MODE_V2DF:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "movaps\t{%1, %0|%0, %1}";
- else
- return "movapd\t{%1, %0|%0, %1}";
default:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "movaps\t{%1, %0|%0, %1}";
- else
- return "movdqa\t{%1, %0|%0, %1}";
+ gcc_unreachable ();
}
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
+ (set_attr "prefix" "maybe_vex")
(set (attr "mode")
- (cond [(ior (ior (ne (symbol_ref "optimize_function_for_size_p (cfun)") (const_int 0))
- (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
+ (cond [(ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "<avxvecmode>")
+ (ior (ior
+ (ne (symbol_ref "optimize_function_for_size_p (cfun)")
+ (const_int 0))
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
(and (eq_attr "alternative" "2")
(ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
(const_int 0))))
@@ -308,6 +292,19 @@
]
(const_string "TI")))])
+(define_insn "sse2_movq128"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (vec_concat:V2DI
+ (vec_select:DI
+ (match_operand:V2DI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (const_int 0)))]
+ "TARGET_SSE2"
+ "%vmovq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "TI")])
+
;; Move a DI from a 32-bit register pair (e.g. %edx:%eax) to an xmm.
;; We'd rather avoid this entirely; if the 32-bit reg pair was loaded
;; from memory, we'd prefer to load the memory directly into the %xmm
@@ -370,15 +367,7 @@
})
(define_expand "push<mode>1"
- [(match_operand:AVX256MODE 0 "register_operand" "")]
- "TARGET_AVX"
-{
- ix86_expand_push (<MODE>mode, operands[0]);
- DONE;
-})
-
-(define_expand "push<mode>1"
- [(match_operand:SSEMODE16 0 "register_operand" "")]
+ [(match_operand:V16 0 "register_operand" "")]
"TARGET_SSE"
{
ix86_expand_push (<MODE>mode, operands[0]);
@@ -386,169 +375,84 @@
})
(define_expand "movmisalign<mode>"
- [(set (match_operand:AVX256MODE 0 "nonimmediate_operand" "")
- (match_operand:AVX256MODE 1 "nonimmediate_operand" ""))]
- "TARGET_AVX"
-{
- ix86_expand_vector_move_misalign (<MODE>mode, operands);
- DONE;
-})
-
-(define_expand "movmisalign<mode>"
- [(set (match_operand:SSEMODE16 0 "nonimmediate_operand" "")
- (match_operand:SSEMODE16 1 "nonimmediate_operand" ""))]
+ [(set (match_operand:V16 0 "nonimmediate_operand" "")
+ (match_operand:V16 1 "nonimmediate_operand" ""))]
"TARGET_SSE"
{
ix86_expand_vector_move_misalign (<MODE>mode, operands);
DONE;
})
-(define_expand "avx_movu<ssemodesuffix><avxmodesuffix>"
- [(set (match_operand:AVXMODEF2P 0 "nonimmediate_operand" "")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "")]
+(define_expand "<sse>_movu<ssemodesuffix><avxmodesuffix>"
+ [(set (match_operand:VF 0 "nonimmediate_operand" "")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "")]
UNSPEC_MOVU))]
- "AVX_VEC_FLOAT_MODE_P (<MODE>mode)"
+ ""
{
if (MEM_P (operands[0]) && MEM_P (operands[1]))
operands[1] = force_reg (<MODE>mode, operands[1]);
})
-(define_insn "*avx_movu<ssemodesuffix><avxmodesuffix>"
- [(set (match_operand:AVXMODEF2P 0 "nonimmediate_operand" "=x,m")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "xm,x")]
+(define_insn "*<sse>_movu<ssemodesuffix><avxmodesuffix>"
+ [(set (match_operand:VF 0 "nonimmediate_operand" "=x,m")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
- "AVX_VEC_FLOAT_MODE_P (<MODE>mode)
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "vmovu<ssemodesuffix>\t{%1, %0|%0, %1}"
+ "!(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "%vmovu<ssemodesuffix>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
- (set_attr "prefix" "vex")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "sse2_movq128"
- [(set (match_operand:V2DI 0 "register_operand" "=x")
- (vec_concat:V2DI
- (vec_select:DI
- (match_operand:V2DI 1 "nonimmediate_operand" "xm")
- (parallel [(const_int 0)]))
- (const_int 0)))]
- "TARGET_SSE2"
- "%vmovq\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
- (set_attr "mode" "TI")])
-
-(define_expand "<sse>_movu<ssemodesuffix>"
- [(set (match_operand:SSEMODEF2P 0 "nonimmediate_operand" "")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")]
- UNSPEC_MOVU))]
- "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
-{
- if (MEM_P (operands[0]) && MEM_P (operands[1]))
- operands[1] = force_reg (<MODE>mode, operands[1]);
-})
-
-(define_insn "*<sse>_movu<ssemodesuffix>"
- [(set (match_operand:SSEMODEF2P 0 "nonimmediate_operand" "=x,m")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "xm,x")]
- UNSPEC_MOVU))]
- "SSE_VEC_FLOAT_MODE_P (<MODE>mode)
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "movu<ssemodesuffix>\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "movu" "1")
(set_attr "mode" "<MODE>")])
-(define_expand "avx_movdqu<avxmodesuffix>"
- [(set (match_operand:AVXMODEQI 0 "nonimmediate_operand" "")
- (unspec:AVXMODEQI
- [(match_operand:AVXMODEQI 1 "nonimmediate_operand" "")]
- UNSPEC_MOVU))]
- "TARGET_AVX"
+(define_expand "<sse2>_movdqu<avxmodesuffix>"
+ [(set (match_operand:VI1 0 "nonimmediate_operand" "")
+ (unspec:VI1 [(match_operand:VI1 1 "nonimmediate_operand" "")]
+ UNSPEC_MOVU))]
+ "TARGET_SSE2"
{
if (MEM_P (operands[0]) && MEM_P (operands[1]))
operands[1] = force_reg (<MODE>mode, operands[1]);
})
-(define_insn "*avx_movdqu<avxmodesuffix>"
- [(set (match_operand:AVXMODEQI 0 "nonimmediate_operand" "=x,m")
- (unspec:AVXMODEQI
- [(match_operand:AVXMODEQI 1 "nonimmediate_operand" "xm,x")]
- UNSPEC_MOVU))]
- "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "vmovdqu\t{%1, %0|%0, %1}"
+(define_insn "*<sse2>_movdqu<avxmodesuffix>"
+ [(set (match_operand:VI1 0 "nonimmediate_operand" "=x,m")
+ (unspec:VI1 [(match_operand:VI1 1 "nonimmediate_operand" "xm,x")]
+ UNSPEC_MOVU))]
+ "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "%vmovdqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
- (set_attr "prefix" "vex")
+ (set (attr "prefix_data16")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
(set_attr "mode" "<avxvecmode>")])
-(define_expand "sse2_movdqu"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "")
- (unspec:V16QI [(match_operand:V16QI 1 "nonimmediate_operand" "")]
- UNSPEC_MOVU))]
- "TARGET_SSE2"
-{
- if (MEM_P (operands[0]) && MEM_P (operands[1]))
- operands[1] = force_reg (V16QImode, operands[1]);
-})
-
-(define_insn "*sse2_movdqu"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "=x,m")
- (unspec:V16QI [(match_operand:V16QI 1 "nonimmediate_operand" "xm,x")]
- UNSPEC_MOVU))]
- "TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "movdqu\t{%1, %0|%0, %1}"
+(define_insn "<sse3>_lddqu<avxmodesuffix>"
+ [(set (match_operand:VI1 0 "register_operand" "=x")
+ (unspec:VI1 [(match_operand:VI1 1 "memory_operand" "m")]
+ UNSPEC_LDDQU))]
+ "TARGET_SSE3"
+ "%vlddqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
- (set_attr "prefix_data16" "1")
- (set_attr "mode" "TI")])
-
-(define_insn "avx_movnt<mode>"
- [(set (match_operand:AVXMODEF2P 0 "memory_operand" "=m")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "AVX_VEC_FLOAT_MODE_P (<MODE>mode)"
- "vmovnt<ssemodesuffix>\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "vex")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "<sse>_movnt<mode>"
- [(set (match_operand:SSEMODEF2P 0 "memory_operand" "=m")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
- "movnt<ssemodesuffix>\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "avx_movnt<mode>"
- [(set (match_operand:AVXMODEDI 0 "memory_operand" "=m")
- (unspec:AVXMODEDI
- [(match_operand:AVXMODEDI 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "TARGET_AVX"
- "vmovntdq\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
- (set_attr "prefix" "vex")
+ (set (attr "prefix_data16")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "0")))
+ (set (attr "prefix_rep")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
(set_attr "mode" "<avxvecmode>")])
-(define_insn "sse2_movntv2di"
- [(set (match_operand:V2DI 0 "memory_operand" "=m")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "TARGET_SSE2"
- "movntdq\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix_data16" "1")
- (set_attr "mode" "TI")])
-
(define_insn "sse2_movntsi"
[(set (match_operand:SI 0 "memory_operand" "=m")
(unspec:SI [(match_operand:SI 1 "register_operand" "r")]
@@ -559,30 +463,31 @@
(set_attr "prefix_data16" "0")
(set_attr "mode" "V2DF")])
-(define_insn "avx_lddqu<avxmodesuffix>"
- [(set (match_operand:AVXMODEQI 0 "register_operand" "=x")
- (unspec:AVXMODEQI
- [(match_operand:AVXMODEQI 1 "memory_operand" "m")]
- UNSPEC_LDDQU))]
- "TARGET_AVX"
- "vlddqu\t{%1, %0|%0, %1}"
+(define_insn "<sse>_movnt<mode>"
+ [(set (match_operand:VF 0 "memory_operand" "=m")
+ (unspec:VF [(match_operand:VF 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE"
+ "%vmovnt<ssemodesuffix>\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "<sse2>_movnt<mode>"
+ [(set (match_operand:VI8 0 "memory_operand" "=m")
+ (unspec:VI8 [(match_operand:VI8 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE2"
+ "%vmovntdq\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
- (set_attr "movu" "1")
- (set_attr "prefix" "vex")
+ (set (attr "prefix_data16")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
(set_attr "mode" "<avxvecmode>")])
-(define_insn "sse3_lddqu"
- [(set (match_operand:V16QI 0 "register_operand" "=x")
- (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "m")]
- UNSPEC_LDDQU))]
- "TARGET_SSE3"
- "lddqu\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "movu" "1")
- (set_attr "prefix_data16" "0")
- (set_attr "prefix_rep" "1")
- (set_attr "mode" "TI")])
-
; Expand patterns for non-temporal stores. At the moment, only those
; that directly map to insns are defined; it would be possible to
; define patterns for other modes that would expand to several insns.