aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Henderson <rth@redhat.com>2005-01-19 22:47:28 -0800
committerRichard Henderson <rth@gcc.gnu.org>2005-01-19 22:47:28 -0800
commitf75959a6a36a08908bfb66af87f67b00e056a4c4 (patch)
tree78c8580bac2ffbd2c711f0a2f348e76472dc1e0d /gcc
parentb100079f2f901d64b6a0f3f996ba3a6d96e508c6 (diff)
downloadgcc-f75959a6a36a08908bfb66af87f67b00e056a4c4.zip
gcc-f75959a6a36a08908bfb66af87f67b00e056a4c4.tar.gz
gcc-f75959a6a36a08908bfb66af87f67b00e056a4c4.tar.bz2
re PR target/19511 (ICE in in reload_cse_simplify_operands, at postreload.c:391)
PR target/19511 * config/i386/i386.c (ix86_preferred_reload_class): Return a proper subclass of the input class. (ix86_secondary_memory_needed): Always true for cross-MMX classes. Always true for cross-SSE1 classes. Rationalize conditionals. * config/i386/i386.h (SSE_CLASS_P, MMX_CLASS_P): Use straight equality. * config/i386/i386.md (movsi_1): Add MMX/SSE zeros. Fix alternatives for SSE1. Don't check TARGET_INTER_UNIT_MOVES. (movdi_2): Add MMX/SSE zeros. (movdi_1_rex64): Likewise. Don't check TARGET_INTER_UNIT_MOVES. (movsf_1): Don't check TARGET_INTER_UNIT_MOVES. (zero_extendsidi2_32, zero_extendsidi2_rex64): Likewise. (movsi_1_nointernunit, movdi_1_rex64_nointerunit): Remove. (movsf_1_nointerunit, zero_extendsidi2_32_1): Remove. (zero_extendsidi2_rex64_1): Remove. (MOV0 peephole): Check GENERAL_REG_P. From-SVN: r93948
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog19
-rw-r--r--gcc/config/i386/i386.c48
-rw-r--r--gcc/config/i386/i386.h4
-rw-r--r--gcc/config/i386/i386.md309
4 files changed, 133 insertions, 247 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index ca86ebb..68d704d 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,24 @@
2005-01-19 Richard Henderson <rth@redhat.com>
+ PR target/19511
+ * config/i386/i386.c (ix86_preferred_reload_class): Return a proper
+ subclass of the input class.
+ (ix86_secondary_memory_needed): Always true for cross-MMX classes.
+ Always true for cross-SSE1 classes. Rationalize conditionals.
+ * config/i386/i386.h (SSE_CLASS_P, MMX_CLASS_P): Use straight equality.
+ * config/i386/i386.md (movsi_1): Add MMX/SSE zeros. Fix alternatives
+ for SSE1. Don't check TARGET_INTER_UNIT_MOVES.
+ (movdi_2): Add MMX/SSE zeros.
+ (movdi_1_rex64): Likewise. Don't check TARGET_INTER_UNIT_MOVES.
+ (movsf_1): Don't check TARGET_INTER_UNIT_MOVES.
+ (zero_extendsidi2_32, zero_extendsidi2_rex64): Likewise.
+ (movsi_1_nointernunit, movdi_1_rex64_nointerunit): Remove.
+ (movsf_1_nointerunit, zero_extendsidi2_32_1): Remove.
+ (zero_extendsidi2_rex64_1): Remove.
+ (MOV0 peephole): Check GENERAL_REG_P.
+
+2005-01-19 Richard Henderson <rth@redhat.com>
+
PR target/19427
* config/i386/i386.c (ix86_expand_vector_set): Fix third and fourth
shufps elements.
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 800cd52..a4ed0f7 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -14599,6 +14599,8 @@ ix86_free_from_memory (enum machine_mode mode)
enum reg_class
ix86_preferred_reload_class (rtx x, enum reg_class class)
{
+ if (class == NO_REGS)
+ return NO_REGS;
if (GET_CODE (x) == CONST_VECTOR && x != CONST0_RTX (GET_MODE (x)))
return NO_REGS;
if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) != VOIDmode)
@@ -14618,7 +14620,7 @@ ix86_preferred_reload_class (rtx x, enum reg_class class)
}
/* General regs can load everything. */
if (reg_class_subset_p (class, GENERAL_REGS))
- return GENERAL_REGS;
+ return class;
/* In case we haven't resolved FLOAT or SSE yet, give up. */
if (MAYBE_FLOAT_CLASS_P (class) || MAYBE_SSE_CLASS_P (class))
return NO_REGS;
@@ -14640,6 +14642,7 @@ ix86_preferred_reload_class (rtx x, enum reg_class class)
When STRICT is false, we are being called from REGISTER_MOVE_COST, so do not
enforce these sanity checks. */
+
int
ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
enum machine_mode mode, int strict)
@@ -14653,21 +14656,50 @@ ix86_secondary_memory_needed (enum reg_class class1, enum reg_class class2,
{
if (strict)
abort ();
- else
- return 1;
+ return true;
}
- return (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2)
- || ((SSE_CLASS_P (class1) != SSE_CLASS_P (class2)
- || MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
- && ((mode != SImode && (mode != DImode || !TARGET_64BIT))
- || (!TARGET_INTER_UNIT_MOVES && !optimize_size))));
+
+ if (FLOAT_CLASS_P (class1) != FLOAT_CLASS_P (class2))
+ return true;
+
+ /* ??? This is a lie. We do have moves between mmx/general, and for
+ mmx/sse2. But by saying we need secondary memory we discourage the
+ register allocator from using the mmx registers unless needed. */
+ if (MMX_CLASS_P (class1) != MMX_CLASS_P (class2))
+ return true;
+
+ if (SSE_CLASS_P (class1) != SSE_CLASS_P (class2))
+ {
+ /* SSE1 doesn't have any direct moves from other classes. */
+ if (!TARGET_SSE2)
+ return true;
+
+ /* If the target says that inter-unit moves are more expensive
+ than moving through memory, then don't generate them. */
+ if (!TARGET_INTER_UNIT_MOVES && !optimize_size)
+ return true;
+
+ /* Between SSE and general, we have moves no larger than word size. */
+ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return true;
+
+ /* ??? For the cost of one register reformat penalty, we could use
+ the same instructions to move SFmode and DFmode data, but the
+ relevant move patterns don't support those alternatives. */
+ if (mode == SFmode || mode == DFmode)
+ return true;
+ }
+
+ return false;
}
+
/* Return the cost of moving data from a register in class CLASS1 to
one in class CLASS2.
It is not required that the cost always equal 2 when FROM is the same as TO;
on some machines it is expensive to move between registers if they are not
general registers. */
+
int
ix86_register_move_cost (enum machine_mode mode, enum reg_class class1,
enum reg_class class2)
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index 9a199f9..5168da8 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -1307,9 +1307,9 @@ enum reg_class
#define FLOAT_CLASS_P(CLASS) \
reg_class_subset_p ((CLASS), FLOAT_REGS)
#define SSE_CLASS_P(CLASS) \
- reg_class_subset_p ((CLASS), SSE_REGS)
+ ((CLASS) == SSE_REGS)
#define MMX_CLASS_P(CLASS) \
- reg_class_subset_p ((CLASS), MMX_REGS)
+ ((CLASS) == MMX_REGS)
#define MAYBE_INTEGER_CLASS_P(CLASS) \
reg_classes_intersect_p ((CLASS), GENERAL_REGS)
#define MAYBE_FLOAT_CLASS_P(CLASS) \
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 949c392..380836e 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -1126,59 +1126,35 @@
(define_insn "*movsi_1"
[(set (match_operand:SI 0 "nonimmediate_operand"
- "=r ,m ,!*y,!rm,!*y,!*x,!rm,!*x")
+ "=r ,m ,*y,*y,?rm,?*y,*x,*x,?r,m ,?*Y,*x")
(match_operand:SI 1 "general_operand"
- "rinm,rin,*y ,*y ,rm ,*x ,*x ,rm"))]
- "(TARGET_INTER_UNIT_MOVES || optimize_size)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "rinm,rin,C ,*y,*y ,rm ,C ,*x,*Y,*x,r ,m "))]
+ "!(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
switch (get_attr_type (insn))
{
- case TYPE_SSEMOV:
+ case TYPE_SSELOG1:
if (get_attr_mode (insn) == MODE_TI)
- return "movdqa\t{%1, %0|%0, %1}";
- return "movd\t{%1, %0|%0, %1}";
+ return "pxor\t%0, %0";
+ return "xorps\t%0, %0";
- case TYPE_MMXMOV:
- if (get_attr_mode (insn) == MODE_DI)
- return "movq\t{%1, %0|%0, %1}";
- return "movd\t{%1, %0|%0, %1}";
-
- case TYPE_LEA:
- return "lea{l}\t{%1, %0|%0, %1}";
-
- default:
- if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1]))
- abort();
- return "mov{l}\t{%1, %0|%0, %1}";
- }
-}
- [(set (attr "type")
- (cond [(eq_attr "alternative" "2,3,4")
- (const_string "mmxmov")
- (eq_attr "alternative" "5,6,7")
- (const_string "ssemov")
- (and (ne (symbol_ref "flag_pic") (const_int 0))
- (match_operand:SI 1 "symbolic_operand" ""))
- (const_string "lea")
- ]
- (const_string "imov")))
- (set_attr "mode" "SI,SI,DI,SI,SI,TI,SI,SI")])
-
-(define_insn "*movsi_1_nointernunit"
- [(set (match_operand:SI 0 "nonimmediate_operand"
- "=r ,m ,!*y,!m,!*y,!*x,!m,!*x")
- (match_operand:SI 1 "general_operand"
- "rinm,rin,*y ,*y,m ,*x ,*x,m"))]
- "(!TARGET_INTER_UNIT_MOVES && !optimize_size)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
-{
- switch (get_attr_type (insn))
- {
case TYPE_SSEMOV:
- if (get_attr_mode (insn) == MODE_TI)
- return "movdqa\t{%1, %0|%0, %1}";
- return "movd\t{%1, %0|%0, %1}";
+ switch (get_attr_mode (insn))
+ {
+ case MODE_TI:
+ return "movdqa\t{%1, %0|%0, %1}";
+ case MODE_V4SF:
+ return "movaps\t{%1, %0|%0, %1}";
+ case MODE_SI:
+ return "movd\t{%1, %0|%0, %1}";
+ case MODE_SF:
+ return "movss\t{%1, %0|%0, %1}";
+ default:
+ gcc_unreachable ();
+ }
+
+ case TYPE_MMXADD:
+ return "pxor\t%0, %0";
case TYPE_MMXMOV:
if (get_attr_mode (insn) == MODE_DI)
@@ -1195,16 +1171,32 @@
}
}
[(set (attr "type")
- (cond [(eq_attr "alternative" "2,3,4")
+ (cond [(eq_attr "alternative" "2")
+ (const_string "mmxadd")
+ (eq_attr "alternative" "3,4,5")
(const_string "mmxmov")
- (eq_attr "alternative" "5,6,7")
+ (eq_attr "alternative" "6")
+ (const_string "sselog1")
+ (eq_attr "alternative" "7,8,9,10,11")
(const_string "ssemov")
(and (ne (symbol_ref "flag_pic") (const_int 0))
(match_operand:SI 1 "symbolic_operand" ""))
(const_string "lea")
]
(const_string "imov")))
- (set_attr "mode" "SI,SI,DI,SI,SI,TI,SI,SI")])
+ (set (attr "mode")
+ (cond [(eq_attr "alternative" "2,3")
+ (const_string "DI")
+ (eq_attr "alternative" "6,7")
+ (if_then_else
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0))
+ (const_string "V4SF")
+ (const_string "TI"))
+ (and (eq_attr "alternative" "8,9,10,11")
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
+ (const_string "SF")
+ ]
+ (const_string "SI")))])
;; Stores and loads of ax to arbitrary constant address.
;; We fake an second form of instruction to force reload to load address
@@ -1903,24 +1895,26 @@
(define_insn "*movdi_2"
[(set (match_operand:DI 0 "nonimmediate_operand"
- "=r ,o ,m*y,*y,m ,*Y,*Y,m ,*x,*x")
+ "=r ,o ,*y,m*y,*y,*Y,m ,*Y,*Y,*x,m ,*x,*x")
(match_operand:DI 1 "general_operand"
- "riFo,riF,*y ,m ,*Y,*Y,m ,*x,*x,m "))]
- "!TARGET_64BIT
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "riFo,riF,C ,*y ,m ,C ,*Y,*Y,m ,C ,*x,*x,m "))]
+ "!TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"@
#
#
+ pxor\t%0, %0
movq\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}
+ pxor\t%0, %0
movq\t{%1, %0|%0, %1}
movdqa\t{%1, %0|%0, %1}
movq\t{%1, %0|%0, %1}
+ xorps\t%0, %0
movlps\t{%1, %0|%0, %1}
movaps\t{%1, %0|%0, %1}
movlps\t{%1, %0|%0, %1}"
- [(set_attr "type" "*,*,mmx,mmx,ssemov,ssemov,ssemov,ssemov,ssemov,ssemov")
- (set_attr "mode" "DI,DI,DI,DI,DI,TI,DI,V2SF,V4SF,V2SF")])
+ [(set_attr "type" "*,*,mmxadd,mmxmov,mmxmov,sselog1,ssemov,ssemov,ssemov,sselog1,ssemov,ssemov,ssemov")
+ (set_attr "mode" "DI,DI,DI,DI,DI,TI,DI,TI,DI,V4SF,V2SF,V4SF,V2SF")])
(define_split
[(set (match_operand:DI 0 "push_operand" "")
@@ -1942,17 +1936,15 @@
(define_insn "*movdi_1_rex64"
[(set (match_operand:DI 0 "nonimmediate_operand"
- "=r,r ,r,mr,!mr,!*y,!rm,!*y,!*x,!rm,!*x,!*x,!*y")
+ "=r,r ,r,m ,!m,*y,*y,?rm,?*y,*x,*x,?rm,?*x,?*x,?*y")
(match_operand:DI 1 "general_operand"
- "Z ,rem,i,re,n ,*y ,*y ,rm ,*x ,*x ,rm ,*y ,*x"))]
- "TARGET_64BIT
- && (TARGET_INTER_UNIT_MOVES || optimize_size)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "Z ,rem,i,re,n ,C ,*y,*y ,rm ,C ,*x,*x ,rm ,*y ,*x"))]
+ "TARGET_64BIT && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
{
switch (get_attr_type (insn))
{
case TYPE_SSECVT:
- if (which_alternative == 11)
+ if (which_alternative == 13)
return "movq2dq\t{%1, %0|%0, %1}";
else
return "movdq2q\t{%1, %0|%0, %1}";
@@ -1966,6 +1958,9 @@
if (GENERAL_REG_P (operands[0]) || GENERAL_REG_P (operands[1]))
return "movd\t{%1, %0|%0, %1}";
return "movq\t{%1, %0|%0, %1}";
+ case TYPE_SSELOG1:
+ case TYPE_MMXADD:
+ return "pxor\t%0, %0";
case TYPE_MULTI:
return "#";
case TYPE_LEA:
@@ -1982,11 +1977,15 @@
}
}
[(set (attr "type")
- (cond [(eq_attr "alternative" "5,6,7")
+ (cond [(eq_attr "alternative" "5")
+ (const_string "mmxadd")
+ (eq_attr "alternative" "6,7,8")
(const_string "mmxmov")
- (eq_attr "alternative" "8,9,10")
+ (eq_attr "alternative" "9")
+ (const_string "sselog1")
+ (eq_attr "alternative" "10,11,12")
(const_string "ssemov")
- (eq_attr "alternative" "11,12")
+ (eq_attr "alternative" "13,14")
(const_string "ssecvt")
(eq_attr "alternative" "4")
(const_string "multi")
@@ -1995,57 +1994,9 @@
(const_string "lea")
]
(const_string "imov")))
- (set_attr "modrm" "*,0,0,*,*,*,*,*,*,*,*,*,*")
- (set_attr "length_immediate" "*,4,8,*,*,*,*,*,*,*,*,*,*")
- (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,TI,DI,DI,DI,DI")])
-
-(define_insn "*movdi_1_rex64_nointerunit"
- [(set (match_operand:DI 0 "nonimmediate_operand"
- "=r,r ,r,mr,!mr,!*y,!m,!*y,!*Y,!m,!*Y")
- (match_operand:DI 1 "general_operand"
- "Z,rem,i,re,n ,*y ,*y,m ,*Y ,*Y,m"))]
- "TARGET_64BIT
- && (!TARGET_INTER_UNIT_MOVES && !optimize_size)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
-{
- switch (get_attr_type (insn))
- {
- case TYPE_SSEMOV:
- if (get_attr_mode (insn) == MODE_TI)
- return "movdqa\t{%1, %0|%0, %1}";
- /* FALLTHRU */
- case TYPE_MMXMOV:
- return "movq\t{%1, %0|%0, %1}";
- case TYPE_MULTI:
- return "#";
- case TYPE_LEA:
- return "lea{q}\t{%a1, %0|%0, %a1}";
- default:
- if (flag_pic && !LEGITIMATE_PIC_OPERAND_P (operands[1]))
- abort ();
- if (get_attr_mode (insn) == MODE_SI)
- return "mov{l}\t{%k1, %k0|%k0, %k1}";
- else if (which_alternative == 2)
- return "movabs{q}\t{%1, %0|%0, %1}";
- else
- return "mov{q}\t{%1, %0|%0, %1}";
- }
-}
- [(set (attr "type")
- (cond [(eq_attr "alternative" "5,6,7")
- (const_string "mmxmov")
- (eq_attr "alternative" "8,9,10")
- (const_string "ssemov")
- (eq_attr "alternative" "4")
- (const_string "multi")
- (and (ne (symbol_ref "flag_pic") (const_int 0))
- (match_operand:DI 1 "symbolic_operand" ""))
- (const_string "lea")
- ]
- (const_string "imov")))
- (set_attr "modrm" "*,0,0,*,*,*,*,*,*,*,*")
- (set_attr "length_immediate" "*,4,8,*,*,*,*,*,*,*,*")
- (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,TI,DI,DI")])
+ (set_attr "modrm" "*,0,0,*,*,*,*,*,*,*,*,*,*,*,*")
+ (set_attr "length_immediate" "*,4,8,*,*,*,*,*,*,*,*,*,*,*,*")
+ (set_attr "mode" "SI,DI,DI,DI,SI,DI,DI,DI,DI,TI,TI,DI,DI,DI,DI")])
;; Stores and loads of ax to arbitrary constant address.
;; We fake an second form of instruction to force reload to load address
@@ -2305,98 +2256,7 @@
"=f#xr,m ,f#xr,r#xf ,m ,x#rf,x#rf,x#rf ,m ,!*y,!rm,!*y")
(match_operand:SF 1 "general_operand"
"fm#rx,f#rx,G ,rmF#fx,Fr#fx,C ,x ,xm#rf,x#rf,rm ,*y ,*y"))]
- "(TARGET_INTER_UNIT_MOVES || optimize_size)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
- && (reload_in_progress || reload_completed
- || (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
- || GET_CODE (operands[1]) != CONST_DOUBLE
- || memory_operand (operands[0], SFmode))"
-{
- switch (which_alternative)
- {
- case 0:
- return output_387_reg_move (insn, operands);
-
- case 1:
- if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
- return "fstp%z0\t%y0";
- else
- return "fst%z0\t%y0";
-
- case 2:
- return standard_80387_constant_opcode (operands[1]);
-
- case 3:
- case 4:
- return "mov{l}\t{%1, %0|%0, %1}";
- case 5:
- if (get_attr_mode (insn) == MODE_TI)
- return "pxor\t%0, %0";
- else
- return "xorps\t%0, %0";
- case 6:
- if (get_attr_mode (insn) == MODE_V4SF)
- return "movaps\t{%1, %0|%0, %1}";
- else
- return "movss\t{%1, %0|%0, %1}";
- case 7:
- case 8:
- return "movss\t{%1, %0|%0, %1}";
-
- case 9:
- case 10:
- return "movd\t{%1, %0|%0, %1}";
-
- case 11:
- return "movq\t{%1, %0|%0, %1}";
-
- default:
- abort();
- }
-}
- [(set_attr "type" "fmov,fmov,fmov,imov,imov,ssemov,ssemov,ssemov,ssemov,mmxmov,mmxmov,mmxmov")
- (set (attr "mode")
- (cond [(eq_attr "alternative" "3,4,9,10")
- (const_string "SI")
- (eq_attr "alternative" "5")
- (if_then_else
- (and (and (ne (symbol_ref "TARGET_SSE_LOAD0_BY_PXOR")
- (const_int 0))
- (ne (symbol_ref "TARGET_SSE2")
- (const_int 0)))
- (eq (symbol_ref "optimize_size")
- (const_int 0)))
- (const_string "TI")
- (const_string "V4SF"))
- /* For architectures resolving dependencies on
- whole SSE registers use APS move to break dependency
- chains, otherwise use short move to avoid extra work.
-
- Do the same for architectures resolving dependencies on
- the parts. While in DF mode it is better to always handle
- just register parts, the SF mode is different due to lack
- of instructions to load just part of the register. It is
- better to maintain the whole registers in single format
- to avoid problems on using packed logical operations. */
- (eq_attr "alternative" "6")
- (if_then_else
- (ior (ne (symbol_ref "TARGET_SSE_PARTIAL_REG_DEPENDENCY")
- (const_int 0))
- (ne (symbol_ref "TARGET_SSE_SPLIT_REGS")
- (const_int 0)))
- (const_string "V4SF")
- (const_string "SF"))
- (eq_attr "alternative" "11")
- (const_string "DI")]
- (const_string "SF")))])
-
-(define_insn "*movsf_1_nointerunit"
- [(set (match_operand:SF 0 "nonimmediate_operand"
- "=f#xr,m ,f#xr,r#xf ,m ,x#rf,x#rf,x#rf ,m ,!*y,!m,!*y")
- (match_operand:SF 1 "general_operand"
- "fm#rx,f#rx,G ,rmF#fx,Fr#fx,C ,x ,xm#rf,x#rf,m ,*y,*y"))]
- "(!TARGET_INTER_UNIT_MOVES && !optimize_size)
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ "!(MEM_P (operands[0]) && MEM_P (operands[1]))
&& (reload_in_progress || reload_completed
|| (ix86_cmodel == CM_MEDIUM || ix86_cmodel == CM_LARGE)
|| GET_CODE (operands[1]) != CONST_DOUBLE
@@ -3273,24 +3133,10 @@
")
(define_insn "zero_extendsidi2_32"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,?r,?*o,!?y,!?Y")
- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,rm,r,m,m")))
- (clobber (reg:CC FLAGS_REG))]
- "!TARGET_64BIT && !TARGET_INTER_UNIT_MOVES"
- "@
- #
- #
- #
- movd\t{%1, %0|%0, %1}
- movd\t{%1, %0|%0, %1}"
- [(set_attr "mode" "SI,SI,SI,DI,TI")
- (set_attr "type" "multi,multi,multi,mmxmov,ssemov")])
-
-(define_insn "*zero_extendsidi2_32_1"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,?r,?*o,!?y,!?Y")
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,?r,?*o,?*y,?*Y")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,rm,r,rm,rm")))
(clobber (reg:CC FLAGS_REG))]
- "!TARGET_64BIT && TARGET_INTER_UNIT_MOVES"
+ "!TARGET_64BIT"
"@
#
#
@@ -3301,21 +3147,9 @@
(set_attr "type" "multi,multi,multi,mmxmov,ssemov")])
(define_insn "zero_extendsidi2_rex64"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o,!?y,!?Y")
- (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "rm,0,m,m")))]
- "TARGET_64BIT && !TARGET_INTER_UNIT_MOVES"
- "@
- mov\t{%k1, %k0|%k0, %k1}
- #
- movd\t{%1, %0|%0, %1}
- movd\t{%1, %0|%0, %1}"
- [(set_attr "type" "imovx,imov,mmxmov,ssemov")
- (set_attr "mode" "SI,DI,DI,TI")])
-
-(define_insn "*zero_extendsidi2_rex64_1"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o,!?y,!*?")
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,o,?*y,?*Y")
(zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "rm,0,rm,rm")))]
- "TARGET_64BIT && TARGET_INTER_UNIT_MOVES"
+ "TARGET_64BIT"
"@
mov\t{%k1, %k0|%k0, %k1}
#
@@ -18534,6 +18368,7 @@
|| GET_MODE (operands[0]) == SImode
|| (GET_MODE (operands[0]) == DImode && TARGET_64BIT))
&& (! TARGET_USE_MOV0 || optimize_size)
+ && GENERAL_REG_P (operands[0])
&& peep2_regno_dead_p (0, FLAGS_REG)"
[(parallel [(set (match_dup 0) (const_int 0))
(clobber (reg:CC FLAGS_REG))])]