aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog45
-rw-r--r--gcc/config/arm/arm.c99
-rw-r--r--gcc/config/arm/arm.h13
-rw-r--r--gcc/config/arm/arm.md219
-rw-r--r--gcc/config/arm/xm-netbsd.h2
5 files changed, 256 insertions, 122 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 1bb6c99..2c8d22c 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,48 @@
+Sat Feb 6 11:17:03 1999 Richard Earnshaw <rearnsha@arm.com>
+
+ Support for ARM9
+ * config/arm/arm.c (all_procs): Add arm9 and arm9tdmi.
+ * config/arm/arm.h ((TARGET_CPU_arm9, TARGET_CPUD_arm9tdmi): Define.
+ (TARGET_CPU_DEFAULT): Rework to support ARM9.
+ (CPP_CPU_ARCH_SPEC): Likewise.
+ (enum processor_type): Likewise.
+ * config/arm/arm.md (attr cpu): Add arm9.
+
+ General scheduling changes
+ * config/arm/arm.c (MAX_INSNS_SKIPPED): Delete.
+ (max_insns_skipped): New variable.
+ (arm_override_options): If generating hard floating point code for
+ the FPA, emit code for version 3.
+ When optimizing for space, don't synthesize constants.
+ Reword several flags based on the requested processor and optimization
+ level.
+ (use_return_insn): New argument iscond, all callers changed. Don't
+ use a return insn if it will be conditional and that would be
+ expensive; eg on StrongARM.
+ (arm_adjust_cost): Anti- and output- dependencies normally have no
+ cost.
+ (load_multiple_sequence): Newer ARMs don't benefit from ldm if
+ the sequence is short.
+ (final_prescan_insn): Use max_insns_skipped instead of
+ MAX_INSNS_SKIPPED. Note whether we will make a return instruction
+ conditional, and aviod this if it would be expensive.
+ * config/arm/arm.md (scheduling attributes and function units):
+ Rewrite to better describe ARM8, 9 and StrongARM.
+
+ * config/arm/arm.md (*movhi_insn_littleend): Make op0 predicate
+ s_register_operand.
+ (*ifcompare_plus_move): Use arm_rhs_operand in place of
+ arm_rhsm_operand. Rework constraints.
+ (*if_plus_move): Likewise.
+ (*ifcompare_move_plus): Likewise.
+ (*if_move_plus): Likewise.
+ (*ifcompre_arith_move): Likewise.
+ (*if_arith_move): Likewise.
+ (*ifcompare_move_arith): Likewise.
+ (*if_move_arith): Likewise.
+
+ * config/arm/xm-netbsd.h: Don't include arm/xm-arm.h.
+
1999-02-05 Michael Meissner <meissner@cygnus.com>
* loop.c (check_dbra_loop): A store using an address giv for which
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 96ade94..c1a6e57 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -1,8 +1,8 @@
-/* Output routines for GCC for ARM/RISCiX.
+/* Output routines for GCC for ARM.
Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
- More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+ More major hacks by Richard Earnshaw (rearnsha@arm.com).
This file is part of GNU CC.
@@ -41,7 +41,7 @@ Boston, MA 02111-1307, USA. */
/* The maximum number of insns skipped which will be conditionalised if
possible. */
-#define MAX_INSNS_SKIPPED 5
+static int max_insns_skipped = 5;
/* Some function declarations. */
extern FILE *asm_out_file;
@@ -200,6 +200,11 @@ static struct processors all_procs[] =
| FL_ARCH4)},
{"arm810", PROCESSOR_ARM8, (FL_FAST_MULT | FL_MODE32 | FL_MODE26
| FL_ARCH4)},
+ /* The next two are the same, but arm9 only exists in the thumb variant */
+ {"arm9", PROCESSOR_ARM9, (FL_FAST_MULT | FL_MODE32 | FL_ARCH4
+ | FL_THUMB)},
+ {"arm9tdmi", PROCESSOR_ARM9, (FL_FAST_MULT | FL_MODE32 | FL_ARCH4
+ | FL_THUMB)},
{"strongarm", PROCESSOR_STARM, (FL_FAST_MULT | FL_MODE32 | FL_MODE26
| FL_ARCH4)},
{"strongarm110", PROCESSOR_STARM, (FL_FAST_MULT | FL_MODE32 | FL_MODE26
@@ -362,13 +367,53 @@ arm_override_options ()
target_flags &= ~ARM_FLAG_THUMB;
}
- if (TARGET_FPE && arm_fpu != FP_HARD)
- arm_fpu = FP_SOFT2;
+ if (TARGET_FPE && arm_fpu == FP_HARD)
+ arm_fpu = FP_SOFT3;
+
+ /* If optimizing for space, don't synthesize constants */
+ if (optimize_size)
+ arm_constant_limit = 1;
+
+ /* Override a few things based on the tuning pararmeters. */
+ switch (arm_cpu)
+ {
+ case PROCESSOR_ARM2:
+ case PROCESSOR_ARM3:
+ /* For arm2/3 there is no need to do any scheduling if there is
+ only a floating point emulator, or we are doing software
+ floating-point. */
+ if (TARGET_SOFT_FLOAT || arm_fpu != FP_HARD)
+ flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+ break;
+
+ case PROCESSOR_ARM6:
+ case PROCESSOR_ARM7:
+ break;
+
+ case PROCESSOR_ARM8:
+ case PROCESSOR_ARM9:
+ /* For these processors, it never costs more than 2 cycles to load a
+ constant, and the load scheduler may well reduce that to 1. */
+ arm_constant_limit = 1;
+ break;
+
+ case PROCESSOR_STARM:
+ /* Same as above */
+ arm_constant_limit = 1;
+ /* StrongARM has early execution of branches, a sequence that is worth
+ skipping is shorter. */
+ max_insns_skipped = 3;
+ break;
+
+ default:
+ fatal ("Unknown cpu type selected");
+ break;
+ }
- /* For arm2/3 there is no need to do any scheduling if there is only
- a floating point emulator, or we are doing software floating-point. */
- if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD) && arm_cpu == PROCESSOR_ARM2)
- flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+ /* If optimizing for size, bump the number of instructions that we
+ are prepared to conditionally execute (even on a StrongARM). */
+ if (optimize_size)
+ max_insns_skipped = 6;
arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
@@ -383,11 +428,11 @@ arm_override_options ()
}
}
-
/* Return 1 if it is possible to return using a single instruction */
int
-use_return_insn ()
+use_return_insn (iscond)
+ int iscond;
{
int regno;
@@ -398,8 +443,12 @@ use_return_insn ()
return 0;
/* Can't be done if interworking with Thumb, and any registers have been
- stacked */
- if (TARGET_THUMB_INTERWORK)
+ stacked. Similarly, on StrongARM, conditional returns are expensive
+ if they aren't taken and registers have been stacked. */
+ if (iscond && arm_cpu == PROCESSOR_STARM && frame_pointer_needed)
+ return 0;
+ else if ((iscond && arm_cpu == PROCESSOR_STARM)
+ || TARGET_THUMB_INTERWORK)
for (regno = 0; regno < 16; regno++)
if (regs_ever_live[regno] && ! call_used_regs[regno])
return 0;
@@ -1604,6 +1653,11 @@ arm_adjust_cost (insn, link, dep, cost)
{
rtx i_pat, d_pat;
+ /* XXX This is not strictly true for the FPA. */
+ if (REG_NOTE_KIND(link) == REG_DEP_ANTI
+ || REG_NOTE_KIND(link) == REG_DEP_OUTPUT)
+ return 0;
+
if ((i_pat = single_set (insn)) != NULL
&& GET_CODE (SET_SRC (i_pat)) == MEM
&& (d_pat = single_set (dep)) != NULL
@@ -2537,6 +2591,13 @@ load_multiple_sequence (operands, nops, regs, base, load_offset)
if (unsorted_offsets[order[nops - 1]] == -4)
return 4; /* ldmdb */
+ /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm if
+ the offset isn't small enough */
+ if (nops == 2
+ && (arm_cpu == PROCESSOR_ARM8 || arm_cpu == PROCESSOR_ARM9
+ || arm_cpu == PROCESSOR_STARM))
+ return 0;
+
/* Can't do it without setting up the offset, only do this if it takes
no more than one insn. */
return (const_ok_for_arm (unsorted_offsets[order[0]])
@@ -5009,7 +5070,7 @@ output_func_epilogue (f, frame_size)
int volatile_func = (optimize > 0
&& TREE_THIS_VOLATILE (current_function_decl));
- if (use_return_insn() && return_used_this_function)
+ if (use_return_insn (FALSE) && return_used_this_function)
{
if ((frame_size + current_function_outgoing_args_size) != 0
&& !(frame_pointer_needed || TARGET_APCS))
@@ -5830,7 +5891,7 @@ final_prescan_insn (insn, opvec, noperands)
insns are okay, and the label or unconditional branch to the same
label is not too far away, succeed. */
for (insns_skipped = 0;
- !fail && !succeed && insns_skipped++ < MAX_INSNS_SKIPPED;)
+ !fail && !succeed && insns_skipped++ < max_insns_skipped;)
{
rtx scanbody;
@@ -5892,7 +5953,7 @@ final_prescan_insn (insn, opvec, noperands)
this_insn = next_nonnote_insn (this_insn);
if (this_insn && this_insn == label
- && insns_skipped < MAX_INSNS_SKIPPED)
+ && insns_skipped < max_insns_skipped)
{
if (jump_clobbers)
{
@@ -5927,6 +5988,12 @@ final_prescan_insn (insn, opvec, noperands)
else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
fail = TRUE;
}
+ /* Fail if a conditional return is undesirable (eg on a
+ StrongARM), but still allow this if optimizing for size. */
+ else if (GET_CODE (scanbody) == RETURN
+ && ! use_return_insn (TRUE)
+ && ! optimize_size)
+ fail = TRUE;
else if (GET_CODE (scanbody) == RETURN
&& seeking_return)
{
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index a76571f..439ebb6 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -53,6 +53,8 @@ Boston, MA 02111-1307, USA. */
#define TARGET_CPU_arm810 0x0020
#define TARGET_CPU_strongarm 0x0040
#define TARGET_CPU_strongarm110 0x0040
+#define TARGET_CPU_arm9 0x0080
+#define TARGET_CPU_arm9tdmi 0x0080
/* Configure didn't specify */
#define TARGET_CPU_generic 0x8000
@@ -95,7 +97,7 @@ extern int frame_pointer_needed;
#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7m
#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_3M__"
#else
-#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi
+#if TARGET_CPU_DEFAULT == TARGET_CPU_arm7tdmi || TARGET_CPU_DEFAULT == TARGET_CPU_ARM9
#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4T__"
#else
#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm
@@ -140,6 +142,8 @@ Unrecognized value in TARGET_CPU_DEFAULT.
%{march=arm7tdmi:-D__ARM_ARCH_4T__} \
%{march=arm8:-D__ARM_ARCH_4__} \
%{march=arm810:-D__ARM_ARCH_4__} \
+%{march=arm9:-D__ARM_ARCH_4T__} \
+%{march=arm9tdmi:-D__ARM_ARCH_4T__} \
%{march=strongarm:-D__ARM_ARCH_4__} \
%{march=strongarm110:-D__ARM_ARCH_4__} \
%{march=armv2:-D__ARM_ARCH_2__} \
@@ -167,6 +171,8 @@ Unrecognized value in TARGET_CPU_DEFAULT.
%{mcpu=arm7tdmi:-D__ARM_ARCH_4T__} \
%{mcpu=arm8:-D__ARM_ARCH_4__} \
%{mcpu=arm810:-D__ARM_ARCH_4__} \
+ %{mcpu=arm9:-D__ARM_ARCH_4T__} \
+ %{mcpu=arm9tdmi:-D__ARM_ARCH_4T__} \
%{mcpu=strongarm:-D__ARM_ARCH_4__} \
%{mcpu=strongarm110:-D__ARM_ARCH_4__} \
%{!mcpu*:%{!m6:%{!m2:%{!m3:%(cpp_cpu_arch_default)}}}}} \
@@ -427,6 +433,7 @@ enum processor_type
PROCESSOR_ARM6,
PROCESSOR_ARM7,
PROCESSOR_ARM8,
+ PROCESSOR_ARM9,
PROCESSOR_STARM,
PROCESSOR_NONE /* NOTE: This must be last, since it doesn't
appear in the attr_cpu list */
@@ -1165,7 +1172,7 @@ do { \
/* Determine if the epilogue should be output as RTL.
You should override this if you define FUNCTION_EXTRA_EPILOGUE. */
-#define USE_RETURN_INSN use_return_insn ()
+#define USE_RETURN_INSN(ISCOND) use_return_insn (ISCOND)
/* Definitions for register eliminations.
@@ -2050,7 +2057,7 @@ do { \
fully defined yet. */
void arm_override_options (/* void */);
-int use_return_insn (/* void */);
+int use_return_insn (/* int */);
int const_ok_for_arm (/* HOST_WIDE_INT */);
int const_ok_for_op (/* HOST_WIDE_INT, enum rtx_code,
enum machine_mode */);
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index c717d45..96843f0 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -1,8 +1,8 @@
-;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;;- Machine description for ARM for GNU compiler
;; Copyright (C) 1991, 93-98, 1999 Free Software Foundation, Inc.
;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
;; and Martin Simmons (@harleqn.co.uk).
-;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+;; More major hacks by Richard Earnshaw (rearnsha@arm.com).
;; This file is part of GNU CC.
@@ -45,11 +45,9 @@
; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
-; CPU attribute is used to determine whether condition codes are clobbered
-; by a call insn: on the arm6 they are if in 32-bit addressing mode; on the
-; arm2 and arm3 the condition codes are restored by the return.
-
-(define_attr "cpu" "arm2,arm3,arm6,arm7,arm8,st_arm"
+; CPU attribute is used to determine the best instruction mix for performance
+; on the named processor.
+(define_attr "cpu" "arm2,arm3,arm6,arm7,arm8,arm9,st_arm"
(const (symbol_ref "arm_cpu_attr")))
; Floating Point Unit. If we only have floating point emulation, then there
@@ -104,7 +102,7 @@
; Load scheduling, set from the cpu characteristic
(define_attr "ldsched" "no,yes"
- (if_then_else (eq_attr "cpu" "arm8,st_arm")
+ (if_then_else (eq_attr "cpu" "arm8,arm9,st_arm")
(const_string "yes")
(const_string "no")))
@@ -135,6 +133,15 @@
(const_string "clob") (const_string "nocond"))
(const_string "nocond")))
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes"
+ (if_then_else (eq_attr "cpu" "arm6,arm7")
+ (const_string "yes")
+ (const_string "no")))
+
(define_attr "write_conflict" "no,yes"
(if_then_else (eq_attr "type"
"block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
@@ -194,59 +201,85 @@
(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
(eq_attr "type" "f_load")) 3 1)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store1") 5 3)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store2") 7 4)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store3") 9 5)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store4") 11 6)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "r_mem_f") 5 3)
-
-;; The write_blockage unit models (partially), the fact that writes will stall
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
;; until the write buffer empties.
-
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store1") 5 5
- [(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store2") 7 7
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store3") 9 9
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store4") 11 11
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "r_mem_f") 5 5
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0
- (eq_attr "write_conflict" "yes") 1 1)
-
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
-(define_function_unit "core" 1 1 (eq_attr "core_cycles" "single") 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load")) 2 2)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
-(define_function_unit "core" 1 1 (eq_attr "type" "mult") 16 16)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "!yes") (eq_attr "type" "store1")) 2 2)
+(define_function_unit "core" 1 0
+ (and (eq_attr "cpu" "!arm8,st_arm") (eq_attr "type" "mult")) 16 16)
-(define_function_unit "core" 1 1 (eq_attr "type" "store2") 3 3)
+(define_function_unit "core" 1 0
+ (and (eq_attr "cpu" "arm8") (eq_attr "type" "mult")) 4 4)
-(define_function_unit "core" 1 1 (eq_attr "type" "store3") 4 4)
+(define_function_unit "core" 1 0
+ (and (eq_attr "cpu" "st_arm") (eq_attr "type" "mult")) 3 2)
-(define_function_unit "core" 1 1 (eq_attr "type" "store4") 5 5)
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
-(define_function_unit "core" 1 1
- (and (eq_attr "core_cycles" "multi")
- (eq_attr "type" "!mult,load,store2,store3,store4")) 32 32)
-
-(define_function_unit "loader" 1 0
- (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
;; Note: For DImode insns, there is normally no reason why operands should
;; not be in the same register, what we don't want is for something being
@@ -3048,7 +3081,7 @@
[(set_attr "type" "*,*,load,store1")])
(define_insn "*movhi_insn_littleend"
- [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
(match_operand:HI 1 "general_operand" "rI,K,m"))]
"! arm_arch4
&& ! BYTES_BIG_ENDIAN
@@ -4252,7 +4285,7 @@
;; Often the return insn will be the same as loading from memory, so set attr
(define_insn "return"
[(return)]
- "USE_RETURN_INSN"
+ "USE_RETURN_INSN(FALSE)"
"*
{
extern int arm_ccfsm_state;
@@ -4272,7 +4305,7 @@
[(match_operand 1 "cc_register" "") (const_int 0)])
(return)
(pc)))]
- "USE_RETURN_INSN"
+ "USE_RETURN_INSN(TRUE)"
"*
{
extern int arm_ccfsm_state;
@@ -4293,7 +4326,7 @@
[(match_operand 1 "cc_register" "") (const_int 0)])
(pc)
(return)))]
- "USE_RETURN_INSN"
+ "USE_RETURN_INSN(TRUE)"
"*
{
extern int arm_ccfsm_state;
@@ -4895,7 +4928,7 @@
(plus:SI
(match_operand:SI 2 "s_register_operand" "r,r")
(match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))
(clobber (reg:CC 24))]
""
"#"
@@ -4903,32 +4936,30 @@
(set_attr "length" "8,12")])
(define_insn "*if_plus_move"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r")
(if_then_else:SI
(match_operator 4 "comparison_operator"
[(match_operand 5 "cc_register" "") (const_int 0)])
(plus:SI
- (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
- (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
- (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI")))]
""
"@
add%d4\\t%0, %2, %3
sub%d4\\t%0, %2, #%n3
add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
- sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
- add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
- sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,4,8,8,8,8")
- (set_attr "type" "*,*,*,*,load,load")])
+ (set_attr "length" "4,4,8,8")
+ (set_attr "type" "*,*,*,*")])
(define_insn "*ifcompare_move_plus"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI (match_operator 6 "comparison_operator"
[(match_operand:SI 4 "s_register_operand" "r,r")
(match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
(plus:SI
(match_operand:SI 2 "s_register_operand" "r,r")
(match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
@@ -4939,25 +4970,23 @@
(set_attr "length" "8,12")])
(define_insn "*if_move_plus"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r")
(if_then_else:SI
(match_operator 4 "comparison_operator"
[(match_operand 5 "cc_register" "") (const_int 0)])
- (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI")
(plus:SI
- (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
- (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L"))))]
""
"@
add%D4\\t%0, %2, %3
sub%D4\\t%0, %2, #%n3
add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
- sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
- add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
- sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,4,8,8,8,8")
- (set_attr "type" "*,*,*,*,load,load")])
+ (set_attr "length" "4,4,8,8")
+ (set_attr "type" "*,*,*,*")])
(define_insn "*ifcompare_arith_arith"
[(set (match_operand:SI 0 "s_register_operand" "=r")
@@ -4999,7 +5028,7 @@
(match_operator:SI 7 "shiftable_operator"
[(match_operand:SI 4 "s_register_operand" "r,r")
(match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))
(clobber (reg:CC 24))]
""
"*
@@ -5025,40 +5054,34 @@
output_asm_insn (\"cmp\\t%2, %3\", operands);
output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
if (which_alternative != 0)
- {
- if (GET_CODE (operands[1]) == MEM)
- return \"ldr%D6\\t%0, %1\";
- else
- return \"mov%D6\\t%0, %1\";
- }
+ return \"mov%D6\\t%0, %1\";
return \"\";
"
[(set_attr "conds" "clob")
(set_attr "length" "8,12")])
(define_insn "*if_arith_move"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI (match_operator 4 "comparison_operator"
[(match_operand 6 "cc_register" "") (const_int 0)])
(match_operator:SI 5 "shiftable_operator"
- [(match_operand:SI 2 "s_register_operand" "r,r,r")
- (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))]
""
"@
%I5%d4\\t%0, %2, %3
- %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
- %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,8,8")
- (set_attr "type" "*,*,load")])
+ (set_attr "length" "4,8")
+ (set_attr "type" "*,*")])
(define_insn "*ifcompare_move_arith"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI (match_operator 6 "comparison_operator"
[(match_operand:SI 4 "s_register_operand" "r,r")
(match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
(match_operator:SI 7 "shiftable_operator"
[(match_operand:SI 2 "s_register_operand" "r,r")
(match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
@@ -5088,34 +5111,28 @@
output_asm_insn (\"cmp\\t%4, %5\", operands);
if (which_alternative != 0)
- {
- if (GET_CODE (operands[1]) == MEM)
- output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
- else
- output_asm_insn (\"mov%d6\\t%0, %1\", operands);
- }
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
return \"%I7%D6\\t%0, %2, %3\";
"
[(set_attr "conds" "clob")
(set_attr "length" "8,12")])
(define_insn "*if_move_arith"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI
(match_operator 4 "comparison_operator"
[(match_operand 6 "cc_register" "") (const_int 0)])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
(match_operator:SI 5 "shiftable_operator"
- [(match_operand:SI 2 "s_register_operand" "r,r,r")
- (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))]
""
"@
%I5%D4\\t%0, %2, %3
- %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
- %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,8,8")
- (set_attr "type" "*,*,load")])
+ (set_attr "length" "4,8")
+ (set_attr "type" "*,*")])
(define_insn "*ifcompare_move_not"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
@@ -5975,7 +5992,7 @@
(match_operand:SI 1 "general_operand" "g"))
(clobber (reg:SI 14))])
(return)]
- "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN(FALSE)
&& !get_frame_size () && !current_function_calls_alloca
&& !frame_pointer_needed && !current_function_args_size)"
"*
@@ -6003,7 +6020,7 @@
(match_operand:SI 2 "general_operand" "g")))
(clobber (reg:SI 14))])
(return)]
- "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN(FALSE)
&& !get_frame_size () && !current_function_calls_alloca
&& !frame_pointer_needed && !current_function_args_size)"
"*
@@ -6035,7 +6052,7 @@
(clobber (reg:SI 14))])
(use (match_dup 0))
(return)]
- "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN(FALSE)
&& !get_frame_size () && !current_function_calls_alloca
&& !frame_pointer_needed && !current_function_args_size)"
"*
diff --git a/gcc/config/arm/xm-netbsd.h b/gcc/config/arm/xm-netbsd.h
index ea9a64e..622709c 100644
--- a/gcc/config/arm/xm-netbsd.h
+++ b/gcc/config/arm/xm-netbsd.h
@@ -1,7 +1,5 @@
/* Configuration for GCC for ARM running NetBSD as host. */
-#include <arm/xm-arm.h>
-
#ifndef SYS_SIGLIST_DECLARED
#define SYS_SIGLIST_DECLARED
#endif