aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorNick Clifton <nickc@redhat.com>2000-12-04 00:23:35 +0000
committerNick Clifton <nickc@gcc.gnu.org>2000-12-04 00:23:35 +0000
commitd19fb8e32531c558578d0fbeef2ef6b23b077be1 (patch)
tree5e7f41866ef7dde9494ac0d08c878aab7b148bbd /gcc
parentaccc8668e30178fb889abac3b3ab820ebfec258e (diff)
downloadgcc-d19fb8e32531c558578d0fbeef2ef6b23b077be1.zip
gcc-d19fb8e32531c558578d0fbeef2ef6b23b077be1.tar.gz
gcc-d19fb8e32531c558578d0fbeef2ef6b23b077be1.tar.bz2
Add support for XScale target
Add support for StrongARM target From-SVN: r37984
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog20
-rw-r--r--gcc/config.gcc40
-rw-r--r--gcc/config/arm/arm-protos.h6
-rw-r--r--gcc/config/arm/arm.c243
-rw-r--r--gcc/config/arm/arm.h154
-rw-r--r--gcc/config/arm/arm.md110
-rw-r--r--gcc/config/arm/strongarm-pe.h26
-rw-r--r--gcc/config/arm/t-strongarm-coff35
-rw-r--r--gcc/config/arm/t-strongarm-elf39
-rw-r--r--gcc/config/arm/t-strongarm-pe38
-rw-r--r--gcc/config/arm/t-xscale-coff51
-rw-r--r--gcc/config/arm/t-xscale-elf51
-rw-r--r--gcc/config/arm/xscale-coff.h35
-rw-r--r--gcc/config/arm/xscale-elf.h38
14 files changed, 811 insertions, 75 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 73ddb4c..c74983a 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,23 @@
+2000-12-03 Nick Clifton <nickc@redhat.com>
+
+ * config.gcc: Add support for StrongARM targets.
+ * config/arm/t-strongarm-elf: New file.
+ * config/arm/t-strongarm-coff: New file.
+ * config/arm/t-strongarm-pe: New file.
+ * config/arm/strongarm-pe.h: New file.
+
+2000-12-03 Nick Clifton <nickc@redhat.com>
+
+ * NEWS: Mention XScale has been added.
+ * config.gcc: Add support for XScale targets.
+ * config/arm/arm.h: Add support for XScale processor.
+ * config/arm/arm.c: Add support for XScale processor.
+ * config/arm/arm.md: Add support for XScale processor.
+ * config/arm/t-xscale-elf: New file.
+ * config/arm/t-xscale-coff: New file.
+ * config/arm/xscale-elf.h: New file.
+ * config/arm/xscale-coff.h: New file.
+
2000-12-03 Richard Henderson <rth@redhat.com>
* bb-reorder.c (reorder_basic_blocks): Don't check for EH edges
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 91e2295..5d1727b 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -213,6 +213,9 @@ strongarm*-*-*)
arm*-*-*)
cpu_type=arm
;;
+xscale-*-*)
+ cpu_type=arm
+ ;;
c*-convex-*)
cpu_type=convex
;;
@@ -3114,6 +3117,28 @@ sparc64-*-linux*) # 64-bit Sparc's running GNU/Linux
fi
float_format=sparc
;;
+strongarm-*-elf*)
+ tm_file=arm/strongarm-elf.h
+ tmake_file=arm/t-strongarm-elf
+ out_file=arm/arm.c
+ xm_file=arm/xm-arm.h
+ md_file=arm/arm.md
+ ;;
+strongarm-*-coff*)
+ tm_file=arm/strongarm-coff.h
+ tmake_file=arm/t-strongarm-coff
+ out_file=arm/arm.c
+ xm_file=arm/xm-arm.h
+ md_file=arm/arm.md
+ ;;
+strongarm-*-pe)
+ tm_file=arm/strongarm-pe.h
+ tmake_file=arm/t-strongarm-pe
+ out_file=arm/arm.c
+ xm_file=arm/xm-arm.h
+ md_file=arm/arm.md
+ extra_objs=pe.o
+ ;;
thumb*-*-*)
{ echo "config.gcc: error:
*** The Thumb targets have been deprecated. The equivalent
@@ -3185,6 +3210,20 @@ we32k-att-sysv*)
xm_file="${xm_file} xm-svr3"
use_collect2=yes
;;
+xscale-*-elf)
+ tm_file=arm/xscale-elf.h
+ tmake_file=arm/t-xscale-elf
+ out_file=arm/arm.c
+ xm_file=arm/xm-arm.h
+ md_file=arm/arm.md
+ ;;
+xscale-*-coff)
+ tm_file=arm/xscale-coff.h
+ tmake_file=arm/t-xscale-coff
+ out_file=arm/arm.c
+ xm_file=arm/xm-arm.h
+ md_file=arm/arm.md
+ ;;
*)
echo "Configuration $machine not supported" 1>&2
exit 1
@@ -3280,6 +3319,7 @@ arm*-*-*)
xarm[236789] | xarm250 | xarm[67][01]0 \
| xarm7m | xarm7dm | xarm7dmi | xarm[79]tdmi \
| xarm7100 | xarm7500 | xarm7500fe | xarm810 \
+ | xxscale \
| xstrongarm | xstrongarm110 | xstrongarm1100)
target_cpu_default2="TARGET_CPU_$with_cpu"
;;
diff --git a/gcc/config/arm/arm-protos.h b/gcc/config/arm/arm-protos.h
index f205a01..d0be97c 100644
--- a/gcc/config/arm/arm-protos.h
+++ b/gcc/config/arm/arm-protos.h
@@ -197,6 +197,12 @@ extern void arm_mark_dllexport PARAMS ((tree));
extern void arm_mark_dllimport PARAMS ((tree));
#endif
+extern void arm_init_builtins PARAMS ((void));
+#if defined (TREE_CODE) && defined (RTX_CODE)
+extern rtx arm_expand_builtin PARAMS ((tree, rtx, rtx,
+ enum machine_mode, int));
+#endif
+
#ifdef _C_PRAGMA_H /* included from code that cares about pragmas */
extern void arm_pr_long_calls PARAMS ((cpp_reader *));
extern void arm_pr_no_long_calls PARAMS ((cpp_reader *));
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 6e14350..32c772a 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -146,6 +146,8 @@ int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
#define FL_THUMB (1 << 6) /* Thumb aware */
#define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
#define FL_STRONG (1 << 8) /* StrongARM */
+#define FL_ARCH5E (1 << 9) /* El Segundo extenstions to v5 */
+#define FL_XSCALE (1 << 10) /* XScale */
/* The bits in this mask specify which instructions we are
allowed to generate. */
@@ -175,6 +177,9 @@ int arm_ld_sched = 0;
/* Nonzero if this chip is a StrongARM. */
int arm_is_strong = 0;
+/* Nonzero if this chip is an XScale. */
+int arm_is_xscale = 0;
+
/* Nonzero if this chip is a an ARM6 or an ARM7. */
int arm_is_6_or_7 = 0;
@@ -269,6 +274,7 @@ static struct processors all_cores[] =
{"strongarm", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
{"strongarm110", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
{"strongarm1100", FL_MODE26 | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_LDSCHED | FL_STRONG },
+ {"xscale", FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_LDSCHED | FL_STRONG | FL_XSCALE | FL_ARCH5 },
{NULL, 0}
};
@@ -286,6 +292,8 @@ static struct processors all_architectures[] =
implementations that support it, so we will leave it out for now. */
{ "armv4t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB },
{ "armv5", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
+ { "armv5t", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 },
+ { "armv5te", FL_CO_PROC | FL_MODE32 | FL_FAST_MULT | FL_ARCH4 | FL_THUMB | FL_ARCH5 | FL_ARCH5E },
{ NULL, 0 }
};
@@ -382,6 +390,7 @@ arm_override_options ()
{ TARGET_CPU_arm810, "arm810" },
{ TARGET_CPU_arm9, "arm9" },
{ TARGET_CPU_strongarm, "strongarm" },
+ { TARGET_CPU_xscale, "xscale" },
{ TARGET_CPU_generic, "arm" },
{ 0, 0 }
};
@@ -516,7 +525,13 @@ arm_override_options ()
/* warning ("ignoring -mapcs-frame because -mthumb was used."); */
target_flags &= ~ARM_FLAG_APCS_FRAME;
}
-
+
+ if (TARGET_HARD_FLOAT && (tune_flags & FL_XSCALE))
+ {
+ warning ("XScale does not support hardware FP instructions.");
+ target_flags |= ARM_FLAG_SOFT_FLOAT;
+ }
+
/* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
from here where no function is being compiled currently. */
if ((target_flags & (THUMB_FLAG_LEAF_BACKTRACE | THUMB_FLAG_BACKTRACE))
@@ -576,6 +591,7 @@ arm_override_options ()
arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
arm_is_strong = (tune_flags & FL_STRONG) != 0;
thumb_code = (TARGET_ARM == 0);
+ arm_is_xscale = (tune_flags & FL_XSCALE) != 0;
arm_is_6_or_7 = (((tune_flags & (FL_MODE26 | FL_MODE32))
&& !(tune_flags & FL_ARCH4))) != 0;
@@ -651,6 +667,9 @@ arm_override_options ()
if (optimize_size || (tune_flags & FL_LDSCHED))
arm_constant_limit = 1;
+ if (arm_is_xscale)
+ arm_constant_limit = 2;
+
/* If optimizing for size, bump the number of instructions that we
are prepared to conditionally execute (even on a StrongARM).
Otherwise for the StrongARM, which has early execution of branches,
@@ -1718,7 +1737,7 @@ arm_encode_call_attribute (decl, flag)
{
const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
int len = strlen (str);
- const char * newstr;
+ char * newstr;
if (TREE_CODE (decl) != FUNCTION_DECL)
return;
@@ -2001,7 +2020,7 @@ legitimize_pic_address (orig, mode, reg)
emit_insn (gen_pic_load_addr_arm (address, orig));
else
emit_insn (gen_pic_load_addr_thumb (address, orig));
-
+
pic_ref = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, address);
emit_move_insn (address, pic_ref);
@@ -2475,6 +2494,47 @@ arm_adjust_cost (insn, link, dep, cost)
{
rtx i_pat, d_pat;
+ /* Some true dependencies can have a higher cost depending
+ on precisely how certain input operands are used. */
+ if (arm_is_xscale
+ && REG_NOTE_KIND (link) == 0
+ && recog_memoized (insn) < 0
+ && recog_memoized (dep) < 0)
+ {
+ int shift_opnum = get_attr_shift (insn);
+ enum attr_type attr_type = get_attr_type (dep);
+
+ /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
+ operand for INSN. If we have a shifted input operand and the
+ instruction we depend on is another ALU instruction, then we may
+ have to account for an additional stall. */
+ if (shift_opnum != 0 && attr_type == TYPE_NORMAL)
+ {
+ rtx shifted_operand;
+ int opno;
+
+ /* Get the shifted operand. */
+ extract_insn (insn);
+ shifted_operand = recog_data.operand[shift_opnum];
+
+ /* Iterate over all the operands in DEP. If we write an operand
+ that overlaps with SHIFTED_OPERAND, then we have increase the
+ cost of this dependency. */
+ extract_insn (dep);
+ preprocess_constraints ();
+ for (opno = 0; opno < recog_data.n_operands; opno++)
+ {
+ /* We can ignore strict inputs. */
+ if (recog_data.operand_type[opno] == OP_IN)
+ continue;
+
+ if (reg_overlap_mentioned_p (recog_data.operand[opno],
+ shifted_operand))
+ return 2;
+ }
+ }
+ }
+
/* XXX This is not strictly true for the FPA. */
if (REG_NOTE_KIND (link) == REG_DEP_ANTI
|| REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
@@ -3822,7 +3882,7 @@ arm_valid_machine_decl_attribute (decl, attr, args)
if (is_attribute_p ("naked", attr))
return TREE_CODE (decl) == FUNCTION_DECL;
-
+
#ifdef ARM_PE
if (is_attribute_p ("interfacearm", attr))
return TREE_CODE (decl) == FUNCTION_DECL;
@@ -3863,6 +3923,58 @@ arm_gen_load_multiple (base_regno, count, from, up, write_back, unchanging_p,
int sign = up ? 1 : -1;
rtx mem;
+ /* XScale has load-store double instructions, but they have stricter
+ alignment requirements than load-store multiple, so we can not
+ use them.
+
+ For XScale ldm requires 2 + NREGS cycles to complete and blocks
+ the pipeline until completion.
+
+ NREGS CYCLES
+ 1 3
+ 2 4
+ 3 5
+ 4 6
+
+ An ldr instruction takes 1-3 cycles, but does not block the
+ pipeline.
+
+ NREGS CYCLES
+ 1 1-3
+ 2 2-6
+ 3 3-9
+ 4 4-12
+
+ Best case ldr will always win. However, the more ldr instructions
+ we issue, the less likely we are to be able to schedule them well.
+ Using ldr instructions also increases code size.
+
+ As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
+ for counts of 3 or 4 regs. */
+ if (arm_is_xscale && count <= 2 && ! optimize_size)
+ {
+ rtx seq;
+
+ start_sequence ();
+
+ for (i = 0; i < count; i++)
+ {
+ mem = gen_rtx_MEM (SImode, plus_constant (from, i * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+ emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
+ }
+
+ if (write_back)
+ emit_move_insn (from, plus_constant (from, count * 4 * sign));
+
+ seq = gen_sequence ();
+ end_sequence ();
+
+ return seq;
+ }
+
result = gen_rtx_PARALLEL (VOIDmode,
rtvec_alloc (count + (write_back ? 1 : 0)));
if (write_back)
@@ -3904,6 +4016,32 @@ arm_gen_store_multiple (base_regno, count, to, up, write_back, unchanging_p,
int sign = up ? 1 : -1;
rtx mem;
+ /* See arm_gen_load_multiple for discussion of
+ the pros/cons of ldm/stm usage for XScale. */
+ if (arm_is_xscale && count <= 2 && ! optimize_size)
+ {
+ rtx seq;
+
+ start_sequence ();
+
+ for (i = 0; i < count; i++)
+ {
+ mem = gen_rtx_MEM (SImode, plus_constant (to, i * 4 * sign));
+ RTX_UNCHANGING_P (mem) = unchanging_p;
+ MEM_IN_STRUCT_P (mem) = in_struct_p;
+ MEM_SCALAR_P (mem) = scalar_p;
+ emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
+ }
+
+ if (write_back)
+ emit_move_insn (to, plus_constant (to, count * 4 * sign));
+
+ seq = gen_sequence ();
+ end_sequence ();
+
+ return seq;
+ }
+
result = gen_rtx_PARALLEL (VOIDmode,
rtvec_alloc (count + (write_back ? 1 : 0)));
if (write_back)
@@ -4145,6 +4283,7 @@ arm_gen_rotated_half_load (memref)
If we are unable to support a dominance comparsison we return CC mode.
This will then fail to match for the RTL expressions that generate this
call. */
+
static enum machine_mode
select_dominance_cc_mode (x, y, cond_or)
rtx x;
@@ -5583,7 +5722,6 @@ arm_reorg (first)
/* Scan all the insns and record the operands that will need fixing. */
for (insn = next_nonnote_insn (first); insn; insn = next_nonnote_insn (insn))
{
-
if (GET_CODE (insn) == BARRIER)
push_minipool_barrier (insn, address);
else if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN
@@ -7357,7 +7495,7 @@ arm_expand_prologue ()
rtx insn;
rtx ip_rtx;
int fp_offset = 0;
-
+
/* Naked functions don't have prologues. */
if (arm_naked_function_p (current_function_decl))
@@ -8379,6 +8517,99 @@ arm_debugger_arg_offset (value, addr)
return value;
}
+#define def_builtin(NAME, TYPE, CODE) \
+ builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL_PTR)
+
+void
+arm_init_builtins ()
+{
+ tree endlink = tree_cons (NULL_TREE, void_type_node, NULL_TREE);
+ tree int_endlink = tree_cons (NULL_TREE, integer_type_node, endlink);
+ tree pchar_type_node = build_pointer_type (char_type_node);
+
+ tree int_ftype_int, void_ftype_pchar;
+
+ /* void func (void *) */
+ void_ftype_pchar
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, pchar_type_node, endlink));
+
+ /* int func (int) */
+ int_ftype_int
+ = build_function_type (integer_type_node, int_endlink);
+
+ /* Initialize arm V5 builtins. */
+ if (arm_arch5)
+ {
+ def_builtin ("__builtin_clz", int_ftype_int, ARM_BUILTIN_CLZ);
+ def_builtin ("__builtin_prefetch", void_ftype_pchar,
+ ARM_BUILTIN_PREFETCH);
+ }
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+rtx
+arm_expand_builtin (exp, target, subtarget, mode, ignore)
+ tree exp;
+ rtx target;
+ rtx subtarget ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ int ignore ATTRIBUTE_UNUSED;
+{
+ enum insn_code icode;
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ tree arg0;
+ rtx op0, pat;
+ enum machine_mode tmode, mode0;
+ int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ switch (fcode)
+ {
+ default:
+ break;
+
+ case ARM_BUILTIN_CLZ:
+ icode = CODE_FOR_clz;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ARM_BUILTIN_PREFETCH:
+ icode = CODE_FOR_prefetch;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+
+ op0 = gen_rtx_MEM (SImode, copy_to_mode_reg (Pmode, op0));
+
+ pat = GEN_FCN (icode) (op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+ }
+
+ /* @@@ Should really do something sensible here. */
+ return NULL_RTX;
+}
/* Recursively search through all of the blocks in a function
checking to see if any of the variables created in that
diff --git a/gcc/config/arm/arm.h b/gcc/config/arm/arm.h
index c1a3215..4390204 100644
--- a/gcc/config/arm/arm.h
+++ b/gcc/config/arm/arm.h
@@ -48,6 +48,7 @@ Boston, MA 02111-1307, USA. */
#define TARGET_CPU_strongarm1100 0x0040
#define TARGET_CPU_arm9 0x0080
#define TARGET_CPU_arm9tdmi 0x0080
+#define TARGET_CPU_xscale 0x0100
/* Configure didn't specify. */
#define TARGET_CPU_generic 0x8000
@@ -115,12 +116,16 @@ extern int current_function_anonymous_args;
#if TARGET_CPU_DEFAULT == TARGET_CPU_arm8 || TARGET_CPU_DEFAULT == TARGET_CPU_arm810 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm110 || TARGET_CPU_DEFAULT == TARGET_CPU_strongarm1100
#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_4__"
#else
+#if TARGET_CPU_DEFAULT == TARGET_CPU_xscale
+#define CPP_ARCH_DEFAULT_SPEC "-D__ARM_ARCH_5TE__ -D__XSCALE__"
+#else
Unrecognized value in TARGET_CPU_DEFAULT.
#endif
#endif
#endif
#endif
#endif
+#endif
#ifndef CPP_PREDEFINES
#define CPP_PREDEFINES "-Acpu=arm -Amachine=arm"
@@ -161,6 +166,8 @@ Unrecognized value in TARGET_CPU_DEFAULT.
%{march=strongarm:-D__ARM_ARCH_4__} \
%{march=strongarm110:-D__ARM_ARCH_4__} \
%{march=strongarm1100:-D__ARM_ARCH_4__} \
+%{march=xscale:-D__ARM_ARCH_5TE__} \
+%{march=xscale:-D__XSCALE__} \
%{march=armv2:-D__ARM_ARCH_2__} \
%{march=armv2a:-D__ARM_ARCH_2__} \
%{march=armv3:-D__ARM_ARCH_3__} \
@@ -198,6 +205,8 @@ Unrecognized value in TARGET_CPU_DEFAULT.
%{mcpu=strongarm:-D__ARM_ARCH_4__} \
%{mcpu=strongarm110:-D__ARM_ARCH_4__} \
%{mcpu=strongarm1100:-D__ARM_ARCH_4__} \
+ %{mcpu=xscale:-D__ARM_ARCH_5TE__} \
+ %{mcpu=xscale:-D__XSCALE__} \
%{!mcpu*:%(cpp_cpu_arch_default)}} \
"
@@ -560,6 +569,9 @@ extern int thumb_code;
/* Nonzero if this chip is a StrongARM. */
extern int arm_is_strong;
+/* Nonzero if this chip is an XScale. */
+extern int arm_is_xscale;
+
/* Nonzero if this chip is a an ARM6 or an ARM7. */
extern int arm_is_6_or_7;
@@ -696,9 +708,12 @@ extern int arm_is_6_or_7;
#define BIGGEST_ALIGNMENT 32
/* Make strings word-aligned so strcpy from constants will be faster. */
-#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
- (TREE_CODE (EXP) == STRING_CST \
- && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+#define CONSTANT_ALIGNMENT_FACTOR (TARGET_THUMB || ! arm_is_xscale ? 1 : 2)
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR) \
+ ? BITS_PER_WORD * CONSTANT_ALIGNMENT_FACTOR : (ALIGN))
/* Setting STRUCTURE_SIZE_BOUNDARY to 32 produces more efficient code, but the
value set in previous versions of this toolchain was 8, which produces more
@@ -2050,63 +2065,63 @@ typedef struct
floating SYMBOL_REF to the constant pool. Allow REG-only and
AUTINC-REG if handling TImode or HImode. Other symbol refs must be
forced though a static cell to ensure addressability. */
-#define ARM_GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
-{ \
- if (ARM_BASE_REGISTER_RTX_P (X)) \
- goto LABEL; \
- else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
- && GET_CODE (XEXP (X, 0)) == REG \
- && ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \
- goto LABEL; \
- else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
- && (GET_CODE (X) == LABEL_REF \
- || (GET_CODE (X) == CONST \
- && GET_CODE (XEXP ((X), 0)) == PLUS \
- && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
- && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT))) \
- goto LABEL; \
- else if ((MODE) == TImode) \
- ; \
- else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
- { \
- if (GET_CODE (X) == PLUS && ARM_BASE_REGISTER_RTX_P (XEXP (X, 0)) \
- && GET_CODE (XEXP (X, 1)) == CONST_INT) \
- { \
- HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
- if (val == 4 || val == -4 || val == -8) \
- goto LABEL; \
- } \
- } \
- else if (GET_CODE (X) == PLUS) \
- { \
- rtx xop0 = XEXP (X, 0); \
- rtx xop1 = XEXP (X, 1); \
- \
- if (ARM_BASE_REGISTER_RTX_P (xop0)) \
- ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
- else if (ARM_BASE_REGISTER_RTX_P (xop1)) \
- ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
- } \
- /* Reload currently can't handle MINUS, so disable this for now */ \
- /* else if (GET_CODE (X) == MINUS) \
- { \
- rtx xop0 = XEXP (X,0); \
- rtx xop1 = XEXP (X,1); \
- \
- if (ARM_BASE_REGISTER_RTX_P (xop0)) \
- ARM_GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
- } */ \
- else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
- && GET_CODE (X) == SYMBOL_REF \
- && CONSTANT_POOL_ADDRESS_P (X) \
- && ! (flag_pic \
- && symbol_mentioned_p (get_pool_constant (X)))) \
- goto LABEL; \
- else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
- && (GET_MODE_SIZE (MODE) <= 4) \
- && GET_CODE (XEXP (X, 0)) == REG \
- && ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \
- goto LABEL; \
+#define ARM_GO_IF_LEGITIMATE_ADDRESS(MODE, X, LABEL) \
+{ \
+ if (ARM_BASE_REGISTER_RTX_P (X)) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == POST_INC || GET_CODE (X) == PRE_DEC) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \
+ goto LABEL; \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP ((X), 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP ((X), 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP ((X), 0), 1)) == CONST_INT)))\
+ goto LABEL; \
+ else if ((MODE) == TImode) \
+ ; \
+ else if ((MODE) == DImode || (TARGET_SOFT_FLOAT && (MODE) == DFmode)) \
+ { \
+ if (GET_CODE (X) == PLUS && ARM_BASE_REGISTER_RTX_P (XEXP (X, 0)) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ if (val == 4 || val == -4 || val == -8) \
+ goto LABEL; \
+ } \
+ } \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ rtx xop0 = XEXP (X, 0); \
+ rtx xop1 = XEXP (X, 1); \
+ \
+ if (ARM_BASE_REGISTER_RTX_P (xop0)) \
+ ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop0), xop1, LABEL); \
+ else if (ARM_BASE_REGISTER_RTX_P (xop1)) \
+ ARM_GO_IF_LEGITIMATE_INDEX (MODE, REGNO (xop1), xop0, LABEL); \
+ } \
+ /* Reload currently can't handle MINUS, so disable this for now */ \
+ /* else if (GET_CODE (X) == MINUS) \
+ { \
+ rtx xop0 = XEXP (X,0); \
+ rtx xop1 = XEXP (X,1); \
+ \
+ if (ARM_BASE_REGISTER_RTX_P (xop0)) \
+ ARM_GO_IF_LEGITIMATE_INDEX (MODE, -1, xop1, LABEL); \
+ } */ \
+ else if (GET_MODE_CLASS (MODE) != MODE_FLOAT \
+ && GET_CODE (X) == SYMBOL_REF \
+ && CONSTANT_POOL_ADDRESS_P (X) \
+ && ! (flag_pic \
+ && symbol_mentioned_p (get_pool_constant (X)))) \
+ goto LABEL; \
+ else if ((GET_CODE (X) == PRE_INC || GET_CODE (X) == POST_DEC) \
+ && (GET_MODE_SIZE (MODE) <= 4) \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && ARM_REG_OK_FOR_BASE_P (XEXP (X, 0))) \
+ goto LABEL; \
}
/* ---------------------thumb version----------------------------------*/
@@ -2355,6 +2370,9 @@ typedef struct
in one reasonably fast instruction. */
#define MOVE_MAX 4
+#undef MOVE_RATIO
+#define MOVE_RATIO (arm_is_xscale ? 4 : 2)
+
/* Define if operations between registers always perform the operation
on the full register even if a narrower mode is specified. */
#define WORD_REGISTER_OPERATIONS
@@ -2924,4 +2942,20 @@ extern int making_const_table;
#define SPECIAL_MODE_PREDICATES \
"cc_register", "dominant_cc_register",
+enum arm_builtins
+{
+ ARM_BUILTIN_CLZ,
+ ARM_BUILTIN_PREFETCH,
+ ARM_BUILTIN_MAX
+};
+
+#define MD_INIT_BUILTINS \
+ do \
+ { \
+ arm_init_builtins (); \
+ } \
+ while (0)
+
+#define MD_EXPAND_BUILTIN(EXP, TARGET, SUBTARGET, MODE, IGNORE) \
+ arm_expand_builtin ((EXP), (TARGET), (SUBTARGET), (MODE), (IGNORE))
#endif /* __ARM_H__ */
diff --git a/gcc/config/arm/arm.md b/gcc/config/arm/arm.md
index 3cb7dfa..5971cfa 100644
--- a/gcc/config/arm/arm.md
+++ b/gcc/config/arm/arm.md
@@ -69,6 +69,11 @@
; scheduling decisions for the load unit and the multiplier.
(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
+;; Operand number of an input operand that is shifted. Zero if the
+;; given instruction does not shift one of its input operands.
+(define_attr "is_xscale" "no,yes" (const (symbol_ref "arm_is_xscale")))
+(define_attr "shift" "" (const_int 0))
+
; Floating Point Unit. If we only have floating point emulation, then there
; is no point in scheduling the floating point insns. (Well, for best
; performance we should try and group them together).
@@ -291,6 +296,18 @@
(define_function_unit "core" 1 0
(and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+;; We do not need to conditionalize the define_function_unit immediately
+;; above. This one will be ignored for anything other than xscale
+;; compiles and for xscale compiles it provides a larger delay
+;; and the scheduler will DTRT.
+;; FIXME: this test needs to be revamped to not depend on this feature
+;; of the scheduler.
+
+(define_function_unit "core" 1 0
+ (and (and (eq_attr "ldsched" "yes") (eq_attr "type" "load"))
+ (eq_attr "is_xscale" "yes"))
+ 3 1)
+
(define_function_unit "core" 1 0
(and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
@@ -1121,7 +1138,7 @@
(const_int 0)))
(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
(mult:SI (match_dup 2) (match_dup 1)))]
- "TARGET_ARM"
+ "TARGET_ARM && !arm_is_xscale"
"mul%?s\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "mult")]
@@ -1134,7 +1151,7 @@
(match_operand:SI 1 "s_register_operand" "%?r,0"))
(const_int 0)))
(clobber (match_scratch:SI 0 "=&r,&r"))]
- "TARGET_ARM"
+ "TARGET_ARM && !arm_is_xscale"
"mul%?s\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "mult")]
@@ -1165,7 +1182,7 @@
(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
(plus:SI (mult:SI (match_dup 2) (match_dup 1))
(match_dup 3)))]
- "TARGET_ARM"
+ "TARGET_ARM && !arm_is_xscale"
"mla%?s\\t%0, %2, %1, %3"
[(set_attr "conds" "set")
(set_attr "type" "mult")]
@@ -1180,7 +1197,7 @@
(match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
(const_int 0)))
(clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
- "TARGET_ARM"
+ "TARGET_ARM && !arm_is_xscale"
"mla%?s\\t%0, %2, %1, %3"
[(set_attr "conds" "set")
(set_attr "type" "mult")]
@@ -1226,7 +1243,7 @@
;; Unnamed template to match long long unsigned multiply-accumlate (umlal)
(define_insn "*umulsidi3adddi"
- [(set (match_operand:DI 0 "s_register_operand" "=&r")
+ [(set (match_operand:DI 0 "s_register_operand" "+&r")
(plus:DI
(mult:DI
(zero_extend:DI (match_operand:SI 2 "s_register_operand" "%r"))
@@ -1268,6 +1285,41 @@
(set_attr "predicable" "yes")]
)
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "s_register_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "s_register_operand" "r"))))]
+ "TARGET_ARM && arm_is_xscale"
+ "smulbb%?\\t%0,%1,%2"
+ [(set_attr "type" "mult")]
+)
+
+(define_insn "*mulhisi3addsi"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 2 "s_register_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 3 "s_register_operand" "r")))))]
+ "TARGET_ARM && arm_is_xscale"
+ "smlabb%?\\t%0,%2,%3,%1"
+ [(set_attr "type" "mult")]
+)
+
+(define_insn "*mulhidi3adddi"
+ [(set (match_operand:DI 0 "s_register_operand" "=r")
+ (plus:DI
+ (match_operand:DI 1 "s_register_operand" "0")
+ (mult:DI (sign_extend:DI
+ (match_operand:HI 2 "s_register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:HI 3 "s_register_operand" "r")))))]
+ "TARGET_ARM && arm_is_xscale"
+ "smlalbb%?\\t%Q0, %R0, %2, %3"
+[(set_attr "type" "mult")])
+
(define_insn "mulsf3"
[(set (match_operand:SF 0 "s_register_operand" "=f")
(mult:SF (match_operand:SF 1 "s_register_operand" "f")
@@ -2003,6 +2055,7 @@
"TARGET_ARM"
"bic%?\\t%0, %1, %2%S4"
[(set_attr "predicable" "yes")
+ (set_attr "shift" "2")
]
)
@@ -2503,6 +2556,7 @@
"TARGET_ARM"
"mov%?\\t%0, %1%S3"
[(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
]
)
@@ -2517,6 +2571,7 @@
"TARGET_ARM"
"mov%?s\\t%0, %1%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "1")
]
)
@@ -2530,6 +2585,7 @@
"TARGET_ARM"
"mov%?s\\t%0, %1%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "1")
]
)
@@ -2541,6 +2597,7 @@
"TARGET_ARM"
"mvn%?\\t%0, %1%S3"
[(set_attr "predicable" "yes")
+ (set_attr "shift" "1")
]
)
@@ -2555,6 +2612,7 @@
"TARGET_ARM"
"mvn%?s\\t%0, %1%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "1")
]
)
@@ -2568,7 +2626,8 @@
"TARGET_ARM"
"mvn%?s\\t%0, %1%S3"
[(set_attr "conds" "set")
- ]
+ (set_attr "shift" "1")
+ ]
)
;; We don't really have extzv, but defining this using shifts helps
@@ -2713,6 +2772,7 @@
cmp\\t%0, #0\;rsblt\\t%0, %0, #0
eor%?\\t%0, %1, %1, asr #31\;sub%?\\t%0, %0, %1, asr #31"
[(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
;; predicable can't be set based on the variant, so left as no
(set_attr "length" "8")]
)
@@ -2726,6 +2786,7 @@
cmp\\t%0, #0\;rsbgt\\t%0, %0, #0
eor%?\\t%0, %1, %1, asr #31\;rsb%?\\t%0, %0, %1, asr #31"
[(set_attr "conds" "clob,*")
+ (set_attr "shift" "1")
;; predicable can't be set based on the variant, so left as no
(set_attr "length" "8")]
)
@@ -3056,6 +3117,7 @@
return \"mov%?\\t%R0, %Q0, asr #31\";
"
[(set_attr "length" "8")
+ (set_attr "shift" "1")
(set_attr "predicable" "yes")]
)
@@ -5471,6 +5533,7 @@
"TARGET_ARM"
"cmp%?\\t%0, %1%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "1")
]
)
@@ -5483,6 +5546,7 @@
"TARGET_ARM"
"cmp%?\\t%0, %1%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "1")
]
)
@@ -5495,6 +5559,7 @@
"TARGET_ARM"
"cmn%?\\t%0, %1%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "1")
]
)
@@ -6728,6 +6793,7 @@
"TARGET_ARM"
"%i1%?\\t%0, %2, %4%S3"
[(set_attr "predicable" "yes")
+ (set_attr "shift" "4")
]
)
@@ -6745,6 +6811,7 @@
"TARGET_ARM"
"%i1%?s\\t%0, %2, %4%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "4")
]
)
@@ -6760,6 +6827,7 @@
"TARGET_ARM"
"%i1%?s\\t%0, %2, %4%S3"
[(set_attr "conds" "set")
+ (set_attr "shift" "4")
]
)
@@ -6772,6 +6840,7 @@
"TARGET_ARM"
"sub%?\\t%0, %1, %3%S2"
[(set_attr "predicable" "yes")
+ (set_attr "shift" "3")
]
)
@@ -6789,6 +6858,7 @@
"TARGET_ARM"
"sub%?s\\t%0, %1, %3%S2"
[(set_attr "conds" "set")
+ (set_attr "shift" "3")
]
)
@@ -6804,6 +6874,7 @@
"TARGET_ARM"
"sub%?s\\t%0, %1, %3%S2"
[(set_attr "conds" "set")
+ (set_attr "shift" "3")
]
)
@@ -6848,12 +6919,13 @@
(plus:SI (plus:SI (match_op_dup 5 [(match_dup 3) (match_dup 4)])
(match_dup 1))
(match_dup 2)))]
- "TARGET_ARM && reload_in_progress"
+ "TARGET_ARM && reload_in_progress && !arm_is_xscale"
"*
output_add_immediate (operands);
return \"add%?s\\t%0, %0, %3%S5\";
"
[(set_attr "conds" "set")
+ (set_attr "shift" "3")
(set_attr "length" "20")]
)
@@ -6868,12 +6940,13 @@
(match_operand:SI 2 "const_int_operand" "n"))
(const_int 0)))
(clobber (match_scratch:SI 0 "=&r"))]
- "TARGET_ARM && reload_in_progress"
+ "TARGET_ARM && reload_in_progress && !arm_is_xscale"
"*
output_add_immediate (operands);
return \"add%?s\\t%0, %0, %3%S5\";
"
[(set_attr "conds" "set")
+ (set_attr "shift" "3")
(set_attr "length" "20")]
)
@@ -6908,7 +6981,7 @@
(set (match_operand:SI 0 "" "=&r")
(plus:SI (plus:SI (mult:SI (match_dup 3) (match_dup 4)) (match_dup 1))
(match_dup 2)))]
- "TARGET_ARM && reload_in_progress"
+ "TARGET_ARM && reload_in_progress && !arm_is_xscale"
"*
output_add_immediate (operands);
output_asm_insn (\"mla%?s\\t%0, %3, %4, %0\", operands);
@@ -7615,6 +7688,7 @@
mov%D5\\t%0, %1\;mov%d5\\t%0, %2%S4
mvn%D5\\t%0, #%B1\;mov%d5\\t%0, %2%S4"
[(set_attr "conds" "use")
+ (set_attr "shift" "2")
(set_attr "length" "4,8,8")]
)
@@ -7650,6 +7724,7 @@
mov%d5\\t%0, %1\;mov%D5\\t%0, %2%S4
mvn%d5\\t%0, #%B1\;mov%D5\\t%0, %2%S4"
[(set_attr "conds" "use")
+ (set_attr "shift" "2")
(set_attr "length" "4,8,8")]
)
@@ -7686,6 +7761,7 @@
"TARGET_ARM"
"mov%d5\\t%0, %1%S6\;mov%D5\\t%0, %3%S7"
[(set_attr "conds" "use")
+ (set_attr "shift" "1")
(set_attr "length" "8")]
)
@@ -8912,6 +8988,22 @@
[(set_attr "length" "2")]
)
+;; V5 Instructions,
+
+(define_insn "clz"
+ [(set (match_operand:SI 0 "s_register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "s_register_operand" "r")] 128))]
+ "TARGET_ARM"
+ "clz\\t%0,%1")
+
+;; XScale instructions.
+
+(define_insn "prefetch"
+ [(unspec_volatile
+ [(match_operand:SI 0 "offsettable_memory_operand" "o")] 129)]
+ "TARGET_ARM"
+ "pld\\t%0")
+
;; General predication pattern
(define_cond_exec
diff --git a/gcc/config/arm/strongarm-pe.h b/gcc/config/arm/strongarm-pe.h
new file mode 100644
index 0000000..4ee08b0
--- /dev/null
+++ b/gcc/config/arm/strongarm-pe.h
@@ -0,0 +1,26 @@
+/* Definitions of target machine for GNU compiler, for ARM with PE obj format.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/strongarm-coff.h"
+#include "arm/pe.h"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (StrongARM/PE)", stderr);
diff --git a/gcc/config/arm/t-strongarm-coff b/gcc/config/arm/t-strongarm-coff
new file mode 100644
index 0000000..269ee33
--- /dev/null
+++ b/gcc/config/arm/t-strongarm-coff
@@ -0,0 +1,35 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float
+MULTILIB_DIRNAMES = le be fpu soft
+MULTILIB_MATCHES =
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somwehere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
diff --git a/gcc/config/arm/t-strongarm-elf b/gcc/config/arm/t-strongarm-elf
new file mode 100644
index 0000000..a0f009b
--- /dev/null
+++ b/gcc/config/arm/t-strongarm-elf
@@ -0,0 +1,39 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mhard-float/msoft-float
+MULTILIB_DIRNAMES = le be fpu soft
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+# If EXTRA_MULTILIB_PARTS is not defined above then define EXTRA_PARTS here
+# EXTRA_PARTS = crtbegin.o crtend.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
diff --git a/gcc/config/arm/t-strongarm-pe b/gcc/config/arm/t-strongarm-pe
new file mode 100644
index 0000000..e487e5d
--- /dev/null
+++ b/gcc/config/arm/t-strongarm-pe
@@ -0,0 +1,38 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+pe.o: $(srcdir)/config/arm/pe.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/arm/pe.c
+
+MULTILIB_OPTIONS = mhard-float/msoft-float
+MULTILIB_DIRNAMES = fpu soft
+MULTILIB_MATCHES =
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somwehere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
diff --git a/gcc/config/arm/t-xscale-coff b/gcc/config/arm/t-xscale-coff
new file mode 100644
index 0000000..ad84fad
--- /dev/null
+++ b/gcc/config/arm/t-xscale-coff
@@ -0,0 +1,51 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian
+MULTILIB_DIRNAMES = le be
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+# Note XScale does not support 26 bit APCS.
+# Note XScale does not support hard FP
+
+MULTILIB_OPTIONS += mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES += normal interwork
+
+MULTILIB_OPTIONS += marm/mthumb
+MULTILIB_DIRNAMES += arm thumb
+MULTILIB_EXCEPTIONS += *mhard-float/*mthumb*
+
+# CYGNUS LOCAL nickc/redundant multilibs
+MULTILIB_REDUNDANT_DIRS = interwork/thumb=thumb
+# END CYGNUS LOCAL
+
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
diff --git a/gcc/config/arm/t-xscale-elf b/gcc/config/arm/t-xscale-elf
new file mode 100644
index 0000000..ad84fad
--- /dev/null
+++ b/gcc/config/arm/t-xscale-elf
@@ -0,0 +1,51 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1funcs.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _bb_init_func _call_via_rX _interwork_call_via_rX
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian
+MULTILIB_DIRNAMES = le be
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+# Note XScale does not support 26 bit APCS.
+# Note XScale does not support hard FP
+
+MULTILIB_OPTIONS += mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES += normal interwork
+
+MULTILIB_OPTIONS += marm/mthumb
+MULTILIB_DIRNAMES += arm thumb
+MULTILIB_EXCEPTIONS += *mhard-float/*mthumb*
+
+# CYGNUS LOCAL nickc/redundant multilibs
+MULTILIB_REDUNDANT_DIRS = interwork/thumb=thumb
+# END CYGNUS LOCAL
+
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Currently there is a bug somewhere in GCC's alias analysis
+# or scheduling code that is breaking _fpmul_parts in libgcc1.c.
+# Disabling function inlining is a workaround for this problem.
+TARGET_LIBGCC2_CFLAGS = -Dinhibit_libc -fno-inline
diff --git a/gcc/config/arm/xscale-coff.h b/gcc/config/arm/xscale-coff.h
new file mode 100644
index 0000000..8ce2e9b
--- /dev/null
+++ b/gcc/config/arm/xscale-coff.h
@@ -0,0 +1,35 @@
+/* Definitions for XScale systems using COFF
+ Copyright (C) 1999, 2000 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_xscale
+#endif
+
+#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mxscale}"
+
+#define MULTILIB_DEFAULTS \
+ { "mlittle-endian", "mno-thumb-interwork", "marm" }
+
+#include "coff.h"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (XScale/COFF)", stderr);
diff --git a/gcc/config/arm/xscale-elf.h b/gcc/config/arm/xscale-elf.h
new file mode 100644
index 0000000..3718d11
--- /dev/null
+++ b/gcc/config/arm/xscale-elf.h
@@ -0,0 +1,38 @@
+/* Definitions for XScale architectures using ELF
+ Copyright (C) 1999, 2000 Free Software Foundation, Inc.
+ Contributed by Catherine Moore <clm@cygnus.com>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Run-time Target Specification. */
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fputs (" (XScale/ELF non-Linux)", stderr);
+#endif
+
+#ifndef SUBTARGET_CPU_DEFAULT
+#define SUBTARGET_CPU_DEFAULT TARGET_CPU_xscale
+#endif
+
+#define SUBTARGET_EXTRA_ASM_SPEC "%{!mcpu=*:-mxscale}"
+
+#ifndef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS \
+ { "mlittle-endian", "mno-thumb-interwork", "marm" }
+#endif
+
+#include "unknown-elf.h"