aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Clifton <nickc@cygnus.com>1998-03-25 10:44:07 +0000
committerNick Clifton <nickc@gcc.gnu.org>1998-03-25 10:44:07 +0000
commite98e406fcc1d898d8d59d8a65395631e698be55e (patch)
treead9018810e7eb3c6870ac03f1343c3f5d53837fb
parent8cf619daa1ecd43d122b22621fde7f6271a9cb05 (diff)
downloadgcc-e98e406fcc1d898d8d59d8a65395631e698be55e.zip
gcc-e98e406fcc1d898d8d59d8a65395631e698be55e.tar.gz
gcc-e98e406fcc1d898d8d59d8a65395631e698be55e.tar.bz2
Add support for ARM's Thumb instruction set.
From-SVN: r18822
-rw-r--r--ChangeLog5
-rwxr-xr-xconfig.sub5
-rw-r--r--configure.in3
-rw-r--r--gcc/ChangeLog14
-rwxr-xr-xgcc/config.sub3
-rw-r--r--gcc/config/arm/lib1thumb.asm702
-rw-r--r--gcc/config/arm/t-thumb32
-rw-r--r--gcc/config/arm/tcoff.h192
-rw-r--r--gcc/config/arm/thumb.c1965
-rw-r--r--gcc/config/arm/thumb.h1102
-rw-r--r--gcc/config/arm/thumb.md1144
-rwxr-xr-xgcc/configure10
-rw-r--r--gcc/configure.in10
13 files changed, 5185 insertions, 2 deletions
diff --git a/ChangeLog b/ChangeLog
index 1a94dad..d6817a0 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+Wed Mar 25 10:04:18 1998 Nick Clifton <nickc@cygnus.com>
+
+ * configure.in: Add thumb-coff target.
+ * config.sub: Add thumb-coff target.
+
Fri Mar 20 09:32:14 1998 Manfred Hollstein <manfred@s-direktnet.de>
* Makefile.in (install-gcc): Don't specify LANGUAGES here.
diff --git a/config.sub b/config.sub
index 406b445..75a0a13 100755
--- a/config.sub
+++ b/config.sub
@@ -173,6 +173,9 @@ case $basic_machine in
m88110 | m680[01234]0 | m683?2 | m68360 | z8k | v70 | h8500 | w65) # CYGNUS LOCAL
basic_machine=$basic_machine-unknown
;;
+ thumb)
+ basic_machine=$basic_machine-unknown
+ ;;
mips64vr4300 | mips64vr4300el) # CYGNUS LOCAL jsmith/vr4300
basic_machine=$basic_machine-unknown
;;
@@ -216,6 +219,8 @@ case $basic_machine in
;;
m88110-* | m680[01234]0-* | m683?2-* | m68360-* | z8k-* | h8500-* | d10v-*) # CYGNUS LOCAL
;;
+ thumb-*)
+ ;;
mips64vr4300-* | mips64vr4300el-*) # CYGNUS LOCAL jsmith/vr4300
;;
mips64vr4100-* | mips64vr4100el-*) # CYGNUS LOCAL jsmith/vr4100
diff --git a/configure.in b/configure.in
index 5580e949..21c0b4b 100644
--- a/configure.in
+++ b/configure.in
@@ -540,6 +540,9 @@ case "${target}" in
arm-*-riscix*)
noconfigdirs="$noconfigdirs ld target-libgloss"
;;
+ thumb-*-coff)
+ noconfigdirs="$noconfigdirs target-libgloss"
+ ;;
d10v-*-*)
noconfigdirs="$noconfigdirs target-librx target-libg++ target-libstdc++ target-libio target-libgloss"
;;
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 790ccf4..9e95bc2 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,17 @@
+Wed Mar 25 10:05:19 1998 Nick Clifton <nickc@cygnus.com>
+
+ * config/arm/thumb.c: New File. Support for ARM's Thumb
+ instruction set.
+ * config/arm/thumb.h: New File. Thumb definitions.
+ * config/arm/thumb.md: New File. Thumb machine description.
+ * config/arm/tcoff.h: New File. Thumb COFF support.
+ * config/arm/t-thumb: New File. Thumb makefile fragment.
+ * config/arm/lib1thumb.asm: New File. Thumb libgcc support functions.
+
+ * configure.in: Add Thumb-coff target.
+ * configure: Add Thumb-coff target.
+ * config.sub: Add Thumb-coff target.
+
Wed Mar 25 10:30:32 1998 Jim Wilson <wilson@cygnus.com>
* loop.c (scan_loop): Initialize move_insn_first to zero.
diff --git a/gcc/config.sub b/gcc/config.sub
index 446235b..dd52e87 100755
--- a/gcc/config.sub
+++ b/gcc/config.sub
@@ -159,6 +159,9 @@ case $basic_machine in
| sparc | sparclet | sparclite | sparc64 | v850)
basic_machine=$basic_machine-unknown
;;
+ thumb | thumbel)
+ basic_machine=$basic_machine-unknown
+ ;;
# We use `pc' rather than `unknown'
# because (1) that's what they normally are, and
# (2) the word "unknown" tends to confuse beginning users.
diff --git a/gcc/config/arm/lib1thumb.asm b/gcc/config/arm/lib1thumb.asm
new file mode 100644
index 0000000..8df1ae7
--- /dev/null
+++ b/gcc/config/arm/lib1thumb.asm
@@ -0,0 +1,702 @@
+@ libgcc1 routines for ARM cpu.
+@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
+
+/* Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you link this library with other files,
+ some of which are compiled with GCC, to produce an executable,
+ this library does not by itself cause the resulting executable
+ to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License. */
+
+ .code 16
+
+#ifndef __USER_LABEL_PREFIX__
+#error USER_LABEL_PREFIX not defined
+#endif
+
+#define RET mov pc, lr
+
+/* ANSI concatenation macros. */
+
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+/* Use the right prefix for global labels. */
+
+#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+
+work .req r4 @ XXXX is this safe ?
+
+#ifdef L_udivsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+
+ .text
+ .globl SYM (__udivsi3)
+ .align 0
+ .thumb_func
+SYM (__udivsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ mov result, #0
+
+ push { work }
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ @ Load the constant 0x10000000 into our work register
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ bcc Over1
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over1:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over2
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over2:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, work
+Over3:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, work
+Over4:
+ cmp dividend, #0 @ Early termination?
+ beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+Lgot_result:
+ mov r0, result
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+#endif /* L_udivsi3 */
+
+#ifdef L_umodsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+ .text
+ .globl SYM (__umodsi3)
+ .align 0
+ .thumb_func
+SYM (__umodsi3):
+ cmp divisor, #0
+ beq Ldiv0
+ mov curbit, #1
+ cmp dividend, divisor
+ bcs Over1
+ RET
+
+Over1:
+ @ Load the constant 0x10000000 into our work register
+ push { work }
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over2
+ sub dividend, dividend, divisor
+Over2:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over3
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over3:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over6
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over6
+ lsr divisor, #4
+ b Loop3
+
+Over6:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ bne Over7
+ pop { work }
+ RET @ No fixups needed
+Over7:
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Over10
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+#endif /* L_umodsi3 */
+
+#ifdef L_divsi3
+
+dividend .req r0
+divisor .req r1
+result .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+ .text
+ .globl SYM (__divsi3)
+ .align 0
+ .thumb_func
+SYM (__divsi3):
+ cmp divisor, #0
+ beq Ldiv0
+
+ push { work }
+ mov work, dividend
+ eor work, divisor @ Save the sign of the result.
+ mov ip, work
+ mov curbit, #1
+ mov result, #0
+ cmp divisor, #0
+ bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ cmp dividend, #0
+ bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ Bcs Lbignum
+ cmp divisor, dividend
+ Bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ lsl work, #3
+Loop2:
+ cmp divisor, work
+ Bcs Loop3
+ cmp divisor, dividend
+ Bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions, and note which bits
+ @ are done in the result. On the final pass, this may subtract
+ @ too much from the dividend, but the result will be ok, since the
+ @ "bit" will have been shifted out at the bottom.
+ cmp dividend, divisor
+ Bcc Over3
+ sub dividend, dividend, divisor
+ orr result, result, curbit
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ Bcc Over4
+ sub dividend, dividend, work
+ lsr work, curbit, #1
+ orr result, work
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ Bcc Over5
+ sub dividend, dividend, work
+ lsr work, curbit, #2
+ orr result, result, work
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ Bcc Over6
+ sub dividend, dividend, work
+ lsr work, curbit, #3
+ orr result, result, work
+Over6:
+ cmp dividend, #0 @ Early termination?
+ Beq Lgot_result
+ lsr curbit, #4 @ No, any more bits to do?
+ Beq Lgot_result
+ lsr divisor, #4
+ b Loop3
+
+Lgot_result:
+ mov r0, result
+ mov work, ip
+ cmp work, #0
+ Bpl Over7
+ neg r0, r0
+Over7:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+#endif /* L_divsi3 */
+
+#ifdef L_modsi3
+
+dividend .req r0
+divisor .req r1
+overdone .req r2
+curbit .req r3
+ip .req r12
+sp .req r13
+lr .req r14
+pc .req r15
+ .text
+ .globl SYM (__modsi3)
+ .align 0
+ .thumb_func
+SYM (__modsi3):
+ mov curbit, #1
+ cmp divisor, #0
+ beq Ldiv0
+ Bpl Over1
+ neg divisor, divisor @ Loops below use unsigned.
+Over1:
+ push { work }
+ @ Need to save the sign of the dividend, unfortunately, we need
+ @ ip later on. Must do this after saving the original value of
+ @ the work register, because we will pop this value off first.
+ push { dividend }
+ cmp dividend, #0
+ Bpl Over2
+ neg dividend, dividend
+Over2:
+ cmp dividend, divisor
+ bcc Lgot_result
+ mov work, #1
+ lsl work, #28
+Loop1:
+ @ Unless the divisor is very big, shift it up in multiples of
+ @ four bits, since this is the amount of unwinding in the main
+ @ division loop. Continue shifting until the divisor is
+ @ larger than the dividend.
+ cmp divisor, work
+ bcs Lbignum
+ cmp divisor, dividend
+ bcs Lbignum
+ lsl divisor, #4
+ lsl curbit, #4
+ b Loop1
+
+Lbignum:
+ @ Set work to 0x80000000
+ lsl work, #3
+Loop2:
+ @ For very big divisors, we must shift it a bit at a time, or
+ @ we will be in danger of overflowing.
+ cmp divisor, work
+ bcs Loop3
+ cmp divisor, dividend
+ bcs Loop3
+ lsl divisor, #1
+ lsl curbit, #1
+ b Loop2
+
+Loop3:
+ @ Test for possible subtractions. On the final pass, this may
+ @ subtract too much from the dividend, so keep track of which
+ @ subtractions are done, we can fix them up afterwards...
+ mov overdone, #0
+ cmp dividend, divisor
+ bcc Over3
+ sub dividend, dividend, divisor
+Over3:
+ lsr work, divisor, #1
+ cmp dividend, work
+ bcc Over4
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #1
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over4:
+ lsr work, divisor, #2
+ cmp dividend, work
+ bcc Over5
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #2
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over5:
+ lsr work, divisor, #3
+ cmp dividend, work
+ bcc Over6
+ sub dividend, dividend, work
+ mov ip, curbit
+ mov work, #3
+ ror curbit, work
+ orr overdone, curbit
+ mov curbit, ip
+Over6:
+ mov ip, curbit
+ cmp dividend, #0 @ Early termination?
+ beq Over7
+ lsr curbit, #4 @ No, any more bits to do?
+ beq Over7
+ lsr divisor, #4
+ b Loop3
+
+Over7:
+ @ Any subtractions that we should not have done will be recorded in
+ @ the top three bits of "overdone". Exactly which were not needed
+ @ are governed by the position of the bit, stored in ip.
+ @ If we terminated early, because dividend became zero,
+ @ then none of the below will match, since the bit in ip will not be
+ @ in the bottom nibble.
+ mov work, #0xe
+ lsl work, #28
+ and overdone, work
+ beq Lgot_result
+
+ mov curbit, ip
+ mov work, #3
+ ror curbit, work
+ tst overdone, curbit
+ beq Over8
+ lsr work, divisor, #3
+ add dividend, dividend, work
+Over8:
+ mov curbit, ip
+ mov work, #2
+ ror curbit, work
+ tst overdone, curbit
+ beq Over9
+ lsr work, divisor, #2
+ add dividend, dividend, work
+Over9:
+ mov curbit, ip
+ mov work, #1
+ ror curbit, work
+ tst overdone, curbit
+ beq Lgot_result
+ lsr work, divisor, #1
+ add dividend, dividend, work
+Lgot_result:
+ pop { work }
+ cmp work, #0
+ bpl Over10
+ neg dividend, dividend
+Over10:
+ pop { work }
+ RET
+
+Ldiv0:
+ push { lr }
+ bl SYM (__div0)
+ mov r0, #0 @ about as wrong as it could be
+ pop { pc }
+
+#endif /* L_modsi3 */
+
+#ifdef L_dvmd_tls
+
+ .globl SYM (__div0)
+ .align 0
+ .thumb_func
+SYM (__div0):
+ RET
+
+#endif /* L_divmodsi_tools */
+
+
+#ifdef L_call_via_rX
+
+/* These labels & instructions are used by the Arm/Thumb interworking code.
+ The address of function to be called is loaded into a register and then
+ one of these labels is called via a BL instruction. This puts the
+ return address into the link register with the bottom bit set, and the
+ code here switches to the correct mode before executing the function. */
+
+ .text
+ .align 0
+
+ .globl SYM (_call_via_r0)
+ .thumb_func
+SYM (_call_via_r0):
+ bx r0
+ nop
+
+ .globl SYM (_call_via_r1)
+ .thumb_func
+SYM (_call_via_r1):
+ bx r1
+ nop
+
+ .globl SYM (_call_via_r2)
+ .thumb_func
+SYM (_call_via_r2):
+ bx r2
+ nop
+
+ .globl SYM (_call_via_r3)
+ .thumb_func
+SYM (_call_via_r3):
+ bx r3
+ nop
+
+ .globl SYM (_call_via_r4)
+ .thumb_func
+SYM (_call_via_r4):
+ bx r4
+ nop
+
+ .globl SYM (_call_via_r5)
+ .thumb_func
+SYM (_call_via_r5):
+ bx r5
+ nop
+
+ .globl SYM (_call_via_r6)
+ .thumb_func
+SYM (_call_via_r6):
+ bx r6
+ nop
+
+ .globl SYM (_call_via_r7)
+ .thumb_func
+SYM (_call_via_r7):
+ bx r7
+ nop
+
+ .globl SYM (_call_via_r8)
+ .thumb_func
+SYM (_call_via_r8):
+ bx r8
+ nop
+
+ .globl SYM (_call_via_r9)
+ .thumb_func
+SYM (_call_via_r9):
+ bx r9
+ nop
+
+ .globl SYM (_call_via_sl)
+ .thumb_func
+SYM (_call_via_sl):
+ bx sl
+ nop
+
+ .globl SYM (_call_via_fp)
+ .thumb_func
+SYM (_call_via_fp):
+ bx fp
+ nop
+
+ .globl SYM (_call_via_ip)
+ .thumb_func
+SYM (_call_via_ip):
+ bx ip
+ nop
+
+ .globl SYM (_call_via_sp)
+ .thumb_func
+SYM (_call_via_sp):
+ bx sp
+ nop
+
+ .globl SYM (_call_via_lr)
+ .thumb_func
+SYM (_call_via_lr):
+ bx lr
+ nop
+
+#endif /* L_call_via_rX */
diff --git a/gcc/config/arm/t-thumb b/gcc/config/arm/t-thumb
new file mode 100644
index 0000000..1701258
--- /dev/null
+++ b/gcc/config/arm/t-thumb
@@ -0,0 +1,32 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = arm/lib1thumb.asm
+LIB1ASMFUNCS = _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls _call_via_rX
+# adddi3/subdi3 added to machine description
+#LIB1ASMFUNCS = _adddi3 _subdi3 _udivsi3 _divsi3 _umodsi3 _modsi3 _dvmd_tls
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#ifndef __ARMEB__' >> fp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
+ echo '#endif' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#ifndef __ARMEB__' > dp-bit.c
+ echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
+ echo '#define FLOAT_WORD_ORDER_MISMATCH' >> dp-bit.c
+ echo '#endif' >> dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+# Avoid building a duplicate set of libraries for the default endian-ness.
+MULTILIB_OPTIONS = mlittle-endian/mbig-endian mno-thumb-interwork/mthumb-interwork
+MULTILIB_DIRNAMES = le be normal interwork
+MULTILIB_MATCHES = mbig-endian=mbe mlittle-endian=mle
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc/config/arm/tcoff.h b/gcc/config/arm/tcoff.h
new file mode 100644
index 0000000..5fabe5d
--- /dev/null
+++ b/gcc/config/arm/tcoff.h
@@ -0,0 +1,192 @@
+/* Definitions of target machine for GNU compiler,
+ for Thumb with COFF obj format.
+ Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+ Derived from arm/coff.h originally by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "arm/thumb.h"
+
+/* Run-time Target Specification. */
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Thumb/coff)", stderr)
+
+#define MULTILIB_DEFAULTS { "mlittle-endian" }
+
+/* Setting this to 32 produces more efficient code, but the value set in previous
+ versions of this toolchain was 8, which produces more compact structures. The
+ command line option -mstructure_size_boundary=<n> can be used to change this
+ value. */
+#undef STRUCTURE_SIZE_BOUNDARY
+#define STRUCTURE_SIZE_BOUNDARY arm_structure_size_boundary
+
+extern int arm_structure_size_boundary;
+
+/* This is COFF, but prefer stabs. */
+#define SDB_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* Note - it is important that these definitions match those in semi.h for the ARM port. */
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+/* A C statement to output assembler commands which will identify the
+ object file as having been compiled with GNU CC (or another GNU
+ compiler). */
+#define ASM_IDENTIFY_GCC(STREAM) \
+ fprintf (STREAM, "%sgcc2_compiled.:\n", LOCAL_LABEL_PREFIX )
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(STREAM) \
+do { \
+ extern char *version_string; \
+ fprintf ((STREAM), "%s Generated by gcc %s for Thumb/coff\n", \
+ ASM_COMMENT_START, version_string); \
+ fprintf ((STREAM), ASM_APP_OFF); \
+} while (0)
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+#define ASM_OUTPUT_SECTION_NAME(STREAM, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (STREAM, "\t.section %s,\"x\"\n", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (STREAM, "\t.section %s,\"\"\n", (NAME)); \
+ else \
+ fprintf (STREAM, "\t.section %s,\"w\"\n", (NAME)); \
+} while (0)
+
+/* Support the ctors/dtors and other sections. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Define this macro if jump tables (for `tablejump' insns) should be
+ output in the text section, along with the assembler instructions.
+ Otherwise, the readonly data section is used. */
+#define JUMP_TABLES_IN_TEXT_SECTION
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION rdata_section
+#undef RDATA_SECTION_ASM_OP
+#define RDATA_SECTION_ASM_OP "\t.section .rdata"
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS SUBTARGET_EXTRA_SECTIONS in_rdata, in_ctors, in_dtors
+
+#define SUBTARGET_EXTRA_SECTIONS
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ RDATA_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION \
+ SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define SUBTARGET_EXTRA_SECTION_FUNCTIONS
+
+#define RDATA_SECTION_FUNCTION \
+void \
+rdata_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", RDATA_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Support the ctors/dtors sections for g++. */
+
+#define INT_ASM_OP ".word"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(STREAM,NAME) \
+do { \
+ ctors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(STREAM,NAME) \
+do { \
+ dtors_section (); \
+ fprintf (STREAM, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, "\n"); \
+} while (0)
+
+/* __CTOR_LIST__ and __DTOR_LIST__ must be defined by the linker script. */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
+
+/* The ARM development system has atexit and doesn't have _exit,
+ so define this for now. */
+#define HAVE_ATEXIT
+
+/* The ARM development system defines __main. */
+#define NAME__MAIN "__gccmain"
+#define SYMBOL__MAIN __gccmain
diff --git a/gcc/config/arm/thumb.c b/gcc/config/arm/thumb.c
new file mode 100644
index 0000000..1a2eb18
--- /dev/null
+++ b/gcc/config/arm/thumb.c
@@ -0,0 +1,1965 @@
+/* Output routines for GCC for ARM/Thumb
+ Copyright (C) 1996 Cygnus Software Technologies Ltd
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include "config.h"
+#include "rtl.h"
+#include "hard-reg-set.h"
+#include "regs.h"
+#include "output.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "tree.h"
+#include "expr.h"
+
+
+int current_function_anonymous_args = 0;
+
+/* Used to parse -mstructure_size_boundary command line option. */
+char * structure_size_string = NULL;
+int arm_structure_size_boundary = 32; /* Used to be 8 */
+
+
+/* Predicates */
+int
+reload_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int regno = true_regnum (op);
+
+ return (! CONSTANT_P (op)
+ && (regno == -1
+ || (GET_CODE (op) == REG
+ && REGNO (op) >= FIRST_PSEUDO_REGISTER)));
+}
+
+/* Return nonzero if op is suitable for the RHS of a cmp instruction. */
+int
+thumb_cmp_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op)) < 256)
+ || register_operand (op, mode));
+}
+
+int
+thumb_shiftable_const (val)
+ HOST_WIDE_INT val;
+{
+ unsigned HOST_WIDE_INT x = val;
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ return 1;
+
+ return 0;
+}
+
+int
+thumb_trivial_epilogue ()
+{
+ int regno;
+
+ /* ??? If this function ever returns 1, we get a function without any
+ epilogue at all. It appears that the intent was to cause a "return"
+ insn to be emitted, but that does not happen. */
+ return 0;
+
+#if 0
+ if (get_frame_size ()
+ || current_function_outgoing_args_size
+ || current_function_pretend_args_size)
+ return 0;
+
+ for (regno = 8; regno < 13; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ return 0;
+
+ return 1;
+#endif
+}
+
+
+/* Routines for handling the constant pool */
+/* This is unashamedly hacked from the version in sh.c, since the problem is
+ extremely similar. */
+
+/* Thumb instructions cannot load a large constant into a register,
+ constants have to come from a pc relative load. The reference of a pc
+ relative load instruction must be less than 1k infront of the instruction.
+ This means that we often have to dump a constant inside a function, and
+ generate code to branch around it.
+
+ It is important to minimize this, since the branches will slow things
+ down and make things bigger.
+
+ Worst case code looks like:
+
+ ldr rn, L1
+ b L2
+ align
+ L1: .long value
+ L2:
+ ..
+
+ ldr rn, L3
+ b L4
+ align
+ L3: .long value
+ L4:
+ ..
+
+ We fix this by performing a scan before scheduling, which notices which
+ instructions need to have their operands fetched from the constant table
+ and builds the table.
+
+
+ The algorithm is:
+
+ scan, find an instruction which needs a pcrel move. Look forward, find the
+ last barrier which is within MAX_COUNT bytes of the requirement.
+ If there isn't one, make one. Process all the instructions between
+ the find and the barrier.
+
+ In the above example, we can tell that L3 is within 1k of L1, so
+ the first move can be shrunk from the 2 insn+constant sequence into
+ just 1 insn, and the constant moved to L3 to make:
+
+ ldr rn, L1
+ ..
+ ldr rn, L3
+ b L4
+ align
+ L1: .long value
+ L3: .long value
+ L4:
+
+ Then the second move becomes the target for the shortening process.
+
+ */
+
+typedef struct
+{
+ rtx value; /* Value in table */
+ HOST_WIDE_INT next_offset;
+ enum machine_mode mode; /* Mode of value */
+} pool_node;
+
+/* The maximum number of constants that can fit into one pool, since
+ the pc relative range is 0...1020 bytes and constants are at least 4
+ bytes long */
+
+#define MAX_POOL_SIZE (1020/4)
+static pool_node pool_vector[MAX_POOL_SIZE];
+static int pool_size;
+static rtx pool_vector_label;
+
+/* Add a constant to the pool and return its label. */
+
+static HOST_WIDE_INT
+add_constant (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ int i;
+ rtx lab;
+ HOST_WIDE_INT offset;
+
+ if (mode == SImode && GET_CODE (x) == MEM && CONSTANT_P (XEXP (x, 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
+ x = get_pool_constant (XEXP (x, 0));
+
+ /* First see if we've already got it */
+
+ for (i = 0; i < pool_size; i++)
+ {
+ if (x->code == pool_vector[i].value->code
+ && mode == pool_vector[i].mode)
+ {
+ if (x->code == CODE_LABEL)
+ {
+ if (XINT (x, 3) != XINT (pool_vector[i].value, 3))
+ continue;
+ }
+ if (rtx_equal_p (x, pool_vector[i].value))
+ return pool_vector[i].next_offset - GET_MODE_SIZE (mode);
+ }
+ }
+
+ /* Need a new one */
+
+ pool_vector[pool_size].next_offset = GET_MODE_SIZE (mode);
+ offset = 0;
+ if (pool_size == 0)
+ pool_vector_label = gen_label_rtx ();
+ else
+ pool_vector[pool_size].next_offset
+ += (offset = pool_vector[pool_size - 1].next_offset);
+
+ pool_vector[pool_size].value = x;
+ pool_vector[pool_size].mode = mode;
+ pool_size++;
+ return offset;
+}
+
+/* Output the literal table */
+
+static void
+dump_table (scan)
+ rtx scan;
+{
+ int i;
+
+ scan = emit_label_after (gen_label_rtx (), scan);
+ scan = emit_insn_after (gen_align_4 (), scan);
+ scan = emit_label_after (pool_vector_label, scan);
+
+ for (i = 0; i < pool_size; i++)
+ {
+ pool_node *p = pool_vector + i;
+
+ switch (GET_MODE_SIZE (p->mode))
+ {
+ case 4:
+ scan = emit_insn_after (gen_consttable_4 (p->value), scan);
+ break;
+
+ case 8:
+ scan = emit_insn_after (gen_consttable_8 (p->value), scan);
+ break;
+
+ default:
+ abort ();
+ break;
+ }
+ }
+
+ scan = emit_insn_after (gen_consttable_end (), scan);
+ scan = emit_barrier_after (scan);
+ pool_size = 0;
+}
+
+/* Non zero if the src operand needs to be fixed up */
+static
+int
+fixit (src, mode)
+ rtx src;
+ enum machine_mode mode;
+{
+ return ((CONSTANT_P (src)
+ && (GET_CODE (src) != CONST_INT
+ || ! (CONST_OK_FOR_LETTER_P (INTVAL (src), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL (src), 'J')
+ || (mode != DImode
+ && CONST_OK_FOR_LETTER_P (INTVAL (src), 'K')))))
+ || (mode == SImode && GET_CODE (src) == MEM
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (XEXP (src, 0))));
+}
+
+/* Find the last barrier less than MAX_COUNT bytes from FROM, or create one. */
+
+#define MAX_COUNT_SI 1000
+
+static rtx
+find_barrier (from)
+ rtx from;
+{
+ int count = 0;
+ rtx found_barrier = 0;
+ rtx label;
+
+ while (from && count < MAX_COUNT_SI)
+ {
+ if (GET_CODE (from) == BARRIER)
+ return from;
+
+ /* Count the length of this insn */
+ if (GET_CODE (from) == INSN
+ && GET_CODE (PATTERN (from)) == SET
+ && CONSTANT_P (SET_SRC (PATTERN (from)))
+ && CONSTANT_POOL_ADDRESS_P (SET_SRC (PATTERN (from))))
+ {
+ rtx src = SET_SRC (PATTERN (from));
+ count += 2;
+ }
+ else
+ count += get_attr_length (from);
+
+ from = NEXT_INSN (from);
+ }
+
+ /* We didn't find a barrier in time to
+ dump our stuff, so we'll make one */
+ label = gen_label_rtx ();
+
+ if (from)
+ from = PREV_INSN (from);
+ else
+ from = get_last_insn ();
+
+ /* Walk back to be just before any jump */
+ while (GET_CODE (from) == JUMP_INSN
+ || GET_CODE (from) == NOTE
+ || GET_CODE (from) == CODE_LABEL)
+ from = PREV_INSN (from);
+
+ from = emit_jump_insn_after (gen_jump (label), from);
+ JUMP_LABEL (from) = label;
+ found_barrier = emit_barrier_after (from);
+ emit_label_after (label, found_barrier);
+ return found_barrier;
+}
+
+/* Non zero if the insn is a move instruction which needs to be fixed. */
+
+static int
+broken_move (insn)
+ rtx insn;
+{
+ if (!INSN_DELETED_P (insn)
+ && GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx pat = PATTERN (insn);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ if (dst == pc_rtx)
+ return 0;
+ return fixit (src, mode);
+ }
+ return 0;
+}
+
+#ifdef DBX_DEBUGGING_INFO
+
+/* Recursively search through all of the blocks in a function
+ checking to see if any of the variables created in that
+ function match the RTX called 'orig'. If they do then
+ replace them with the RTX called 'new'. */
+
+static void
+replace_symbols_in_block (tree block, rtx orig, rtx new)
+{
+ for (; block; block = BLOCK_CHAIN (block))
+ {
+ tree sym;
+
+ if (! TREE_USED (block))
+ continue;
+
+ for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
+ {
+ if ( (DECL_NAME (sym) == 0 && TREE_CODE (sym) != TYPE_DECL)
+ || DECL_IGNORED_P (sym)
+ || TREE_CODE (sym) != VAR_DECL
+ || DECL_EXTERNAL (sym)
+ || ! rtx_equal_p (DECL_RTL (sym), orig)
+ )
+ continue;
+
+ DECL_RTL (sym) = new;
+ }
+
+ replace_symbols_in_block (BLOCK_SUBBLOCKS (block), orig, new);
+ }
+}
+#endif
+
+void
+thumb_reorg (first)
+ rtx first;
+{
+ rtx insn;
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ if (broken_move (insn))
+ {
+ /* This is a broken move instruction, scan ahead looking for
+ a barrier to stick the constant table behind */
+ rtx scan;
+ rtx barrier = find_barrier (insn);
+
+ /* Now find all the moves between the points and modify them */
+ for (scan = insn; scan != barrier; scan = NEXT_INSN (scan))
+ {
+ if (broken_move (scan))
+ {
+ /* This is a broken move instruction, add it to the pool */
+ rtx pat = PATTERN (scan);
+ rtx src = SET_SRC (pat);
+ rtx dst = SET_DEST (pat);
+ enum machine_mode mode = GET_MODE (dst);
+ HOST_WIDE_INT offset;
+ rtx newinsn;
+ rtx newsrc;
+
+ /* If this is an HImode constant load, convert it into
+ an SImode constant load. Since the register is always
+ 32 bits this is safe. We have to do this, since the
+ load pc-relative instruction only does a 32-bit load. */
+ if (mode == HImode)
+ {
+ mode = SImode;
+ if (GET_CODE (dst) != REG)
+ abort ();
+ PUT_MODE (dst, SImode);
+ }
+
+ offset = add_constant (src, mode);
+ newsrc = gen_rtx (MEM, mode,
+ plus_constant (gen_rtx (LABEL_REF,
+ VOIDmode,
+ pool_vector_label),
+ offset));
+
+ /* Build a jump insn wrapper around the move instead
+ of an ordinary insn, because we want to have room for
+ the target label rtx in fld[7], which an ordinary
+ insn doesn't have. */
+ newinsn = emit_jump_insn_after (gen_rtx (SET, VOIDmode,
+ dst, newsrc), scan);
+ JUMP_LABEL (newinsn) = pool_vector_label;
+
+ /* But it's still an ordinary insn */
+ PUT_CODE (newinsn, INSN);
+
+#ifdef DBX_DEBUGGING_INFO
+ /* If debugging information is going to be emitted then we must
+ make sure that any refences to symbols which are removed by
+ the above code are also removed in the descriptions of the
+ function's variables. Failure to do this means that the
+ debugging information emitted could refer to symbols which
+ are not emited by output_constant_pool() because
+ mark_constant_pool() never sees them as being used. */
+
+ if (optimize > 0 /* These are the tests used in output_constant_pool() */
+ && flag_expensive_optimizations /* to decide if the constant pool will be marked. */
+ && write_symbols == DBX_DEBUG /* Only necessary if debugging info is being emitted. */
+ && GET_CODE (src) == MEM /* Only necessary for references to memory ... */
+ && GET_CODE (XEXP (src, 0)) == SYMBOL_REF) /* ... whose address is given by a symbol. */
+ {
+ replace_symbols_in_block (DECL_INITIAL (current_function_decl), src, newsrc);
+ }
+#endif
+
+ /* Kill old insn */
+ delete_insn (scan);
+ scan = newinsn;
+ }
+ }
+ dump_table (barrier);
+ }
+ }
+}
+
+
+/* Routines for generating rtl */
+
+void
+thumb_expand_movstrqi (operands)
+ rtx *operands;
+{
+ rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
+ rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+ HOST_WIDE_INT len = INTVAL (operands[2]);
+ HOST_WIDE_INT offset = 0;
+
+ while (len >= 12)
+ {
+ emit_insn (gen_movmem12b (out, in));
+ len -= 12;
+ }
+ if (len >= 8)
+ {
+ emit_insn (gen_movmem8b (out, in));
+ len -= 8;
+ }
+ if (len >= 4)
+ {
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (reg, gen_rtx (MEM, SImode, in)));
+ emit_insn (gen_movsi (gen_rtx (MEM, SImode, out), reg));
+ len -= 4;
+ offset += 4;
+ }
+ if (len >= 2)
+ {
+ rtx reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (reg, gen_rtx (MEM, HImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movhi (gen_rtx (MEM, HImode, plus_constant (out, offset)),
+ reg));
+ len -= 2;
+ offset += 2;
+ }
+ if (len)
+ {
+ rtx reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (reg, gen_rtx (MEM, QImode,
+ plus_constant (in, offset))));
+ emit_insn (gen_movqi (gen_rtx (MEM, QImode, plus_constant (out, offset)),
+ reg));
+ }
+}
+
+
+/* Routines for reloading */
+
+void
+thumb_reload_out_si (operands)
+ rtx operands;
+{
+ abort ();
+}
+
+
+/* Routines for emitting code */
+
+void
+final_prescan_insn(insn)
+ rtx insn;
+{
+ extern int *insn_addresses;
+
+ if (flag_print_asm_name)
+ fprintf (asm_out_file, "%s 0x%04x\n", ASM_COMMENT_START,
+ insn_addresses[INSN_UID (insn)]);
+}
+
+
+static void thumb_pushpop ( FILE *, int, int ); /* Forward declaration. */
+
+#ifdef __GNUC__
+inline
+#endif
+static int
+number_of_first_bit_set (mask)
+ int mask;
+{
+ int bit;
+
+ for (bit = 0;
+ (mask & (1 << bit)) == 0;
+ ++ bit)
+ continue;
+
+ return bit;
+}
+
+#define ARG_1_REGISTER 0
+#define ARG_2_REGISTER 1
+#define ARG_3_REGISTER 2
+#define ARG_4_REGISTER 3
+#define WORK_REGISTER 7
+#define FRAME_POINTER 11
+#define IP_REGISTER 12
+#define STACK_POINTER STACK_POINTER_REGNUM
+#define LINK_REGISTER 14
+#define PROGRAM_COUNTER 15
+
+/* Generate code to return from a thumb function.
+ If 'reg_containing_return_addr' is -1, then the
+ address is actually on the stack, at the stack
+ pointer. */
+
+static void
+thumb_exit (f, reg_containing_return_addr)
+ FILE * f;
+ int reg_containing_return_addr;
+{
+ int regs_available_for_popping;
+ int regs_to_pop;
+ int pops_needed;
+ int reg;
+ int available;
+ int required;
+ int mode;
+ int size;
+ int restore_a4 = FALSE;
+
+ /* Compute the registers we need to pop. */
+
+ regs_to_pop = 0;
+ pops_needed = 0;
+
+ if (reg_containing_return_addr == -1)
+ {
+ regs_to_pop |= 1 << LINK_REGISTER;
+ ++ pops_needed;
+ }
+
+ if (TARGET_BACKTRACE)
+ {
+ /* Restore frame pointer and stack pointer. */
+
+ regs_to_pop |= (1 << FRAME_POINTER) | (1 << STACK_POINTER);
+ pops_needed += 2;
+ }
+
+ /* If there is nothing to pop then just emit the BX instruction and return. */
+
+ if (pops_needed == 0)
+ {
+ asm_fprintf (f, "\tbx\t%s\n", reg_names[ reg_containing_return_addr ]);
+
+ return;
+ }
+
+ /* Otherwise if we are not supporting interworking and we have not created
+ a backtrace structure then just pop the return address straight into the PC. */
+
+ else if (! TARGET_THUMB_INTERWORK && ! TARGET_BACKTRACE)
+ {
+ asm_fprintf (f, "\tpop\t{pc}\n" );
+
+ return;
+ }
+
+ /* Find out how many of the (return) argument registers we can corrupt. */
+
+ regs_available_for_popping = 0;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 0)
+ {
+ /* In a void function we can use any argument register.
+ In a function that returns a structure on the stack
+ we can use the second and third argument registers. */
+
+ if (mode == VOIDmode)
+ regs_available_for_popping = (1 << ARG_1_REGISTER) | (1 << ARG_2_REGISTER) | (1 << ARG_3_REGISTER);
+ else
+ regs_available_for_popping = (1 << ARG_2_REGISTER) | (1 << ARG_3_REGISTER);
+ }
+ else if (size <= 4) regs_available_for_popping = (1 << ARG_2_REGISTER) | (1 << ARG_3_REGISTER);
+ else if (size <= 8) regs_available_for_popping = (1 << ARG_3_REGISTER);
+
+ /* Match registers to be popped with registers into which we pop them. */
+
+ for (available = regs_available_for_popping,
+ required = regs_to_pop;
+ required != 0 && available != 0;
+ available &= ~(available & - available),
+ required &= ~(required & - required))
+ -- pops_needed;
+
+ /* If we have any popping registers left over, remove them. */
+
+ if (available > 0)
+ regs_available_for_popping &= ~ available;
+
+ /* Otherwise if we need another popping register we can use
+ the fourth argument register. */
+
+ else if (pops_needed)
+ {
+ /* If we have not found any free argument registers and
+ reg a4 contains the return address, we must move it. */
+
+ if (regs_available_for_popping == 0 && reg_containing_return_addr == ARG_4_REGISTER)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names[ LINK_REGISTER ],
+ reg_names[ ARG_4_REGISTER ]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+ else if (size > 12)
+ {
+ /* Register a4 is being used to hold part of the return value,
+ but we have dire need of a free, low register. */
+
+ restore_a4 = TRUE;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names[ IP_REGISTER ],
+ reg_names[ ARG_4_REGISTER ]);
+ }
+
+ if (reg_containing_return_addr != ARG_4_REGISTER)
+ {
+ /* The fourth argument register is available. */
+
+ regs_available_for_popping |= 1 << ARG_4_REGISTER;
+
+ -- pops_needed;
+ }
+ }
+
+ /* Pop as many registers as we can. */
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE );
+
+ /* Process the registers we popped. */
+
+ if (reg_containing_return_addr == -1)
+ {
+ /* The return address was popped into the lowest numbered register. */
+
+ regs_to_pop &= ~ (1 << LINK_REGISTER);
+
+ reg_containing_return_addr = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Remove this register for the mask of available registers, so that
+ the return address will not be corrupted by futher pops. */
+
+ regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ }
+
+ /* If we popped other registers then handle them here. */
+
+ if (regs_available_for_popping)
+ {
+ int frame_pointer;
+
+ /* Work out which register currently contains the frame pointer. */
+
+ frame_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the correct place. */
+
+ asm_fprintf (f, "\tmov\tfp, %s\n", reg_names[ frame_pointer ]);
+
+ /* (Temporarily) remove it from the mask of popped registers. */
+
+ regs_available_for_popping &= ~ (1 << frame_pointer);
+ regs_to_pop &= ~ (1 << FRAME_POINTER);
+
+ if (regs_available_for_popping)
+ {
+ int stack_pointer;
+
+ /* We popped the stack pointer as well, find the register that contains it. */
+
+ stack_pointer = number_of_first_bit_set (regs_available_for_popping);
+
+ /* Move it into the stack register. */
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names[ stack_pointer ]);
+
+ /* At this point we have popped all necessary registers, so
+ do not worry about restoring regs_available_for_popping
+ to its correct value:
+
+ assert (pops_needed == 0)
+ assert (regs_available_for_popping == (1 << stack_frame_pointer))
+ assert (regs_to_pop == (1 << STACK_POINTER)) */
+ }
+ else
+ {
+ /* Since we have just move the popped value into the frame
+ pointer, the popping register is available for reuse, and
+ we know that we still have the stack pointer left to pop. */
+
+ regs_available_for_popping |= (1 << frame_pointer);
+ }
+ }
+
+ /* If we still have registers left on the stack, but we no longer
+ have any registers into which we can pop them, then we must
+ move the return address into the link register and make
+ available the register that contained it. */
+
+ if (regs_available_for_popping == 0 && pops_needed > 0)
+ {
+ regs_available_for_popping |= 1 << reg_containing_return_addr;
+
+ asm_fprintf (f, "\tmov\t%s, %s\n",
+ reg_names[ LINK_REGISTER ],
+ reg_names[ reg_containing_return_addr ]);
+
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* If we have registers left on the stack then pop some more.
+ We know that we will only be popping one register here for
+ the following reasons:
+
+ 1. We know that at most we want to pop LR, FP and SP.
+ 2. We have already popped at least one register.
+ 3. If there were 3 registers available for popping then
+ we have already popped all three of the registers.
+ 4. If there were 2 registers available for popping then
+ we have already popped LR and FP, so there can only
+ be one register left on the stack: SP. And since we
+ had two registers available for popping we will have
+ left the LR in one of those registers and leaving
+ only one register left for popping the SP.
+ 5. If there was only 1 register available for popping
+ then we can only be popping one register here. */
+
+ if (pops_needed > 0)
+ {
+ int popped_into;
+ int move_to;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ /* We have popped either FP or SP. Move whichever one
+ it is into the correct register. */
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+ move_to = number_of_first_bit_set (regs_to_pop);
+
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[ move_to ], reg_names[ popped_into ]);
+
+ regs_to_pop &= ~ (1 << move_to);
+
+ -- pops_needed;
+ }
+
+ /* If we still have not popped everything then we must have
+ only had one register available to us and we are now
+ popping the SP. */
+
+ if (pops_needed > 0)
+ {
+ int popped_into;
+
+ thumb_pushpop (f, regs_available_for_popping, FALSE);
+
+ popped_into = number_of_first_bit_set (regs_available_for_popping);
+
+ asm_fprintf (f, "\tmov\tsp, %s\n", reg_names[ popped_into ]);
+
+ /*
+ assert (regs_to_pop == (1 << STACK_POINTER))
+ assert (pops_needed == 1)
+ */
+ }
+
+ /* If necessary restore the a4 register. */
+ if (restore_a4)
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[ LINK_REGISTER ], reg_names[ ARG_4_REGISTER ]);
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[ ARG_4_REGISTER ], reg_names[ IP_REGISTER ]);
+ reg_containing_return_addr = LINK_REGISTER;
+ }
+
+ /* Return to caller. */
+
+ asm_fprintf (f, "\tbx\t%s\n", reg_names[ reg_containing_return_addr ]);
+}
+
+/* Emit code to push or pop registers to or from the stack. */
+
+static void
+thumb_pushpop (f, mask, push)
+ FILE *f;
+ int mask;
+ int push;
+{
+ int regno;
+ int lo_mask = mask & 0xFF;
+
+ if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ {
+ /* Special case. Do not generate a POP PC statement here, do it in thumb_exit() */
+
+ thumb_exit (f, -1);
+ return;
+ }
+
+ asm_fprintf (f, "\t%s\t{", push ? "push" : "pop");
+
+ /* Look at the low registers first. */
+
+ for (regno = 0; regno < 8; regno ++, lo_mask >>= 1)
+ {
+ if (lo_mask & 1)
+ {
+ asm_fprintf (f, reg_names[regno]);
+
+ if ((lo_mask & ~1) != 0)
+ asm_fprintf (f, ", ");
+ }
+ }
+
+ if (push && (mask & (1 << 14)))
+ {
+ /* Catch pushing the LR. */
+
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, "%s", reg_names[14]);
+ }
+ else if (!push && (mask & (1 << 15)))
+ {
+ /* Catch popping the PC. */
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ /* The PC is never poped directly, instead
+ it is popped into r3 and then BX is used. */
+
+ asm_fprintf (f, "}\n");
+
+ thumb_exit (f, -1);
+
+ return;
+ }
+ else
+ {
+ if (mask & 0xFF)
+ asm_fprintf (f, ", ");
+
+ asm_fprintf (f, "%s", reg_names[15]);
+ }
+ }
+
+ asm_fprintf (f, "}\n");
+}
+
+/* Returns non-zero if the current function contains a far jump */
+
+int
+far_jump_used_p (void)
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (GET_CODE (insn) == JUMP_INSN
+ /* Ignore tablejump patterns. */
+ && GET_CODE (PATTERN (insn)) != ADDR_VEC
+ && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
+ && get_attr_far_jump (insn) == FAR_JUMP_YES)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int return_used_this_function = 0;
+
+char *
+output_return ()
+{
+ int regno;
+ int live_regs_mask = 0;
+
+ return_used_this_function = 1;
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask == 0)
+ {
+ if (leaf_function_p () && ! far_jump_used_p())
+ {
+ thumb_exit (asm_out_file, 14);
+ }
+ else if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, "\tpop\t{pc}\n");
+ }
+ else
+ {
+ asm_fprintf (asm_out_file, "\tpop\t{");
+
+ for (regno = 0; live_regs_mask; regno ++, live_regs_mask >>= 1)
+ if (live_regs_mask & 1)
+ {
+ asm_fprintf (asm_out_file, reg_names[regno]);
+ if (live_regs_mask & ~1)
+ asm_fprintf (asm_out_file, ", ");
+ }
+
+ if (TARGET_THUMB_INTERWORK || TARGET_BACKTRACE)
+ {
+ asm_fprintf (asm_out_file, "}\n");
+ thumb_exit (asm_out_file, -1);
+ }
+ else
+ asm_fprintf (asm_out_file, ", pc}\n");
+ }
+
+ return "";
+}
+
+void
+thumb_function_prologue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ int amount = frame_size + current_function_outgoing_args_size;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int store_arg_regs = 0;
+ int regno;
+
+ if (current_function_anonymous_args && current_function_pretend_args_size)
+ store_arg_regs = 1;
+
+ if (current_function_pretend_args_size)
+ {
+ if (store_arg_regs)
+ {
+ asm_fprintf (f, "\tpush\t{");
+ for (regno = 4 - current_function_pretend_args_size / 4 ; regno < 4;
+ regno++)
+ asm_fprintf (f, "%s%s", reg_names[regno], regno == 3 ? "" : ", ");
+ asm_fprintf (f, "}\n");
+ }
+ else
+ asm_fprintf (f, "\tsub\t%Rsp, %Rsp, #%d\n",
+ current_function_pretend_args_size);
+ }
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (live_regs_mask || ! leaf_function_p () || far_jump_used_p())
+ live_regs_mask |= 1 << 14;
+
+ if (TARGET_BACKTRACE)
+ {
+ char * name;
+ int offset;
+ int work_register = 0;
+
+
+ /* We have been asked to create a stack backtrace structure.
+ The code looks like this:
+
+ 0 .align 2
+ 0 func:
+ 0 sub SP, #16 Reserve space for 4 registers.
+ 2 push {R7} Get a work register.
+ 4 add R7, SP, #20 Get the stack pointer before the push.
+ 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
+ 8 mov R7, PC Get hold of the start of this code plus 12.
+ 10 str R7, [SP, #16] Store it.
+ 12 mov R7, FP Get hold of the current frame pointer.
+ 14 str R7, [SP, #4] Store it.
+ 16 mov R7, LR Get hold of the current return address.
+ 18 str R7, [SP, #12] Store it.
+ 20 add R7, SP, #16 Point at the start of the backtrace structure.
+ 22 mov FP, R7 Put this value into the frame pointer. */
+
+ if ((live_regs_mask & 0xFF) == 0)
+ {
+ /* See if the a4 register is free. */
+
+ if (regs_ever_live[ 3 ] == 0)
+ work_register = 3;
+ else /* We must push a register of our own */
+ live_regs_mask |= (1 << 7);
+ }
+
+ if (work_register == 0)
+ {
+ /* Select a register from the list that will be pushed to use as our work register. */
+
+ for (work_register = 8; work_register--;)
+ if ((1 << work_register) & live_regs_mask)
+ break;
+ }
+
+ name = reg_names[ work_register ];
+
+ asm_fprintf (f, "\tsub\tsp, sp, #16\t@ Create stack backtrace structure\n");
+
+ if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (offset = 0, work_register = 1 << 15; work_register; work_register >>= 1)
+ if (work_register & live_regs_mask)
+ offset += 4;
+
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n",
+ name, offset + 16 + current_function_pretend_args_size);
+
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 4);
+
+ /* Make sure that the instruction fetching the PC is in the right place
+ to calculate "start of backtrace creation code + 12". */
+
+ if (live_regs_mask)
+ {
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ }
+ else
+ {
+ asm_fprintf (f, "\tmov\t%s, fp\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset);
+ asm_fprintf (f, "\tmov\t%s, pc\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 12);
+ }
+
+ asm_fprintf (f, "\tmov\t%s, lr\n", name);
+ asm_fprintf (f, "\tstr\t%s, [sp, #%d]\n", name, offset + 8);
+ asm_fprintf (f, "\tadd\t%s, sp, #%d\n", name, offset + 12);
+ asm_fprintf (f, "\tmov\tfp, %s\t\t@ Backtrace structure created\n", name);
+ }
+ else if (live_regs_mask)
+ thumb_pushpop (f, live_regs_mask, 1);
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ if (high_regs_pushed)
+ {
+ int pushable_regs = 0;
+ int mask = live_regs_mask & 0xff;
+ int next_hi_reg;
+
+ for (next_hi_reg = 12; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+ }
+
+ pushable_regs = mask;
+
+ if (pushable_regs == 0)
+ {
+ /* desperation time -- this probably will never happen */
+ if (regs_ever_live[3] || ! call_used_regs[3])
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[12], reg_names[3]);
+ mask = 1 << 3;
+ }
+
+ while (high_regs_pushed > 0)
+ {
+ for (regno = 7; regno >= 0; regno--)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[regno],
+ reg_names[next_hi_reg]);
+ high_regs_pushed--;
+ if (high_regs_pushed)
+ for (next_hi_reg--; next_hi_reg > 7; next_hi_reg--)
+ {
+ if (regs_ever_live[next_hi_reg]
+ && ! call_used_regs[next_hi_reg])
+ break;
+ }
+ else
+ {
+ mask &= ~ ((1 << regno) - 1);
+ break;
+ }
+ }
+ }
+ thumb_pushpop (f, mask, 1);
+ }
+
+ if (pushable_regs == 0 && (regs_ever_live[3] || ! call_used_regs[3]))
+ asm_fprintf (f, "\tmov\t%s, %s\n", reg_names[3], reg_names[12]);
+ }
+}
+
+void
+thumb_expand_prologue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+ int live_regs_mask;
+
+ if (amount)
+ {
+ live_regs_mask = 0;
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (-amount)));
+ else
+ {
+ rtx reg, spare;
+
+ if ((live_regs_mask & 0xff) == 0) /* Very unlikely */
+ emit_insn (gen_movsi (spare = gen_rtx (REG, SImode, 12),
+ reg = gen_rtx (REG, SImode, 4)));
+ else
+ {
+ for (regno = 0; regno < 8; regno++)
+ if (live_regs_mask & (1 << regno))
+ break;
+ reg = gen_rtx (REG, SImode, regno);
+ }
+
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ if ((live_regs_mask & 0xff) == 0)
+ emit_insn (gen_movsi (reg, spare));
+ }
+ }
+
+ if (frame_pointer_needed)
+ {
+ if (current_function_outgoing_args_size)
+ {
+ rtx offset = GEN_INT (current_function_outgoing_args_size);
+
+ if (current_function_outgoing_args_size < 1024)
+ emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ offset));
+ else
+ {
+ emit_insn (gen_movsi (frame_pointer_rtx, offset));
+ emit_insn (gen_addsi3 (frame_pointer_rtx, frame_pointer_rtx,
+ stack_pointer_rtx));
+ }
+ }
+ else
+ emit_insn (gen_movsi (frame_pointer_rtx, stack_pointer_rtx));
+ }
+
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+}
+
+void
+thumb_expand_epilogue ()
+{
+ HOST_WIDE_INT amount = (get_frame_size ()
+ + current_function_outgoing_args_size);
+ int regno;
+
+ if (amount)
+ {
+ if (amount < 512)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (amount)));
+ else
+ {
+ rtx reg = gen_rtx (REG, SImode, 3); /* Always free in the epilogue */
+
+ emit_insn (gen_movsi (reg, GEN_INT (amount)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
+ }
+ /* if (profile_flag || profile_block_flag) */
+ emit_insn (gen_blockage ());
+ }
+}
+
+void
+thumb_function_epilogue (f, frame_size)
+ FILE *f;
+ int frame_size;
+{
+ /* ??? Probably not safe to set this here, since it assumes that a
+ function will be emitted as assembly immediately after we generate
+ RTL for it. This does not happen for inline functions. */
+ return_used_this_function = 0;
+#if 0 /* TODO : comment not really needed */
+ fprintf (f, "%s THUMB Epilogue\n", ASM_COMMENT_START);
+#endif
+}
+
+/* The bits which aren't usefully expanded as rtl. */
+char *
+thumb_unexpanded_epilogue ()
+{
+ int regno;
+ int live_regs_mask = 0;
+ int high_regs_pushed = 0;
+ int leaf_function = leaf_function_p ();
+ int had_to_push_lr;
+
+ if (return_used_this_function)
+ return "";
+
+ for (regno = 0; regno < 8; regno++)
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ live_regs_mask |= 1 << regno;
+
+ for (regno = 8; regno < 13; regno++)
+ {
+ if (regs_ever_live[regno] && ! call_used_regs[regno])
+ high_regs_pushed++;
+ }
+
+ /* The prolog may have pushed some high registers to use as
+ work registers. eg the testuite file:
+ gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
+ compiles to produce:
+ push {r4, r5, r6, r7, lr}
+ mov r7, r9
+ mov r6, r8
+ push {r6, r7}
+ as part of the prolog. We have to undo that pushing here. */
+
+ if (high_regs_pushed)
+ {
+ int mask = live_regs_mask;
+ int next_hi_reg;
+ int size;
+ int mode;
+
+#ifdef RTX_CODE
+ /* If we can deduce the registers used from the function's return value.
+ This is more reliable that examining regs_ever_live[] because that
+ will be set if the register is ever used in the function, not just if
+ the register is used to hold a return value. */
+
+ if (current_function_return_rtx != 0)
+ {
+ mode = GET_MODE (current_function_return_rtx);
+ }
+ else
+#endif
+ {
+ mode = DECL_MODE (DECL_RESULT (current_function_decl));
+ }
+
+ size = GET_MODE_SIZE (mode);
+
+ /* Unless we are returning a type of size > 12 register r3 is available. */
+ if (size < 13)
+ mask |= 1 << 3;
+
+ if (mask == 0)
+ {
+ /* Oh dear! We have no low registers into which we can pop high registers! */
+
+ fatal ("No low registers available for popping high registers");
+ }
+
+ for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg])
+ break;
+
+ while (high_regs_pushed)
+ {
+ /* Find low register(s) into which the high register(s) can be popped. */
+
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ high_regs_pushed--;
+ if (high_regs_pushed == 0)
+ break;
+ }
+
+ mask &= (2 << regno) - 1; /* A noop if regno == 8 */
+
+ /* Pop the values into the low register(s). */
+
+ thumb_pushpop (asm_out_file, mask, 0);
+
+ /* Move the value(s) into the high registers. */
+
+ for (regno = 0; regno < 8; regno++)
+ {
+ if (mask & (1 << regno))
+ {
+ asm_fprintf (asm_out_file, "\tmov\t%s, %s\n",
+ reg_names[next_hi_reg], reg_names[regno]);
+ for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
+ if (regs_ever_live[next_hi_reg] &&
+ ! call_used_regs[next_hi_reg])
+ break;
+ }
+ }
+ }
+ }
+
+ had_to_push_lr = (live_regs_mask || ! leaf_function || far_jump_used_p());
+
+ if (had_to_push_lr)
+ {
+ live_regs_mask |= 1 << PROGRAM_COUNTER;
+ }
+
+ if (TARGET_BACKTRACE && ((live_regs_mask & 0xFF) == 0) && regs_ever_live[ ARG_4_REGISTER ] != 0)
+ {
+ /* The stack backtrace structure creation code had to
+ push R7 in order to get a work register, so we pop
+ it now. */
+
+ live_regs_mask |= (1 << WORK_REGISTER);
+ }
+
+ if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
+ {
+ /* Either no argument registers were pushed or a backtrace
+ structure was created which includes an adjusted stack
+ pointer, so just pop everything. */
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ /* We have either just popped the return address into the
+ PC or it is was kept in LR for the entire function. */
+
+ if (! had_to_push_lr)
+ thumb_exit (asm_out_file, LINK_REGISTER);
+ }
+ else
+ {
+ /* Pop everything but the return address. */
+
+ live_regs_mask &= ~ (1 << PROGRAM_COUNTER);
+
+ if (live_regs_mask)
+ thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
+
+ if (had_to_push_lr)
+ {
+ /* Get the return address into a temporary register. */
+
+ thumb_pushpop (asm_out_file, 1 << ARG_4_REGISTER, 0);
+ }
+
+ /* Remove the argument registers that were pushed onto the stack. */
+
+ asm_fprintf (asm_out_file, "\tadd\t%s, %s, #%d\n",
+ reg_names[STACK_POINTER],
+ reg_names[STACK_POINTER],
+ current_function_pretend_args_size);
+
+ thumb_exit (asm_out_file, had_to_push_lr ? ARG_4_REGISTER : LINK_REGISTER);
+ }
+
+ return "";
+}
+
+/* Handle the case of a double word load into a low register from
+ a computed memory address. The computed address may involve a
+ register which is overwritten by the load. */
+
+char *
+thumb_load_double_from_address (operands)
+ rtx * operands;
+{
+ rtx addr;
+ rtx base;
+ rtx offset;
+ rtx arg1;
+ rtx arg2;
+
+ if (GET_CODE (operands[0]) != REG)
+ fatal ("thumb_load_double_from_address: destination is not a register");
+
+ if (GET_CODE (operands[1]) != MEM)
+ fatal ("thumb_load_double_from_address: source is not a computed memory address");
+
+ /* Get the memory address. */
+
+ addr = XEXP (operands[1], 0);
+
+ /* Work out how the memory address is computed. */
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ if (REGNO (operands[0]) == REGNO (addr))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ break;
+
+ case CONST:
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ case PLUS:
+ arg1 = XEXP (addr, 0);
+ arg2 = XEXP (addr, 1);
+
+ if (CONSTANT_P (arg1))
+ base = arg2, offset = arg1;
+ else
+ base = arg1, offset = arg2;
+
+ if (GET_CODE (base) != REG)
+ fatal ("thumb_load_double_from_address: base is not a register");
+
+ /* Catch the case of <address> = <reg> + <reg> */
+
+ if (GET_CODE (offset) == REG)
+ {
+ int reg_offset = REGNO (offset);
+ int reg_base = REGNO (base);
+ int reg_dest = REGNO (operands[0]);
+
+ /* Add the base and offset registers together into the higher destination register. */
+
+ fprintf (asm_out_file, "\tadd\t%s, %s, %s\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_base ],
+ reg_names[ reg_offset ],
+ ASM_COMMENT_START);
+
+ /* Load the lower destination register from the address in the higher destination register. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #0]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest ],
+ reg_names[ reg_dest + 1],
+ ASM_COMMENT_START);
+
+ /* Load the higher destination register from its own address plus 4. */
+
+ fprintf (asm_out_file, "\tldr\t%s, [%s, #4]\t\t%s created by thumb_load_double_from_address",
+ reg_names[ reg_dest + 1 ],
+ reg_names[ reg_dest + 1 ],
+ ASM_COMMENT_START);
+ }
+ else
+ {
+ /* Compute <address> + 4 for the high order load. */
+
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ /* If the computed address is held in the low order register
+ then load the high order register first, otherwise always
+ load the low order register first. */
+
+ if (REGNO (operands[0]) == REGNO (base))
+ {
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ else
+ {
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ }
+ }
+ break;
+
+ case LABEL_REF:
+ /* With no registers to worry about we can just load the value directly. */
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[1], 0), 4));
+
+ output_asm_insn ("ldr\t%H0, %2\t\t%@ created by thumb_load_double_from_address", operands);
+ output_asm_insn ("ldr\t%0, %1\t\t%@ created by thumb_load_double_from_address", operands);
+ break;
+
+ default:
+ debug_rtx (operands[1]);
+ fatal ("thumb_load_double_from_address: Unhandled address calculation");
+ break;
+ }
+
+ return "";
+}
+
+char *
+output_move_mem_multiple (n, operands)
+ int n;
+ rtx *operands;
+{
+ rtx tmp;
+
+ switch (n)
+ {
+ case 2:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3}", operands);
+ break;
+
+ case 3:
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ if (REGNO (operands[3]) > REGNO (operands[4]))
+ {
+ tmp = operands[3];
+ operands[3] = operands[4];
+ operands[4] = tmp;
+ }
+ if (REGNO (operands[2]) > REGNO (operands[3]))
+ {
+ tmp = operands[2];
+ operands[2] = operands[3];
+ operands[3] = tmp;
+ }
+ output_asm_insn ("ldmia\t%1!, {%2, %3, %4}", operands);
+ output_asm_insn ("stmia\t%0!, {%2, %3, %4}", operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+
+int
+thumb_epilogue_size ()
+{
+ return 42; /* The answer to .... */
+}
+
+static char *conds[] =
+{
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le"
+};
+
+static char *
+thumb_condition_code (x, invert)
+ rtx x;
+ int invert;
+{
+ int val;
+
+ switch (GET_CODE (x))
+ {
+ case EQ: val = 0; break;
+ case NE: val = 1; break;
+ case GEU: val = 2; break;
+ case LTU: val = 3; break;
+ case GTU: val = 8; break;
+ case LEU: val = 9; break;
+ case GE: val = 10; break;
+ case LT: val = 11; break;
+ case GT: val = 12; break;
+ case LE: val = 13; break;
+ default:
+ abort ();
+ }
+
+ return conds[val ^ invert];
+}
+
+void
+thumb_print_operand (f, x, code)
+ FILE *f;
+ rtx x;
+ int code;
+{
+ if (code)
+ {
+ switch (code)
+ {
+ case '@':
+ fputs (ASM_COMMENT_START, f);
+ return;
+
+ case 'D':
+ if (x)
+ fputs (thumb_condition_code (x, 1), f);
+ return;
+
+ case 'd':
+ if (x)
+ fputs (thumb_condition_code (x, 0), f);
+ return;
+
+ /* An explanation of the 'Q', 'R' and 'H' register operands:
+
+ In a pair of registers containing a DI or DF value the 'Q'
+ operand returns the register number of the register containing
+ the least signficant part of the value. The 'R' operand returns
+ the register number of the register containing the most
+ significant part of the value.
+
+ The 'H' operand returns the higher of the two register numbers.
+ On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
+ same as the 'Q' operand, since the most signficant part of the
+ value is held in the lower number register. The reverse is true
+ on systems where WORDS_BIG_ENDIAN is false.
+
+ The purpose of these operands is to distinguish between cases
+ where the endian-ness of the values is important (for example
+ when they are added together), and cases where the endian-ness
+ is irrelevant, but the order of register operations is important.
+ For example when loading a value from memory into a register
+ pair, the endian-ness does not matter. Provided that the value
+ from the lower memory address is put into the lower numbered
+ register, and the value from the higher address is put into the
+ higher numbered register, the load will work regardless of whether
+ the value being loaded is big-wordian or little-wordian. The
+ order of the two register loads can matter however, if the address
+ of the memory location is actually held in one of the registers
+ being overwritten by the load. */
+ case 'Q':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0)], f);
+ return;
+
+ case 'R':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1)], f);
+ return;
+
+ case 'H':
+ if (REGNO (x) > 15)
+ abort ();
+ fputs (reg_names[REGNO (x) + 1], f);
+ return;
+
+ default:
+ abort ();
+ }
+ }
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], f);
+ else if (GET_CODE (x) == MEM)
+ output_address (XEXP (x, 0));
+ else if (GET_CODE (x) == CONST_INT)
+ {
+ fputc ('#', f);
+ output_addr_const (f, x);
+ }
+ else
+ abort ();
+}
+
+#ifdef AOF_ASSEMBLER
+int arm_text_section_count = 1;
+
+char *
+aof_text_section (in_readonly)
+ int in_readonly;
+{
+ static char buf[100];
+ if (in_readonly)
+ return "";
+ sprintf (buf, "\tCODE16\n\tAREA |C$$code%d|, CODE, READONLY",
+ arm_text_section_count++);
+ return buf;
+}
+
+static int arm_data_section_count = 1;
+
+char *
+aof_data_section ()
+{
+ static char buf[100];
+ sprintf (buf, "\tAREA |C$$data%d|, DATA", arm_data_section_count++);
+ return buf;
+}
+
+/* The AOF thumb assembler is religiously strict about declarations of
+ imported and exported symbols, so that it is impossible to declare a
+ function as imported near the begining of the file, and then to export
+ it later on. It is, however, possible to delay the decision until all
+ the functions in the file have been compiled. To get around this, we
+ maintain a list of the imports and exports, and delete from it any that
+ are subsequently defined. At the end of compilation we spit the
+ remainder of the list out before the END directive. */
+
+struct import
+{
+ struct import *next;
+ char *name;
+};
+
+static struct import *imports_list = NULL;
+
+void
+thumb_aof_add_import (name)
+ char *name;
+{
+ struct import *new;
+
+ for (new = imports_list; new; new = new->next)
+ if (new->name == name)
+ return;
+
+ new = (struct import *) xmalloc (sizeof (struct import));
+ new->next = imports_list;
+ imports_list = new;
+ new->name = name;
+}
+
+void
+thumb_aof_delete_import (name)
+ char *name;
+{
+ struct import **old;
+
+ for (old = &imports_list; *old; old = & (*old)->next)
+ {
+ if ((*old)->name == name)
+ {
+ *old = (*old)->next;
+ return;
+ }
+ }
+}
+
+void
+thumb_aof_dump_imports (f)
+ FILE *f;
+{
+ while (imports_list)
+ {
+ fprintf (f, "\tIMPORT\t");
+ assemble_name (f, imports_list->name);
+ fputc ('\n', f);
+ imports_list = imports_list->next;
+ }
+}
+#endif
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ RETURN_IN_MEMORY. */
+
+int
+thumb_return_in_memory (type)
+ tree type;
+{
+ if (! AGGREGATE_TYPE_P (type))
+ {
+ /* All simple types are returned in registers. */
+
+ return 0;
+ }
+ else if (int_size_in_bytes (type) > 4)
+ {
+ /* All structures/unions bigger than one word are returned in memory. */
+
+ return 1;
+ }
+ else if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ tree field;
+
+ /* For a struct the APCS says that we must return in a register if
+ every addressable element has an offset of zero. For practical
+ purposes this means that the structure can have at most one non-
+ bit-field element and that this element must be the first one in
+ the structure. */
+
+ /* Find the first field, ignoring non FIELD_DECL things which will
+ have been created by C++. */
+ for (field = TYPE_FIELDS (type);
+ field && TREE_CODE (field) != FIELD_DECL;
+ field = TREE_CHAIN (field))
+ continue;
+
+ if (field == NULL)
+ return 0; /* An empty structure. Allowed by an extension to ANSI C. */
+
+ /* Now check the remaining fields, if any. */
+ for (field = TREE_CHAIN (field); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (! DECL_BIT_FIELD_TYPE (field))
+ return 1;
+ }
+
+ return 0;
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ tree field;
+
+ /* Unions can be returned in registers if every element is
+ integral, or can be returned in an integer register. */
+
+ for (field = TYPE_FIELDS (type);
+ field;
+ field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (RETURN_IN_MEMORY (TREE_TYPE (field)))
+ return 1;
+ }
+
+ return 0;
+ }
+ /* XXX Not sure what should be done for other aggregates, so put them in
+ memory. */
+ return 1;
+}
+
+void thumb_override_options()
+{
+ if (structure_size_string != NULL)
+ {
+ int size = strtol (structure_size_string, NULL, 0);
+
+ if (size == 8 || size == 32)
+ arm_structure_size_boundary = size;
+ else
+ warning ("Structure size boundary can only be set to 8 or 32");
+ }
+}
diff --git a/gcc/config/arm/thumb.h b/gcc/config/arm/thumb.h
new file mode 100644
index 0000000..6121866
--- /dev/null
+++ b/gcc/config/arm/thumb.h
@@ -0,0 +1,1102 @@
+/* Definitions of target machine for GNU compiler, for ARM/Thumb.
+ Copyright (C) 19996, 1997, 1998 Free Software Foundation, Inc.
+ The basis of this contribution was generated by
+ Richard Earnshaw, Advanced RISC Machines Ltd
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? The files thumb.{c,h,md} are all seriously lacking comments. */
+
+/* ??? The files thumb.{c,h,md} need to be reviewed by an experienced
+ gcc hacker in their entirety. */
+
+/* ??? The files thumb.{c,h,md} and tcoff.h are all separate from the arm
+ files, which will lead to many maintenance problems. These files are
+ likely missing all bug fixes made to the arm port since they diverged. */
+
+/* ??? Many patterns in the md file accept operands that will require a
+ reload. These should be eliminated if possible by tightening the
+ predicates and/or constraints. This will give faster/smaller code. */
+
+/* ??? There is no pattern for the TST instuction. Check for other unsupported
+ instructions. */
+
+/* Run Time Target Specifications */
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dthumb -D__thumb -Acpu(arm) -Amachine(arm)"
+#endif
+
+#ifndef CPP_SPEC
+#define CPP_SPEC "\
+%{mbig-endian:-D__ARMEB__ -D__THUMBEB__} \
+%{mbe:-D__ARMEB__ -D__THUMBEB__} \
+%{!mbe: %{!mbig-endian:-D__ARMEL__ -D__THUMBEL__}} \
+"
+#endif
+
+#define ASM_SPEC "-marm7tdmi %{mthumb-interwork:-mthumb-interwork} %{mbig-endian:-EB}"
+#define LINK_SPEC "%{mbig-endian:-EB} -X"
+
+#define TARGET_VERSION fputs (" (ARM/THUMB:generic)", stderr);
+
+/* Nonzero if we should compile with BYTES_BIG_ENDIAN set to 1. */
+#define THUMB_FLAG_BIG_END (0x0001)
+#define THUMB_FLAG_BACKTRACE (0x0002)
+#define THUMB_FLAG_LEAF_BACKTRACE (0x0004)
+#define ARM_FLAG_THUMB (0x1000) /* same as in arm.h */
+
+/* Run-time compilation parameters selecting different hardware/software subsets. */
+extern int target_flags;
+#define TARGET_DEFAULT 0 /* ARM_FLAG_THUMB */
+#define TARGET_BIG_END (target_flags & THUMB_FLAG_BIG_END)
+#define TARGET_THUMB_INTERWORK (target_flags & ARM_FLAG_THUMB)
+#define TARGET_BACKTRACE (leaf_function_p() \
+ ? (target_flags & THUMB_FLAG_LEAF_BACKTRACE) \
+ : (target_flags & THUMB_FLAG_BACKTRACE))
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", THUMB_FLAG_BIG_END}, \
+ {"little-endian", -THUMB_FLAG_BIG_END}, \
+ {"thumb-interwork", ARM_FLAG_THUMB}, \
+ {"no-thumb-interwork", -ARM_FLAG_THUMB}, \
+ {"tpcs-frame", THUMB_FLAG_BACKTRACE}, \
+ {"no-tpcs-frame", -THUMB_FLAG_BACKTRACE}, \
+ {"tpcs-leaf-frame", THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"no-tpcs-leaf-frame", -THUMB_FLAG_LEAF_BACKTRACE}, \
+ {"", TARGET_DEFAULT} \
+}
+
+#define TARGET_OPTIONS \
+{ \
+ { "structure-size-boundary=", & structure_size_string }, \
+}
+
+#define REGISTER_PREFIX ""
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+#define ASM_APP_ON ""
+#define ASM_APP_OFF "\t.code\t16\n"
+
+/* Output a gap. In fact we fill it with nulls. */
+#define ASM_OUTPUT_SKIP(STREAM, NBYTES) \
+ fprintf ((STREAM), "\t.space\t%u\n", (NBYTES))
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
+{ \
+ fprintf (STREAM, "\t.align\t%d\n", (LOG)); \
+}
+
+/* Output a common block */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf ((STREAM), "\t.comm\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fprintf((STREAM), ", %d\t%s %d\n", (ROUNDED), (ASM_COMMENT_START), (SIZE)))
+
+#define ASM_GENERATE_INTERNAL_LABEL(STRING,PREFIX,NUM) \
+ sprintf ((STRING), "*%s%s%d", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+#define ASM_OUTPUT_INTERNAL_LABEL(STREAM,PREFIX,NUM) \
+ fprintf ((STREAM), "%s%s%d:\n", (LOCAL_LABEL_PREFIX), (PREFIX), (NUM))
+
+/* This is how to output a label which precedes a jumptable. Since
+ instructions are 2 bytes, we need explicit alignment here. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_ALIGN (FILE, 2); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ (fprintf((STREAM),"\n\t.lcomm\t"), \
+ assemble_name((STREAM),(NAME)), \
+ fprintf((STREAM),",%u\n",(SIZE)))
+
+/* Output a reference to a label. */
+#define ASM_OUTPUT_LABELREF(STREAM,NAME) \
+ fprintf ((STREAM), "%s%s", USER_LABEL_PREFIX, (NAME))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+#define ASM_OUTPUT_BYTE(STREAM,VALUE) \
+ fprintf ((STREAM), "\t.byte\t0x%x\n", (VALUE))
+
+#define ASM_OUTPUT_INT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.word\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_SHORT(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.short\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_CHAR(STREAM,VALUE) \
+{ \
+ fprintf (STREAM, "\t.byte\t"); \
+ output_addr_const (STREAM, (VALUE)); \
+ fprintf (STREAM, "\n"); \
+}
+
+#define ASM_OUTPUT_LONG_DOUBLE(STREAM,VALUE) \
+do { char dstr[30]; \
+ long l[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.20g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx,0x%lx,0x%lx\t%s long double %s\n", \
+ l[0], l[1], l[2], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_DOUBLE(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.14g", dstr); \
+ fprintf (STREAM, "\t.long 0x%lx, 0x%lx\t%s double %s\n", l[0], \
+ l[1], ASM_COMMENT_START, dstr); \
+ } while (0)
+
+#define ASM_OUTPUT_FLOAT(STREAM, VALUE) \
+do { char dstr[30]; \
+ long l; \
+ REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
+ REAL_VALUE_TO_DECIMAL (VALUE, "%.7g", dstr); \
+ fprintf (STREAM, "\t.word 0x%lx\t%s float %s\n", l, \
+ ASM_COMMENT_START, dstr); \
+ } while (0);
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+/* This is how to output a string. */
+#define ASM_OUTPUT_ASCII(STREAM, STRING, LEN) \
+do { \
+ register int i, c, len = (LEN), cur_pos = 17; \
+ register unsigned char *string = (unsigned char *)(STRING); \
+ fprintf ((STREAM), "\t.ascii\t\""); \
+ for (i = 0; i < len; i++) \
+ { \
+ register int c = string[i]; \
+ \
+ switch (c) \
+ { \
+ case '\"': \
+ case '\\': \
+ putc ('\\', (STREAM)); \
+ putc (c, (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_NEWLINE: \
+ fputs ("\\n", (STREAM)); \
+ if (i+1 < len \
+ && (((c = string[i+1]) >= '\040' && c <= '~') \
+ || c == TARGET_TAB)) \
+ cur_pos = 32767; /* break right here */ \
+ else \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_TAB: \
+ fputs ("\\t", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_FF: \
+ fputs ("\\f", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_BS: \
+ fputs ("\\b", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ case TARGET_CR: \
+ fputs ("\\r", (STREAM)); \
+ cur_pos += 2; \
+ break; \
+ \
+ default: \
+ if (c >= ' ' && c < 0177) \
+ { \
+ putc (c, (STREAM)); \
+ cur_pos++; \
+ } \
+ else \
+ { \
+ fprintf ((STREAM), "\\%03o", c); \
+ cur_pos += 4; \
+ } \
+ } \
+ \
+ if (cur_pos > 72 && i+1 < len) \
+ { \
+ cur_pos = 17; \
+ fprintf ((STREAM), "\"\n\t.ascii\t\""); \
+ } \
+ } \
+ fprintf ((STREAM), "\"\n"); \
+} while (0)
+
+/* Output and Generation of Labels */
+#define ASM_OUTPUT_LABEL(STREAM,NAME) \
+ (assemble_name ((STREAM), (NAME)), \
+ fprintf ((STREAM), ":\n"))
+
+#define ASM_GLOBALIZE_LABEL(STREAM,NAME) \
+ (fprintf ((STREAM), "\t.globl\t"), \
+ assemble_name ((STREAM), (NAME)), \
+ fputc ('\n', (STREAM)))
+
+/* Construct a private name. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTVAR,NAME,NUMBER) \
+ ((OUTVAR) = (char *) alloca (strlen (NAME) + 10), \
+ sprintf ((OUTVAR), "%s.%d", (NAME), (NUMBER)))
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP ".text"
+#define DATA_SECTION_ASM_OP ".data"
+#define BSS_SECTION_ASM_OP ".bss"
+
+/* The assembler's names for the registers. */
+#ifndef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc", "ap" \
+}
+#endif
+
+#ifndef ADDITIONAL_REGISTER_NAMES
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ {"a1", 0}, \
+ {"a2", 1}, \
+ {"a3", 2}, \
+ {"a4", 3}, \
+ {"v1", 4}, \
+ {"v2", 5}, \
+ {"v3", 6}, \
+ {"v4", 7}, \
+ {"v5", 8}, \
+ {"v6", 9}, \
+ {"sb", 9}, \
+ {"v7", 10}, \
+ {"r10", 10}, /* sl */ \
+ {"r11", 11}, /* fp */ \
+ {"r12", 12}, /* ip */ \
+ {"r13", 13}, /* sp */ \
+ {"r14", 14}, /* lr */ \
+ {"r15", 15} /* pc */ \
+}
+#endif
+
+/* The assembler's parentheses characters. */
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+#ifndef ASM_COMMENT_START
+#define ASM_COMMENT_START "@"
+#endif
+
+/* Output an element of a dispatch table. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM,VALUE) \
+ fprintf (STREAM, "\t.word\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM,BODY,VALUE,REL) \
+ fprintf (STREAM, "\tb\t%sL%d\n", (LOCAL_LABEL_PREFIX), (VALUE))
+
+/* Storage Layout */
+
+/* Define this is most significant bit is lowest numbered in
+ instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 0
+
+/* Define this if most significant byte of a word is the lowest
+ numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
+
+#define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
+
+/* LIBGCC2_WORDS_BIG_ENDIAN has to be a constant, so we define this based
+ on processor pre-defineds when compiling libgcc2.c. */
+#if defined(__THUMBEB__) && !defined(__THUMBEL__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define FLOAT_WORDS_BIG_ENDIAN 1
+
+#define BITS_PER_UNIT 8
+#define BITS_PER_WORD 32
+
+#define UNITS_PER_WORD 4
+
+#define POINTER_SIZE 32
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+{ \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < 4) \
+ { \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+}
+
+#define PARM_BOUNDARY 32
+#define STACK_BOUNDARY 32
+
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+#define EMPTY_FIELD_BOUNDARY 32
+
+#define STRUCTURE_SIZE_BOUNDARY 32
+
+/* Used when parsing command line option -mstructure_size_boundary. */
+extern char * structure_size_string;
+
+#define STRICT_ALIGNMENT 1
+
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+
+/* Layout of Source Language Data Types */
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+
+/* Register Usage */
+
+/* Note there are 16 hard registers on the Thumb. We invent a 17th register
+ which is assigned to ARG_POINTER_REGNUM, but this is later removed by
+ elimination passes in the compiler. */
+#define FIRST_PSEUDO_REGISTER 17
+
+/* ??? This is questionable. */
+#define FIXED_REGISTERS \
+{ \
+ 0,0,0,0, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 0,1,1,1,1 \
+}
+
+/* ??? This is questionable. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1,1,1,1, \
+ 0,0,0,0, \
+ 0,0,0,1, \
+ 1,1,1,1,1 \
+}
+
+#define HARD_REGNO_NREGS(REGNO,MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* ??? Probably should only allow DImode/DFmode in even numbered registers. */
+#define HARD_REGNO_MODE_OK(REGNO,MODE) ((GET_MODE_SIZE (MODE) > UNITS_PER_WORD) ? (REGNO < 7) : 1)
+
+#define MODES_TIEABLE_P(MODE1,MODE2) 1
+
+enum reg_class
+{
+ NO_REGS,
+ LO_REGS,
+ STACK_REG,
+ BASE_REGS,
+ HI_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define GENERAL_REGS ALL_REGS
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "LO_REGS", \
+ "STACK_REG", \
+ "BASE_REGS", \
+ "HI_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ 0x00000, \
+ 0x000ff, \
+ 0x02000, \
+ 0x020ff, \
+ 0x0ff00, \
+ 0x1ffff, \
+}
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == STACK_POINTER_REGNUM ? STACK_REG \
+ : (REGNO) < 8 ? LO_REGS \
+ : HI_REGS)
+
+#define BASE_REG_CLASS BASE_REGS
+
+#define INDEX_REG_CLASS LO_REGS
+
+/* When SMALL_REGISTER_CLASSES is nonzero, the compiler allows
+ registers explicitly used in the rtl to be used as spill registers
+ but prevents the compiler from extending the lifetime of these
+ registers. */
+
+#define SMALL_REGISTER_CLASSES 1
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'l' ? LO_REGS \
+ : (C) == 'h' ? HI_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'k' ? STACK_REG \
+ : NO_REGS)
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+ ((REGNO) < 8 \
+ || (REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)
+
+#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8 \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && ((REGNO) == STACK_POINTER_REGNUM \
+ || (unsigned) reg_renumber[REGNO] == STACK_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+ ((REGNO) < 8 \
+ || (unsigned) reg_renumber[REGNO] < 8)
+
+/* ??? This looks suspiciously wrong. */
+/* We need to leave BASE_REGS reloads alone, in order to avoid caller_save
+ lossage. Caller_saves requests a BASE_REGS reload (caller_save_spill_class)
+ and then later we verify that one was allocated. If PREFERRED_RELOAD_CLASS
+ says to allocate a LO_REGS spill instead, then this mismatch gives an
+ abort. Alternatively, this could be fixed by modifying BASE_REG_CLASS
+ to be LO_REGS instead of BASE_REGS. It is not clear what affect this
+ change would have. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CLASS) == BASE_REGS ? (CLASS) \
+ : LO_REGS)
+/*
+ ((CONSTANT_P ((X)) && GET_CODE ((X)) != CONST_INT \
+ && ! CONSTANT_POOL_ADDRESS_P((X))) ? NO_REGS \
+ : (GET_CODE ((X)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL ((X)) > 255) ? NO_REGS \
+ : LO_REGS) */
+
+/* Must leave BASE_REGS reloads alone, see comment above. */
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,X) \
+ ((CLASS) != LO_REGS && (CLASS) != BASE_REGS \
+ ? ((true_regnum (X) == -1 ? LO_REGS \
+ : (true_regnum (X) + HARD_REGNO_NREGS (0, MODE) > 8) ? LO_REGS \
+ : NO_REGS)) \
+ : NO_REGS)
+
+#define CLASS_MAX_NREGS(CLASS,MODE) HARD_REGNO_NREGS(0,(MODE))
+
+int thumb_shiftable_const ();
+
+#define CONST_OK_FOR_LETTER_P(VAL,C) \
+ ((C) == 'I' ? (unsigned HOST_WIDE_INT) (VAL) < 256 \
+ : (C) == 'J' ? (VAL) > -256 && (VAL) <= 0 \
+ : (C) == 'K' ? thumb_shiftable_const (VAL) \
+ : (C) == 'L' ? (VAL) > -8 && (VAL) < 8 \
+ : (C) == 'M' ? ((unsigned HOST_WIDE_INT) (VAL) < 1024 \
+ && ((VAL) & 3) == 0) \
+ : (C) == 'N' ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : (C) == 'O' ? ((VAL) >= -508 && (VAL) <= 508) \
+ : 0)
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VAL,C) 0
+
+#define EXTRA_CONSTRAINT(X,C) \
+ ((C) == 'Q' ? (GET_CODE (X) == MEM \
+ && GET_CODE (XEXP (X, 0)) == LABEL_REF) : 0)
+
+/* Stack Layout and Calling Conventions */
+
+#define STACK_GROWS_DOWNWARD 1
+
+/* #define FRAME_GROWS_DOWNWARD 1 */
+
+/* #define ARGS_GROW_DOWNWARD 1 */
+
+#define STARTING_FRAME_OFFSET 0
+
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Registers that address the stack frame */
+
+#define STACK_POINTER_REGNUM 13 /* Defined by the TPCS. */
+
+#define FRAME_POINTER_REGNUM 7 /* TPCS defines this as 11 but it does not really mean it. */
+
+#define ARG_POINTER_REGNUM 16 /* A fake hard register that is eliminated later on. */
+
+#define STATIC_CHAIN_REGNUM 9
+
+#define FRAME_POINTER_REQUIRED 0
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+/* On the Thumb we always want to perform the eliminations as we
+ actually only have one real register pointing to the stashed
+ variables: the stack pointer, and we never use the frame pointer. */
+#define CAN_ELIMINATE(FROM,TO) 1
+
+/* Note: This macro must match the code in thumb_function_prologue() in thumb.c. */
+#define INITIAL_ELIMINATION_OFFSET(FROM,TO,OFFSET) \
+{ \
+ (OFFSET) = 0; \
+ if ((FROM) == ARG_POINTER_REGNUM) \
+ { \
+ int count_regs = 0; \
+ int regno; \
+ (OFFSET) += get_frame_size (); \
+ for (regno = 8; regno < 13; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs) \
+ (OFFSET) += 4 * count_regs; \
+ count_regs = 0; \
+ for (regno = 0; regno < 8; regno++) \
+ if (regs_ever_live[regno] && ! call_used_regs[regno]) \
+ count_regs++; \
+ if (count_regs || ! leaf_function_p () || far_jump_used_p()) \
+ (OFFSET) += 4 * (count_regs + 1); \
+ if (TARGET_BACKTRACE) { \
+ if ((count_regs & 0xFF) == 0 && (regs_ever_live[3] != 0)) \
+ (OFFSET) += 20; \
+ else \
+ (OFFSET) += 16; } \
+ } \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += current_function_outgoing_args_size; \
+}
+
+/* Passing Arguments on the stack */
+
+#define PROMOTE_PROTOTYPES 1
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+#define FUNCTION_ARG(CUM,MODE,TYPE,NAMED) \
+ ((NAMED) ? ((CUM) >= 16 ? 0 : gen_rtx (REG, (MODE), (CUM) / 4)) \
+ : 0)
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM,MODE,TYPE,NAMED) \
+ (((CUM) < 16 && (CUM) + (((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : HARD_REGNO_NREGS (0, (MODE)) * 4) > 16) \
+ ? 4 - (CUM) / 4 : 0)
+
+#define CUMULATIVE_ARGS int
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ ((CUM) = ((FNTYPE) && aggregate_value_p (TREE_TYPE (FNTYPE))) ? 4 : 0)
+
+#define FUNCTION_ARG_ADVANCE(CUM,MODE,TYPE,NAMED) \
+ (CUM) += ((((MODE) == BLKmode) \
+ ? int_size_in_bytes (TYPE) \
+ : GET_MODE_SIZE (MODE)) + 3) & ~3
+
+#define FUNCTION_ARG_REGNO_P(REGNO) \
+ ((REGNO) >=0 && (REGNO) <= 3)
+
+#define FUNCTION_VALUE(VALTYPE,FUNC) gen_rtx (REG, TYPE_MODE (VALTYPE), 0)
+
+#define LIBCALL_VALUE(MODE) gen_rtx (REG, (MODE), 0)
+
+#define FUNCTION_VALUE_REGNO_P(REGNO) ((REGNO) == 0)
+
+ /* How large values are returned */
+/* A C expression which can inhibit the returning of certain function values
+ in registers, based on the type of value. */
+#define RETURN_IN_MEMORY(TYPE) thumb_return_in_memory (TYPE)
+
+/* Define DEFAULT_PCC_STRUCT_RETURN to 1 if all structure and union return
+ values must be in memory. On the ARM, they need only do so if larger
+ than a word, or if they contain elements offset from zero in the struct. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+
+#define STRUCT_VALUE_REGNUM 0
+
+#define FUNCTION_PROLOGUE(FILE,SIZE) thumb_function_prologue((FILE),(SIZE))
+
+#define FUNCTION_EPILOGUE(FILE,SIZE) thumb_function_epilogue((FILE),(SIZE))
+
+/* Generating code for profiling */
+#define FUNCTION_PROFILER(STREAM,LABELNO) \
+{ \
+ fprintf ((STREAM), "\tmov\\tip, lr\n"); \
+ fprintf ((STREAM), "\tbl\tmcount\n"); \
+ fprintf ((STREAM), "\t.word\tLP%d\n", (LABELNO)); \
+}
+
+/* Implementing the Varargs Macros */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ \
+ extern int current_function_anonymous_args; \
+ current_function_anonymous_args = 1; \
+ if ((CUM) < 16) \
+ (PRETEND_SIZE) = 16 - (CUM); \
+}
+
+/* Trampolines for nested functions */
+
+/* Output assembler code for a block containing the constant parts of
+ a trampoline, leaving space for the variable parts.
+
+ On the Thumb we always switch into ARM mode to execute the trampoline.
+ Why - because it is easier. This code will always be branched to via
+ a BX instruction and since the compiler magically generates the address
+ of the function the linker has no opportunity to ensure that the
+ bottom bit is set. Thus the processor will be in ARM mode when it
+ reaches this code. So we duplicate the ARM trampoline code and add
+ a switch into Thumb mode as well.
+
+ On the ARM, (if r8 is the static chain regnum, and remembering that
+ referencing pc adds an offset of 8) the trampoline looks like:
+ ldr r8, [pc, #0]
+ ldr pc, [pc]
+ .word static chain value
+ .word function's address
+ ??? FIXME: When the trampoline returns, r8 will be clobbered. */
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf ((FILE), "\t.code 32\n"); \
+ fprintf ((FILE), ".Ltrampoline_start:\n"); \
+ fprintf ((FILE), "\tldr\t%s, [%spc, #8]\n", \
+ reg_names[STATIC_CHAIN_REGNUM], REGISTER_PREFIX); \
+ fprintf ((FILE), "\tldr\t%sip, [%spc, #8]\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\torr\t%sip, %sip, #1\n", \
+ REGISTER_PREFIX, REGISTER_PREFIX); \
+ fprintf ((FILE), "\tbx\t%sip\n", REGISTER_PREFIX); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.word\t0\n"); \
+ fprintf ((FILE), "\t.code 16\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+#define TRAMPOLINE_SIZE 24
+
+/* Alignment required for a trampoline in units. */
+#define TRAMPOLINE_ALIGN 4
+
+#define INITIALIZE_TRAMPOLINE(ADDR,FNADDR,CHAIN) \
+{ \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 16)), \
+ (CHAIN)); \
+ emit_move_insn (gen_rtx (MEM, SImode, plus_constant ((ADDR), 20)), \
+ (FNADDR)); \
+}
+
+
+/* Implicit Calls to Library Routines */
+
+#define TARGET_MEM_FUNCTIONS 1
+
+#define OVERRIDE_OPTIONS thumb_override_options ()
+
+
+/* Addressing Modes */
+
+#define HAVE_POST_INCREMENT 1
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (X))
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ REGNO_MODE_OK_FOR_BASE_P (REGNO (X), MODE)
+
+#else /* REG_OK_STRICT */
+
+#define REG_OK_FOR_BASE_P(X) \
+ (REGNO (X) < 8 || REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#define REG_MODE_OK_FOR_BASE_P(X,MODE) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER \
+ || (GET_MODE_SIZE (MODE) >= 4 \
+ && (REGNO (X) == STACK_POINTER_REGNUM \
+ || (X) == arg_pointer_rtx)))
+
+#define REG_OK_FOR_INDEX_P(X) \
+ (REGNO (X) < 8 \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)
+
+#endif /* REG_OK_STRICT */
+
+/* In a REG+REG address, both must be INDEX registers. */
+#define REG_OK_FOR_INDEXED_BASE_P(X) REG_OK_FOR_INDEX_P(X)
+
+#define LEGITIMATE_OFFSET(MODE,VAL) \
+(GET_MODE_SIZE (MODE) == 1 ? ((unsigned HOST_WIDE_INT) (VAL) < 32) \
+ : GET_MODE_SIZE (MODE) == 2 ? ((unsigned HOST_WIDE_INT) (VAL) < 64 \
+ && ((VAL) & 1) == 0) \
+ : ((VAL) >= 0 && ((VAL) + GET_MODE_SIZE (MODE)) <= 128 \
+ && ((VAL) & 3) == 0))
+
+/* The AP may be eliminated to either the SP or the FP, so we use the
+ least common denominator, e.g. SImode, and offsets from 0 to 64. */
+
+/* ??? Verify whether the above is the right approach. */
+
+/* ??? Also, the FP may be eliminated to the SP, so perhaps that
+ needs special handling also. */
+
+/* ??? Look at how the mips16 port solves this problem. It probably uses
+ better ways to solve some of these problems. */
+
+/* Although it is not incorrect, we don't accept QImode and HImode
+ addresses based on the frame pointer or arg pointer until the reload pass starts.
+ This is so that eliminating such addresses into stack based ones
+ won't produce impossible code. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE,X,WIN) \
+{ \
+ /* ??? Not clear if this is right. Experiment. */ \
+ if (GET_MODE_SIZE (MODE) < 4 \
+ && ! (reload_in_progress || reload_completed) \
+ && (reg_mentioned_p (frame_pointer_rtx, X) \
+ || reg_mentioned_p (arg_pointer_rtx, X) \
+ || reg_mentioned_p (virtual_incoming_args_rtx, X) \
+ || reg_mentioned_p (virtual_outgoing_args_rtx, X) \
+ || reg_mentioned_p (virtual_stack_dynamic_rtx, X) \
+ || reg_mentioned_p (virtual_stack_vars_rtx, X))) \
+ ; \
+ /* Accept any base register. SP only in SImode or larger. */ \
+ else if (GET_CODE (X) == REG && REG_MODE_OK_FOR_BASE_P(X, MODE)) \
+ goto WIN; \
+ /* This is PC relative data before MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && CONSTANT_P (X) \
+ && CONSTANT_POOL_ADDRESS_P (X)) \
+ goto WIN; \
+ /* This is PC relative data after MACHINE_DEPENDENT_REORG runs. */ \
+ else if (GET_MODE_SIZE (MODE) >= 4 && reload_completed \
+ && (GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == LABEL_REF \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT))) \
+ goto WIN; \
+ /* Post-inc indexing only supported for SImode and larger. */ \
+ else if (GET_CODE (X) == POST_INC && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0))) \
+ goto WIN; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ /* REG+REG address can be any two index registers. */ \
+ /* ??? Normally checking the mode here is wrong, since it isn't \
+ impossible to use REG+REG with DFmode. However, the movdf \
+ pattern requires offsettable addresses, and REG+REG is not \
+ offsettable, so it must be rejected somehow. Trying to use \
+ 'o' fails, because offsettable_address_p does a QImode check. \
+ QImode is not valid for stack addresses, and has a smaller \
+ range for non-stack bases, and this causes valid addresses \
+ to be rejected. So we just eliminate REG+REG here by checking \
+ the mode. */ \
+ /* We also disallow FRAME+REG addressing since we know that FRAME \
+ will be replaced with STACK, and SP relative addressing only \
+ permits SP+OFFSET. */ \
+ if (GET_MODE_SIZE (MODE) <= 4 \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && REGNO (XEXP (X, 0)) != FRAME_POINTER_REGNUM \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ && REG_OK_FOR_INDEX_P (XEXP (X, 1))) \
+ goto WIN; \
+ /* REG+const has 5-7 bit offset for non-SP registers. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && (REG_OK_FOR_INDEX_P (XEXP (X, 0)) \
+ || XEXP (X, 0) == arg_pointer_rtx) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && LEGITIMATE_OFFSET (MODE, INTVAL (XEXP (X, 1)))) \
+ goto WIN; \
+ /* REG+const has 10 bit offset for SP, but only SImode and \
+ larger is supported. */ \
+ /* ??? Should probably check for DI/DFmode overflow here \
+ just like GO_IF_LEGITIMATE_OFFSET does. */ \
+ else if (GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) == STACK_POINTER_REGNUM \
+ && GET_MODE_SIZE (MODE) >= 4 \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (XEXP (X, 1)) < 1024 \
+ && (INTVAL (XEXP (X, 1)) & 3) == 0) \
+ goto WIN; \
+ } \
+}
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN)
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) == CONST_INT \
+ || GET_CODE (X) == CONST_DOUBLE \
+ || CONSTANT_ADDRESS_P (X))
+
+
+/* Condition Code Status */
+
+#define NOTICE_UPDATE_CC(EXP,INSN) \
+{ \
+ if (get_attr_conds ((INSN)) != CONDS_UNCHANGED) \
+ CC_STATUS_INIT; \
+}
+
+
+/* Describing Relative Costs of Operations */
+
+#define SLOW_BYTE_ACCESS 0
+
+#define SLOW_UNALIGNED_ACCESS 1
+
+#define NO_FUNCTION_CSE 1
+
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+#define REGISTER_MOVE_COST(FROM,TO) \
+ (((FROM) == HI_REGS || (TO) == HI_REGS) ? 4 : 2)
+
+#define MEMORY_MOVE_COST(M,CLASS,IN) \
+ ((GET_MODE_SIZE(M) < 4 ? 8 : 2 * GET_MODE_SIZE(M)) * (CLASS == LO_REGS ? 1 : 2))
+
+/* This will allow better space optimization when compiling with -O */
+#define BRANCH_COST (optimize > 1 ? 1 : 0)
+
+#define RTX_COSTS(X,CODE,OUTER) \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ int cycles = 0; \
+ unsigned HOST_WIDE_INT i = INTVAL (XEXP (X, 1)); \
+ while (i) \
+ { \
+ i >>= 2; \
+ cycles++; \
+ } \
+ return COSTS_N_INSNS (2) + cycles; \
+ } \
+ return COSTS_N_INSNS (1) + 16; \
+ case ASHIFT: case ASHIFTRT: case LSHIFTRT: case ROTATERT: \
+ case PLUS: case MINUS: case COMPARE: case NEG: case NOT: \
+ return COSTS_N_INSNS (1); \
+ case SET: \
+ return (COSTS_N_INSNS (1) \
+ + 4 * ((GET_CODE (SET_SRC (X)) == MEM) \
+ + GET_CODE (SET_DEST (X)) == MEM))
+
+#define CONST_COSTS(X,CODE,OUTER) \
+ case CONST_INT: \
+ if ((OUTER) == SET) \
+ { \
+ if ((unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ if (thumb_shiftable_const (INTVAL (X))) \
+ return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (3); \
+ } \
+ else if (OUTER == PLUS \
+ && INTVAL (X) < 256 && INTVAL (X) > -256) \
+ return 0; \
+ else if (OUTER == COMPARE \
+ && (unsigned HOST_WIDE_INT) INTVAL (X) < 256) \
+ return 0; \
+ else if (OUTER == ASHIFT || OUTER == ASHIFTRT \
+ || OUTER == LSHIFTRT) \
+ return 0; \
+ return COSTS_N_INSNS (2); \
+ case CONST: \
+ case CONST_DOUBLE: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return COSTS_N_INSNS(3);
+
+#define ADDRESS_COST(X) \
+ ((GET_CODE (X) == REG \
+ || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT)) \
+ ? 1 : 2)
+
+
+/* Position Independent Code */
+
+#define PRINT_OPERAND(STREAM,X,CODE) \
+ thumb_print_operand((STREAM), (X), (CODE))
+
+#define PRINT_OPERAND_ADDRESS(STREAM,X) \
+{ \
+ if (GET_CODE ((X)) == REG) \
+ fprintf ((STREAM), "[%s]", reg_names[REGNO ((X))]); \
+ else if (GET_CODE ((X)) == POST_INC) \
+ fprintf ((STREAM), "%s!", reg_names[REGNO (XEXP (X, 0))]); \
+ else if (GET_CODE ((X)) == PLUS) \
+ { \
+ if (GET_CODE (XEXP ((X), 1)) == CONST_INT) \
+ fprintf ((STREAM), "[%s, #%d]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ (int) INTVAL (XEXP ((X), 1))); \
+ else \
+ fprintf ((STREAM), "[%s, %s]", \
+ reg_names[REGNO (XEXP ((X), 0))], \
+ reg_names[REGNO (XEXP ((X), 1))]); \
+ } \
+ else \
+ output_addr_const ((STREAM), (X)); \
+}
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) ((CODE) == '@')
+
+/* Emit a special directive when defining a function name.
+ This is used by the assembler to assit with interworking. */
+#define ASM_DECLARE_FUNCTION_NAME(file, name, decl) \
+ fprintf (file, ".thumb_func\n") ; \
+ ASM_OUTPUT_LABEL (file, name)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
+ asm_fprintf ((STREAM), "\tpush {%R%s}\n", reg_names[(REGNO)])
+
+#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
+ fprintf ((STREAM), "\tpop {%R%s}\n", reg_names[(REGNO)])
+
+#define FINAL_PRESCAN_INSN(INSN,OPVEC,NOPERANDS) \
+ final_prescan_insn((INSN))
+
+/* Controlling Debugging Information Format */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* Specific options for DBX Output */
+
+#define DBX_DEBUGGING_INFO 1
+
+#define DEFAULT_GDB_EXTENSIONS 1
+
+
+/* Cross Compilation and Floating Point */
+
+#define REAL_ARITHMETIC
+
+
+/* Miscellaneous Parameters */
+
+#define PREDICATE_CODES \
+ {"thumb_cmp_operand", {SUBREG, REG, CONST_INT}},
+
+#define CASE_VECTOR_MODE Pmode
+
+#define WORD_REGISTER_OPERATIONS
+
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC,INPREC) 1
+
+#define STORE_FLAG_VALUE 1
+
+#define Pmode SImode
+
+#define FUNCTION_MODE SImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define NO_DOLLAR_IN_LABEL 1
+
+#define HAVE_ATEXIT
+
+/* The literal pool needs to reside in the text area due to the
+ limited PC addressing range: */
+#define MACHINE_DEPENDENT_REORG(INSN) thumb_reorg ((INSN))
+
+
+/* Options specific to Thumb */
+
+/* True if a return instruction can be used in this function. */
+int thumb_trivial_epilogue ();
+#define USE_RETURN (reload_completed && thumb_trivial_epilogue ())
+
+extern char *thumb_unexpanded_epilogue ();
+extern char *output_move_mem_multiple ();
+extern char *thumb_load_double_from_address ();
+extern char *output_return ();
+extern int far_jump_used_p();
diff --git a/gcc/config/arm/thumb.md b/gcc/config/arm/thumb.md
new file mode 100644
index 0000000..1e0ee38
--- /dev/null
+++ b/gcc/config/arm/thumb.md
@@ -0,0 +1,1144 @@
+;; thumb.md Machine description for ARM/Thumb processors
+;; Copyright (C) 19996, 1997, 1998 Free Software Foundation, Inc.
+;; The basis of this contribution was generated by
+;; Richard Earnshaw, Advanced RISC Machines Ltd
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;; LENGTH of an instruction is 2 bytes
+(define_attr "length" "" (const_int 2))
+
+;; CONDS is set to UNCHANGED when an insn does not affect the condition codes
+;; Most insns change the condition codes
+(define_attr "conds" "changed,unchanged" (const_string "changed"))
+
+;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
+;; distant label.
+(define_attr "far_jump" "yes,no" (const_string "no"))
+
+;; Start with move insns
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SImode, operands[1]);
+ }
+")
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*r,*h")
+ (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*h,*r"))]
+ "register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)"
+ "@
+ add\\t%0, %1, #0
+ mov\\t%0, %1
+ #
+ #
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1"
+[(set_attr "length" "2,2,4,4,2,2,2,2,2,2")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "thumb_shiftable_const (INTVAL (operands[1]))"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (ashift:SI (match_dup 0) (match_dup 2)))]
+ "
+{
+ unsigned HOST_WIDE_INT val = INTVAL (operands[1]);
+ unsigned HOST_WIDE_INT mask = 0xff;
+ int i;
+ for (i = 0; i < 25; i++)
+ if ((val & (mask << i)) == val)
+ break;
+
+ if (i == 0)
+ FAIL;
+
+ operands[1] = GEN_INT (val >> i);
+ operands[2] = GEN_INT (i);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "INTVAL (operands[1]) < 0 && INTVAL (operands[1]) > -256"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (neg:SI (match_dup 0)))]
+ "
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+")
+
+;;(define_expand "reload_outsi"
+;; [(set (match_operand:SI 2 "register_operand" "=&l")
+;; (match_operand:SI 1 "register_operand" "h"))
+;; (set (match_operand:SI 0 "reload_memory_operand" "=o")
+;; (match_dup 2))]
+;; ""
+;; "
+;;/* thumb_reload_out_si (operands);
+;; DONE; */
+;;")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (HImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:HI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrh\\t%0, %1
+ strh\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (QImode, operands[1]);
+
+ /* ??? We shouldn't really get invalid addresses here, but this can
+ happen if we are passed a SP (never OK for HImode/QImode) or virtual
+ register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
+ relative address. */
+ /* ??? This should perhaps be fixed elsewhere, for instance, in
+ fixup_stack_1, by checking for other kinds of invalid addresses,
+ e.g. a bare reference to a virtual register. This may confuse the
+ alpha though, which must handle this case differently. */
+ if (GET_CODE (operands[0]) == MEM
+ && ! memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[0], 0));
+ operands[0] = change_address (operands[0], VOIDmode, temp);
+ }
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
+ }
+ /* Handle loading a large integer during reload */
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* Writing a constant to memory needs a scratch, which should
+ be handled with SECONDARY_RELOADs. */
+ if (GET_CODE (operands[0]) != REG)
+ abort ();
+
+ operands[0] = gen_rtx (SUBREG, SImode, operands[0], 0);
+ emit_insn (gen_movsi (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ add\\t%0, %1, #0
+ ldrb\\t%0, %1
+ strb\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdf_insn pattern.
+;;; ??? The 'i' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdi_insn"
+ [(set (match_operand:DI 0 "general_operand" "=l,l,l,l,>,l,m,*r")
+ (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"mov\\t%Q0, %1\;mov\\t%R0, #0\";
+ case 2:
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ return \"mov\\t%Q0, %1\;neg\\t%Q0, %Q0\;asr\\t%R0, %Q0, #31\";
+ case 3:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 4:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 5:
+ return thumb_load_double_from_address (operands);
+ case 6:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 7:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+}"[(set_attr "length" "4,4,6,2,2,6,4,4")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+;;; ??? This was originally identical to the movdi_insn pattern.
+;;; ??? The 'F' constraint looks funny, but it should always be replaced by
+;;; thumb_reorg with a memory reference.
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "general_operand" "=l,l,>,l,m,*r")
+ (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+ switch (which_alternative)
+ {
+ case 0:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"add\\t%0, %1, #0\;add\\t%H0, %H1, #0\";
+ return \"add\\t%H0, %H1, #0\;add\\t%0, %1, #0\";
+ case 1:
+ return \"ldmia\\t%1, {%0, %H0}\";
+ case 2:
+ return \"stmia\\t%0, {%1, %H1}\";
+ case 3:
+ return thumb_load_double_from_address (operands);
+ case 4:
+ operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
+ return \"\";
+ case 5:
+ if (REGNO (operands[1]) == REGNO (operands[0]) + 1)
+ return \"mov\\t%0, %1\;mov\\t%H0, %H1\";
+ return \"mov\\t%H0, %H1\;mov\\t%0, %1\";
+ }
+"[(set_attr "length" "4,2,2,6,4,4")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+ if (! (reload_in_progress || reload_completed))
+ {
+ if (GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
+")
+
+;;; ??? This should have alternatives for constants.
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ "register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode)"
+ "@
+ add\\t%0, %1, #0
+ ldmia\\t%1, {%0}
+ stmia\\t%0, {%1}
+ ldr\\t%0, %1
+ str\\t%1, %0
+ mov\\t%0, %1
+ mov\\t%0, %1")
+
+;; Widening move insns
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldrh\\t%0, %1")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_lshrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldrb\\t%0, %1")
+
+(define_expand "extendhisi2"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (16)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (16)));
+ DONE;
+ }
+")
+
+(define_insn "*extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))
+ (clobber (match_scratch:SI 2 "=&l"))]
+ ""
+ "*
+{
+ rtx ops[4];
+ /* This code used to try to use 'V', and fix the address only if it was
+ offsettable, but this fails for e.g. REG+48 because 48 is outside the
+ range of QImode offsets, and offsettable_address_p does a QImode
+ address check. */
+
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ if (GET_CODE (ops[2]) == REG)
+ return \"ldrsh\\t%0, %1\";
+
+ ops[0] = operands[0];
+ ops[3] = operands[2];
+ output_asm_insn (\"mov\\t%3, %2\;ldrsh\\t%0, [%1, %3]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != MEM)
+ {
+ rtx temp = gen_reg_rtx (SImode);
+
+ operands[1] = force_reg (QImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ emit_insn (gen_ashlsi3 (temp, operands[1], GEN_INT (24)));
+ emit_insn (gen_ashrsi3 (operands[0], temp, GEN_INT (24)));
+ DONE;
+ }
+")
+
+(define_insn "*extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l,&l")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "V,m")))]
+ ""
+ "*
+{
+ rtx ops[3];
+
+ if (which_alternative == 0)
+ return \"ldrsb\\t%0, %1\";
+ ops[0] = operands[0];
+ if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
+ {
+ ops[1] = XEXP (XEXP (operands[1], 0), 0);
+ ops[2] = XEXP (XEXP (operands[1], 0), 1);
+ }
+ else
+ {
+ ops[1] = XEXP (operands[1], 0);
+ ops[2] = const0_rtx;
+ }
+ output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
+ return \"\";
+}"
+[(set_attr "length" "2,4")])
+
+;; We don't really have extzv, but defining this using shifts helps
+;; to reduce register pressure later on.
+
+(define_expand "extzv"
+ [(set (match_dup 4)
+ (ashift:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (lshiftrt:SI (match_dup 4)
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ HOST_WIDE_INT lshift = 32 - INTVAL (operands[2]) - INTVAL (operands[3]);
+ HOST_WIDE_INT rshift = 32 - INTVAL (operands[2]);
+ operands[3] = GEN_INT (rshift);
+ if (lshift == 0)
+ {
+ emit_insn (gen_lshrsi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ operands[2] = GEN_INT (lshift);
+ operands[4] = gen_reg_rtx (SImode);
+}
+")
+
+;; Block-move insns
+
+(define_expand "movstrqi"
+ [(match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" "")
+ (match_operand:SI 2 "" "")
+ (match_operand:SI 3 "const_int_operand" "")]
+ ""
+ "
+ if (INTVAL (operands[3]) != 4
+ || GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) > 48)
+ FAIL;
+
+ thumb_expand_movstrqi (operands);
+ DONE;
+")
+
+(define_insn "movmem12b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 8)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 12)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 12)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))
+ (clobber (match_scratch:SI 4 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (3, operands);"
+[(set_attr "length" "4")])
+
+(define_insn "movmem8b"
+ [(set (mem:SI (match_operand:SI 0 "register_operand" "+&l"))
+ (mem:SI (match_operand:SI 1 "register_operand" "+&l")))
+ (set (mem:SI (plus:SI (match_dup 0) (const_int 4)))
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_dup 0) (plus:SI (match_dup 0) (const_int 8)))
+ (set (match_dup 1) (plus:SI (match_dup 1) (const_int 8)))
+ (clobber (match_scratch:SI 2 "=&l"))
+ (clobber (match_scratch:SI 3 "=&l"))]
+ ""
+ "* return output_move_mem_multiple (2, operands);"
+[(set_attr "length" "4")])
+
+;; Arithmetic insns
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0")
+ (match_operand:DI 2 "register_operand" "l")))]
+ ""
+ "add\\t%Q0, %Q0, %Q2\;adc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+;; register group 'k' is a single register group containing only the stack
+;; register. Trying to reload it will always fail catastrophically,
+;; so never allow those alternatives to match if reloading is needed.
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l,l,*r,*h,l,!k")
+ (plus:SI (match_operand:SI 1 "register_operand" "%0,0,l,*0,*0,!k,!k")
+ (match_operand:SI 2 "nonmemory_operand" "I,J,lL,*h,*r,!M,!O")))]
+ ""
+ "*
+ static char *asms[] =
+{
+ \"add\\t%0, %0, %2\",
+ \"sub\\t%0, %0, #%n2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %0, %2\",
+ \"add\\t%0, %1, %2\",
+ \"add\\t%0, %1, %2\"
+};
+ if (which_alternative == 2 && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < 0)
+ return \"sub\\t%0, %1, #%n2\";
+ return asms[which_alternative];
+")
+
+; reloading and elimination of the frame pointer can sometimes cause this
+; optimization to be missed.
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "const_int_operand" "M"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_operand:SI 2 "register_operand" "k")))]
+ "REGNO (operands[2]) == STACK_POINTER_REGNUM
+ && (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) < 1024
+ && (INTVAL (operands[1]) & 3) == 0"
+ "add\\t%0, %2, %1")
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=l")
+ (minus:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "register_operand" "l")))]
+ ""
+ "sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
+[(set_attr "conds" "changed")
+ (set_attr "length" "8")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (minus:SI (match_operand:SI 1 "register_operand" "l")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "sub\\t%0, %1, %2")
+
+;; We must ensure that one input matches the output, and that the other input
+;; does not match the output. Using 0 satisfies the first, and using &
+;; satisfies the second. Unfortunately, this fails when operands 1 and 2
+;; are the same, because reload will make operand 0 match operand 1 without
+;; realizing that this conflicts with operand 2. We fix this by adding another
+;; alternative to match this case, and then `reload' it ourselves. This
+;; alternative must come first.
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=&l,&l,&l")
+ (mult:SI (match_operand:SI 1 "register_operand" "%l,*h,0")
+ (match_operand:SI 2 "register_operand" "l,l,l")))]
+ ""
+ "*
+{
+ if (which_alternative < 2)
+ return \"mov\\t%0, %1\;mul\\t%0, %0, %2\";
+ else
+ return \"mul\\t%0, %0, %2\";
+}"
+ [(set_attr "length" "4,4,2")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (neg:SI (match_operand:SI 1 "register_operand" "l")))]
+ ""
+ "neg\\t%0, %1")
+
+;; Logical insns
+
+(define_expand "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[2]) != CONST_INT)
+ operands[2] = force_reg (SImode, operands[2]);
+ else
+ {
+ int i;
+ if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ {
+ operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ for (i = 9; i <= 31; i++)
+ if ((((HOST_WIDE_INT) 1) << i) - 1 == INTVAL (operands[2]))
+ {
+ emit_insn (gen_extzv (operands[0], operands[1], GEN_INT (i),
+ const0_rtx));
+ DONE;
+ }
+ else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ {
+ rtx shift = GEN_INT (i);
+ rtx reg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (reg, operands[1], shift));
+ emit_insn (gen_ashlsi3 (operands[0], reg, shift));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, operands[2]);
+ }
+")
+
+(define_insn "*andsi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "and\\t%0, %0, %2")
+
+(define_insn "bicsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "l"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "bic\\t%0, %0, %1")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "orr\\t%0, %0, %2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "eor\\t%0, %0, %2")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (not:SI (match_operand:SI 1 "register_operand" "l")))]
+ ""
+ "mvn\\t%0, %1")
+
+;; Shift and rotation insns
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashift:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsl\\t%0, %1, %2
+ lsl\\t%0, %0, %2")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ asr\\t%0, %1, %2
+ asr\\t%0, %0, %2")
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l,l")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "l,0")
+ (match_operand:SI 2 "nonmemory_operand" "N,l")))]
+ ""
+ "@
+ lsr\\t%0, %1, %2
+ lsr\\t%0, %0, %2")
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "l")))]
+ ""
+ "ror\\t%0, %0, %2")
+
+;; Comparison insns
+
+(define_expand "cmpsi"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+ if (GET_CODE (operands[1]) != REG && GET_CODE (operands[1]) != SUBREG)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || (unsigned HOST_WIDE_INT) (INTVAL (operands[1])) >= 256)
+ {
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -255
+ || INTVAL (operands[1]) > 0)
+ operands[1] = force_reg (SImode, operands[1]);
+ else
+ {
+ operands[1] = force_reg (SImode,
+ GEN_INT (- INTVAL (operands[1])));
+ emit_insn (gen_cmnsi (operands[0], operands[1]));
+ DONE;
+ }
+ }
+ }
+")
+
+(define_insn "*cmpsi_insn"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "l,*r,*h")
+ (match_operand:SI 1 "thumb_cmp_operand" "lI,*h,*r")))]
+ ""
+ "@
+ cmp\\t%0, %1
+ cmp\\t%0, %1
+ cmp\\t%0, %1")
+
+(define_insn "tstsi"
+ [(set (cc0) (match_operand:SI 0 "register_operand" "l"))]
+ ""
+ "cmp\\t%0, #0")
+
+(define_insn "cmnsi"
+ [(set (cc0) (compare (match_operand:SI 0 "register_operand" "l")
+ (neg:SI (match_operand:SI 1 "register_operand" "l"))))]
+ ""
+ "cmn\\t%0, %1")
+
+;; Jump insns
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+ if (get_attr_length (insn) == 2)
+ return \"b\\t%l0\";
+ return \"bl\\t%l0\\t%@ far jump\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "4")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2048))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 2)
+ (const_int 4)))])
+
+
+(define_expand "beq"
+ [(set (pc) (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bne"
+ [(set (pc) (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bge"
+ [(set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "ble"
+ [(set (pc) (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgt"
+ [(set (pc) (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "blt"
+ [(set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgeu"
+ [(set (pc) (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bleu"
+ [(set (pc) (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bgtu"
+ [(set (pc) (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_expand "bltu"
+ [(set (pc) (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cond_branch"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%d1\\t%l0\\t%@cond_branch\";
+ case 4: return \"b%D1\\t.LCB%=\;b\\t%l0\\t%@long jump\\n.LCB%=:\";
+ default: return \"b%D1\\t.LCB%=\;bl\\t%l0\\t%@far jump\\n.LCB%=:\";
+ }
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "*cond_branch_reversed"
+ [(set (pc) (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+ switch (get_attr_length (insn))
+ {
+ case 2: return \"b%D1\\t%l0\\t%@cond_branch_reversed\";
+ case 4: return \"b%d1\\t.LCBR%=\;b\\t%l0\\t%@long jump\\n.LCBR%=:\";
+ default: return \"b%d1\\t.LCBR%=\;bl\\t%l0\\t%@far jump\\n.LCBR%=:\";
+ }
+ return \"\";
+"[(set (attr "far_jump")
+ (if_then_else (eq_attr "length" "6")
+ (const_string "yes")
+ (const_string "no")))
+ (set (attr "length")
+ (if_then_else
+ (and (ge (minus (match_dup 0) (pc)) (const_int -252))
+ (le (minus (match_dup 0) (pc)) (const_int 254)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (match_dup 0) (pc)) (const_int -2044))
+ (le (minus (match_dup 0) (pc)) (const_int 2044)))
+ (const_int 4)
+ (const_int 6))))])
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "l*r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "mov\\tpc, %0")
+
+(define_insn "return"
+ [(return)]
+ "USE_RETURN"
+ "* return output_return ();"
+[(set_attr "length" "18")])
+
+;; Call insns
+
+(define_expand "call"
+ [(call (match_operand:SI 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "")
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "TARGET_THUMB_INTERWORK"
+ "bl\\t__call_via_%0"
+[(set_attr "length" "4")])
+
+
+(define_insn "*call_indirect"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "l*r"))
+ (match_operand 1 "" ""))]
+ "! TARGET_THUMB_INTERWORK"
+ "bl\\t__call_via_%0"
+[(set_attr "length" "4")])
+;; used to be: "mov\\tlr,pc\;bx\\t%0"
+;; but this does not set bottom bit of lr
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "")
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "TARGET_THUMB_INTERWORK"
+ "bl\\t__call_via_%1"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "" "=l")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "l*r"))
+ (match_operand 2 "" "")))]
+ "! TARGET_THUMB_INTERWORK"
+ "bl\\t__call_via_%1"
+[(set_attr "length" "4")])
+;; used to be "mov\\tlr,pc\;bx\\t%1"
+;; but this does not set bottom bit of lr
+
+(define_insn "*call_insn"
+ [(call (mem:SI (match_operand:SI 0 "" "i"))
+ (match_operand:SI 1 "" ""))]
+ "GET_CODE (operands[0]) == SYMBOL_REF"
+ "bl\\t%a0"
+[(set_attr "length" "4")])
+
+(define_insn "*call_value_insn"
+ [(set (match_operand 0 "register_operand" "=l")
+ (call (mem:SI (match_operand 1 "" "i"))
+ (match_operand 2 "" "")))]
+ "GET_CODE (operands[1]) == SYMBOL_REF"
+ "bl\\t%a1"
+[(set_attr "length" "4")])
+
+;; Untyped call not required, since all funcs return in r0
+
+;; Miscellaneous patterns
+
+(define_insn "nop"
+ [(clobber (const_int 0))]
+ ""
+ "mov\\tr8, r8")
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "
+ thumb_expand_prologue ();
+ DONE;
+")
+
+(define_expand "epilogue"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "! thumb_trivial_epilogue ()"
+ "
+ thumb_expand_epilogue ();
+")
+
+(define_insn "*epilogue_insns"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+ return thumb_unexpanded_epilogue ();
+"
+[(set_attr "length" "42")])
+
+;; Special patterns for dealing with the constant pool
+
+(define_insn "consttable_4"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 4, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "4")])
+
+(define_insn "consttable_8"
+ [(unspec_volatile [(match_operand 0 "" "")] 3)]
+ ""
+ "*
+{
+ switch (GET_MODE_CLASS (GET_MODE (operands[0])))
+ {
+ case MODE_FLOAT:
+ {
+ union real_extract u;
+ bcopy ((char *) &CONST_DOUBLE_LOW (operands[0]), (char *) &u, sizeof u);
+ assemble_real (u.d, GET_MODE (operands[0]));
+ break;
+ }
+ default:
+ assemble_integer (operands[0], 8, 1);
+ break;
+ }
+ return \"\";
+}"
+[(set_attr "length" "8")])
+
+(define_insn "consttable_end"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "*
+ /* Nothing to do (currently). */
+ return \"\";
+")
+
+(define_insn "align_4"
+ [(unspec_volatile [(const_int 0)] 5)]
+ ""
+ "*
+ assemble_align (32);
+ return \"\";
+")
diff --git a/gcc/configure b/gcc/configure
index 09c1d80..90b4309 100755
--- a/gcc/configure
+++ b/gcc/configure
@@ -4168,6 +4168,13 @@ for machine in $build $host $target; do
# ;;
# tahoe-*-bsd*) # tahoe running BSD
# ;;
+ thumb-*-coff* | thumbel-*-coff*)
+ tm_file=arm/tcoff.h
+ out_file=arm/thumb.c
+ xm_file=arm/xm-thumb.h
+ md_file=arm/thumb.md
+ tmake_file=arm/t-thumb
+ ;;
# This hasn't been upgraded to GCC 2.
# tron-*-*)
# cpu_type=gmicro
@@ -4445,7 +4452,8 @@ then extra_headers=; fi
if [ x"$xm_file" = x ]
then xm_file=$cpu_type/xm-$cpu_type.h; fi
-md_file=$cpu_type/$cpu_type.md
+if [ x$md_file = x ]
+then md_file=$cpu_type/$cpu_type.md; fi
if [ x$out_file = x ]
then out_file=$cpu_type/$cpu_type.c; fi
diff --git a/gcc/configure.in b/gcc/configure.in
index 29176b8..0a0fc41 100644
--- a/gcc/configure.in
+++ b/gcc/configure.in
@@ -2485,6 +2485,13 @@ for machine in $build $host $target; do
# ;;
# tahoe-*-bsd*) # tahoe running BSD
# ;;
+ thumb-*-coff* | thumbel-*-coff*)
+ tm_file=arm/tcoff.h
+ out_file=arm/thumb.c
+ xm_file=arm/xm-thumb.h
+ md_file=arm/thumb.md
+ tmake_file=arm/t-thumb
+ ;;
# This hasn't been upgraded to GCC 2.
# tron-*-*)
# cpu_type=gmicro
@@ -2762,7 +2769,8 @@ then extra_headers=; fi
if [[ x"$xm_file" = x ]]
then xm_file=$cpu_type/xm-$cpu_type.h; fi
-md_file=$cpu_type/$cpu_type.md
+if [[ x$md_file = x ]]
+then md_file=$cpu_type/$cpu_type.md; fi
if [[ x$out_file = x ]]
then out_file=$cpu_type/$cpu_type.c; fi