aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Earnshaw <rearnsha@arm.com>2009-01-13 14:31:56 +0000
committerRichard Earnshaw <rearnsha@gcc.gnu.org>2009-01-13 14:31:56 +0000
commitff128632643c1a931d3dd0f1f464dc19c2b65504 (patch)
tree7992941d9a3dabf25e0767f4cd187a14d79363a2 /gcc
parentd5a0a47beef90da73f9f105c30ed380dce36e26b (diff)
downloadgcc-ff128632643c1a931d3dd0f1f464dc19c2b65504.zip
gcc-ff128632643c1a931d3dd0f1f464dc19c2b65504.tar.gz
gcc-ff128632643c1a931d3dd0f1f464dc19c2b65504.tar.bz2
arm.c (output_move_double): Don't synthesize thumb-2 ldrd/strd with two 32-bit instructions.
* arm.c (output_move_double): Don't synthesize thumb-2 ldrd/strd with two 32-bit instructions. From-SVN: r143339
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog5
-rw-r--r--gcc/config/arm/arm.c58
2 files changed, 33 insertions, 30 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 7bb4789..d5c7e9e 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,10 @@
2009-01-13 Richard Earnshaw <rearnsha@arm.com>
+ * arm.c (output_move_double): Don't synthesize thumb-2 ldrd/strd with
+ two 32-bit instructions.
+
+2009-01-13 Richard Earnshaw <rearnsha@arm.com>
+
* arm.c (struct processors): Pass for speed down into cost helper
functions.
(const_ok_for_op): Handle COMPARE and inequality nodes.
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 8c2aa5e..209682b 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -6377,7 +6377,7 @@ neon_valid_immediate (rtx op, enum machine_mode mode, int inverse,
break; \
}
- unsigned int i, elsize, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
+ unsigned int i, elsize = 0, idx = 0, n_elts = CONST_VECTOR_NUNITS (op);
unsigned int innersize = GET_MODE_SIZE (GET_MODE_INNER (mode));
unsigned char bytes[16];
int immtype = -1, matches;
@@ -10391,36 +10391,36 @@ output_move_double (rtx *operands)
}
else
{
- /* IWMMXT allows offsets larger than ldrd can handle,
- fix these up with a pair of ldr. */
- if (GET_CODE (otherops[2]) == CONST_INT
- && (INTVAL(otherops[2]) <= -256
- || INTVAL(otherops[2]) >= 256))
+ /* Use a single insn if we can.
+ FIXME: IWMMXT allows offsets larger than ldrd can
+ handle, fix these up with a pair of ldr. */
+ if (TARGET_THUMB2
+ || GET_CODE (otherops[2]) != CONST_INT
+ || (INTVAL (otherops[2]) > -256
+ && INTVAL (otherops[2]) < 256))
+ output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
+ else
{
output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
- otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
- output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
}
- else
- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
}
}
else
{
- /* IWMMXT allows offsets larger than ldrd can handle,
+ /* Use a single insn if we can.
+ FIXME: IWMMXT allows offsets larger than ldrd can handle,
fix these up with a pair of ldr. */
- if (GET_CODE (otherops[2]) == CONST_INT
- && (INTVAL(otherops[2]) <= -256
- || INTVAL(otherops[2]) >= 256))
+ if (TARGET_THUMB2
+ || GET_CODE (otherops[2]) != CONST_INT
+ || (INTVAL (otherops[2]) > -256
+ && INTVAL (otherops[2]) < 256))
+ output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
+ else
{
- otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
- output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
- otherops[0] = operands[0];
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
}
- else
- /* We only allow constant increments, so this is safe. */
- output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
}
break;
@@ -10474,6 +10474,7 @@ output_move_double (rtx *operands)
operands[1] = otherops[0];
if (TARGET_LDRD
&& (GET_CODE (otherops[2]) == REG
+ || TARGET_THUMB2
|| (GET_CODE (otherops[2]) == CONST_INT
&& INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256)))
@@ -10586,23 +10587,19 @@ output_move_double (rtx *operands)
/* IWMMXT allows offsets larger than ldrd can handle,
fix these up with a pair of ldr. */
- if (GET_CODE (otherops[2]) == CONST_INT
+ if (!TARGET_THUMB2
+ && GET_CODE (otherops[2]) == CONST_INT
&& (INTVAL(otherops[2]) <= -256
|| INTVAL(otherops[2]) >= 256))
{
- rtx reg1;
- reg1 = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
{
output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
- otherops[0] = reg1;
- output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
}
else
{
- otherops[0] = reg1;
- output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
- otherops[0] = operands[1];
+ output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
}
}
@@ -10637,6 +10634,7 @@ output_move_double (rtx *operands)
}
if (TARGET_LDRD
&& (GET_CODE (otherops[2]) == REG
+ || TARGET_THUMB2
|| (GET_CODE (otherops[2]) == CONST_INT
&& INTVAL (otherops[2]) > -256
&& INTVAL (otherops[2]) < 256)))
@@ -10650,9 +10648,9 @@ output_move_double (rtx *operands)
default:
otherops[0] = adjust_address (operands[0], SImode, 4);
- otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
+ otherops[1] = operands[1];
output_asm_insn ("str%?\t%1, %0", operands);
- output_asm_insn ("str%?\t%1, %0", otherops);
+ output_asm_insn ("str%?\t%H1, %0", otherops);
}
}