diff options
author | Richard Earnshaw <rearnsha@arm.com> | 2019-10-31 16:07:09 +0000 |
---|---|---|
committer | Richard Earnshaw <rearnsha@gcc.gnu.org> | 2019-10-31 16:07:09 +0000 |
commit | a2f9e6e3d98efadc420e0b8327a3f45c3f65689b (patch) | |
tree | f67c73040697b238524982807f04a014984b094e | |
parent | d84b9ad53fed75590c3ebc6e38bd49db3b201128 (diff) | |
download | gcc-a2f9e6e3d98efadc420e0b8327a3f45c3f65689b.zip gcc-a2f9e6e3d98efadc420e0b8327a3f45c3f65689b.tar.gz gcc-a2f9e6e3d98efadc420e0b8327a3f45c3f65689b.tar.bz2 |
[arm] Avoid using negative offsets for 'immediate' addresses when compiling for Thumb2
Thumb2 code now uses the Arm implementation of legitimize_address.
That code has a case to handle addresses that are absolute CONST_INT
values, which is a common use case in deeply embedded targets (eg:
void *p = (void*)0x12345678). Since thumb has very limited negative
offsets from a constant, we want to avoid forming a CSE base that will
then be used with a negative value.
This was reported upstream originally in
https://gcc.gnu.org/ml/gcc-help/2019-10/msg00122.html
For example,
void test1(void) {
volatile uint32_t * const p = (uint32_t *) 0x43fe1800;
p[3] = 1;
p[4] = 2;
p[1] = 3;
p[7] = 4;
p[0] = 6;
}
With the new code, instead of
ldr r3, .L2
subw r2, r3, #2035
movs r1, #1
str r1, [r2]
subw r2, r3, #2031
movs r1, #2
str r1, [r2]
subw r2, r3, #2043
movs r1, #3
str r1, [r2]
subw r2, r3, #2019
movs r1, #4
subw r3, r3, #2047
str r1, [r2]
movs r2, #6
str r2, [r3]
bx lr
We now get
ldr r3, .L2
movs r2, #1
str r2, [r3, #2060]
movs r2, #2
str r2, [r3, #2064]
movs r2, #3
str r2, [r3, #2052]
movs r2, #4
str r2, [r3, #2076]
movs r2, #6
str r2, [r3, #2048]
bx lr
* config/arm/arm.c (arm_legitimize_address): Don't form negative
offsets from a CONST_INT address when TARGET_THUMB2.
From-SVN: r277677
-rw-r--r-- | gcc/ChangeLog | 5 | ||||
-rw-r--r-- | gcc/config/arm/arm.c | 17 |
2 files changed, 15 insertions, 7 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index e520cf0..66b7a14 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,10 @@ 2019-10-31 Richard Earnshaw <rearnsha@arm.com> + * config/arm/arm.c (arm_legitimize_address): Don't form negative offsets + from a CONST_INT address when TARGET_THUMB2. + +2019-10-31 Richard Earnshaw <rearnsha@arm.com> + * config/arm/arm.md (add_not_cin): New insn. (add_not_shift_cin): Likewise. diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c index 3c83b7f..eddd3ca 100644 --- a/gcc/config/arm/arm.c +++ b/gcc/config/arm/arm.c @@ -9039,17 +9039,20 @@ arm_legitimize_address (rtx x, rtx orig_x, machine_mode mode) HOST_WIDE_INT mask, base, index; rtx base_reg; - /* ldr and ldrb can use a 12-bit index, ldrsb and the rest can only - use a 8-bit index. So let's use a 12-bit index for SImode only and - hope that arm_gen_constant will enable ldrb to use more bits. */ + /* LDR and LDRB can use a 12-bit index, ldrsb and the rest can + only use a 8-bit index. So let's use a 12-bit index for + SImode only and hope that arm_gen_constant will enable LDRB + to use more bits. */ bits = (mode == SImode) ? 12 : 8; mask = (1 << bits) - 1; base = INTVAL (x) & ~mask; index = INTVAL (x) & mask; - if (bit_count (base & 0xffffffff) > (32 - bits)/2) - { - /* It'll most probably be more efficient to generate the base - with more bits set and use a negative index instead. */ + if (TARGET_ARM && bit_count (base & 0xffffffff) > (32 - bits)/2) + { + /* It'll most probably be more efficient to generate the + base with more bits set and use a negative index instead. + Don't do this for Thumb as negative offsets are much more + limited. */ base |= mask; index -= mask; } |