diff options
-rw-r--r-- | gcc/ChangeLog | 4 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64.h | 10 |
2 files changed, 6 insertions, 8 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index f955514..ce26404 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,7 @@ +2020-01-20 Wilco Dijkstra <wdijkstr@arm.com> + + * config/aarch64/aarch64.h (SLOW_BYTE_ACCESS): Set to 1. + 2020-01-20 Richard Sandiford <richard.sandiford@arm.com> * config/aarch64/aarch64-sve-builtins-base.cc diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h index eac2d62..342fe29 100644 --- a/gcc/config/aarch64/aarch64.h +++ b/gcc/config/aarch64/aarch64.h @@ -1006,14 +1006,8 @@ typedef struct if given data not on the nominal alignment. */ #define STRICT_ALIGNMENT TARGET_STRICT_ALIGN -/* Define this macro to be non-zero if accessing less than a word of - memory is no faster than accessing a word of memory, i.e., if such - accesses require more than one instruction or if there is no - difference in cost. - Although there's no difference in instruction count or cycles, - in AArch64 we don't want to expand to a sub-word to a 64-bit access - if we don't have to, for power-saving reasons. */ -#define SLOW_BYTE_ACCESS 0 +/* Enable wide bitfield accesses for more efficient bitfield code. */ +#define SLOW_BYTE_ACCESS 1 #define NO_FUNCTION_CSE 1 |