diff options
author | Wilco Dijkstra <wdijkstr@arm.com> | 2020-01-17 14:27:14 +0000 |
---|---|---|
committer | Wilco Dijkstra <wdijkstr@arm.com> | 2020-01-17 14:28:30 +0000 |
commit | 6ed8c923325c9b2fcb30996fa14582ac136d9329 (patch) | |
tree | a0da09c53e25fefd8d423f07a018e214d213ab50 | |
parent | eff9c61dfb082cb3ea26f354d795e4098ec76866 (diff) | |
download | gcc-6ed8c923325c9b2fcb30996fa14582ac136d9329.zip gcc-6ed8c923325c9b2fcb30996fa14582ac136d9329.tar.gz gcc-6ed8c923325c9b2fcb30996fa14582ac136d9329.tar.bz2 |
[AArch64] Enable compare branch fusion
Enable the most basic form of compare-branch fusion since various CPUs
support it. This has no measurable effect on cores which don't support
branch fusion, but increases fusion opportunities on cores which do.
gcc/
* config/aarch64/aarch64.c (generic_tunings): Add branch fusion.
(neoversen1_tunings): Likewise.
-rw-r--r-- | gcc/ChangeLog | 5 | ||||
-rw-r--r-- | gcc/config/aarch64/aarch64.c | 4 |
2 files changed, 7 insertions, 2 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 06bcb81..debd4d7 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,10 @@ 2020-01-17 Wilco Dijkstra <wdijkstr@arm.com> + * config/aarch64/aarch64.c (generic_tunings): Add branch fusion. + (neoversen1_tunings): Likewise. + +2020-01-17 Wilco Dijkstra <wdijkstr@arm.com> + PR target/92692 * config/aarch64/aarch64.c (aarch64_split_compare_and_swap) Add assert to ensure prolog has been emitted. diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index ea9d6a1..fb2e102 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -726,7 +726,7 @@ static const struct tune_params generic_tunings = SVE_NOT_IMPLEMENTED, /* sve_width */ 4, /* memmov_cost */ 2, /* issue_rate */ - (AARCH64_FUSE_AES_AESMC), /* fusible_ops */ + (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_CMP_BRANCH), /* fusible_ops */ "16:12", /* function_align. */ "4", /* jump_align. */ "8", /* loop_align. */ @@ -1130,7 +1130,7 @@ static const struct tune_params neoversen1_tunings = SVE_NOT_IMPLEMENTED, /* sve_width */ 4, /* memmov_cost */ 3, /* issue_rate */ - AARCH64_FUSE_AES_AESMC, /* fusible_ops */ + (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_CMP_BRANCH), /* fusible_ops */ "32:16", /* function_align. */ "32:16", /* jump_align. */ "32:16", /* loop_align. */ |