aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorWang Pengcheng <wangpengcheng.pp@bytedance.com>2024-10-23 23:11:53 -0600
committerJeff Law <jlaw@ventanamicro.com>2024-10-23 23:11:53 -0600
commit078f7c4f1fcf4d7099d855afb02dbaf71bebddbf (patch)
treecc9a928c8e4f2b697041cef5d61da99001cca8f1 /gcc
parenta616b7e1db7319c587b9c65fe9548c59c67d1234 (diff)
downloadgcc-078f7c4f1fcf4d7099d855afb02dbaf71bebddbf.zip
gcc-078f7c4f1fcf4d7099d855afb02dbaf71bebddbf.tar.gz
gcc-078f7c4f1fcf4d7099d855afb02dbaf71bebddbf.tar.bz2
[PATCH] RISC-V: override alignment of function/jump/loop
Just like what AArch64 has done. Signed-off-by: Wang Pengcheng <wangpengcheng.pp@bytedance.com> gcc/ChangeLog: * config/riscv/riscv.cc (struct riscv_tune_param): Add new tune options. (riscv_override_options_internal): Override the default alignment when not optimizing for size.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/riscv/riscv.cc15
1 files changed, 15 insertions, 0 deletions
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 3ac4023..7d6fc14 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -295,6 +295,9 @@ struct riscv_tune_param
bool overlap_op_by_pieces;
unsigned int fusible_ops;
const struct cpu_vector_cost *vec_costs;
+ const char *function_align = nullptr;
+ const char *jump_align = nullptr;
+ const char *loop_align = nullptr;
};
@@ -10283,6 +10286,18 @@ riscv_override_options_internal (struct gcc_options *opts)
? &optimize_size_tune_info
: cpu->tune_param;
+ /* If not optimizing for size, set the default
+ alignment to what the target wants. */
+ if (!opts->x_optimize_size)
+ {
+ if (opts->x_flag_align_loops && !opts->x_str_align_loops)
+ opts->x_str_align_loops = tune_param->loop_align;
+ if (opts->x_flag_align_jumps && !opts->x_str_align_jumps)
+ opts->x_str_align_jumps = tune_param->jump_align;
+ if (opts->x_flag_align_functions && !opts->x_str_align_functions)
+ opts->x_str_align_functions = tune_param->function_align;
+ }
+
/* Use -mtune's setting for slow_unaligned_access, even when optimizing
for size. For architectures that trap and emulate unaligned accesses,
the performance cost is too great, even for -Os. Similarly, if