aboutsummaryrefslogtreecommitdiff
path: root/gcc/config/aarch64/aarch64-protos.h
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/config/aarch64/aarch64-protos.h')
-rw-r--r--gcc/config/aarch64/aarch64-protos.h14
1 files changed, 12 insertions, 2 deletions
diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
index c935e7b..38c307c 100644
--- a/gcc/config/aarch64/aarch64-protos.h
+++ b/gcc/config/aarch64/aarch64-protos.h
@@ -290,8 +290,8 @@ struct sve_vec_cost : simd_vec_cost
/* The cost of a gather load instruction. The x32 value is for loads
of 32-bit elements and the x64 value is for loads of 64-bit elements. */
- const int gather_load_x32_cost;
- const int gather_load_x64_cost;
+ const unsigned int gather_load_x32_cost;
+ const unsigned int gather_load_x64_cost;
/* Additional loop initialization cost of using a gather load instruction. The x32
value is for loads of 32-bit elements and the x64 value is for loads of
@@ -933,6 +933,7 @@ char *aarch64_output_simd_mov_imm (rtx, unsigned);
char *aarch64_output_simd_orr_imm (rtx, unsigned);
char *aarch64_output_simd_and_imm (rtx, unsigned);
char *aarch64_output_simd_xor_imm (rtx, unsigned);
+char *aarch64_output_fmov (rtx);
char *aarch64_output_sve_mov_immediate (rtx);
char *aarch64_output_sve_ptrues (rtx);
@@ -946,8 +947,10 @@ bool aarch64_parallel_select_half_p (machine_mode, rtx);
bool aarch64_pars_overlap_p (rtx, rtx);
bool aarch64_simd_scalar_immediate_valid_for_move (rtx, scalar_int_mode);
bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
+bool aarch64_sve_valid_pred_p (rtx, machine_mode);
bool aarch64_sve_ptrue_svpattern_p (rtx, struct simd_immediate_info *);
bool aarch64_simd_valid_and_imm (rtx);
+bool aarch64_simd_valid_and_imm_fmov (rtx, unsigned int * = NULL);
bool aarch64_simd_valid_mov_imm (rtx);
bool aarch64_simd_valid_orr_imm (rtx);
bool aarch64_simd_valid_xor_imm (rtx);
@@ -1026,6 +1029,9 @@ rtx aarch64_ptrue_reg (machine_mode, unsigned int);
rtx aarch64_ptrue_reg (machine_mode, machine_mode);
rtx aarch64_pfalse_reg (machine_mode);
bool aarch64_sve_same_pred_for_ptest_p (rtx *, rtx *);
+rtx aarch64_sve_packed_pred (machine_mode);
+rtx aarch64_sve_fp_pred (machine_mode, rtx *);
+rtx aarch64_sve_emit_masked_fp_pred (machine_mode, rtx);
void aarch64_emit_load_store_through_mode (rtx, rtx, machine_mode);
bool aarch64_expand_maskloadstore (rtx *, machine_mode);
void aarch64_emit_sve_pred_move (rtx, rtx, rtx);
@@ -1034,6 +1040,7 @@ bool aarch64_maybe_expand_sve_subreg_move (rtx, rtx);
rtx aarch64_replace_reg_mode (rtx, machine_mode);
void aarch64_split_sve_subreg_move (rtx, rtx, rtx);
void aarch64_expand_prologue (void);
+void aarch64_decompose_vec_struct_index (machine_mode, rtx *, rtx *, bool);
void aarch64_expand_vector_init (rtx, rtx);
void aarch64_sve_expand_vector_init_subvector (rtx, rtx);
void aarch64_sve_expand_vector_init (rtx, rtx);
@@ -1129,6 +1136,8 @@ bool aarch64_general_check_builtin_call (location_t, vec<location_t>,
unsigned int, tree, unsigned int,
tree *);
+bool aarch64_cb_rhs (rtx_code op_code, rtx rhs);
+
namespace aarch64 {
void report_non_ice (location_t, tree, unsigned int);
void report_out_of_range (location_t, tree, unsigned int, HOST_WIDE_INT,
@@ -1265,6 +1274,7 @@ void aarch64_expand_reversed_crc_using_pmull (scalar_mode, scalar_mode, rtx *);
void aarch64_expand_fp_spaceship (rtx, rtx, rtx, rtx);
+extern bool aarch64_pacret_enabled ();
extern bool aarch64_gcs_enabled ();
extern unsigned aarch64_data_alignment (const_tree exp, unsigned align);