aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorRichard Sandiford <rsandifo@nildram.co.uk>2007-10-18 17:27:19 +0000
committerRichard Sandiford <rsandifo@gcc.gnu.org>2007-10-18 17:27:19 +0000
commitab77a036ff36a414ba6c54d03216d03cf0797591 (patch)
treef05aabb5b85370f32f7719356a62c20480111231 /gcc
parent4ec4ca36f8b0265728e15504294c8c31d1b75ac4 (diff)
downloadgcc-ab77a036ff36a414ba6c54d03216d03cf0797591.zip
gcc-ab77a036ff36a414ba6c54d03216d03cf0797591.tar.gz
gcc-ab77a036ff36a414ba6c54d03216d03cf0797591.tar.bz2
mips.h: Move variable declarations to end of file and enclose them all in #ifndef...
gcc/ * config/mips/mips.h: Move variable declarations to end of file and enclose them all in #ifndef USED_FOR_TARGET. * config/mips/mips.c: Reorder functions into more logical groups, and so that callees appear before callers. Put the targetm initializer at the end of the file. Remove forward static declarations where possible. (mips_init_builtins): Add "static" to definition. (mips_expand_builtin, mips_mode_rep_extended): Likewise. From-SVN: r129452
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog11
-rw-r--r--gcc/config/mips/mips.c9879
-rw-r--r--gcc/config/mips/mips.h62
3 files changed, 4898 insertions, 5054 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 39901e4..35724af 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,16 @@
2007-10-18 Richard Sandiford <rsandifo@nildram.co.uk>
+ * config/mips/mips.h: Move variable declarations to end of file and
+ enclose them all in #ifndef USED_FOR_TARGET.
+ * config/mips/mips.c: Reorder functions into more logical groups,
+ and so that callees appear before callers. Put the targetm
+ initializer at the end of the file. Remove forward static
+ declarations where possible.
+ (mips_init_builtins): Add "static" to definition.
+ (mips_expand_builtin, mips_mode_rep_extended): Likewise.
+
+2007-10-18 Richard Sandiford <rsandifo@nildram.co.uk>
+
* config/mips/mips.c (TARGET_MIN_ANCHOR_OFFSET): Delete.
(TARGET_MAX_ANCHOR_OFFSET): Delete.
(mips_cannot_force_const_mem): Don't check TARGET_HAVE_TLS.
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index c460cca..e56d045 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -278,162 +278,6 @@ static const char *const mips_fp_conditions[] = {
MIPS_FP_CONDITIONS (STRINGIFY)
};
-/* A function to save or store a register. The first argument is the
- register and the second is the stack slot. */
-typedef void (*mips_save_restore_fn) (rtx, rtx);
-
-struct mips16_constant;
-struct mips_arg_info;
-struct mips_address_info;
-struct mips_integer_op;
-struct mips_sim;
-
-static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
-static bool mips_classify_address (struct mips_address_info *, rtx,
- enum machine_mode, int);
-static bool mips_cannot_force_const_mem (rtx);
-static bool mips_use_blocks_for_constant_p (enum machine_mode, const_rtx);
-static int mips_symbol_insns (enum mips_symbol_type, enum machine_mode);
-static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
-static rtx mips_force_temporary (rtx, rtx);
-static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
-static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
-static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
-static unsigned int mips_build_lower (struct mips_integer_op *,
- unsigned HOST_WIDE_INT);
-static unsigned int mips_build_integer (struct mips_integer_op *,
- unsigned HOST_WIDE_INT);
-static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
-static int m16_check_op (rtx, int, int, int);
-static bool mips_rtx_costs (rtx, int, int, int *);
-static int mips_address_cost (rtx);
-static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
-static bool mips_load_call_address (rtx, rtx, int);
-static bool mips_function_ok_for_sibcall (tree, tree);
-static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
-static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
-static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
-static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
- tree, int, struct mips_arg_info *);
-static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
-static void mips_set_architecture (const struct mips_cpu_info *);
-static void mips_set_tune (const struct mips_cpu_info *);
-static bool mips_handle_option (size_t, const char *, int);
-static struct machine_function *mips_init_machine_status (void);
-static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
- const char **);
-static void mips_file_start (void);
-static int mips_small_data_pattern_1 (rtx *, void *);
-static int mips_rewrite_small_data_1 (rtx *, void *);
-static bool mips_function_has_gp_insn (void);
-static unsigned int mips_global_pointer (void);
-static bool mips_save_reg_p (unsigned int);
-static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
- mips_save_restore_fn);
-static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
-static void mips_output_cplocal (void);
-static void mips_emit_loadgp (void);
-static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
-static void mips_set_frame_expr (rtx);
-static rtx mips_frame_set (rtx, rtx);
-static void mips_save_reg (rtx, rtx);
-static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
-static void mips_restore_reg (rtx, rtx);
-static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
- HOST_WIDE_INT, tree);
-static section *mips_select_rtx_section (enum machine_mode, rtx,
- unsigned HOST_WIDE_INT);
-static section *mips_function_rodata_section (tree);
-static bool mips_in_small_data_p (const_tree);
-static bool mips_use_anchors_for_symbol_p (const_rtx);
-static int mips_fpr_return_fields (const_tree, tree *);
-static bool mips_return_in_msb (const_tree);
-static rtx mips_return_fpr_pair (enum machine_mode mode,
- enum machine_mode mode1, HOST_WIDE_INT,
- enum machine_mode mode2, HOST_WIDE_INT);
-static rtx mips16_gp_pseudo_reg (void);
-static void mips16_fp_args (FILE *, int, int);
-static void build_mips16_function_stub (FILE *);
-static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
-static void dump_constants (struct mips16_constant *, rtx);
-static int mips16_insn_length (rtx);
-static int mips16_rewrite_pool_refs (rtx *, void *);
-static void mips16_lay_out_constants (void);
-static void mips_sim_reset (struct mips_sim *);
-static void mips_sim_init (struct mips_sim *, state_t);
-static void mips_sim_next_cycle (struct mips_sim *);
-static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
-static int mips_sim_wait_regs_2 (rtx *, void *);
-static void mips_sim_wait_regs_1 (rtx *, void *);
-static void mips_sim_wait_regs (struct mips_sim *, rtx);
-static void mips_sim_wait_units (struct mips_sim *, rtx);
-static void mips_sim_wait_insn (struct mips_sim *, rtx);
-static void mips_sim_record_set (rtx, const_rtx, void *);
-static void mips_sim_issue_insn (struct mips_sim *, rtx);
-static void mips_sim_issue_nop (struct mips_sim *);
-static void mips_sim_finish_insn (struct mips_sim *, rtx);
-static void vr4130_avoid_branch_rt_conflict (rtx);
-static void vr4130_align_insns (void);
-static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
-static void mips_avoid_hazards (void);
-static void mips_reorg (void);
-static bool mips_strict_matching_cpu_name_p (const char *, const char *);
-static bool mips_matching_cpu_name_p (const char *, const char *);
-static const struct mips_cpu_info *mips_parse_cpu (const char *);
-static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
-static bool mips_return_in_memory (const_tree, const_tree);
-static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
-static void mips_macc_chains_record (rtx);
-static void mips_macc_chains_reorder (rtx *, int);
-static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
-static bool vr4130_true_reg_dependence_p (rtx);
-static bool vr4130_swap_insns_p (rtx, rtx);
-static void vr4130_reorder (rtx *, int);
-static void mips_promote_ready (rtx *, int, int);
-static void mips_sched_init (FILE *, int, int);
-static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
-static int mips_variable_issue (FILE *, int, rtx, int);
-static int mips_adjust_cost (rtx, rtx, rtx, int);
-static int mips_issue_rate (void);
-static int mips_multipass_dfa_lookahead (void);
-static void mips_init_libfuncs (void);
-static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
- tree, int *, int);
-static tree mips_build_builtin_va_list (void);
-static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
-static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
- const_tree, bool);
-static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
- const_tree, bool);
-static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
- tree, bool);
-static bool mips_valid_pointer_mode (enum machine_mode);
-static bool mips_scalar_mode_supported_p (enum machine_mode);
-static bool mips_vector_mode_supported_p (enum machine_mode);
-static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
-static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
-static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
-static void mips_init_builtins (void);
-static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
-static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
- enum insn_code, enum mips_fp_condition,
- rtx, tree);
-static rtx mips_expand_builtin_compare (enum mips_builtin_type,
- enum insn_code, enum mips_fp_condition,
- rtx, tree);
-static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
-static void mips_encode_section_info (tree, rtx, int);
-static void mips_extra_live_on_entry (bitmap);
-static int mips_comp_type_attributes (const_tree, const_tree);
-static void mips_set_mips16_mode (int);
-static void mips_insert_attributes (tree, tree *);
-static tree mips_merge_decl_attributes (tree, tree);
-static void mips_set_current_function (tree);
-static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
-static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
-static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
-static rtx mips_dwarf_register_span (rtx);
-
/* Structure to be filled in by compute_frame_size with register
save masks, and offsets for the current function. */
@@ -584,6 +428,9 @@ int mips_section_threshold = -1;
/* Count the number of .file directives, so that .loc is up to date. */
int num_source_filenames = 0;
+/* Name of the file containing the current function. */
+const char *current_function_file = "";
+
/* Count the number of sdb related labels are generated (to find block
start and end boundaries). */
int sdb_label_count = 0;
@@ -591,8 +438,9 @@ int sdb_label_count = 0;
/* Next label # for each statement for Silicon Graphics IRIS systems. */
int sym_lineno = 0;
-/* Name of the file containing the current function. */
-const char *current_function_file = "";
+/* Map GCC register number to debugger register number. */
+int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
+int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
/* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
int set_noreorder;
@@ -617,6 +465,9 @@ const struct mips_cpu_info *mips_tune_info;
/* Which instruction set architecture to use. */
int mips_isa;
+/* The architecture selected by -mipsN. */
+static const struct mips_cpu_info *mips_isa_info;
+
/* Which ABI to use. */
int mips_abi = MIPS_ABI_DEFAULT;
@@ -628,20 +479,17 @@ static int mips_base_target_flags;
/* The mips16 command-line target flags only. */
static bool mips_base_mips16;
/* Similar copies of option settings. */
+static int mips_flag_delayed_branch; /* flag_delayed_branch */
static int mips_base_schedule_insns; /* flag_schedule_insns */
static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
static int mips_base_align_loops; /* align_loops */
static int mips_base_align_jumps; /* align_jumps */
static int mips_base_align_functions; /* align_functions */
-static GTY(()) int mips16_flipper;
/* The -mtext-loads setting. */
enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
-/* The architecture selected by -mipsN. */
-static const struct mips_cpu_info *mips_isa_info;
-
/* If TRUE, we split addresses into their high and low parts in the RTL. */
int mips_split_addresses;
@@ -655,13 +503,6 @@ char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
/* List of all MIPS punctuation characters used by print_operand. */
char mips_print_operand_punct[256];
-/* Map GCC register number to debugger register number. */
-int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
-int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
-
-/* A copy of the original flag_delayed_branch: see override_options. */
-static int mips_flag_delayed_branch;
-
static GTY (()) int mips_output_filename_first_time = 1;
/* mips_split_p[X] is true if symbols of type X can be split by
@@ -1210,185 +1051,68 @@ static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
DEFAULT_COSTS
},
};
-
-/* If a MIPS16e SAVE or RESTORE instruction saves or restores register
- mips16e_s2_s8_regs[X], it must also save the registers in indexes
- X + 1 onwards. Likewise mips16e_a0_a3_regs. */
-static const unsigned char mips16e_s2_s8_regs[] = {
- 30, 23, 22, 21, 20, 19, 18
-};
-static const unsigned char mips16e_a0_a3_regs[] = {
- 4, 5, 6, 7
-};
-
-/* A list of the registers that can be saved by the MIPS16e SAVE instruction,
- ordered from the uppermost in memory to the lowest in memory. */
-static const unsigned char mips16e_save_restore_regs[] = {
- 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
-};
-/* Initialize the GCC target structure. */
-#undef TARGET_ASM_ALIGNED_HI_OP
-#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
-#undef TARGET_ASM_ALIGNED_SI_OP
-#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
-#undef TARGET_ASM_ALIGNED_DI_OP
-#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
-
-#undef TARGET_ASM_FUNCTION_PROLOGUE
-#define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
-#undef TARGET_ASM_FUNCTION_EPILOGUE
-#define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
-#undef TARGET_ASM_SELECT_RTX_SECTION
-#define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
-#undef TARGET_ASM_FUNCTION_RODATA_SECTION
-#define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
-
-#undef TARGET_SCHED_INIT
-#define TARGET_SCHED_INIT mips_sched_init
-#undef TARGET_SCHED_REORDER
-#define TARGET_SCHED_REORDER mips_sched_reorder
-#undef TARGET_SCHED_REORDER2
-#define TARGET_SCHED_REORDER2 mips_sched_reorder
-#undef TARGET_SCHED_VARIABLE_ISSUE
-#define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
-#undef TARGET_SCHED_ADJUST_COST
-#define TARGET_SCHED_ADJUST_COST mips_adjust_cost
-#undef TARGET_SCHED_ISSUE_RATE
-#define TARGET_SCHED_ISSUE_RATE mips_issue_rate
-#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
-#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
- mips_multipass_dfa_lookahead
-
-#undef TARGET_DEFAULT_TARGET_FLAGS
-#define TARGET_DEFAULT_TARGET_FLAGS \
- (TARGET_DEFAULT \
- | TARGET_CPU_DEFAULT \
- | TARGET_ENDIAN_DEFAULT \
- | TARGET_FP_EXCEPTIONS_DEFAULT \
- | MASK_CHECK_ZERO_DIV \
- | MASK_FUSED_MADD)
-#undef TARGET_HANDLE_OPTION
-#define TARGET_HANDLE_OPTION mips_handle_option
-
-#undef TARGET_FUNCTION_OK_FOR_SIBCALL
-#define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
-
-#undef TARGET_INSERT_ATTRIBUTES
-#define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
-#undef TARGET_MERGE_DECL_ATTRIBUTES
-#define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
-#undef TARGET_SET_CURRENT_FUNCTION
-#define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
-
-#undef TARGET_VALID_POINTER_MODE
-#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
-#undef TARGET_RTX_COSTS
-#define TARGET_RTX_COSTS mips_rtx_costs
-#undef TARGET_ADDRESS_COST
-#define TARGET_ADDRESS_COST mips_address_cost
-
-#undef TARGET_IN_SMALL_DATA_P
-#define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
-
-#undef TARGET_MACHINE_DEPENDENT_REORG
-#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
-
-#undef TARGET_ASM_FILE_START
-#define TARGET_ASM_FILE_START mips_file_start
-#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
-#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
-
-#undef TARGET_INIT_LIBFUNCS
-#define TARGET_INIT_LIBFUNCS mips_init_libfuncs
-
-#undef TARGET_BUILD_BUILTIN_VA_LIST
-#define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
-#undef TARGET_GIMPLIFY_VA_ARG_EXPR
-#define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
-
-#undef TARGET_PROMOTE_FUNCTION_ARGS
-#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
-#undef TARGET_PROMOTE_FUNCTION_RETURN
-#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
-#undef TARGET_PROMOTE_PROTOTYPES
-#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
-
-#undef TARGET_RETURN_IN_MEMORY
-#define TARGET_RETURN_IN_MEMORY mips_return_in_memory
-#undef TARGET_RETURN_IN_MSB
-#define TARGET_RETURN_IN_MSB mips_return_in_msb
-
-#undef TARGET_ASM_OUTPUT_MI_THUNK
-#define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
-#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
-#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
-
-#undef TARGET_SETUP_INCOMING_VARARGS
-#define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
-#undef TARGET_STRICT_ARGUMENT_NAMING
-#define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
-#undef TARGET_MUST_PASS_IN_STACK
-#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
-#undef TARGET_PASS_BY_REFERENCE
-#define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
-#undef TARGET_CALLEE_COPIES
-#define TARGET_CALLEE_COPIES mips_callee_copies
-#undef TARGET_ARG_PARTIAL_BYTES
-#define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
-
-#undef TARGET_MODE_REP_EXTENDED
-#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
-
-#undef TARGET_VECTOR_MODE_SUPPORTED_P
-#define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
-
-#undef TARGET_SCALAR_MODE_SUPPORTED_P
-#define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
-
-#undef TARGET_INIT_BUILTINS
-#define TARGET_INIT_BUILTINS mips_init_builtins
-#undef TARGET_EXPAND_BUILTIN
-#define TARGET_EXPAND_BUILTIN mips_expand_builtin
-
-#undef TARGET_HAVE_TLS
-#define TARGET_HAVE_TLS HAVE_AS_TLS
-
-#undef TARGET_CANNOT_FORCE_CONST_MEM
-#define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
+/* Use a hash table to keep track of implicit mips16/nomips16 attributes
+ for -mflip_mips16. It maps decl names onto a boolean mode setting. */
-#undef TARGET_ENCODE_SECTION_INFO
-#define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
+struct mflip_mips16_entry GTY (()) {
+ const char *name;
+ bool mips16_p;
+};
+static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
-#undef TARGET_ATTRIBUTE_TABLE
-#define TARGET_ATTRIBUTE_TABLE mips_attribute_table
-/* All our function attributes are related to how out-of-line copies should
- be compiled or called. They don't in themselves prevent inlining. */
-#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
-#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
+/* Hash table callbacks for mflip_mips16_htab. */
-#undef TARGET_EXTRA_LIVE_ON_ENTRY
-#define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
+static hashval_t
+mflip_mips16_htab_hash (const void *entry)
+{
+ return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
+}
-#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
-#define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
-#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
-#define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
+static int
+mflip_mips16_htab_eq (const void *entry, const void *name)
+{
+ return strcmp (((const struct mflip_mips16_entry *) entry)->name,
+ (const char *) name) == 0;
+}
-#undef TARGET_COMP_TYPE_ATTRIBUTES
-#define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
+static GTY(()) int mips16_flipper;
-#ifdef HAVE_AS_DTPRELWORD
-#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
-#define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
-#endif
+/* DECL is a function that needs a default "mips16" or "nomips16" attribute
+ for -mflip-mips16. Return true if it should use "mips16" and false if
+ it should use "nomips16". */
-#undef TARGET_DWARF_REGISTER_SPAN
-#define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
+static bool
+mflip_mips16_use_mips16_p (tree decl)
+{
+ struct mflip_mips16_entry *entry;
+ const char *name;
+ hashval_t hash;
+ void **slot;
-struct gcc_target targetm = TARGET_INITIALIZER;
+ /* Use the opposite of the command-line setting for anonymous decls. */
+ if (!DECL_NAME (decl))
+ return !mips_base_mips16;
+ if (!mflip_mips16_htab)
+ mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
+ mflip_mips16_htab_eq, NULL);
+ name = IDENTIFIER_POINTER (DECL_NAME (decl));
+ hash = htab_hash_string (name);
+ slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
+ entry = (struct mflip_mips16_entry *) *slot;
+ if (!entry)
+ {
+ mips16_flipper = !mips16_flipper;
+ entry = GGC_NEW (struct mflip_mips16_entry);
+ entry->name = name;
+ entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
+ *slot = entry;
+ }
+ return entry->mips16_p;
+}
+
/* Predicates to test for presence of "near" and "far"/"long_call"
attributes on the given TYPE. */
@@ -1419,6 +1143,27 @@ mips_nomips16_decl_p (const_tree decl)
return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
}
+/* Return true if function DECL is a MIPS16 function. Return the ambient
+ setting if DECL is null. */
+
+static bool
+mips_use_mips16_mode_p (tree decl)
+{
+ if (decl)
+ {
+ /* Nested functions must use the same frame pointer as their
+ parent and must therefore use the same ISA mode. */
+ tree parent = decl_function_context (decl);
+ if (parent)
+ decl = parent;
+ if (mips_mips16_decl_p (decl))
+ return true;
+ if (mips_nomips16_decl_p (decl))
+ return false;
+ }
+ return mips_base_mips16;
+}
+
/* Return 0 if the attributes for two types are incompatible, 1 if they
are compatible, and 2 if they are nearly compatible (which causes a
warning to be generated). */
@@ -1438,6 +1183,64 @@ mips_comp_type_attributes (const_tree type1, const_tree type2)
return 1;
}
+
+/* Implement TARGET_INSERT_ATTRIBUTES. */
+
+static void
+mips_insert_attributes (tree decl, tree *attributes)
+{
+ const char *name;
+ bool mips16_p, nomips16_p;
+
+ /* Check for "mips16" and "nomips16" attributes. */
+ mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
+ nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ if (mips16_p)
+ error ("%qs attribute only applies to functions", "mips16");
+ if (nomips16_p)
+ error ("%qs attribute only applies to functions", "nomips16");
+ }
+ else
+ {
+ mips16_p |= mips_mips16_decl_p (decl);
+ nomips16_p |= mips_nomips16_decl_p (decl);
+ if (mips16_p || nomips16_p)
+ {
+ /* DECL cannot be simultaneously mips16 and nomips16. */
+ if (mips16_p && nomips16_p)
+ error ("%qs cannot have both %<mips16%> and "
+ "%<nomips16%> attributes",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ }
+ else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
+ {
+ /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
+ "mips16" attribute, arbitrarily pick one. We must pick the same
+ setting for duplicate declarations of a function. */
+ name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
+ *attributes = tree_cons (get_identifier (name), NULL, *attributes);
+ }
+ }
+}
+
+/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
+
+static tree
+mips_merge_decl_attributes (tree olddecl, tree newdecl)
+{
+ /* The decls' "mips16" and "nomips16" attributes must match exactly. */
+ if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
+ error ("%qs redeclared with conflicting %qs attributes",
+ IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
+ if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
+ error ("%qs redeclared with conflicting %qs attributes",
+ IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
+
+ return merge_attributes (DECL_ATTRIBUTES (olddecl),
+ DECL_ATTRIBUTES (newdecl));
+}
/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
@@ -1457,6 +1260,118 @@ mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
}
}
+static unsigned int mips_build_integer (struct mips_integer_op *,
+ unsigned HOST_WIDE_INT);
+
+/* Subroutine of mips_build_integer (with the same interface).
+ Assume that the final action in the sequence should be a left shift. */
+
+static unsigned int
+mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
+{
+ unsigned int i, shift;
+
+ /* Shift VALUE right until its lowest bit is set. Shift arithmetically
+ since signed numbers are easier to load than unsigned ones. */
+ shift = 0;
+ while ((value & 1) == 0)
+ value /= 2, shift++;
+
+ i = mips_build_integer (codes, value);
+ codes[i].code = ASHIFT;
+ codes[i].value = shift;
+ return i + 1;
+}
+
+
+/* As for mips_build_shift, but assume that the final action will be
+ an IOR or PLUS operation. */
+
+static unsigned int
+mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
+{
+ unsigned HOST_WIDE_INT high;
+ unsigned int i;
+
+ high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
+ if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
+ {
+ /* The constant is too complex to load with a simple lui/ori pair
+ so our goal is to clear as many trailing zeros as possible.
+ In this case, we know bit 16 is set and that the low 16 bits
+ form a negative number. If we subtract that number from VALUE,
+ we will clear at least the lowest 17 bits, maybe more. */
+ i = mips_build_integer (codes, CONST_HIGH_PART (value));
+ codes[i].code = PLUS;
+ codes[i].value = CONST_LOW_PART (value);
+ }
+ else
+ {
+ i = mips_build_integer (codes, high);
+ codes[i].code = IOR;
+ codes[i].value = value & 0xffff;
+ }
+ return i + 1;
+}
+
+
+/* Fill CODES with a sequence of rtl operations to load VALUE.
+ Return the number of operations needed. */
+
+static unsigned int
+mips_build_integer (struct mips_integer_op *codes,
+ unsigned HOST_WIDE_INT value)
+{
+ if (SMALL_OPERAND (value)
+ || SMALL_OPERAND_UNSIGNED (value)
+ || LUI_OPERAND (value))
+ {
+ /* The value can be loaded with a single instruction. */
+ codes[0].code = UNKNOWN;
+ codes[0].value = value;
+ return 1;
+ }
+ else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
+ {
+ /* Either the constant is a simple LUI/ORI combination or its
+ lowest bit is set. We don't want to shift in this case. */
+ return mips_build_lower (codes, value);
+ }
+ else if ((value & 0xffff) == 0)
+ {
+ /* The constant will need at least three actions. The lowest
+ 16 bits are clear, so the final action will be a shift. */
+ return mips_build_shift (codes, value);
+ }
+ else
+ {
+ /* The final action could be a shift, add or inclusive OR.
+ Rather than use a complex condition to select the best
+ approach, try both mips_build_shift and mips_build_lower
+ and pick the one that gives the shortest sequence.
+ Note that this case is only used once per constant. */
+ struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
+ unsigned int cost, alt_cost;
+
+ cost = mips_build_shift (codes, value);
+ alt_cost = mips_build_lower (alt_codes, value);
+ if (alt_cost < cost)
+ {
+ memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
+ cost = alt_cost;
+ }
+ return cost;
+ }
+}
+
+/* Return true if X is a thread-local symbol. */
+
+static bool
+mips_tls_operand_p (rtx x)
+{
+ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
+}
+
/* Return true if SYMBOL_REF X is associated with a global symbol
(in the STB_GLOBAL sense). */
@@ -1495,6 +1410,19 @@ mips_rtx_constant_in_small_data_p (enum machine_mode mode)
&& GET_MODE_SIZE (mode) <= mips_section_threshold);
}
+/* Return true if X should not be moved directly into register $25.
+ We need this because many versions of GAS will treat "la $25,foo" as
+ part of a call sequence and so allow a global "foo" to be lazily bound. */
+
+bool
+mips_dangerous_for_la25_p (rtx x)
+{
+ return (!TARGET_EXPLICIT_RELOCS
+ && TARGET_USE_GOT
+ && GET_CODE (x) == SYMBOL_REF
+ && mips_global_symbol_p (x));
+}
+
/* Return the method that should be used to access SYMBOL_REF or
LABEL_REF X in context CONTEXT. */
@@ -1700,182 +1628,6 @@ mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
}
gcc_unreachable ();
}
-
-
-/* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
-
-int
-mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
-{
- if (!HARD_REGISTER_NUM_P (regno))
- {
- if (!strict)
- return true;
- regno = reg_renumber[regno];
- }
-
- /* These fake registers will be eliminated to either the stack or
- hard frame pointer, both of which are usually valid base registers.
- Reload deals with the cases where the eliminated form isn't valid. */
- if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
- return true;
-
- /* In mips16 mode, the stack pointer can only address word and doubleword
- values, nothing smaller. There are two problems here:
-
- (a) Instantiating virtual registers can introduce new uses of the
- stack pointer. If these virtual registers are valid addresses,
- the stack pointer should be too.
-
- (b) Most uses of the stack pointer are not made explicit until
- FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
- We don't know until that stage whether we'll be eliminating to the
- stack pointer (which needs the restriction) or the hard frame
- pointer (which doesn't).
-
- All in all, it seems more consistent to only enforce this restriction
- during and after reload. */
- if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
- return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
-
- return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
-}
-
-
-/* Return true if X is a valid base register for the given mode.
- Allow only hard registers if STRICT. */
-
-static bool
-mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
-{
- if (!strict && GET_CODE (x) == SUBREG)
- x = SUBREG_REG (x);
-
- return (REG_P (x)
- && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
-}
-
-
-/* Return true if X is a valid address for machine mode MODE. If it is,
- fill in INFO appropriately. STRICT is true if we should only accept
- hard base registers. */
-
-static bool
-mips_classify_address (struct mips_address_info *info, rtx x,
- enum machine_mode mode, int strict)
-{
- switch (GET_CODE (x))
- {
- case REG:
- case SUBREG:
- info->type = ADDRESS_REG;
- info->reg = x;
- info->offset = const0_rtx;
- return mips_valid_base_register_p (info->reg, mode, strict);
-
- case PLUS:
- info->type = ADDRESS_REG;
- info->reg = XEXP (x, 0);
- info->offset = XEXP (x, 1);
- return (mips_valid_base_register_p (info->reg, mode, strict)
- && const_arith_operand (info->offset, VOIDmode));
-
- case LO_SUM:
- info->type = ADDRESS_LO_SUM;
- info->reg = XEXP (x, 0);
- info->offset = XEXP (x, 1);
- /* We have to trust the creator of the LO_SUM to do something vaguely
- sane. Target-independent code that creates a LO_SUM should also
- create and verify the matching HIGH. Target-independent code that
- adds an offset to a LO_SUM must prove that the offset will not
- induce a carry. Failure to do either of these things would be
- a bug, and we are not required to check for it here. The MIPS
- backend itself should only create LO_SUMs for valid symbolic
- constants, with the high part being either a HIGH or a copy
- of _gp. */
- info->symbol_type
- = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
- return (mips_valid_base_register_p (info->reg, mode, strict)
- && mips_symbol_insns (info->symbol_type, mode) > 0
- && mips_lo_relocs[info->symbol_type] != 0);
-
- case CONST_INT:
- /* Small-integer addresses don't occur very often, but they
- are legitimate if $0 is a valid base register. */
- info->type = ADDRESS_CONST_INT;
- return !TARGET_MIPS16 && SMALL_INT (x);
-
- case CONST:
- case LABEL_REF:
- case SYMBOL_REF:
- info->type = ADDRESS_SYMBOLIC;
- return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
- &info->symbol_type)
- && mips_symbol_insns (info->symbol_type, mode) > 0
- && !mips_split_p[info->symbol_type]);
-
- default:
- return false;
- }
-}
-
-/* Return true if X is a thread-local symbol. */
-
-static bool
-mips_tls_operand_p (rtx x)
-{
- return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
-}
-
-/* Return true if X can not be forced into a constant pool. */
-
-static int
-mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
-{
- return mips_tls_operand_p (*x);
-}
-
-/* Return true if X can not be forced into a constant pool. */
-
-static bool
-mips_cannot_force_const_mem (rtx x)
-{
- rtx base, offset;
-
- if (!TARGET_MIPS16)
- {
- /* As an optimization, reject constants that mips_legitimize_move
- can expand inline.
-
- Suppose we have a multi-instruction sequence that loads constant C
- into register R. If R does not get allocated a hard register, and
- R is used in an operand that allows both registers and memory
- references, reload will consider forcing C into memory and using
- one of the instruction's memory alternatives. Returning false
- here will force it to use an input reload instead. */
- if (GET_CODE (x) == CONST_INT)
- return true;
-
- split_const (x, &base, &offset);
- if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
- return true;
- }
-
- if (for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
- return true;
-
- return false;
-}
-
-/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
- constants when we're using a per-function constant pool. */
-
-static bool
-mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
- const_rtx x ATTRIBUTE_UNUSED)
-{
- return !TARGET_MIPS16_PCREL_LOADS;
-}
/* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
single instruction. We rely on the fact that, in the worst case,
@@ -2000,6 +1752,186 @@ mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
}
+/* Return true if X can not be forced into a constant pool. */
+
+static int
+mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ return mips_tls_operand_p (*x);
+}
+
+/* Return true if X can not be forced into a constant pool. */
+
+static bool
+mips_cannot_force_const_mem (rtx x)
+{
+ rtx base, offset;
+
+ if (!TARGET_MIPS16)
+ {
+ /* As an optimization, reject constants that mips_legitimize_move
+ can expand inline.
+
+ Suppose we have a multi-instruction sequence that loads constant C
+ into register R. If R does not get allocated a hard register, and
+ R is used in an operand that allows both registers and memory
+ references, reload will consider forcing C into memory and using
+ one of the instruction's memory alternatives. Returning false
+ here will force it to use an input reload instead. */
+ if (GET_CODE (x) == CONST_INT)
+ return true;
+
+ split_const (x, &base, &offset);
+ if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
+ return true;
+ }
+
+ if (for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
+ return true;
+
+ return false;
+}
+
+/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
+ constants when we're using a per-function constant pool. */
+
+static bool
+mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_rtx x ATTRIBUTE_UNUSED)
+{
+ return !TARGET_MIPS16_PCREL_LOADS;
+}
+
+/* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
+
+int
+mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
+{
+ if (!HARD_REGISTER_NUM_P (regno))
+ {
+ if (!strict)
+ return true;
+ regno = reg_renumber[regno];
+ }
+
+ /* These fake registers will be eliminated to either the stack or
+ hard frame pointer, both of which are usually valid base registers.
+ Reload deals with the cases where the eliminated form isn't valid. */
+ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
+ return true;
+
+ /* In mips16 mode, the stack pointer can only address word and doubleword
+ values, nothing smaller. There are two problems here:
+
+ (a) Instantiating virtual registers can introduce new uses of the
+ stack pointer. If these virtual registers are valid addresses,
+ the stack pointer should be too.
+
+ (b) Most uses of the stack pointer are not made explicit until
+ FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
+ We don't know until that stage whether we'll be eliminating to the
+ stack pointer (which needs the restriction) or the hard frame
+ pointer (which doesn't).
+
+ All in all, it seems more consistent to only enforce this restriction
+ during and after reload. */
+ if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
+ return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
+
+ return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
+}
+
+
+/* Return true if X is a valid base register for the given mode.
+ Allow only hard registers if STRICT. */
+
+static bool
+mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
+{
+ if (!strict && GET_CODE (x) == SUBREG)
+ x = SUBREG_REG (x);
+
+ return (REG_P (x)
+ && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
+}
+
+
+/* Return true if X is a valid address for machine mode MODE. If it is,
+ fill in INFO appropriately. STRICT is true if we should only accept
+ hard base registers. */
+
+static bool
+mips_classify_address (struct mips_address_info *info, rtx x,
+ enum machine_mode mode, int strict)
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ case SUBREG:
+ info->type = ADDRESS_REG;
+ info->reg = x;
+ info->offset = const0_rtx;
+ return mips_valid_base_register_p (info->reg, mode, strict);
+
+ case PLUS:
+ info->type = ADDRESS_REG;
+ info->reg = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ return (mips_valid_base_register_p (info->reg, mode, strict)
+ && const_arith_operand (info->offset, VOIDmode));
+
+ case LO_SUM:
+ info->type = ADDRESS_LO_SUM;
+ info->reg = XEXP (x, 0);
+ info->offset = XEXP (x, 1);
+ /* We have to trust the creator of the LO_SUM to do something vaguely
+ sane. Target-independent code that creates a LO_SUM should also
+ create and verify the matching HIGH. Target-independent code that
+ adds an offset to a LO_SUM must prove that the offset will not
+ induce a carry. Failure to do either of these things would be
+ a bug, and we are not required to check for it here. The MIPS
+ backend itself should only create LO_SUMs for valid symbolic
+ constants, with the high part being either a HIGH or a copy
+ of _gp. */
+ info->symbol_type
+ = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
+ return (mips_valid_base_register_p (info->reg, mode, strict)
+ && mips_symbol_insns (info->symbol_type, mode) > 0
+ && mips_lo_relocs[info->symbol_type] != 0);
+
+ case CONST_INT:
+ /* Small-integer addresses don't occur very often, but they
+ are legitimate if $0 is a valid base register. */
+ info->type = ADDRESS_CONST_INT;
+ return !TARGET_MIPS16 && SMALL_INT (x);
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ info->type = ADDRESS_SYMBOLIC;
+ return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
+ &info->symbol_type)
+ && mips_symbol_insns (info->symbol_type, mode) > 0
+ && !mips_split_p[info->symbol_type]);
+
+ default:
+ return false;
+ }
+}
+
+/* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
+ returns a nonzero value if X is a legitimate address for a memory
+ operand of the indicated MODE. STRICT is nonzero if this function
+ is called during reload. */
+
+bool
+mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
+{
+ struct mips_address_info addr;
+
+ return mips_classify_address (&addr, x, mode, strict);
+}
+
/* Return true if X is a legitimate $sp-based address for mode MDOE. */
bool
@@ -2012,6 +1944,26 @@ mips_stack_address_p (rtx x, enum machine_mode mode)
&& addr.reg == stack_pointer_rtx);
}
+/* Return true if ADDR matches the pattern for the lwxs load scaled indexed
+ address instruction. */
+
+static bool
+mips_lwxs_address_p (rtx addr)
+{
+ if (ISA_HAS_LWXS
+ && GET_CODE (addr) == PLUS
+ && REG_P (XEXP (addr, 1)))
+ {
+ rtx offset = XEXP (addr, 0);
+ if (GET_CODE (offset) == MULT
+ && REG_P (XEXP (offset, 0))
+ && GET_CODE (XEXP (offset, 1)) == CONST_INT
+ && INTVAL (XEXP (offset, 1)) == 4)
+ return true;
+ }
+ return false;
+}
+
/* Return true if a value at OFFSET bytes from BASE can be accessed
using an unextended mips16 instruction. MODE is the mode of the
value.
@@ -2204,19 +2156,6 @@ mips_idiv_insns (void)
return count;
}
-/* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
- returns a nonzero value if X is a legitimate address for a memory
- operand of the indicated MODE. STRICT is nonzero if this function
- is called during reload. */
-
-bool
-mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
-{
- struct mips_address_info addr;
-
- return mips_classify_address (&addr, x, mode, strict);
-}
-
/* Emit a move from SRC to DEST. Assume that the move expanders can
handle all moves if !can_create_pseudo_p (). The distinction is
important because, unlike emit_move_insn, the move expanders know
@@ -2231,6 +2170,15 @@ mips_emit_move (rtx dest, rtx src)
: emit_move_insn_1 (dest, src));
}
+/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
+
+static void
+mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
+{
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
+}
+
/* Copy VALUE to a register and return that register. If new psuedos
are allowed, copy it into a new register, otherwise use DEST. */
@@ -2246,6 +2194,47 @@ mips_force_temporary (rtx dest, rtx value)
}
}
+/* If we can access small data directly (using gp-relative relocation
+ operators) return the small data pointer, otherwise return null.
+
+ For each mips16 function which refers to GP relative symbols, we
+ use a pseudo register, initialized at the start of the function, to
+ hold the $gp value. */
+
+static rtx
+mips16_gp_pseudo_reg (void)
+{
+ if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
+ cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
+
+ /* Don't initialize the pseudo register if we are being called from
+ the tree optimizers' cost-calculation routines. */
+ if (!cfun->machine->initialized_mips16_gp_pseudo_p
+ && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
+ {
+ rtx insn, scan;
+
+ /* We want to initialize this to a value which gcc will believe
+ is constant. */
+ insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
+
+ push_topmost_sequence ();
+ /* We need to emit the initialization after the FUNCTION_BEG
+ note, so that it will be integrated. */
+ for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
+ if (NOTE_P (scan)
+ && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
+ break;
+ if (scan == NULL_RTX)
+ scan = get_insns ();
+ insn = emit_insn_after (insn, scan);
+ pop_topmost_sequence ();
+
+ cfun->machine->initialized_mips16_gp_pseudo_p = true;
+ }
+
+ return cfun->machine->mips16_gp_pseudo_rtx;
+}
/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
it appears in a MEM of that mode. Return true if ADDR is a legitimate
@@ -2523,108 +2512,6 @@ mips_legitimize_address (rtx *xloc, enum machine_mode mode)
}
-/* Subroutine of mips_build_integer (with the same interface).
- Assume that the final action in the sequence should be a left shift. */
-
-static unsigned int
-mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
-{
- unsigned int i, shift;
-
- /* Shift VALUE right until its lowest bit is set. Shift arithmetically
- since signed numbers are easier to load than unsigned ones. */
- shift = 0;
- while ((value & 1) == 0)
- value /= 2, shift++;
-
- i = mips_build_integer (codes, value);
- codes[i].code = ASHIFT;
- codes[i].value = shift;
- return i + 1;
-}
-
-
-/* As for mips_build_shift, but assume that the final action will be
- an IOR or PLUS operation. */
-
-static unsigned int
-mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
-{
- unsigned HOST_WIDE_INT high;
- unsigned int i;
-
- high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
- if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
- {
- /* The constant is too complex to load with a simple lui/ori pair
- so our goal is to clear as many trailing zeros as possible.
- In this case, we know bit 16 is set and that the low 16 bits
- form a negative number. If we subtract that number from VALUE,
- we will clear at least the lowest 17 bits, maybe more. */
- i = mips_build_integer (codes, CONST_HIGH_PART (value));
- codes[i].code = PLUS;
- codes[i].value = CONST_LOW_PART (value);
- }
- else
- {
- i = mips_build_integer (codes, high);
- codes[i].code = IOR;
- codes[i].value = value & 0xffff;
- }
- return i + 1;
-}
-
-
-/* Fill CODES with a sequence of rtl operations to load VALUE.
- Return the number of operations needed. */
-
-static unsigned int
-mips_build_integer (struct mips_integer_op *codes,
- unsigned HOST_WIDE_INT value)
-{
- if (SMALL_OPERAND (value)
- || SMALL_OPERAND_UNSIGNED (value)
- || LUI_OPERAND (value))
- {
- /* The value can be loaded with a single instruction. */
- codes[0].code = UNKNOWN;
- codes[0].value = value;
- return 1;
- }
- else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
- {
- /* Either the constant is a simple LUI/ORI combination or its
- lowest bit is set. We don't want to shift in this case. */
- return mips_build_lower (codes, value);
- }
- else if ((value & 0xffff) == 0)
- {
- /* The constant will need at least three actions. The lowest
- 16 bits are clear, so the final action will be a shift. */
- return mips_build_shift (codes, value);
- }
- else
- {
- /* The final action could be a shift, add or inclusive OR.
- Rather than use a complex condition to select the best
- approach, try both mips_build_shift and mips_build_lower
- and pick the one that gives the shortest sequence.
- Note that this case is only used once per constant. */
- struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
- unsigned int cost, alt_cost;
-
- cost = mips_build_shift (codes, value);
- alt_cost = mips_build_lower (alt_codes, value);
- if (alt_cost < cost)
- {
- memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
- cost = alt_cost;
- }
- return cost;
- }
-}
-
-
/* Load VALUE into DEST, using TEMP as a temporary register if need be. */
void
@@ -2749,6 +2636,86 @@ mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
return false;
}
+/* Return true if X in context CONTEXT is a small data address that can
+ be rewritten as a LO_SUM. */
+
+static bool
+mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
+{
+ enum mips_symbol_type symbol_type;
+
+ return (TARGET_EXPLICIT_RELOCS
+ && mips_symbolic_constant_p (x, context, &symbol_type)
+ && symbol_type == SYMBOL_GP_RELATIVE);
+}
+
+
+/* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
+ containing MEM, or null if none. */
+
+static int
+mips_small_data_pattern_1 (rtx *loc, void *data)
+{
+ enum mips_symbol_context context;
+
+ if (GET_CODE (*loc) == LO_SUM)
+ return -1;
+
+ if (MEM_P (*loc))
+ {
+ if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
+ return 1;
+ return -1;
+ }
+
+ context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
+ return mips_rewrite_small_data_p (*loc, context);
+}
+
+/* Return true if OP refers to small data symbols directly, not through
+ a LO_SUM. */
+
+bool
+mips_small_data_pattern_p (rtx op)
+{
+ return for_each_rtx (&op, mips_small_data_pattern_1, 0);
+}
+
+/* A for_each_rtx callback, used by mips_rewrite_small_data.
+ DATA is the containing MEM, or null if none. */
+
+static int
+mips_rewrite_small_data_1 (rtx *loc, void *data)
+{
+ enum mips_symbol_context context;
+
+ if (MEM_P (*loc))
+ {
+ for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
+ return -1;
+ }
+
+ context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
+ if (mips_rewrite_small_data_p (*loc, context))
+ *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
+
+ if (GET_CODE (*loc) == LO_SUM)
+ return -1;
+
+ return 0;
+}
+
+/* If possible, rewrite OP so that it refers to small data using
+ explicit relocations. */
+
+rtx
+mips_rewrite_small_data (rtx op)
+{
+ op = copy_insn (op);
+ for_each_rtx (&op, mips_rewrite_small_data_1, 0);
+ return op;
+}
+
/* We need a lot of little routines to check constant values on the
mips16. These are used to figure out how long the instruction will
be. It would be much better to do this using constraints, but
@@ -2859,26 +2826,6 @@ m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
}
-/* Return true if ADDR matches the pattern for the lwxs load scaled indexed
- address instruction. */
-
-static bool
-mips_lwxs_address_p (rtx addr)
-{
- if (ISA_HAS_LWXS
- && GET_CODE (addr) == PLUS
- && REG_P (XEXP (addr, 1)))
- {
- rtx offset = XEXP (addr, 0);
- if (GET_CODE (offset) == MULT
- && REG_P (XEXP (offset, 0))
- && GET_CODE (XEXP (offset, 1)) == CONST_INT
- && INTVAL (XEXP (offset, 1)) == 4)
- return true;
- }
- return false;
-}
-
/* The cost of loading values from the constant pool. It should be
larger than the cost of any constant we want to synthesize inline. */
@@ -3721,37 +3668,6 @@ mips_output_move (rtx dest, rtx src)
gcc_unreachable ();
}
-/* Restore $gp from its save slot. Valid only when using o32 or
- o64 abicalls. */
-
-void
-mips_restore_gp (void)
-{
- rtx address, slot;
-
- gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
-
- address = mips_add_offset (pic_offset_table_rtx,
- frame_pointer_needed
- ? hard_frame_pointer_rtx
- : stack_pointer_rtx,
- current_function_outgoing_args_size);
- slot = gen_rtx_MEM (Pmode, address);
-
- mips_emit_move (pic_offset_table_rtx, slot);
- if (!TARGET_EXPLICIT_RELOCS)
- emit_insn (gen_blockage ());
-}
-
-/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
-
-static void
-mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
-{
- emit_insn (gen_rtx_SET (VOIDmode, target,
- gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
-}
-
/* Return true if CMP1 is a suitable second operand for relational
operator CODE. See also the *sCC patterns in mips.md. */
@@ -4099,391 +4015,6 @@ mips_gen_conditional_trap (rtx *operands)
operands[1]));
}
-/* Return true if function DECL is a MIPS16 function. Return the ambient
- setting if DECL is null. */
-
-static bool
-mips_use_mips16_mode_p (tree decl)
-{
- if (decl)
- {
- /* Nested functions must use the same frame pointer as their
- parent and must therefore use the same ISA mode. */
- tree parent = decl_function_context (decl);
- if (parent)
- decl = parent;
- if (mips_mips16_decl_p (decl))
- return true;
- if (mips_nomips16_decl_p (decl))
- return false;
- }
- return mips_base_mips16;
-}
-
-/* Return true if calls to X can use R_MIPS_CALL* relocations. */
-
-static bool
-mips_ok_for_lazy_binding_p (rtx x)
-{
- return (TARGET_USE_GOT
- && GET_CODE (x) == SYMBOL_REF
- && !mips_symbol_binds_local_p (x));
-}
-
-/* Load function address ADDR into register DEST. SIBCALL_P is true
- if the address is needed for a sibling call. Return true if we
- used an explicit lazy-binding sequence. */
-
-static bool
-mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
-{
- /* If we're generating PIC, and this call is to a global function,
- try to allow its address to be resolved lazily. This isn't
- possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
- to the stub would be our caller's gp, not ours. */
- if (TARGET_EXPLICIT_RELOCS
- && !(sibcall_p && TARGET_CALL_SAVED_GP)
- && mips_ok_for_lazy_binding_p (addr))
- {
- rtx high, lo_sum_symbol;
-
- high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
- addr, SYMBOL_GOTOFF_CALL);
- lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
- if (Pmode == SImode)
- emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
- else
- emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
- return true;
- }
- else
- {
- mips_emit_move (dest, addr);
- return false;
- }
-}
-
-
-/* Expand a call or call_value instruction. RESULT is where the
- result will go (null for calls), ADDR is the address of the
- function, ARGS_SIZE is the size of the arguments and AUX is
- the value passed to us by mips_function_arg. SIBCALL_P is true
- if we are expanding a sibling call, false if we're expanding
- a normal call. */
-
-void
-mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
-{
- rtx orig_addr, pattern, insn;
- bool lazy_p;
-
- orig_addr = addr;
- lazy_p = false;
- if (!call_insn_operand (addr, VOIDmode))
- {
- addr = gen_reg_rtx (Pmode);
- lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
- }
-
- if (TARGET_MIPS16
- && TARGET_HARD_FLOAT_ABI
- && build_mips16_call_stub (result, addr, args_size,
- aux == 0 ? 0 : (int) GET_MODE (aux)))
- return;
-
- if (result == 0)
- pattern = (sibcall_p
- ? gen_sibcall_internal (addr, args_size)
- : gen_call_internal (addr, args_size));
- else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
- {
- rtx reg1, reg2;
-
- reg1 = XEXP (XVECEXP (result, 0, 0), 0);
- reg2 = XEXP (XVECEXP (result, 0, 1), 0);
- pattern =
- (sibcall_p
- ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
- : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
- }
- else
- pattern = (sibcall_p
- ? gen_sibcall_value_internal (result, addr, args_size)
- : gen_call_value_internal (result, addr, args_size));
-
- insn = emit_call_insn (pattern);
-
- /* Lazy-binding stubs require $gp to be valid on entry. We also pretend
- that they use FAKE_CALL_REGNO; see the load_call<mode> patterns for
- details. */
- if (lazy_p)
- {
- use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
- use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
- gen_rtx_REG (Pmode, FAKE_CALL_REGNO));
- }
-}
-
-
-/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
-
-static bool
-mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
-{
- if (!TARGET_SIBCALLS)
- return false;
-
- /* We can't do a sibcall if the called function is a MIPS16 function
- because there is no direct "jx" instruction equivalent to "jalx" to
- switch the ISA mode. */
- if (mips_use_mips16_mode_p (decl))
- return false;
-
- /* ...and when -minterlink-mips16 is in effect, assume that external
- functions could be MIPS16 ones unless an attribute explicitly
- tells us otherwise. We only care about cases where the sibling
- and normal calls would both be direct. */
- if (TARGET_INTERLINK_MIPS16
- && decl
- && DECL_EXTERNAL (decl)
- && !mips_nomips16_decl_p (decl)
- && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
- return false;
-
- /* Otherwise OK. */
- return true;
-}
-
-/* Emit code to move general operand SRC into condition-code
- register DEST. SCRATCH is a scratch TFmode float register.
- The sequence is:
-
- FP1 = SRC
- FP2 = 0.0f
- DEST = FP2 < FP1
-
- where FP1 and FP2 are single-precision float registers
- taken from SCRATCH. */
-
-void
-mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
-{
- rtx fp1, fp2;
-
- /* Change the source to SFmode. */
- if (MEM_P (src))
- src = adjust_address (src, SFmode, 0);
- else if (REG_P (src) || GET_CODE (src) == SUBREG)
- src = gen_rtx_REG (SFmode, true_regnum (src));
-
- fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
- fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
-
- mips_emit_move (copy_rtx (fp1), src);
- mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
- emit_insn (gen_slt_sf (dest, fp2, fp1));
-}
-
-/* Emit code to change the current function's return address to
- ADDRESS. SCRATCH is available as a scratch register, if needed.
- ADDRESS and SCRATCH are both word-mode GPRs. */
-
-void
-mips_set_return_address (rtx address, rtx scratch)
-{
- rtx slot_address;
-
- compute_frame_size (get_frame_size ());
- gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
- slot_address = mips_add_offset (scratch, stack_pointer_rtx,
- cfun->machine->frame.gp_sp_offset);
-
- mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
-}
-
-/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
- Assume that the areas do not overlap. */
-
-static void
-mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
-{
- HOST_WIDE_INT offset, delta;
- unsigned HOST_WIDE_INT bits;
- int i;
- enum machine_mode mode;
- rtx *regs;
-
- /* Work out how many bits to move at a time. If both operands have
- half-word alignment, it is usually better to move in half words.
- For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
- and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
- Otherwise move word-sized chunks. */
- if (MEM_ALIGN (src) == BITS_PER_WORD / 2
- && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
- bits = BITS_PER_WORD / 2;
- else
- bits = BITS_PER_WORD;
-
- mode = mode_for_size (bits, MODE_INT, 0);
- delta = bits / BITS_PER_UNIT;
-
- /* Allocate a buffer for the temporary registers. */
- regs = alloca (sizeof (rtx) * length / delta);
-
- /* Load as many BITS-sized chunks as possible. Use a normal load if
- the source has enough alignment, otherwise use left/right pairs. */
- for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
- {
- regs[i] = gen_reg_rtx (mode);
- if (MEM_ALIGN (src) >= bits)
- mips_emit_move (regs[i], adjust_address (src, mode, offset));
- else
- {
- rtx part = adjust_address (src, BLKmode, offset);
- if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
- gcc_unreachable ();
- }
- }
-
- /* Copy the chunks to the destination. */
- for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
- if (MEM_ALIGN (dest) >= bits)
- mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
- else
- {
- rtx part = adjust_address (dest, BLKmode, offset);
- if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
- gcc_unreachable ();
- }
-
- /* Mop up any left-over bytes. */
- if (offset < length)
- {
- src = adjust_address (src, BLKmode, offset);
- dest = adjust_address (dest, BLKmode, offset);
- move_by_pieces (dest, src, length - offset,
- MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
- }
-}
-
-#define MAX_MOVE_REGS 4
-#define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
-
-
-/* Helper function for doing a loop-based block operation on memory
- reference MEM. Each iteration of the loop will operate on LENGTH
- bytes of MEM.
-
- Create a new base register for use within the loop and point it to
- the start of MEM. Create a new memory reference that uses this
- register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
-
-static void
-mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
- rtx *loop_reg, rtx *loop_mem)
-{
- *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
-
- /* Although the new mem does not refer to a known location,
- it does keep up to LENGTH bytes of alignment. */
- *loop_mem = change_address (mem, BLKmode, *loop_reg);
- set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
-}
-
-
-/* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
- per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
- memory regions do not overlap. */
-
-static void
-mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
-{
- rtx label, src_reg, dest_reg, final_src;
- HOST_WIDE_INT leftover;
-
- leftover = length % MAX_MOVE_BYTES;
- length -= leftover;
-
- /* Create registers and memory references for use within the loop. */
- mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
- mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
-
- /* Calculate the value that SRC_REG should have after the last iteration
- of the loop. */
- final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
- 0, 0, OPTAB_WIDEN);
-
- /* Emit the start of the loop. */
- label = gen_label_rtx ();
- emit_label (label);
-
- /* Emit the loop body. */
- mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
-
- /* Move on to the next block. */
- mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
- mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
-
- /* Emit the loop condition. */
- if (Pmode == DImode)
- emit_insn (gen_cmpdi (src_reg, final_src));
- else
- emit_insn (gen_cmpsi (src_reg, final_src));
- emit_jump_insn (gen_bne (label));
-
- /* Mop up any left-over bytes. */
- if (leftover)
- mips_block_move_straight (dest, src, leftover);
-}
-
-
-/* Expand a loop of synci insns for the address range [BEGIN, END). */
-
-void
-mips_expand_synci_loop (rtx begin, rtx end)
-{
- rtx inc, label, cmp, cmp_result;
-
- /* Load INC with the cache line size (rdhwr INC,$1). */
- inc = gen_reg_rtx (SImode);
- emit_insn (gen_rdhwr (inc, const1_rtx));
-
- /* Loop back to here. */
- label = gen_label_rtx ();
- emit_label (label);
-
- emit_insn (gen_synci (begin));
-
- cmp = gen_reg_rtx (Pmode);
- mips_emit_binary (GTU, cmp, begin, end);
-
- mips_emit_binary (PLUS, begin, begin, inc);
-
- cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
- emit_jump_insn (gen_condjump (cmp_result, label));
-}
-
-/* Expand a movmemsi instruction. */
-
-bool
-mips_expand_block_move (rtx dest, rtx src, rtx length)
-{
- if (GET_CODE (length) == CONST_INT)
- {
- if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
- {
- mips_block_move_straight (dest, src, INTVAL (length));
- return true;
- }
- else if (optimize)
- {
- mips_block_move_loop (dest, src, INTVAL (length));
- return true;
- }
- }
- return false;
-}
-
/* Argument support functions. */
/* Initialize CUMULATIVE_ARGS for a function. */
@@ -4625,7 +4156,6 @@ mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
info->stack_words = num_words - info->reg_words;
}
-
/* INFO describes an argument that is passed in a single-register value.
Return the register it uses, assuming that FPRs are available if
HARD_FLOAT_P. */
@@ -4644,34 +4174,10 @@ mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
return FP_ARG_FIRST + info->reg_offset;
}
-/* Implement FUNCTION_ARG_ADVANCE. */
-
-void
-function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
- tree type, int named)
+static bool
+mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
{
- struct mips_arg_info info;
-
- mips_arg_info (cum, mode, type, named, &info);
-
- if (!info.fpr_p)
- cum->gp_reg_found = true;
-
- /* See the comment above the cumulative args structure in mips.h
- for an explanation of what this code does. It assumes the O32
- ABI, which passes at most 2 arguments in float registers. */
- if (cum->arg_number < 2 && info.fpr_p)
- cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
-
- if (mips_abi != ABI_EABI || !info.fpr_p)
- cum->num_gprs = info.reg_offset + info.reg_words;
- else if (info.reg_words > 0)
- cum->num_fprs += MAX_FPRS_PER_FMT;
-
- if (info.stack_words > 0)
- cum->stack_words = info.stack_offset + info.stack_words;
-
- cum->arg_number++;
+ return !TARGET_OLDABI;
}
/* Implement FUNCTION_ARG. */
@@ -4801,6 +4307,35 @@ function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
}
+/* Implement FUNCTION_ARG_ADVANCE. */
+
+void
+function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int named)
+{
+ struct mips_arg_info info;
+
+ mips_arg_info (cum, mode, type, named, &info);
+
+ if (!info.fpr_p)
+ cum->gp_reg_found = true;
+
+ /* See the comment above the cumulative args structure in mips.h
+ for an explanation of what this code does. It assumes the O32
+ ABI, which passes at most 2 arguments in float registers. */
+ if (cum->arg_number < 2 && info.fpr_p)
+ cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
+
+ if (mips_abi != ABI_EABI || !info.fpr_p)
+ cum->num_gprs = info.reg_offset + info.reg_words;
+ else if (info.reg_words > 0)
+ cum->num_fprs += MAX_FPRS_PER_FMT;
+
+ if (info.stack_words > 0)
+ cum->stack_words = info.stack_offset + info.stack_words;
+
+ cum->arg_number++;
+}
/* Implement TARGET_ARG_PARTIAL_BYTES. */
@@ -4887,6 +4422,237 @@ mips_pad_reg_upward (enum machine_mode mode, tree type)
to stack arguments. */
return mips_pad_arg_upward (mode, type);
}
+
+
+/* Return nonzero when an argument must be passed by reference. */
+
+static bool
+mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ if (mips_abi == ABI_EABI)
+ {
+ int size;
+
+ /* ??? How should SCmode be handled? */
+ if (mode == DImode || mode == DFmode
+ || mode == DQmode || mode == UDQmode
+ || mode == DAmode || mode == UDAmode)
+ return 0;
+
+ size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ return size == -1 || size > UNITS_PER_WORD;
+ }
+ else
+ {
+ /* If we have a variable-sized parameter, we have no choice. */
+ return targetm.calls.must_pass_in_stack (mode, type);
+ }
+}
+
+static bool
+mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED, bool named)
+{
+ return mips_abi == ABI_EABI && named;
+}
+
+/* See whether VALTYPE is a record whose fields should be returned in
+ floating-point registers. If so, return the number of fields and
+ list them in FIELDS (which should have two elements). Return 0
+ otherwise.
+
+ For n32 & n64, a structure with one or two fields is returned in
+ floating-point registers as long as every field has a floating-point
+ type. */
+
+static int
+mips_fpr_return_fields (const_tree valtype, tree *fields)
+{
+ tree field;
+ int i;
+
+ if (!TARGET_NEWABI)
+ return 0;
+
+ if (TREE_CODE (valtype) != RECORD_TYPE)
+ return 0;
+
+ i = 0;
+ for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+
+ if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
+ return 0;
+
+ if (i == 2)
+ return 0;
+
+ fields[i++] = field;
+ }
+ return i;
+}
+
+
+/* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
+ a value in the most significant part of $2/$3 if:
+
+ - the target is big-endian;
+
+ - the value has a structure or union type (we generalize this to
+ cover aggregates from other languages too); and
+
+ - the structure is not returned in floating-point registers. */
+
+static bool
+mips_return_in_msb (const_tree valtype)
+{
+ tree fields[2];
+
+ return (TARGET_NEWABI
+ && TARGET_BIG_ENDIAN
+ && AGGREGATE_TYPE_P (valtype)
+ && mips_fpr_return_fields (valtype, fields) == 0);
+}
+
+
+/* Return true if the function return value MODE will get returned in a
+ floating-point register. */
+
+static bool
+mips_return_mode_in_fpr_p (enum machine_mode mode)
+{
+ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
+}
+
+/* Return a composite value in a pair of floating-point registers.
+ MODE1 and OFFSET1 are the mode and byte offset for the first value,
+ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
+ complete value.
+
+ For n32 & n64, $f0 always holds the first value and $f2 the second.
+ Otherwise the values are packed together as closely as possible. */
+
+static rtx
+mips_return_fpr_pair (enum machine_mode mode,
+ enum machine_mode mode1, HOST_WIDE_INT offset1,
+ enum machine_mode mode2, HOST_WIDE_INT offset2)
+{
+ int inc;
+
+ inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
+ return gen_rtx_PARALLEL
+ (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode1, FP_RETURN),
+ GEN_INT (offset1)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode2, FP_RETURN + inc),
+ GEN_INT (offset2))));
+
+}
+
+
+/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
+ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
+ VALTYPE is null and MODE is the mode of the return value. */
+
+rtx
+mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ if (valtype)
+ {
+ tree fields[2];
+ int unsignedp;
+
+ mode = TYPE_MODE (valtype);
+ unsignedp = TYPE_UNSIGNED (valtype);
+
+ /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
+ true, we must promote the mode just as PROMOTE_MODE does. */
+ mode = promote_mode (valtype, mode, &unsignedp, 1);
+
+ /* Handle structures whose fields are returned in $f0/$f2. */
+ switch (mips_fpr_return_fields (valtype, fields))
+ {
+ case 1:
+ return gen_rtx_REG (mode, FP_RETURN);
+
+ case 2:
+ return mips_return_fpr_pair (mode,
+ TYPE_MODE (TREE_TYPE (fields[0])),
+ int_byte_position (fields[0]),
+ TYPE_MODE (TREE_TYPE (fields[1])),
+ int_byte_position (fields[1]));
+ }
+
+ /* If a value is passed in the most significant part of a register, see
+ whether we have to round the mode up to a whole number of words. */
+ if (mips_return_in_msb (valtype))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (valtype);
+ if (size % UNITS_PER_WORD != 0)
+ {
+ size += UNITS_PER_WORD - size % UNITS_PER_WORD;
+ mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
+ }
+ }
+
+ /* For EABI, the class of return register depends entirely on MODE.
+ For example, "struct { some_type x; }" and "union { some_type x; }"
+ are returned in the same way as a bare "some_type" would be.
+ Other ABIs only use FPRs for scalar, complex or vector types. */
+ if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
+ return gen_rtx_REG (mode, GP_RETURN);
+ }
+
+ if (!TARGET_MIPS16)
+ {
+ /* Handle long doubles for n32 & n64. */
+ if (mode == TFmode)
+ return mips_return_fpr_pair (mode,
+ DImode, 0,
+ DImode, GET_MODE_SIZE (mode) / 2);
+
+ if (mips_return_mode_in_fpr_p (mode))
+ {
+ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ return mips_return_fpr_pair (mode,
+ GET_MODE_INNER (mode), 0,
+ GET_MODE_INNER (mode),
+ GET_MODE_SIZE (mode) / 2);
+ else
+ return gen_rtx_REG (mode, FP_RETURN);
+ }
+ }
+
+ return gen_rtx_REG (mode, GP_RETURN);
+}
+
+/* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
+ all BLKmode objects are returned in memory. Under the new (N32 and
+ 64-bit MIPS ABIs) small structures are returned in a register.
+ Objects with varying size must still be returned in memory, of
+ course. */
+
+static bool
+mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
+{
+ if (TARGET_OLDABI)
+ return (TYPE_MODE (type) == BLKmode);
+ else
+ return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
+ || (int_size_in_bytes (type) == -1));
+}
static void
mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
@@ -5294,6 +5060,887 @@ mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
return addr;
}
+/* We keep a list of functions for which we have already built stubs
+ in build_mips16_call_stub. */
+
+struct mips16_stub
+{
+ struct mips16_stub *next;
+ char *name;
+ int fpret;
+};
+
+static struct mips16_stub *mips16_stubs;
+
+/* Return a two-character string representing a function floating-point
+ return mode, used to name MIPS16 function stubs. */
+
+static const char *
+mips16_call_stub_mode_suffix (enum machine_mode mode)
+{
+ if (mode == SFmode)
+ return "sf";
+ else if (mode == DFmode)
+ return "df";
+ else if (mode == SCmode)
+ return "sc";
+ else if (mode == DCmode)
+ return "dc";
+ else if (mode == V2SFmode)
+ return "df";
+ else
+ gcc_unreachable ();
+}
+
+/* Write out code to move floating point arguments in or out of
+ general registers. Output the instructions to FILE. FP_CODE is
+ the code describing which arguments are present (see the comment at
+ the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
+ we are copying from the floating point registers. */
+
+static void
+mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
+{
+ const char *s;
+ int gparg, fparg;
+ unsigned int f;
+ CUMULATIVE_ARGS cum;
+
+ /* This code only works for the original 32-bit ABI and the O64 ABI. */
+ gcc_assert (TARGET_OLDABI);
+
+ if (from_fp_p)
+ s = "mfc1";
+ else
+ s = "mtc1";
+
+ init_cumulative_args (&cum, NULL, NULL);
+
+ for (f = (unsigned int) fp_code; f != 0; f >>= 2)
+ {
+ enum machine_mode mode;
+ struct mips_arg_info info;
+
+ if ((f & 3) == 1)
+ mode = SFmode;
+ else if ((f & 3) == 2)
+ mode = DFmode;
+ else
+ gcc_unreachable ();
+
+ mips_arg_info (&cum, mode, NULL, true, &info);
+ gparg = mips_arg_regno (&info, false);
+ fparg = mips_arg_regno (&info, true);
+
+ if (mode == SFmode)
+ fprintf (file, "\t%s\t%s,%s\n", s,
+ reg_names[gparg], reg_names[fparg]);
+ else if (TARGET_64BIT)
+ fprintf (file, "\td%s\t%s,%s\n", s,
+ reg_names[gparg], reg_names[fparg]);
+ else if (ISA_HAS_MXHC1)
+ /* -mips32r2 -mfp64 */
+ fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
+ s,
+ reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
+ reg_names[fparg],
+ from_fp_p ? "mfhc1" : "mthc1",
+ reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
+ reg_names[fparg]);
+ else if (TARGET_BIG_ENDIAN)
+ fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
+ reg_names[gparg], reg_names[fparg + 1], s,
+ reg_names[gparg + 1], reg_names[fparg]);
+ else
+ fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
+ reg_names[gparg], reg_names[fparg], s,
+ reg_names[gparg + 1], reg_names[fparg + 1]);
+
+ function_arg_advance (&cum, mode, NULL, true);
+ }
+}
+
+/* Build a mips16 function stub. This is used for functions which
+ take arguments in the floating point registers. It is 32-bit code
+ that moves the floating point args into the general registers, and
+ then jumps to the 16-bit code. */
+
+static void
+build_mips16_function_stub (FILE *file)
+{
+ const char *fnname;
+ char *secname, *stubname;
+ tree stubid, stubdecl;
+ int need_comma;
+ unsigned int f;
+
+ fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ fnname = targetm.strip_name_encoding (fnname);
+ secname = (char *) alloca (strlen (fnname) + 20);
+ sprintf (secname, ".mips16.fn.%s", fnname);
+ stubname = (char *) alloca (strlen (fnname) + 20);
+ sprintf (stubname, "__fn_stub_%s", fnname);
+ stubid = get_identifier (stubname);
+ stubdecl = build_decl (FUNCTION_DECL, stubid,
+ build_function_type (void_type_node, NULL_TREE));
+ DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
+ DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
+
+ fprintf (file, "\t# Stub function for %s (", current_function_name ());
+ need_comma = 0;
+ for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
+ {
+ fprintf (file, "%s%s",
+ need_comma ? ", " : "",
+ (f & 3) == 1 ? "float" : "double");
+ need_comma = 1;
+ }
+ fprintf (file, ")\n");
+
+ fprintf (file, "\t.set\tnomips16\n");
+ switch_to_section (function_section (stubdecl));
+ ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
+
+ /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
+ within a .ent, and we cannot emit another .ent. */
+ if (!FUNCTION_NAME_ALREADY_DECLARED)
+ {
+ fputs ("\t.ent\t", file);
+ assemble_name (file, stubname);
+ fputs ("\n", file);
+ }
+
+ assemble_name (file, stubname);
+ fputs (":\n", file);
+
+ /* We don't want the assembler to insert any nops here. */
+ fprintf (file, "\t.set\tnoreorder\n");
+
+ mips16_fp_args (file, current_function_args_info.fp_code, 1);
+
+ fprintf (asm_out_file, "\t.set\tnoat\n");
+ fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
+ assemble_name (file, fnname);
+ fprintf (file, "\n");
+ fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
+ fprintf (asm_out_file, "\t.set\tat\n");
+
+ /* Unfortunately, we can't fill the jump delay slot. We can't fill
+ with one of the mfc1 instructions, because the result is not
+ available for one instruction, so if the very first instruction
+ in the function refers to the register, it will see the wrong
+ value. */
+ fprintf (file, "\tnop\n");
+
+ fprintf (file, "\t.set\treorder\n");
+
+ if (!FUNCTION_NAME_ALREADY_DECLARED)
+ {
+ fputs ("\t.end\t", file);
+ assemble_name (file, stubname);
+ fputs ("\n", file);
+ }
+
+ switch_to_section (function_section (current_function_decl));
+}
+
+/* Emit code to return a double value from a mips16 stub. GPREG is the
+ first GP reg to use, FPREG is the first FP reg to use. */
+
+static void
+mips16_fpret_double (int gpreg, int fpreg)
+{
+ if (TARGET_64BIT)
+ fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
+ reg_names[gpreg], reg_names[fpreg]);
+ else if (TARGET_FLOAT64)
+ {
+ fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
+ reg_names[gpreg + WORDS_BIG_ENDIAN],
+ reg_names[fpreg]);
+ fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
+ reg_names[gpreg + !WORDS_BIG_ENDIAN],
+ reg_names[fpreg]);
+ }
+ else
+ {
+ if (TARGET_BIG_ENDIAN)
+ {
+ fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
+ reg_names[gpreg + 0],
+ reg_names[fpreg + 1]);
+ fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
+ reg_names[gpreg + 1],
+ reg_names[fpreg + 0]);
+ }
+ else
+ {
+ fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
+ reg_names[gpreg + 0],
+ reg_names[fpreg + 0]);
+ fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
+ reg_names[gpreg + 1],
+ reg_names[fpreg + 1]);
+ }
+ }
+}
+
+/* Build a call stub for a mips16 call. A stub is needed if we are
+ passing any floating point values which should go into the floating
+ point registers. If we are, and the call turns out to be to a
+ 32-bit function, the stub will be used to move the values into the
+ floating point registers before calling the 32-bit function. The
+ linker will magically adjust the function call to either the 16-bit
+ function or the 32-bit stub, depending upon where the function call
+ is actually defined.
+
+ Similarly, we need a stub if the return value might come back in a
+ floating point register.
+
+ RETVAL is the location of the return value, or null if this is
+ a call rather than a call_value. FN is the address of the
+ function and ARG_SIZE is the size of the arguments. FP_CODE
+ is the code built by function_arg. This function returns a nonzero
+ value if it builds the call instruction itself. */
+
+int
+build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
+{
+ int fpret = 0;
+ const char *fnname;
+ char *secname, *stubname;
+ struct mips16_stub *l;
+ tree stubid, stubdecl;
+ int need_comma;
+ unsigned int f;
+ rtx insn;
+
+ /* We don't need to do anything if we aren't in mips16 mode, or if
+ we were invoked with the -msoft-float option. */
+ if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
+ return 0;
+
+ /* Figure out whether the value might come back in a floating point
+ register. */
+ if (retval)
+ fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
+
+ /* We don't need to do anything if there were no floating point
+ arguments and the value will not be returned in a floating point
+ register. */
+ if (fp_code == 0 && ! fpret)
+ return 0;
+
+ /* We don't need to do anything if this is a call to a special
+ mips16 support function. */
+ if (GET_CODE (fn) == SYMBOL_REF
+ && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
+ return 0;
+
+ /* This code will only work for o32 and o64 abis. The other ABI's
+ require more sophisticated support. */
+ gcc_assert (TARGET_OLDABI);
+
+ /* If we're calling via a function pointer, then we must always call
+ via a stub. There are magic stubs provided in libgcc.a for each
+ of the required cases. Each of them expects the function address
+ to arrive in register $2. */
+
+ if (GET_CODE (fn) != SYMBOL_REF)
+ {
+ char buf[30];
+ tree id;
+ rtx stub_fn, insn;
+
+ /* ??? If this code is modified to support other ABI's, we need
+ to handle PARALLEL return values here. */
+
+ if (fpret)
+ sprintf (buf, "__mips16_call_stub_%s_%d",
+ mips16_call_stub_mode_suffix (GET_MODE (retval)),
+ fp_code);
+ else
+ sprintf (buf, "__mips16_call_stub_%d",
+ fp_code);
+
+ id = get_identifier (buf);
+ stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
+
+ mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
+
+ if (retval == NULL_RTX)
+ insn = gen_call_internal (stub_fn, arg_size);
+ else
+ insn = gen_call_value_internal (retval, stub_fn, arg_size);
+ insn = emit_call_insn (insn);
+
+ /* Put the register usage information on the CALL. */
+ CALL_INSN_FUNCTION_USAGE (insn) =
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
+ CALL_INSN_FUNCTION_USAGE (insn));
+
+ /* If we are handling a floating point return value, we need to
+ save $18 in the function prologue. Putting a note on the
+ call will mean that df_regs_ever_live_p ($18) will be true if the
+ call is not eliminated, and we can check that in the prologue
+ code. */
+ if (fpret)
+ CALL_INSN_FUNCTION_USAGE (insn) =
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (word_mode, 18)),
+ CALL_INSN_FUNCTION_USAGE (insn));
+
+ /* Return 1 to tell the caller that we've generated the call
+ insn. */
+ return 1;
+ }
+
+ /* We know the function we are going to call. If we have already
+ built a stub, we don't need to do anything further. */
+
+ fnname = targetm.strip_name_encoding (XSTR (fn, 0));
+ for (l = mips16_stubs; l != NULL; l = l->next)
+ if (strcmp (l->name, fnname) == 0)
+ break;
+
+ if (l == NULL)
+ {
+ /* Build a special purpose stub. When the linker sees a
+ function call in mips16 code, it will check where the target
+ is defined. If the target is a 32-bit call, the linker will
+ search for the section defined here. It can tell which
+ symbol this section is associated with by looking at the
+ relocation information (the name is unreliable, since this
+ might be a static function). If such a section is found, the
+ linker will redirect the call to the start of the magic
+ section.
+
+ If the function does not return a floating point value, the
+ special stub section is named
+ .mips16.call.FNNAME
+
+ If the function does return a floating point value, the stub
+ section is named
+ .mips16.call.fp.FNNAME
+ */
+
+ secname = (char *) alloca (strlen (fnname) + 40);
+ sprintf (secname, ".mips16.call.%s%s",
+ fpret ? "fp." : "",
+ fnname);
+ stubname = (char *) alloca (strlen (fnname) + 20);
+ sprintf (stubname, "__call_stub_%s%s",
+ fpret ? "fp_" : "",
+ fnname);
+ stubid = get_identifier (stubname);
+ stubdecl = build_decl (FUNCTION_DECL, stubid,
+ build_function_type (void_type_node, NULL_TREE));
+ DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
+ DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
+
+ fprintf (asm_out_file, "\t# Stub function to call %s%s (",
+ (fpret
+ ? (GET_MODE (retval) == SFmode ? "float " : "double ")
+ : ""),
+ fnname);
+ need_comma = 0;
+ for (f = (unsigned int) fp_code; f != 0; f >>= 2)
+ {
+ fprintf (asm_out_file, "%s%s",
+ need_comma ? ", " : "",
+ (f & 3) == 1 ? "float" : "double");
+ need_comma = 1;
+ }
+ fprintf (asm_out_file, ")\n");
+
+ fprintf (asm_out_file, "\t.set\tnomips16\n");
+ assemble_start_function (stubdecl, stubname);
+
+ if (!FUNCTION_NAME_ALREADY_DECLARED)
+ {
+ fputs ("\t.ent\t", asm_out_file);
+ assemble_name (asm_out_file, stubname);
+ fputs ("\n", asm_out_file);
+
+ assemble_name (asm_out_file, stubname);
+ fputs (":\n", asm_out_file);
+ }
+
+ /* We build the stub code by hand. That's the only way we can
+ do it, since we can't generate 32-bit code during a 16-bit
+ compilation. */
+
+ /* We don't want the assembler to insert any nops here. */
+ fprintf (asm_out_file, "\t.set\tnoreorder\n");
+
+ mips16_fp_args (asm_out_file, fp_code, 0);
+
+ if (! fpret)
+ {
+ fprintf (asm_out_file, "\t.set\tnoat\n");
+ fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
+ fnname);
+ fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
+ fprintf (asm_out_file, "\t.set\tat\n");
+ /* Unfortunately, we can't fill the jump delay slot. We
+ can't fill with one of the mtc1 instructions, because the
+ result is not available for one instruction, so if the
+ very first instruction in the function refers to the
+ register, it will see the wrong value. */
+ fprintf (asm_out_file, "\tnop\n");
+ }
+ else
+ {
+ fprintf (asm_out_file, "\tmove\t%s,%s\n",
+ reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
+ fprintf (asm_out_file, "\tjal\t%s\n", fnname);
+ /* As above, we can't fill the delay slot. */
+ fprintf (asm_out_file, "\tnop\n");
+ switch (GET_MODE (retval))
+ {
+ case SCmode:
+ fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
+ reg_names[GP_REG_FIRST + 3],
+ reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
+ /* Fall though. */
+ case SFmode:
+ fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
+ reg_names[GP_REG_FIRST + 2],
+ reg_names[FP_REG_FIRST + 0]);
+ if (GET_MODE (retval) == SCmode && TARGET_64BIT)
+ {
+ /* On 64-bit targets, complex floats are returned in
+ a single GPR, such that "sd" on a suitably-aligned
+ target would store the value correctly. */
+ fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
+ reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN],
+ reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN]);
+ fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
+ reg_names[GP_REG_FIRST + 2],
+ reg_names[GP_REG_FIRST + 2],
+ reg_names[GP_REG_FIRST + 3]);
+ }
+ break;
+
+ case DCmode:
+ mips16_fpret_double (GP_REG_FIRST + 2 + (8 / UNITS_PER_WORD),
+ FP_REG_FIRST + MAX_FPRS_PER_FMT);
+ /* Fall though. */
+ case DFmode:
+ case V2SFmode:
+ mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
+ /* As above, we can't fill the delay slot. */
+ fprintf (asm_out_file, "\tnop\n");
+ }
+
+ fprintf (asm_out_file, "\t.set\treorder\n");
+
+#ifdef ASM_DECLARE_FUNCTION_SIZE
+ ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
+#endif
+
+ if (!FUNCTION_NAME_ALREADY_DECLARED)
+ {
+ fputs ("\t.end\t", asm_out_file);
+ assemble_name (asm_out_file, stubname);
+ fputs ("\n", asm_out_file);
+ }
+
+ /* Record this stub. */
+ l = (struct mips16_stub *) xmalloc (sizeof *l);
+ l->name = xstrdup (fnname);
+ l->fpret = fpret;
+ l->next = mips16_stubs;
+ mips16_stubs = l;
+ }
+
+ /* If we expect a floating point return value, but we've built a
+ stub which does not expect one, then we're in trouble. We can't
+ use the existing stub, because it won't handle the floating point
+ value. We can't build a new stub, because the linker won't know
+ which stub to use for the various calls in this object file.
+ Fortunately, this case is illegal, since it means that a function
+ was declared in two different ways in a single compilation. */
+ if (fpret && ! l->fpret)
+ error ("cannot handle inconsistent calls to %qs", fnname);
+
+ if (retval == NULL_RTX)
+ insn = gen_call_internal_direct (fn, arg_size);
+ else
+ insn = gen_call_value_internal_direct (retval, fn, arg_size);
+ insn = emit_call_insn (insn);
+
+ /* If we are calling a stub which handles a floating point return
+ value, we need to arrange to save $18 in the prologue. We do
+ this by marking the function call as using the register. The
+ prologue will later see that it is used, and emit code to save
+ it. */
+ if (l->fpret)
+ CALL_INSN_FUNCTION_USAGE (insn) =
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
+ CALL_INSN_FUNCTION_USAGE (insn));
+
+ /* Return 1 to tell the caller that we've generated the call
+ insn. */
+ return 1;
+}
+
+/* Return true if calls to X can use R_MIPS_CALL* relocations. */
+
+static bool
+mips_ok_for_lazy_binding_p (rtx x)
+{
+ return (TARGET_USE_GOT
+ && GET_CODE (x) == SYMBOL_REF
+ && !mips_symbol_binds_local_p (x));
+}
+
+/* Load function address ADDR into register DEST. SIBCALL_P is true
+ if the address is needed for a sibling call. Return true if we
+ used an explicit lazy-binding sequence. */
+
+static bool
+mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
+{
+ /* If we're generating PIC, and this call is to a global function,
+ try to allow its address to be resolved lazily. This isn't
+ possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
+ to the stub would be our caller's gp, not ours. */
+ if (TARGET_EXPLICIT_RELOCS
+ && !(sibcall_p && TARGET_CALL_SAVED_GP)
+ && mips_ok_for_lazy_binding_p (addr))
+ {
+ rtx high, lo_sum_symbol;
+
+ high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
+ addr, SYMBOL_GOTOFF_CALL);
+ lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
+ if (Pmode == SImode)
+ emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
+ else
+ emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
+ return true;
+ }
+ else
+ {
+ mips_emit_move (dest, addr);
+ return false;
+ }
+}
+
+
+/* Expand a call or call_value instruction. RESULT is where the
+ result will go (null for calls), ADDR is the address of the
+ function, ARGS_SIZE is the size of the arguments and AUX is
+ the value passed to us by mips_function_arg. SIBCALL_P is true
+ if we are expanding a sibling call, false if we're expanding
+ a normal call. */
+
+void
+mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
+{
+ rtx orig_addr, pattern, insn;
+ bool lazy_p;
+
+ orig_addr = addr;
+ lazy_p = false;
+ if (!call_insn_operand (addr, VOIDmode))
+ {
+ addr = gen_reg_rtx (Pmode);
+ lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
+ }
+
+ if (TARGET_MIPS16
+ && TARGET_HARD_FLOAT_ABI
+ && build_mips16_call_stub (result, addr, args_size,
+ aux == 0 ? 0 : (int) GET_MODE (aux)))
+ return;
+
+ if (result == 0)
+ pattern = (sibcall_p
+ ? gen_sibcall_internal (addr, args_size)
+ : gen_call_internal (addr, args_size));
+ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
+ {
+ rtx reg1, reg2;
+
+ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
+ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
+ pattern =
+ (sibcall_p
+ ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
+ : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
+ }
+ else
+ pattern = (sibcall_p
+ ? gen_sibcall_value_internal (result, addr, args_size)
+ : gen_call_value_internal (result, addr, args_size));
+
+ insn = emit_call_insn (pattern);
+
+ /* Lazy-binding stubs require $gp to be valid on entry. We also pretend
+ that they use FAKE_CALL_REGNO; see the load_call<mode> patterns for
+ details. */
+ if (lazy_p)
+ {
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
+ gen_rtx_REG (Pmode, FAKE_CALL_REGNO));
+ }
+}
+
+
+/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
+
+static bool
+mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ if (!TARGET_SIBCALLS)
+ return false;
+
+ /* We can't do a sibcall if the called function is a MIPS16 function
+ because there is no direct "jx" instruction equivalent to "jalx" to
+ switch the ISA mode. */
+ if (mips_use_mips16_mode_p (decl))
+ return false;
+
+ /* ...and when -minterlink-mips16 is in effect, assume that external
+ functions could be MIPS16 ones unless an attribute explicitly
+ tells us otherwise. We only care about cases where the sibling
+ and normal calls would both be direct. */
+ if (TARGET_INTERLINK_MIPS16
+ && decl
+ && DECL_EXTERNAL (decl)
+ && !mips_nomips16_decl_p (decl)
+ && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
+ return false;
+
+ /* Otherwise OK. */
+ return true;
+}
+
+/* Emit code to move general operand SRC into condition-code
+ register DEST. SCRATCH is a scratch TFmode float register.
+ The sequence is:
+
+ FP1 = SRC
+ FP2 = 0.0f
+ DEST = FP2 < FP1
+
+ where FP1 and FP2 are single-precision float registers
+ taken from SCRATCH. */
+
+void
+mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
+{
+ rtx fp1, fp2;
+
+ /* Change the source to SFmode. */
+ if (MEM_P (src))
+ src = adjust_address (src, SFmode, 0);
+ else if (REG_P (src) || GET_CODE (src) == SUBREG)
+ src = gen_rtx_REG (SFmode, true_regnum (src));
+
+ fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
+ fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
+
+ mips_emit_move (copy_rtx (fp1), src);
+ mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
+ emit_insn (gen_slt_sf (dest, fp2, fp1));
+}
+
+/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
+ Assume that the areas do not overlap. */
+
+static void
+mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
+{
+ HOST_WIDE_INT offset, delta;
+ unsigned HOST_WIDE_INT bits;
+ int i;
+ enum machine_mode mode;
+ rtx *regs;
+
+ /* Work out how many bits to move at a time. If both operands have
+ half-word alignment, it is usually better to move in half words.
+ For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
+ and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
+ Otherwise move word-sized chunks. */
+ if (MEM_ALIGN (src) == BITS_PER_WORD / 2
+ && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
+ bits = BITS_PER_WORD / 2;
+ else
+ bits = BITS_PER_WORD;
+
+ mode = mode_for_size (bits, MODE_INT, 0);
+ delta = bits / BITS_PER_UNIT;
+
+ /* Allocate a buffer for the temporary registers. */
+ regs = alloca (sizeof (rtx) * length / delta);
+
+ /* Load as many BITS-sized chunks as possible. Use a normal load if
+ the source has enough alignment, otherwise use left/right pairs. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ {
+ regs[i] = gen_reg_rtx (mode);
+ if (MEM_ALIGN (src) >= bits)
+ mips_emit_move (regs[i], adjust_address (src, mode, offset));
+ else
+ {
+ rtx part = adjust_address (src, BLKmode, offset);
+ if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
+ gcc_unreachable ();
+ }
+ }
+
+ /* Copy the chunks to the destination. */
+ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+ if (MEM_ALIGN (dest) >= bits)
+ mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
+ else
+ {
+ rtx part = adjust_address (dest, BLKmode, offset);
+ if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
+ gcc_unreachable ();
+ }
+
+ /* Mop up any left-over bytes. */
+ if (offset < length)
+ {
+ src = adjust_address (src, BLKmode, offset);
+ dest = adjust_address (dest, BLKmode, offset);
+ move_by_pieces (dest, src, length - offset,
+ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
+ }
+}
+
+#define MAX_MOVE_REGS 4
+#define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
+
+
+/* Helper function for doing a loop-based block operation on memory
+ reference MEM. Each iteration of the loop will operate on LENGTH
+ bytes of MEM.
+
+ Create a new base register for use within the loop and point it to
+ the start of MEM. Create a new memory reference that uses this
+ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
+
+static void
+mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
+ rtx *loop_reg, rtx *loop_mem)
+{
+ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
+
+ /* Although the new mem does not refer to a known location,
+ it does keep up to LENGTH bytes of alignment. */
+ *loop_mem = change_address (mem, BLKmode, *loop_reg);
+ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
+}
+
+
+/* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
+ per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
+ memory regions do not overlap. */
+
+static void
+mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
+{
+ rtx label, src_reg, dest_reg, final_src;
+ HOST_WIDE_INT leftover;
+
+ leftover = length % MAX_MOVE_BYTES;
+ length -= leftover;
+
+ /* Create registers and memory references for use within the loop. */
+ mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
+ mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
+
+ /* Calculate the value that SRC_REG should have after the last iteration
+ of the loop. */
+ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
+ 0, 0, OPTAB_WIDEN);
+
+ /* Emit the start of the loop. */
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ /* Emit the loop body. */
+ mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
+
+ /* Move on to the next block. */
+ mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
+ mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
+
+ /* Emit the loop condition. */
+ if (Pmode == DImode)
+ emit_insn (gen_cmpdi (src_reg, final_src));
+ else
+ emit_insn (gen_cmpsi (src_reg, final_src));
+ emit_jump_insn (gen_bne (label));
+
+ /* Mop up any left-over bytes. */
+ if (leftover)
+ mips_block_move_straight (dest, src, leftover);
+}
+
+/* Expand a movmemsi instruction. */
+
+bool
+mips_expand_block_move (rtx dest, rtx src, rtx length)
+{
+ if (GET_CODE (length) == CONST_INT)
+ {
+ if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
+ {
+ mips_block_move_straight (dest, src, INTVAL (length));
+ return true;
+ }
+ else if (optimize)
+ {
+ mips_block_move_loop (dest, src, INTVAL (length));
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/* Expand a loop of synci insns for the address range [BEGIN, END). */
+
+void
+mips_expand_synci_loop (rtx begin, rtx end)
+{
+ rtx inc, label, cmp, cmp_result;
+
+ /* Load INC with the cache line size (rdhwr INC,$1). */
+ inc = gen_reg_rtx (SImode);
+ emit_insn (gen_rdhwr (inc, const1_rtx));
+
+ /* Loop back to here. */
+ label = gen_label_rtx ();
+ emit_label (label);
+
+ emit_insn (gen_synci (begin));
+
+ cmp = gen_reg_rtx (Pmode);
+ mips_emit_binary (GTU, cmp, begin, end);
+
+ mips_emit_binary (PLUS, begin, begin, inc);
+
+ cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
+ emit_jump_insn (gen_condjump (cmp_result, label));
+}
+
/* Return true if it is possible to use left/right accesses for a
bitfield of WIDTH bits starting BITPOS bits into *OP. When
returning true, update *OP, *LEFT and *RIGHT as follows:
@@ -5469,34 +6116,7 @@ mips_use_ins_ext_p (rtx op, rtx size, rtx position)
return true;
}
-
-/* Set up globals to generate code for the ISA or processor
- described by INFO. */
-
-static void
-mips_set_architecture (const struct mips_cpu_info *info)
-{
- if (info != 0)
- {
- mips_arch_info = info;
- mips_arch = info->cpu;
- mips_isa = info->isa;
- }
-}
-
-
-/* Likewise for tuning. */
-
-static void
-mips_set_tune (const struct mips_cpu_info *info)
-{
- if (info != 0)
- {
- mips_tune_info = info;
- mips_tune = info->cpu;
- }
-}
-
+
/* Initialize mips_split_addresses from the associated command-line
settings.
@@ -5632,830 +6252,6 @@ mips_init_relocs (void)
mips_lo_relocs[SYMBOL_HALF] = "%half(";
}
-static GTY(()) int was_mips16_p = -1;
-
-/* Set up the target-dependent global state so that it matches the
- current function's ISA mode. */
-
-static void
-mips_set_mips16_mode (int mips16_p)
-{
- if (mips16_p == was_mips16_p)
- return;
-
- /* Restore base settings of various flags. */
- target_flags = mips_base_target_flags;
- align_loops = mips_base_align_loops;
- align_jumps = mips_base_align_jumps;
- align_functions = mips_base_align_functions;
- flag_schedule_insns = mips_base_schedule_insns;
- flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
- flag_move_loop_invariants = mips_base_move_loop_invariants;
- flag_delayed_branch = mips_flag_delayed_branch;
-
- if (mips16_p)
- {
- /* Select mips16 instruction set. */
- target_flags |= MASK_MIPS16;
-
- /* Don't run the scheduler before reload, since it tends to
- increase register pressure. */
- flag_schedule_insns = 0;
-
- /* Don't do hot/cold partitioning. The constant layout code expects
- the whole function to be in a single section. */
- flag_reorder_blocks_and_partition = 0;
-
- /* Don't move loop invariants, because it tends to increase
- register pressure. It also introduces an extra move in cases
- where the constant is the first operand in a two-operand binary
- instruction, or when it forms a register argument to a functon
- call. */
- flag_move_loop_invariants = 0;
-
- /* Silently disable -mexplicit-relocs since it doesn't apply
- to mips16 code. Even so, it would overly pedantic to warn
- about "-mips16 -mexplicit-relocs", especially given that
- we use a %gprel() operator. */
- target_flags &= ~MASK_EXPLICIT_RELOCS;
-
- /* Experiments suggest we get the best overall results from using
- the range of an unextended lw or sw. Code that makes heavy use
- of byte or short accesses can do better with ranges of 0...31
- and 0...63 respectively, but most code is sensitive to the range
- of lw and sw instead. */
- targetm.min_anchor_offset = 0;
- targetm.max_anchor_offset = 127;
-
- if (flag_pic || TARGET_ABICALLS)
- sorry ("MIPS16 PIC");
- }
- else
- {
- /* Reset to select base non-mips16 ISA. */
- target_flags &= ~MASK_MIPS16;
-
- /* When using explicit relocs, we call dbr_schedule from within
- mips_reorg. */
- if (TARGET_EXPLICIT_RELOCS)
- flag_delayed_branch = 0;
-
- /* Provide default values for align_* for 64-bit targets. */
- if (TARGET_64BIT)
- {
- if (align_loops == 0)
- align_loops = 8;
- if (align_jumps == 0)
- align_jumps = 8;
- if (align_functions == 0)
- align_functions = 8;
- }
-
- targetm.min_anchor_offset = -32768;
- targetm.max_anchor_offset = 32767;
- }
-
- /* (Re)initialize mips target internals for new ISA. */
- mips_init_split_addresses ();
- mips_init_relocs ();
-
- if (was_mips16_p >= 0)
- /* Reinitialize target-dependent state. */
- target_reinit ();
-
- was_mips16_p = TARGET_MIPS16;
-}
-
-/* Use a hash table to keep track of implicit mips16/nomips16 attributes
- for -mflip_mips16. It maps decl names onto a boolean mode setting. */
-
-struct mflip_mips16_entry GTY (()) {
- const char *name;
- bool mips16_p;
-};
-static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
-
-/* Hash table callbacks for mflip_mips16_htab. */
-
-static hashval_t
-mflip_mips16_htab_hash (const void *entry)
-{
- return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
-}
-
-static int
-mflip_mips16_htab_eq (const void *entry, const void *name)
-{
- return strcmp (((const struct mflip_mips16_entry *) entry)->name,
- (const char *) name) == 0;
-}
-
-/* DECL is a function that needs a default "mips16" or "nomips16" attribute
- for -mflip-mips16. Return true if it should use "mips16" and false if
- it should use "nomips16". */
-
-static bool
-mflip_mips16_use_mips16_p (tree decl)
-{
- struct mflip_mips16_entry *entry;
- const char *name;
- hashval_t hash;
- void **slot;
-
- /* Use the opposite of the command-line setting for anonymous decls. */
- if (!DECL_NAME (decl))
- return !mips_base_mips16;
-
- if (!mflip_mips16_htab)
- mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
- mflip_mips16_htab_eq, NULL);
-
- name = IDENTIFIER_POINTER (DECL_NAME (decl));
- hash = htab_hash_string (name);
- slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
- entry = (struct mflip_mips16_entry *) *slot;
- if (!entry)
- {
- mips16_flipper = !mips16_flipper;
- entry = GGC_NEW (struct mflip_mips16_entry);
- entry->name = name;
- entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
- *slot = entry;
- }
- return entry->mips16_p;
-}
-
-/* Implement TARGET_INSERT_ATTRIBUTES. */
-
-static void
-mips_insert_attributes (tree decl, tree *attributes)
-{
- const char *name;
- bool mips16_p, nomips16_p;
-
- /* Check for "mips16" and "nomips16" attributes. */
- mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
- nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
- if (TREE_CODE (decl) != FUNCTION_DECL)
- {
- if (mips16_p)
- error ("%qs attribute only applies to functions", "mips16");
- if (nomips16_p)
- error ("%qs attribute only applies to functions", "nomips16");
- }
- else
- {
- mips16_p |= mips_mips16_decl_p (decl);
- nomips16_p |= mips_nomips16_decl_p (decl);
- if (mips16_p || nomips16_p)
- {
- /* DECL cannot be simultaneously mips16 and nomips16. */
- if (mips16_p && nomips16_p)
- error ("%qs cannot have both %<mips16%> and "
- "%<nomips16%> attributes",
- IDENTIFIER_POINTER (DECL_NAME (decl)));
- }
- else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
- {
- /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
- "mips16" attribute, arbitrarily pick one. We must pick the same
- setting for duplicate declarations of a function. */
- name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
- *attributes = tree_cons (get_identifier (name), NULL, *attributes);
- }
- }
-}
-
-/* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
-
-static tree
-mips_merge_decl_attributes (tree olddecl, tree newdecl)
-{
- /* The decls' "mips16" and "nomips16" attributes must match exactly. */
- if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
- error ("%qs redeclared with conflicting %qs attributes",
- IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
- if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
- error ("%qs redeclared with conflicting %qs attributes",
- IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
-
- return merge_attributes (DECL_ATTRIBUTES (olddecl),
- DECL_ATTRIBUTES (newdecl));
-}
-
-/* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
- function should use the MIPS16 ISA and switch modes accordingly. */
-
-static void
-mips_set_current_function (tree fndecl)
-{
- mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
-}
-
-/* Implement TARGET_HANDLE_OPTION. */
-
-static bool
-mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
-{
- switch (code)
- {
- case OPT_mabi_:
- if (strcmp (arg, "32") == 0)
- mips_abi = ABI_32;
- else if (strcmp (arg, "o64") == 0)
- mips_abi = ABI_O64;
- else if (strcmp (arg, "n32") == 0)
- mips_abi = ABI_N32;
- else if (strcmp (arg, "64") == 0)
- mips_abi = ABI_64;
- else if (strcmp (arg, "eabi") == 0)
- mips_abi = ABI_EABI;
- else
- return false;
- return true;
-
- case OPT_march_:
- case OPT_mtune_:
- return mips_parse_cpu (arg) != 0;
-
- case OPT_mips:
- mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
- return mips_isa_info != 0;
-
- case OPT_mno_flush_func:
- mips_cache_flush_func = NULL;
- return true;
-
- case OPT_mcode_readable_:
- if (strcmp (arg, "yes") == 0)
- mips_code_readable = CODE_READABLE_YES;
- else if (strcmp (arg, "pcrel") == 0)
- mips_code_readable = CODE_READABLE_PCREL;
- else if (strcmp (arg, "no") == 0)
- mips_code_readable = CODE_READABLE_NO;
- else
- return false;
- return true;
-
- default:
- return true;
- }
-}
-
-/* Set up the threshold for data to go into the small data area, instead
- of the normal data area, and detect any conflicts in the switches. */
-
-void
-override_options (void)
-{
- int i, start, regno;
- enum machine_mode mode;
-
-#ifdef SUBTARGET_OVERRIDE_OPTIONS
- SUBTARGET_OVERRIDE_OPTIONS;
-#endif
-
- mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
-
- /* The following code determines the architecture and register size.
- Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
- The GAS and GCC code should be kept in sync as much as possible. */
-
- if (mips_arch_string != 0)
- mips_set_architecture (mips_parse_cpu (mips_arch_string));
-
- if (mips_isa_info != 0)
- {
- if (mips_arch_info == 0)
- mips_set_architecture (mips_isa_info);
- else if (mips_arch_info->isa != mips_isa_info->isa)
- error ("-%s conflicts with the other architecture options, "
- "which specify a %s processor",
- mips_isa_info->name,
- mips_cpu_info_from_isa (mips_arch_info->isa)->name);
- }
-
- if (mips_arch_info == 0)
- {
-#ifdef MIPS_CPU_STRING_DEFAULT
- mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
-#else
- mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
-#endif
- }
-
- if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
- error ("-march=%s is not compatible with the selected ABI",
- mips_arch_info->name);
-
- /* Optimize for mips_arch, unless -mtune selects a different processor. */
- if (mips_tune_string != 0)
- mips_set_tune (mips_parse_cpu (mips_tune_string));
-
- if (mips_tune_info == 0)
- mips_set_tune (mips_arch_info);
-
- /* Set cost structure for the processor. */
- if (optimize_size)
- mips_cost = &mips_rtx_cost_optimize_size;
- else
- mips_cost = &mips_rtx_cost_data[mips_tune];
-
- /* If the user hasn't specified a branch cost, use the processor's
- default. */
- if (mips_branch_cost == 0)
- mips_branch_cost = mips_cost->branch_cost;
-
- if ((target_flags_explicit & MASK_64BIT) != 0)
- {
- /* The user specified the size of the integer registers. Make sure
- it agrees with the ABI and ISA. */
- if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
- error ("-mgp64 used with a 32-bit processor");
- else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
- error ("-mgp32 used with a 64-bit ABI");
- else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
- error ("-mgp64 used with a 32-bit ABI");
- }
- else
- {
- /* Infer the integer register size from the ABI and processor.
- Restrict ourselves to 32-bit registers if that's all the
- processor has, or if the ABI cannot handle 64-bit registers. */
- if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
- target_flags &= ~MASK_64BIT;
- else
- target_flags |= MASK_64BIT;
- }
-
- if ((target_flags_explicit & MASK_FLOAT64) != 0)
- {
- /* Really, -mfp32 and -mfp64 are ornamental options. There's
- only one right answer here. */
- if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
- error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
- else if (!TARGET_64BIT && TARGET_FLOAT64
- && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
- error ("-mgp32 and -mfp64 can only be combined if the target"
- " supports the mfhc1 and mthc1 instructions");
- else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
- error ("unsupported combination: %s", "-mfp64 -msingle-float");
- }
- else
- {
- /* -msingle-float selects 32-bit float registers. Otherwise the
- float registers should be the same size as the integer ones. */
- if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
- target_flags |= MASK_FLOAT64;
- else
- target_flags &= ~MASK_FLOAT64;
- }
-
- /* End of code shared with GAS. */
-
- if ((target_flags_explicit & MASK_LONG64) == 0)
- {
- if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
- target_flags |= MASK_LONG64;
- else
- target_flags &= ~MASK_LONG64;
- }
-
- if (!TARGET_OLDABI)
- flag_pcc_struct_return = 0;
-
- if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
- {
- /* If neither -mbranch-likely nor -mno-branch-likely was given
- on the command line, set MASK_BRANCHLIKELY based on the target
- architecture and tuning flags. Annulled delay slots are a
- size win, so we only consider the processor-specific tuning
- for !optimize_size. */
- if (ISA_HAS_BRANCHLIKELY
- && (optimize_size
- || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
- target_flags |= MASK_BRANCHLIKELY;
- else
- target_flags &= ~MASK_BRANCHLIKELY;
- }
- else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
- warning (0, "the %qs architecture does not support branch-likely"
- " instructions", mips_arch_info->name);
-
- /* The effect of -mabicalls isn't defined for the EABI. */
- if (mips_abi == ABI_EABI && TARGET_ABICALLS)
- {
- error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
- target_flags &= ~MASK_ABICALLS;
- }
-
- /* MIPS16 cannot generate PIC yet. */
- if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
- {
- sorry ("MIPS16 PIC");
- target_flags &= ~MASK_ABICALLS;
- flag_pic = flag_pie = flag_shlib = 0;
- }
-
- if (TARGET_ABICALLS)
- /* We need to set flag_pic for executables as well as DSOs
- because we may reference symbols that are not defined in
- the final executable. (MIPS does not use things like
- copy relocs, for example.)
-
- Also, there is a body of code that uses __PIC__ to distinguish
- between -mabicalls and -mno-abicalls code. */
- flag_pic = 1;
-
- /* -mvr4130-align is a "speed over size" optimization: it usually produces
- faster code, but at the expense of more nops. Enable it at -O3 and
- above. */
- if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
- target_flags |= MASK_VR4130_ALIGN;
-
- /* Prefer a call to memcpy over inline code when optimizing for size,
- though see MOVE_RATIO in mips.h. */
- if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
- target_flags |= MASK_MEMCPY;
-
- /* If we have a nonzero small-data limit, check that the -mgpopt
- setting is consistent with the other target flags. */
- if (mips_section_threshold > 0)
- {
- if (!TARGET_GPOPT)
- {
- if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
- error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
-
- TARGET_LOCAL_SDATA = false;
- TARGET_EXTERN_SDATA = false;
- }
- else
- {
- if (TARGET_VXWORKS_RTP)
- warning (0, "cannot use small-data accesses for %qs", "-mrtp");
-
- if (TARGET_ABICALLS)
- warning (0, "cannot use small-data accesses for %qs",
- "-mabicalls");
- }
- }
-
-#ifdef MIPS_TFMODE_FORMAT
- REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
-#endif
-
- /* Make sure that the user didn't turn off paired single support when
- MIPS-3D support is requested. */
- if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
- && !TARGET_PAIRED_SINGLE_FLOAT)
- error ("-mips3d requires -mpaired-single");
-
- /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
- if (TARGET_MIPS3D)
- target_flags |= MASK_PAIRED_SINGLE_FLOAT;
-
- /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
- and TARGET_HARD_FLOAT_ABI are both true. */
- if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
- error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
-
- /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
- enabled. */
- if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
- error ("-mips3d/-mpaired-single must be used with -mips64");
-
- /* If TARGET_DSPR2, enable MASK_DSP. */
- if (TARGET_DSPR2)
- target_flags |= MASK_DSP;
-
- mips_print_operand_punct['?'] = 1;
- mips_print_operand_punct['#'] = 1;
- mips_print_operand_punct['/'] = 1;
- mips_print_operand_punct['&'] = 1;
- mips_print_operand_punct['!'] = 1;
- mips_print_operand_punct['*'] = 1;
- mips_print_operand_punct['@'] = 1;
- mips_print_operand_punct['.'] = 1;
- mips_print_operand_punct['('] = 1;
- mips_print_operand_punct[')'] = 1;
- mips_print_operand_punct['['] = 1;
- mips_print_operand_punct[']'] = 1;
- mips_print_operand_punct['<'] = 1;
- mips_print_operand_punct['>'] = 1;
- mips_print_operand_punct['{'] = 1;
- mips_print_operand_punct['}'] = 1;
- mips_print_operand_punct['^'] = 1;
- mips_print_operand_punct['$'] = 1;
- mips_print_operand_punct['+'] = 1;
- mips_print_operand_punct['~'] = 1;
- mips_print_operand_punct['|'] = 1;
- mips_print_operand_punct['-'] = 1;
-
- /* Set up array to map GCC register number to debug register number.
- Ignore the special purpose register numbers. */
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- {
- mips_dbx_regno[i] = INVALID_REGNUM;
- if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
- mips_dwarf_regno[i] = i;
- else
- mips_dwarf_regno[i] = INVALID_REGNUM;
- }
-
- start = GP_DBX_FIRST - GP_REG_FIRST;
- for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
- mips_dbx_regno[i] = i + start;
-
- start = FP_DBX_FIRST - FP_REG_FIRST;
- for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
- mips_dbx_regno[i] = i + start;
-
- /* HI and LO debug registers use big-endian ordering. */
- mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
- mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
- mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
- mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
- for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
- {
- mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
- mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
- }
-
- /* Set up array giving whether a given register can hold a given mode. */
-
- for (mode = VOIDmode;
- mode != MAX_MACHINE_MODE;
- mode = (enum machine_mode) ((int)mode + 1))
- {
- register int size = GET_MODE_SIZE (mode);
- register enum mode_class class = GET_MODE_CLASS (mode);
-
- for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
- {
- register int temp;
-
- if (mode == CCV2mode)
- temp = (ISA_HAS_8CC
- && ST_REG_P (regno)
- && (regno - ST_REG_FIRST) % 2 == 0);
-
- else if (mode == CCV4mode)
- temp = (ISA_HAS_8CC
- && ST_REG_P (regno)
- && (regno - ST_REG_FIRST) % 4 == 0);
-
- else if (mode == CCmode)
- {
- if (! ISA_HAS_8CC)
- temp = (regno == FPSW_REGNUM);
- else
- temp = (ST_REG_P (regno) || GP_REG_P (regno)
- || FP_REG_P (regno));
- }
-
- else if (GP_REG_P (regno))
- temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
-
- else if (FP_REG_P (regno))
- temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
- || (MIN_FPRS_PER_FMT == 1
- && size <= UNITS_PER_FPREG))
- && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
- || class == MODE_VECTOR_FLOAT)
- && size <= UNITS_PER_FPVALUE)
- /* Allow integer modes that fit into a single
- register. We need to put integers into FPRs
- when using instructions like cvt and trunc.
- We can't allow sizes smaller than a word,
- the FPU has no appropriate load/store
- instructions for those. */
- || (class == MODE_INT
- && size >= MIN_UNITS_PER_WORD
- && size <= UNITS_PER_FPREG)
- /* Allow TFmode for CCmode reloads. */
- || (ISA_HAS_8CC && mode == TFmode)));
-
- else if (ACC_REG_P (regno))
- temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
- && size <= UNITS_PER_WORD * 2
- && (size <= UNITS_PER_WORD
- || regno == MD_REG_FIRST
- || (DSP_ACC_REG_P (regno)
- && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
-
- else if (ALL_COP_REG_P (regno))
- temp = (class == MODE_INT && size <= UNITS_PER_WORD);
- else
- temp = 0;
-
- mips_hard_regno_mode_ok[(int)mode][regno] = temp;
- }
- }
-
- /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
- initialized yet, so we can't use that here. */
- gpr_mode = TARGET_64BIT ? DImode : SImode;
-
- /* Function to allocate machine-dependent function status. */
- init_machine_status = &mips_init_machine_status;
-
- /* Default to working around R4000 errata only if the processor
- was selected explicitly. */
- if ((target_flags_explicit & MASK_FIX_R4000) == 0
- && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
- target_flags |= MASK_FIX_R4000;
-
- /* Default to working around R4400 errata only if the processor
- was selected explicitly. */
- if ((target_flags_explicit & MASK_FIX_R4400) == 0
- && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
- target_flags |= MASK_FIX_R4400;
-
- /* Save base state of options. */
- mips_base_mips16 = TARGET_MIPS16;
- mips_base_target_flags = target_flags;
- mips_base_schedule_insns = flag_schedule_insns;
- mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
- mips_base_move_loop_invariants = flag_move_loop_invariants;
- mips_base_align_loops = align_loops;
- mips_base_align_jumps = align_jumps;
- mips_base_align_functions = align_functions;
- mips_flag_delayed_branch = flag_delayed_branch;
-
- /* Now select the mips16 or 32-bit instruction set, as requested. */
- mips_set_mips16_mode (mips_base_mips16);
-}
-
-/* Swap the register information for registers I and I + 1, which
- currently have the wrong endianness. Note that the registers'
- fixedness and call-clobberedness might have been set on the
- command line. */
-
-static void
-mips_swap_registers (unsigned int i)
-{
- int tmpi;
- const char *tmps;
-
-#define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
-#define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
-
- SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
- SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
- SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
- SWAP_STRING (reg_names[i], reg_names[i + 1]);
-
-#undef SWAP_STRING
-#undef SWAP_INT
-}
-
-/* Implement CONDITIONAL_REGISTER_USAGE. */
-
-void
-mips_conditional_register_usage (void)
-{
- if (!ISA_HAS_DSP)
- {
- int regno;
-
- for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
- fixed_regs[regno] = call_used_regs[regno] = 1;
- }
- if (!TARGET_HARD_FLOAT)
- {
- int regno;
-
- for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
- fixed_regs[regno] = call_used_regs[regno] = 1;
- for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
- fixed_regs[regno] = call_used_regs[regno] = 1;
- }
- else if (! ISA_HAS_8CC)
- {
- int regno;
-
- /* We only have a single condition code register. We
- implement this by hiding all the condition code registers,
- and generating RTL that refers directly to ST_REG_FIRST. */
- for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
- fixed_regs[regno] = call_used_regs[regno] = 1;
- }
- /* In mips16 mode, we permit the $t temporary registers to be used
- for reload. We prohibit the unused $s registers, since they
- are caller saved, and saving them via a mips16 register would
- probably waste more time than just reloading the value. */
- if (TARGET_MIPS16)
- {
- fixed_regs[18] = call_used_regs[18] = 1;
- fixed_regs[19] = call_used_regs[19] = 1;
- fixed_regs[20] = call_used_regs[20] = 1;
- fixed_regs[21] = call_used_regs[21] = 1;
- fixed_regs[22] = call_used_regs[22] = 1;
- fixed_regs[23] = call_used_regs[23] = 1;
- fixed_regs[26] = call_used_regs[26] = 1;
- fixed_regs[27] = call_used_regs[27] = 1;
- fixed_regs[30] = call_used_regs[30] = 1;
- }
- /* fp20-23 are now caller saved. */
- if (mips_abi == ABI_64)
- {
- int regno;
- for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
- call_really_used_regs[regno] = call_used_regs[regno] = 1;
- }
- /* Odd registers from fp21 to fp31 are now caller saved. */
- if (mips_abi == ABI_N32)
- {
- int regno;
- for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
- call_really_used_regs[regno] = call_used_regs[regno] = 1;
- }
- /* Make sure that double-register accumulator values are correctly
- ordered for the current endianness. */
- if (TARGET_LITTLE_ENDIAN)
- {
- int regno;
- mips_swap_registers (MD_REG_FIRST);
- for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
- mips_swap_registers (regno);
- }
-}
-
-/* Allocate a chunk of memory for per-function machine-dependent data. */
-static struct machine_function *
-mips_init_machine_status (void)
-{
- return ((struct machine_function *)
- ggc_alloc_cleared (sizeof (struct machine_function)));
-}
-
-/* On the mips16, we want to allocate $24 (T_REG) before other
- registers for instructions for which it is possible. This helps
- avoid shuffling registers around in order to set up for an xor,
- encouraging the compiler to use a cmp instead. */
-
-void
-mips_order_regs_for_local_alloc (void)
-{
- register int i;
-
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- reg_alloc_order[i] = i;
-
- if (TARGET_MIPS16)
- {
- /* It really doesn't matter where we put register 0, since it is
- a fixed register anyhow. */
- reg_alloc_order[0] = 24;
- reg_alloc_order[24] = 0;
- }
-}
-
-
-/* The MIPS debug format wants all automatic variables and arguments
- to be in terms of the virtual frame pointer (stack pointer before
- any adjustment in the function), while the MIPS 3.0 linker wants
- the frame pointer to be the stack pointer after the initial
- adjustment. So, we do the adjustment here. The arg pointer (which
- is eliminated) points to the virtual frame pointer, while the frame
- pointer (which may be eliminated) points to the stack pointer after
- the initial adjustments. */
-
-HOST_WIDE_INT
-mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
-{
- rtx offset2 = const0_rtx;
- rtx reg = eliminate_constant_term (addr, &offset2);
-
- if (offset == 0)
- offset = INTVAL (offset2);
-
- if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
- || reg == hard_frame_pointer_rtx)
- {
- HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
- ? compute_frame_size (get_frame_size ())
- : cfun->machine->frame.total_size;
-
- /* MIPS16 frame is smaller */
- if (frame_pointer_needed && TARGET_MIPS16)
- frame_size -= cfun->machine->frame.args_size;
-
- offset = offset - frame_size;
- }
-
- /* sdbout_parms does not want this to crash for unrecognized cases. */
-#if 0
- else if (reg != arg_pointer_rtx)
- fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
- addr);
-#endif
-
- return offset;
-}
-
/* If OP is an UNSPEC address, return the address to which it refers,
otherwise return OP itself. */
@@ -6470,6 +6266,27 @@ mips_strip_unspec_address (rtx op)
return op;
}
+/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
+ in context CONTEXT. RELOCS is the array of relocations to use. */
+
+static void
+print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
+ const char **relocs)
+{
+ enum mips_symbol_type symbol_type;
+ const char *p;
+
+ symbol_type = mips_classify_symbolic_expression (op, context);
+ if (relocs[symbol_type] == 0)
+ fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
+
+ fputs (relocs[symbol_type], file);
+ output_addr_const (file, mips_strip_unspec_address (op));
+ for (p = relocs[symbol_type]; *p != 0; p++)
+ if (*p == '(')
+ fputc (')', file);
+}
+
/* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
@@ -6821,28 +6638,6 @@ print_operand (FILE *file, rtx op, int letter)
else
output_addr_const (file, mips_strip_unspec_address (op));
}
-
-
-/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
- in context CONTEXT. RELOCS is the array of relocations to use. */
-
-static void
-print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
- const char **relocs)
-{
- enum mips_symbol_type symbol_type;
- const char *p;
-
- symbol_type = mips_classify_symbolic_expression (op, context);
- if (relocs[symbol_type] == 0)
- fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
-
- fputs (relocs[symbol_type], file);
- output_addr_const (file, mips_strip_unspec_address (op));
- for (p = relocs[symbol_type]; *p != 0; p++)
- if (*p == '(')
- fputc (')', file);
-}
/* Output address operand X to FILE. */
@@ -6877,6 +6672,195 @@ print_operand_address (FILE *file, rtx x)
gcc_unreachable ();
}
+/* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
+ FIRST is true if this is the first time handling this decl. */
+
+static void
+mips_encode_section_info (tree decl, rtx rtl, int first)
+{
+ default_encode_section_info (decl, rtl, first);
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ rtx symbol = XEXP (rtl, 0);
+ tree type = TREE_TYPE (decl);
+
+ if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
+ || mips_far_type_p (type))
+ SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
+ }
+}
+
+/* Implement TARGET_SELECT_RTX_SECTION. */
+
+static section *
+mips_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ /* ??? Consider using mergeable small data sections. */
+ if (mips_rtx_constant_in_small_data_p (mode))
+ return get_named_section (NULL, ".sdata", 0);
+
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
+
+ The complication here is that, with the combination TARGET_ABICALLS
+ && !TARGET_GPWORD, jump tables will use absolute addresses, and should
+ therefore not be included in the read-only part of a DSO. Handle such
+ cases by selecting a normal data section instead of a read-only one.
+ The logic apes that in default_function_rodata_section. */
+
+static section *
+mips_function_rodata_section (tree decl)
+{
+ if (!TARGET_ABICALLS || TARGET_GPWORD)
+ return default_function_rodata_section (decl);
+
+ if (decl && DECL_SECTION_NAME (decl))
+ {
+ const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+ if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
+ {
+ char *rname = ASTRDUP (name);
+ rname[14] = 'd';
+ return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
+ }
+ else if (flag_function_sections && flag_data_sections
+ && strncmp (name, ".text.", 6) == 0)
+ {
+ char *rname = ASTRDUP (name);
+ memcpy (rname + 1, "data", 4);
+ return get_section (rname, SECTION_WRITE, decl);
+ }
+ }
+ return data_section;
+}
+
+/* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
+ locally-defined objects go in a small data section. It also controls
+ the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
+ mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
+
+static bool
+mips_in_small_data_p (const_tree decl)
+{
+ HOST_WIDE_INT size;
+
+ if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
+ return false;
+
+ /* We don't yet generate small-data references for -mabicalls or
+ VxWorks RTP code. See the related -G handling in override_options. */
+ if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
+ return false;
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
+ {
+ const char *name;
+
+ /* Reject anything that isn't in a known small-data section. */
+ name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+ if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
+ return false;
+
+ /* If a symbol is defined externally, the assembler will use the
+ usual -G rules when deciding how to implement macros. */
+ if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
+ return true;
+ }
+ else if (TARGET_EMBEDDED_DATA)
+ {
+ /* Don't put constants into the small data section: we want them
+ to be in ROM rather than RAM. */
+ if (TREE_CODE (decl) != VAR_DECL)
+ return false;
+
+ if (TREE_READONLY (decl)
+ && !TREE_SIDE_EFFECTS (decl)
+ && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
+ return false;
+ }
+
+ /* Enforce -mlocal-sdata. */
+ if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
+ return false;
+
+ /* Enforce -mextern-sdata. */
+ if (!TARGET_EXTERN_SDATA && DECL_P (decl))
+ {
+ if (DECL_EXTERNAL (decl))
+ return false;
+ if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
+ return false;
+ }
+
+ size = int_size_in_bytes (TREE_TYPE (decl));
+ return (size > 0 && size <= mips_section_threshold);
+}
+
+/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
+ anchors for small data: the GP register acts as an anchor in that
+ case. We also don't want to use them for PC-relative accesses,
+ where the PC acts as an anchor. */
+
+static bool
+mips_use_anchors_for_symbol_p (const_rtx symbol)
+{
+ switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
+ {
+ case SYMBOL_PC_RELATIVE:
+ case SYMBOL_GP_RELATIVE:
+ return false;
+
+ default:
+ return default_use_anchors_for_symbol_p (symbol);
+ }
+}
+
+/* The MIPS debug format wants all automatic variables and arguments
+ to be in terms of the virtual frame pointer (stack pointer before
+ any adjustment in the function), while the MIPS 3.0 linker wants
+ the frame pointer to be the stack pointer after the initial
+ adjustment. So, we do the adjustment here. The arg pointer (which
+ is eliminated) points to the virtual frame pointer, while the frame
+ pointer (which may be eliminated) points to the stack pointer after
+ the initial adjustments. */
+
+HOST_WIDE_INT
+mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
+{
+ rtx offset2 = const0_rtx;
+ rtx reg = eliminate_constant_term (addr, &offset2);
+
+ if (offset == 0)
+ offset = INTVAL (offset2);
+
+ if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
+ || reg == hard_frame_pointer_rtx)
+ {
+ HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
+ ? compute_frame_size (get_frame_size ())
+ : cfun->machine->frame.total_size;
+
+ /* MIPS16 frame is smaller */
+ if (frame_pointer_needed && TARGET_MIPS16)
+ frame_size -= cfun->machine->frame.args_size;
+
+ offset = offset - frame_size;
+ }
+
+ /* sdbout_parms does not want this to crash for unrecognized cases. */
+#if 0
+ else if (reg != arg_pointer_rtx)
+ fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
+ addr);
+#endif
+
+ return offset;
+}
+
/* When using assembler macros, keep track of all of small-data externs
so that mips_file_end can emit the appropriate declarations for them.
@@ -6954,6 +6938,54 @@ mips_output_filename (FILE *stream, const char *name)
putc ('\n', stream);
}
}
+
+/* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
+
+static void
+mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
+{
+ switch (size)
+ {
+ case 4:
+ fputs ("\t.dtprelword\t", file);
+ break;
+
+ case 8:
+ fputs ("\t.dtpreldword\t", file);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ output_addr_const (file, x);
+ fputs ("+0x8000", file);
+}
+
+/* Implement TARGET_DWARF_REGISTER_SPAN. */
+
+static rtx
+mips_dwarf_register_span (rtx reg)
+{
+ rtx high, low;
+ enum machine_mode mode;
+
+ /* By default, GCC maps increasing register numbers to increasing
+ memory locations, but paired FPRs are always little-endian,
+ regardless of the prevailing endianness. */
+ mode = GET_MODE (reg);
+ if (FP_REG_P (REGNO (reg))
+ && TARGET_BIG_ENDIAN
+ && MAX_FPRS_PER_FMT > 1
+ && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
+ {
+ gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
+ high = mips_subword (reg, true);
+ low = mips_subword (reg, false);
+ return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
+ }
+
+ return NULL_RTX;
+}
/* Output an ASCII string, in a space-saving way. PREFIX is the string
that should be written before the opening quote, such as "\t.ascii\t"
@@ -6997,65 +7029,6 @@ mips_output_ascii (FILE *stream, const char *string_param, size_t len,
}
fprintf (stream, "\"\n");
}
-
-/* Implement TARGET_ASM_FILE_START. */
-
-static void
-mips_file_start (void)
-{
- default_file_start ();
-
- if (!TARGET_IRIX)
- {
- /* Generate a special section to describe the ABI switches used to
- produce the resultant binary. This used to be done by the assembler
- setting bits in the ELF header's flags field, but we have run out of
- bits. GDB needs this information in order to be able to correctly
- debug these binaries. See the function mips_gdbarch_init() in
- gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
- causes unnecessary IRIX 6 ld warnings. */
- const char * abi_string = NULL;
-
- switch (mips_abi)
- {
- case ABI_32: abi_string = "abi32"; break;
- case ABI_N32: abi_string = "abiN32"; break;
- case ABI_64: abi_string = "abi64"; break;
- case ABI_O64: abi_string = "abiO64"; break;
- case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
- default:
- gcc_unreachable ();
- }
- /* Note - we use fprintf directly rather than calling switch_to_section
- because in this way we can avoid creating an allocated section. We
- do not want this section to take up any space in the running
- executable. */
- fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
- abi_string);
-
- /* There is no ELF header flag to distinguish long32 forms of the
- EABI from long64 forms. Emit a special section to help tools
- such as GDB. Do the same for o64, which is sometimes used with
- -mlong64. */
- if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
- fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
- "\t.previous\n", TARGET_LONG64 ? 64 : 32);
-
-#ifdef HAVE_AS_GNU_ATTRIBUTE
- fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
- TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
-#endif
- }
-
- /* Generate the pseudo ops that System V.4 wants. */
- if (TARGET_ABICALLS)
- fprintf (asm_out_file, "\t.abicalls\n");
-
- if (flag_verbose_asm)
- fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
- ASM_COMMENT_START,
- mips_section_threshold, mips_arch_info->name, mips_isa);
-}
#ifdef BSS_SECTION_ASM_OP
/* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
@@ -7078,32 +7051,30 @@ mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
}
#endif
-/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
- elfos.h version, but we also need to handle -muninit-const-in-rodata. */
+/* Emit either a label, .comm, or .lcomm directive. When using assembler
+ macros, mark the symbol as written so that mips_file_end won't emit an
+ .extern for it. STREAM is the output file, NAME is the name of the
+ symbol, INIT_STRING is the string that should be written before the
+ symbol and FINAL_STRING is the string that should be written after it.
+ FINAL_STRING is a printf() format that consumes the remaining arguments. */
void
-mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
- unsigned HOST_WIDE_INT size,
- unsigned int align)
+mips_declare_object (FILE *stream, const char *name, const char *init_string,
+ const char *final_string, ...)
{
- /* If the target wants uninitialized const declarations in
- .rdata then don't put them in .comm. */
- if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
- && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
- && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
- {
- if (TREE_PUBLIC (decl) && DECL_NAME (decl))
- targetm.asm_out.globalize_label (stream, name);
+ va_list ap;
- switch_to_section (readonly_data_section);
- ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
- mips_declare_object (stream, name, "",
- ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
- size);
+ fputs (init_string, stream);
+ assemble_name (stream, name);
+ va_start (ap, final_string);
+ vfprintf (stream, final_string, ap);
+ va_end (ap);
+
+ if (!TARGET_EXPLICIT_RELOCS)
+ {
+ tree name_tree = get_identifier (name);
+ TREE_ASM_WRITTEN (name_tree) = 1;
}
- else
- mips_declare_common_object (stream, name, "\n\t.comm\t",
- size, align, true);
}
/* Declare a common object of SIZE bytes using asm directive INIT_STRING.
@@ -7130,30 +7101,32 @@ mips_declare_common_object (FILE *stream, const char *name,
size, align / BITS_PER_UNIT);
}
-/* Emit either a label, .comm, or .lcomm directive. When using assembler
- macros, mark the symbol as written so that mips_file_end won't emit an
- .extern for it. STREAM is the output file, NAME is the name of the
- symbol, INIT_STRING is the string that should be written before the
- symbol and FINAL_STRING is the string that should be written after it.
- FINAL_STRING is a printf() format that consumes the remaining arguments. */
+/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
+ elfos.h version, but we also need to handle -muninit-const-in-rodata. */
void
-mips_declare_object (FILE *stream, const char *name, const char *init_string,
- const char *final_string, ...)
+mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
+ unsigned HOST_WIDE_INT size,
+ unsigned int align)
{
- va_list ap;
-
- fputs (init_string, stream);
- assemble_name (stream, name);
- va_start (ap, final_string);
- vfprintf (stream, final_string, ap);
- va_end (ap);
-
- if (!TARGET_EXPLICIT_RELOCS)
+ /* If the target wants uninitialized const declarations in
+ .rdata then don't put them in .comm. */
+ if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
+ && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
+ && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
{
- tree name_tree = get_identifier (name);
- TREE_ASM_WRITTEN (name_tree) = 1;
+ if (TREE_PUBLIC (decl) && DECL_NAME (decl))
+ targetm.asm_out.globalize_label (stream, name);
+
+ switch_to_section (readonly_data_section);
+ ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
+ mips_declare_object (stream, name, "",
+ ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
+ size);
}
+ else
+ mips_declare_common_object (stream, name, "\n\t.comm\t",
+ size, align, true);
}
#ifdef ASM_OUTPUT_SIZE_DIRECTIVE
@@ -7206,84 +7179,551 @@ mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
}
#endif
-/* Return true if X in context CONTEXT is a small data address that can
- be rewritten as a LO_SUM. */
+/* Implement TARGET_ASM_FILE_START. */
-static bool
-mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
+static void
+mips_file_start (void)
{
- enum mips_symbol_type symbol_type;
+ default_file_start ();
- return (TARGET_EXPLICIT_RELOCS
- && mips_symbolic_constant_p (x, context, &symbol_type)
- && symbol_type == SYMBOL_GP_RELATIVE);
+ if (!TARGET_IRIX)
+ {
+ /* Generate a special section to describe the ABI switches used to
+ produce the resultant binary. This used to be done by the assembler
+ setting bits in the ELF header's flags field, but we have run out of
+ bits. GDB needs this information in order to be able to correctly
+ debug these binaries. See the function mips_gdbarch_init() in
+ gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
+ causes unnecessary IRIX 6 ld warnings. */
+ const char * abi_string = NULL;
+
+ switch (mips_abi)
+ {
+ case ABI_32: abi_string = "abi32"; break;
+ case ABI_N32: abi_string = "abiN32"; break;
+ case ABI_64: abi_string = "abi64"; break;
+ case ABI_O64: abi_string = "abiO64"; break;
+ case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
+ default:
+ gcc_unreachable ();
+ }
+ /* Note - we use fprintf directly rather than calling switch_to_section
+ because in this way we can avoid creating an allocated section. We
+ do not want this section to take up any space in the running
+ executable. */
+ fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
+ abi_string);
+
+ /* There is no ELF header flag to distinguish long32 forms of the
+ EABI from long64 forms. Emit a special section to help tools
+ such as GDB. Do the same for o64, which is sometimes used with
+ -mlong64. */
+ if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
+ fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
+ "\t.previous\n", TARGET_LONG64 ? 64 : 32);
+
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
+ TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
+#endif
+ }
+
+ /* Generate the pseudo ops that System V.4 wants. */
+ if (TARGET_ABICALLS)
+ fprintf (asm_out_file, "\t.abicalls\n");
+
+ if (flag_verbose_asm)
+ fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
+ ASM_COMMENT_START,
+ mips_section_threshold, mips_arch_info->name, mips_isa);
}
+
+
+/* Make the last instruction frame related and note that it performs
+ the operation described by FRAME_PATTERN. */
+static void
+mips_set_frame_expr (rtx frame_pattern)
+{
+ rtx insn;
-/* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
- containing MEM, or null if none. */
+ insn = get_last_insn ();
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ frame_pattern,
+ REG_NOTES (insn));
+}
-static int
-mips_small_data_pattern_1 (rtx *loc, void *data)
+
+/* Return a frame-related rtx that stores REG at MEM.
+ REG must be a single register. */
+
+static rtx
+mips_frame_set (rtx mem, rtx reg)
{
- enum mips_symbol_context context;
+ rtx set;
- if (GET_CODE (*loc) == LO_SUM)
- return -1;
+ /* If we're saving the return address register and the dwarf return
+ address column differs from the hard register number, adjust the
+ note reg to refer to the former. */
+ if (REGNO (reg) == GP_REG_FIRST + 31
+ && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
+ reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
- if (MEM_P (*loc))
+ set = gen_rtx_SET (VOIDmode, mem, reg);
+ RTX_FRAME_RELATED_P (set) = 1;
+
+ return set;
+}
+
+/* If a MIPS16e SAVE or RESTORE instruction saves or restores register
+ mips16e_s2_s8_regs[X], it must also save the registers in indexes
+ X + 1 onwards. Likewise mips16e_a0_a3_regs. */
+static const unsigned char mips16e_s2_s8_regs[] = {
+ 30, 23, 22, 21, 20, 19, 18
+};
+static const unsigned char mips16e_a0_a3_regs[] = {
+ 4, 5, 6, 7
+};
+
+/* A list of the registers that can be saved by the MIPS16e SAVE instruction,
+ ordered from the uppermost in memory to the lowest in memory. */
+static const unsigned char mips16e_save_restore_regs[] = {
+ 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
+};
+
+/* Return the index of the lowest X in the range [0, SIZE) for which
+ bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
+
+static unsigned int
+mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
+ unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ if (BITSET_P (mask, regs[i]))
+ break;
+
+ return i;
+}
+
+/* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
+ is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
+ for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
+ the same is true for all indexes (X, SIZE). */
+
+static void
+mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
+ unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
+{
+ unsigned int i;
+
+ i = mips16e_find_first_register (*mask_ptr, regs, size);
+ for (i++; i < size; i++)
+ if (!BITSET_P (*mask_ptr, regs[i]))
+ {
+ *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
+ *mask_ptr |= 1 << regs[i];
+ }
+}
+
+/* Return a simplified form of X using the register values in REG_VALUES.
+ REG_VALUES[R] is the last value assigned to hard register R, or null
+ if R has not been modified.
+
+ This function is rather limited, but is good enough for our purposes. */
+
+static rtx
+mips16e_collect_propagate_value (rtx x, rtx *reg_values)
+{
+ rtx x0, x1;
+
+ x = avoid_constant_pool_reference (x);
+
+ if (UNARY_P (x))
{
- if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
- return 1;
- return -1;
+ x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
+ return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
+ x0, GET_MODE (XEXP (x, 0)));
}
- context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
- return mips_rewrite_small_data_p (*loc, context);
+ if (ARITHMETIC_P (x))
+ {
+ x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
+ x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
+ return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
+ }
+
+ if (REG_P (x)
+ && reg_values[REGNO (x)]
+ && !rtx_unstable_p (reg_values[REGNO (x)]))
+ return reg_values[REGNO (x)];
+
+ return x;
}
-/* Return true if OP refers to small data symbols directly, not through
- a LO_SUM. */
+/* Return true if (set DEST SRC) stores an argument register into its
+ caller-allocated save slot, storing the number of that argument
+ register in *REGNO_PTR if so. REG_VALUES is as for
+ mips16e_collect_propagate_value. */
-bool
-mips_small_data_pattern_p (rtx op)
+static bool
+mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
+ unsigned int *regno_ptr)
{
- return for_each_rtx (&op, mips_small_data_pattern_1, 0);
+ unsigned int argno, regno;
+ HOST_WIDE_INT offset, required_offset;
+ rtx addr, base;
+
+ /* Check that this is a word-mode store. */
+ if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
+ return false;
+
+ /* Check that the register being saved is an unmodified argument
+ register. */
+ regno = REGNO (src);
+ if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
+ return false;
+ argno = regno - GP_ARG_FIRST;
+
+ /* Check whether the address is an appropriate stack pointer or
+ frame pointer access. The frame pointer is offset from the
+ stack pointer by the size of the outgoing arguments. */
+ addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
+ mips_split_plus (addr, &base, &offset);
+ required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
+ if (base == hard_frame_pointer_rtx)
+ required_offset -= cfun->machine->frame.args_size;
+ else if (base != stack_pointer_rtx)
+ return false;
+ if (offset != required_offset)
+ return false;
+
+ *regno_ptr = regno;
+ return true;
}
-
-/* A for_each_rtx callback, used by mips_rewrite_small_data.
- DATA is the containing MEM, or null if none. */
-static int
-mips_rewrite_small_data_1 (rtx *loc, void *data)
+/* A subroutine of mips_expand_prologue, called only when generating
+ MIPS16e SAVE instructions. Search the start of the function for any
+ instructions that save argument registers into their caller-allocated
+ save slots. Delete such instructions and return a value N such that
+ saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
+ instructions redundant. */
+
+static unsigned int
+mips16e_collect_argument_saves (void)
{
- enum mips_symbol_context context;
+ rtx reg_values[FIRST_PSEUDO_REGISTER];
+ rtx insn, next, set, dest, src;
+ unsigned int nargs, regno;
- if (MEM_P (*loc))
+ push_topmost_sequence ();
+ nargs = 0;
+ memset (reg_values, 0, sizeof (reg_values));
+ for (insn = get_insns (); insn; insn = next)
{
- for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
- return -1;
+ next = NEXT_INSN (insn);
+ if (NOTE_P (insn))
+ continue;
+
+ if (!INSN_P (insn))
+ break;
+
+ set = PATTERN (insn);
+ if (GET_CODE (set) != SET)
+ break;
+
+ dest = SET_DEST (set);
+ src = SET_SRC (set);
+ if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
+ {
+ if (!BITSET_P (cfun->machine->frame.mask, regno))
+ {
+ delete_insn (insn);
+ nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
+ }
+ }
+ else if (REG_P (dest) && GET_MODE (dest) == word_mode)
+ reg_values[REGNO (dest)]
+ = mips16e_collect_propagate_value (src, reg_values);
+ else
+ break;
}
+ pop_topmost_sequence ();
- context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
- if (mips_rewrite_small_data_p (*loc, context))
- *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
+ return nargs;
+}
- if (GET_CODE (*loc) == LO_SUM)
- return -1;
+/* Return a move between register REGNO and memory location SP + OFFSET.
+ Make the move a load if RESTORE_P, otherwise make it a frame-related
+ store. */
- return 0;
+static rtx
+mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
+ unsigned int regno)
+{
+ rtx reg, mem;
+
+ mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
+ reg = gen_rtx_REG (SImode, regno);
+ return (restore_p
+ ? gen_rtx_SET (VOIDmode, reg, mem)
+ : mips_frame_set (mem, reg));
}
-/* If possible, rewrite OP so that it refers to small data using
- explicit relocations. */
+/* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
+ The instruction must:
-rtx
-mips_rewrite_small_data (rtx op)
+ - Allocate or deallocate SIZE bytes in total; SIZE is known
+ to be nonzero.
+
+ - Save or restore as many registers in *MASK_PTR as possible.
+ The instruction saves the first registers at the top of the
+ allocated area, with the other registers below it.
+
+ - Save NARGS argument registers above the allocated area.
+
+ (NARGS is always zero if RESTORE_P.)
+
+ The SAVE and RESTORE instructions cannot save and restore all general
+ registers, so there may be some registers left over for the caller to
+ handle. Destructively modify *MASK_PTR so that it contains the registers
+ that still need to be saved or restored. The caller can save these
+ registers in the memory immediately below *OFFSET_PTR, which is a
+ byte offset from the bottom of the allocated stack area. */
+
+static rtx
+mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
+ HOST_WIDE_INT *offset_ptr, unsigned int nargs,
+ HOST_WIDE_INT size)
{
- op = copy_insn (op);
- for_each_rtx (&op, mips_rewrite_small_data_1, 0);
- return op;
+ rtx pattern, set;
+ HOST_WIDE_INT offset, top_offset;
+ unsigned int i, regno;
+ int n;
+
+ gcc_assert (cfun->machine->frame.fp_reg_size == 0);
+
+ /* Calculate the number of elements in the PARALLEL. We need one element
+ for the stack adjustment, one for each argument register save, and one
+ for each additional register move. */
+ n = 1 + nargs;
+ for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
+ if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
+ n++;
+
+ /* Create the final PARALLEL. */
+ pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
+ n = 0;
+
+ /* Add the stack pointer adjustment. */
+ set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx,
+ restore_p ? size : -size));
+ RTX_FRAME_RELATED_P (set) = 1;
+ XVECEXP (pattern, 0, n++) = set;
+
+ /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
+ top_offset = restore_p ? size : 0;
+
+ /* Save the arguments. */
+ for (i = 0; i < nargs; i++)
+ {
+ offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
+ set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
+ XVECEXP (pattern, 0, n++) = set;
+ }
+
+ /* Then fill in the other register moves. */
+ offset = top_offset;
+ for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
+ {
+ regno = mips16e_save_restore_regs[i];
+ if (BITSET_P (*mask_ptr, regno))
+ {
+ offset -= UNITS_PER_WORD;
+ set = mips16e_save_restore_reg (restore_p, offset, regno);
+ XVECEXP (pattern, 0, n++) = set;
+ *mask_ptr &= ~(1 << regno);
+ }
+ }
+
+ /* Tell the caller what offset it should use for the remaining registers. */
+ *offset_ptr = size + (offset - top_offset) + size;
+
+ gcc_assert (n == XVECLEN (pattern, 0));
+
+ return pattern;
+}
+
+/* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
+ pointer. Return true if PATTERN matches the kind of instruction
+ generated by mips16e_build_save_restore. If INFO is nonnull,
+ initialize it when returning true. */
+
+bool
+mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
+ struct mips16e_save_restore_info *info)
+{
+ unsigned int i, nargs, mask;
+ HOST_WIDE_INT top_offset, save_offset, offset, extra;
+ rtx set, reg, mem, base;
+ int n;
+
+ if (!GENERATE_MIPS16E_SAVE_RESTORE)
+ return false;
+
+ /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
+ top_offset = adjust > 0 ? adjust : 0;
+
+ /* Interpret all other members of the PARALLEL. */
+ save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
+ mask = 0;
+ nargs = 0;
+ i = 0;
+ for (n = 1; n < XVECLEN (pattern, 0); n++)
+ {
+ /* Check that we have a SET. */
+ set = XVECEXP (pattern, 0, n);
+ if (GET_CODE (set) != SET)
+ return false;
+
+ /* Check that the SET is a load (if restoring) or a store
+ (if saving). */
+ mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
+ if (!MEM_P (mem))
+ return false;
+
+ /* Check that the address is the sum of the stack pointer and a
+ possibly-zero constant offset. */
+ mips_split_plus (XEXP (mem, 0), &base, &offset);
+ if (base != stack_pointer_rtx)
+ return false;
+
+ /* Check that SET's other operand is a register. */
+ reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
+ if (!REG_P (reg))
+ return false;
+
+ /* Check for argument saves. */
+ if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
+ && REGNO (reg) == GP_ARG_FIRST + nargs)
+ nargs++;
+ else if (offset == save_offset)
+ {
+ while (mips16e_save_restore_regs[i++] != REGNO (reg))
+ if (i == ARRAY_SIZE (mips16e_save_restore_regs))
+ return false;
+
+ mask |= 1 << REGNO (reg);
+ save_offset -= GET_MODE_SIZE (gpr_mode);
+ }
+ else
+ return false;
+ }
+
+ /* Check that the restrictions on register ranges are met. */
+ extra = 0;
+ mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
+ ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
+ mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
+ ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
+ if (extra != 0)
+ return false;
+
+ /* Make sure that the topmost argument register is not saved twice.
+ The checks above ensure that the same is then true for the other
+ argument registers. */
+ if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
+ return false;
+
+ /* Pass back information, if requested. */
+ if (info)
+ {
+ info->nargs = nargs;
+ info->mask = mask;
+ info->size = (adjust > 0 ? adjust : -adjust);
+ }
+
+ return true;
+}
+
+/* Add a MIPS16e SAVE or RESTORE register-range argument to string S
+ for the register range [MIN_REG, MAX_REG]. Return a pointer to
+ the null terminator. */
+
+static char *
+mips16e_add_register_range (char *s, unsigned int min_reg,
+ unsigned int max_reg)
+{
+ if (min_reg != max_reg)
+ s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
+ else
+ s += sprintf (s, ",%s", reg_names[min_reg]);
+ return s;
+}
+
+/* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
+ PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
+
+const char *
+mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
+{
+ static char buffer[300];
+
+ struct mips16e_save_restore_info info;
+ unsigned int i, end;
+ char *s;
+
+ /* Parse the pattern. */
+ if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
+ gcc_unreachable ();
+
+ /* Add the mnemonic. */
+ s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
+ s += strlen (s);
+
+ /* Save the arguments. */
+ if (info.nargs > 1)
+ s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
+ reg_names[GP_ARG_FIRST + info.nargs - 1]);
+ else if (info.nargs == 1)
+ s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
+
+ /* Emit the amount of stack space to allocate or deallocate. */
+ s += sprintf (s, "%d", (int) info.size);
+
+ /* Save or restore $16. */
+ if (BITSET_P (info.mask, 16))
+ s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
+
+ /* Save or restore $17. */
+ if (BITSET_P (info.mask, 17))
+ s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
+
+ /* Save or restore registers in the range $s2...$s8, which
+ mips16e_s2_s8_regs lists in decreasing order. Note that this
+ is a software register range; the hardware registers are not
+ numbered consecutively. */
+ end = ARRAY_SIZE (mips16e_s2_s8_regs);
+ i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
+ if (i < end)
+ s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
+ mips16e_s2_s8_regs[i]);
+
+ /* Save or restore registers in the range $a0...$a3. */
+ end = ARRAY_SIZE (mips16e_a0_a3_regs);
+ i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
+ if (i < end)
+ s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
+ mips16e_a0_a3_regs[end - 1]);
+
+ /* Save or restore $31. */
+ if (BITSET_P (info.mask, 31))
+ s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
+
+ return buffer;
}
/* Return true if the current function has an insn that implicitly
@@ -7371,39 +7811,6 @@ mips_global_pointer (void)
return GLOBAL_POINTER_REGNUM;
}
-
-/* Return true if the function return value MODE will get returned in a
- floating-point register. */
-
-static bool
-mips_return_mode_in_fpr_p (enum machine_mode mode)
-{
- return ((GET_MODE_CLASS (mode) == MODE_FLOAT
- || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
- || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
- && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
-}
-
-/* Return a two-character string representing a function floating-point
- return mode, used to name MIPS16 function stubs. */
-
-static const char *
-mips16_call_stub_mode_suffix (enum machine_mode mode)
-{
- if (mode == SFmode)
- return "sf";
- else if (mode == DFmode)
- return "df";
- else if (mode == SCmode)
- return "sc";
- else if (mode == DCmode)
- return "dc";
- else if (mode == V2SFmode)
- return "df";
- else
- gcc_unreachable ();
-}
-
/* Return true if the current function returns its value in a floating-point
register in MIPS16 mode. */
@@ -7463,42 +7870,6 @@ mips_save_reg_p (unsigned int regno)
return false;
}
-/* Return the index of the lowest X in the range [0, SIZE) for which
- bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
-
-static unsigned int
-mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
- unsigned int size)
-{
- unsigned int i;
-
- for (i = 0; i < size; i++)
- if (BITSET_P (mask, regs[i]))
- break;
-
- return i;
-}
-
-/* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
- is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
- for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
- the same is true for all indexes (X, SIZE). */
-
-static void
-mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
- unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
-{
- unsigned int i;
-
- i = mips16e_find_first_register (*mask_ptr, regs, size);
- for (i++; i < size; i++)
- if (!BITSET_P (*mask_ptr, regs[i]))
- {
- *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
- *mask_ptr |= 1 << regs[i];
- }
-}
-
/* Return the bytes needed to compute the frame pointer from the current
stack pointer. SIZE is the size (in bytes) of the local variables.
@@ -7702,6 +8073,24 @@ compute_frame_size (HOST_WIDE_INT size)
/* Ok, we're done. */
return total_size;
}
+
+/* Return the style of GP load sequence that is being used for the
+ current function. */
+
+enum mips_loadgp_style
+mips_current_loadgp_style (void)
+{
+ if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
+ return LOADGP_NONE;
+
+ if (TARGET_RTP_PIC)
+ return LOADGP_RTP;
+
+ if (TARGET_ABSOLUTE_ABICALLS)
+ return LOADGP_ABSOLUTE;
+
+ return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
+}
/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
pointer or argument pointer. TO is either the stack pointer or
@@ -7736,8 +8125,19 @@ mips_initial_elimination_offset (int from, int to)
return offset;
}
+/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
+ value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
+
+static void
+mips_extra_live_on_entry (bitmap regs)
+{
+ if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
+ bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
+}
+
/* Implement RETURN_ADDR_RTX. Note, we do not support moving
back to a previous frame. */
+
rtx
mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
{
@@ -7746,7 +8146,50 @@ mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
}
+
+/* Emit code to change the current function's return address to
+ ADDRESS. SCRATCH is available as a scratch register, if needed.
+ ADDRESS and SCRATCH are both word-mode GPRs. */
+
+void
+mips_set_return_address (rtx address, rtx scratch)
+{
+ rtx slot_address;
+
+ compute_frame_size (get_frame_size ());
+ gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
+ slot_address = mips_add_offset (scratch, stack_pointer_rtx,
+ cfun->machine->frame.gp_sp_offset);
+
+ mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
+}
+
+/* Restore $gp from its save slot. Valid only when using o32 or
+ o64 abicalls. */
+
+void
+mips_restore_gp (void)
+{
+ rtx address, slot;
+
+ gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
+
+ address = mips_add_offset (pic_offset_table_rtx,
+ frame_pointer_needed
+ ? hard_frame_pointer_rtx
+ : stack_pointer_rtx,
+ current_function_outgoing_args_size);
+ slot = gen_rtx_MEM (Pmode, address);
+
+ mips_emit_move (pic_offset_table_rtx, slot);
+ if (!TARGET_EXPLICIT_RELOCS)
+ emit_insn (gen_blockage ());
+}
+/* A function to save or store a register. The first argument is the
+ register and the second is the stack slot. */
+typedef void (*mips_save_restore_fn) (rtx, rtx);
+
/* Use FN to save or restore register REGNO. MODE is the register's
mode and OFFSET is the offset of its save slot from the current
stack pointer. */
@@ -7813,69 +8256,6 @@ mips_output_cplocal (void)
output_asm_insn (".cplocal %+", 0);
}
-/* Return the style of GP load sequence that is being used for the
- current function. */
-
-enum mips_loadgp_style
-mips_current_loadgp_style (void)
-{
- if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
- return LOADGP_NONE;
-
- if (TARGET_RTP_PIC)
- return LOADGP_RTP;
-
- if (TARGET_ABSOLUTE_ABICALLS)
- return LOADGP_ABSOLUTE;
-
- return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
-}
-
-/* The __gnu_local_gp symbol. */
-
-static GTY(()) rtx mips_gnu_local_gp;
-
-/* If we're generating n32 or n64 abicalls, emit instructions
- to set up the global pointer. */
-
-static void
-mips_emit_loadgp (void)
-{
- rtx addr, offset, incoming_address, base, index;
-
- switch (mips_current_loadgp_style ())
- {
- case LOADGP_ABSOLUTE:
- if (mips_gnu_local_gp == NULL)
- {
- mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
- SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
- }
- emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
- break;
-
- case LOADGP_NEWABI:
- addr = XEXP (DECL_RTL (current_function_decl), 0);
- offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
- incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
- emit_insn (gen_loadgp_newabi (offset, incoming_address));
- if (!TARGET_EXPLICIT_RELOCS)
- emit_insn (gen_loadgp_blockage ());
- break;
-
- case LOADGP_RTP:
- base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
- index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
- emit_insn (gen_loadgp_rtp (base, index));
- if (!TARGET_EXPLICIT_RELOCS)
- emit_insn (gen_loadgp_blockage ());
- break;
-
- default:
- break;
- }
-}
-
/* Set up the stack and frame (if desired) for the function. */
static void
@@ -7975,45 +8355,42 @@ mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
explicit relocs or assembler macros. */
mips_output_cplocal ();
}
-
-/* Make the last instruction frame related and note that it performs
- the operation described by FRAME_PATTERN. */
-
-static void
-mips_set_frame_expr (rtx frame_pattern)
-{
- rtx insn;
-
- insn = get_last_insn ();
- RTX_FRAME_RELATED_P (insn) = 1;
- REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
- frame_pattern,
- REG_NOTES (insn));
-}
+/* Do any necessary cleanup after a function to restore stack, frame,
+ and regs. */
-/* Return a frame-related rtx that stores REG at MEM.
- REG must be a single register. */
+#define RA_MASK BITMASK_HIGH /* 1 << 31 */
-static rtx
-mips_frame_set (rtx mem, rtx reg)
+static void
+mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
- rtx set;
+ /* Reinstate the normal $gp. */
+ SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
+ mips_output_cplocal ();
- /* If we're saving the return address register and the dwarf return
- address column differs from the hard register number, adjust the
- note reg to refer to the former. */
- if (REGNO (reg) == GP_REG_FIRST + 31
- && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
- reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
+ if (cfun->machine->all_noreorder_p)
+ {
+ /* Avoid using %>%) since it adds excess whitespace. */
+ output_asm_insn (".set\tmacro", 0);
+ output_asm_insn (".set\treorder", 0);
+ set_noreorder = set_nomacro = 0;
+ }
- set = gen_rtx_SET (VOIDmode, mem, reg);
- RTX_FRAME_RELATED_P (set) = 1;
+ if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
+ {
+ const char *fnname;
- return set;
+ /* Get the function name the same way that toplev.c does before calling
+ assemble_start_function. This is needed so that the name used here
+ exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
+ fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ fputs ("\t.end\t", file);
+ assemble_name (file, fnname);
+ fputs ("\n", file);
+ }
}
-
-
+
/* Save register REG to MEM. Make the instruction frame-related. */
static void
@@ -8051,402 +8428,49 @@ mips_save_reg (rtx reg, rtx mem)
}
}
-/* Return a move between register REGNO and memory location SP + OFFSET.
- Make the move a load if RESTORE_P, otherwise make it a frame-related
- store. */
-
-static rtx
-mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
- unsigned int regno)
-{
- rtx reg, mem;
-
- mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
- reg = gen_rtx_REG (SImode, regno);
- return (restore_p
- ? gen_rtx_SET (VOIDmode, reg, mem)
- : mips_frame_set (mem, reg));
-}
-
-/* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
- The instruction must:
-
- - Allocate or deallocate SIZE bytes in total; SIZE is known
- to be nonzero.
-
- - Save or restore as many registers in *MASK_PTR as possible.
- The instruction saves the first registers at the top of the
- allocated area, with the other registers below it.
-
- - Save NARGS argument registers above the allocated area.
-
- (NARGS is always zero if RESTORE_P.)
-
- The SAVE and RESTORE instructions cannot save and restore all general
- registers, so there may be some registers left over for the caller to
- handle. Destructively modify *MASK_PTR so that it contains the registers
- that still need to be saved or restored. The caller can save these
- registers in the memory immediately below *OFFSET_PTR, which is a
- byte offset from the bottom of the allocated stack area. */
-
-static rtx
-mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
- HOST_WIDE_INT *offset_ptr, unsigned int nargs,
- HOST_WIDE_INT size)
-{
- rtx pattern, set;
- HOST_WIDE_INT offset, top_offset;
- unsigned int i, regno;
- int n;
-
- gcc_assert (cfun->machine->frame.fp_reg_size == 0);
-
- /* Calculate the number of elements in the PARALLEL. We need one element
- for the stack adjustment, one for each argument register save, and one
- for each additional register move. */
- n = 1 + nargs;
- for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
- if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
- n++;
-
- /* Create the final PARALLEL. */
- pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
- n = 0;
-
- /* Add the stack pointer adjustment. */
- set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- plus_constant (stack_pointer_rtx,
- restore_p ? size : -size));
- RTX_FRAME_RELATED_P (set) = 1;
- XVECEXP (pattern, 0, n++) = set;
-
- /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
- top_offset = restore_p ? size : 0;
-
- /* Save the arguments. */
- for (i = 0; i < nargs; i++)
- {
- offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
- set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
- XVECEXP (pattern, 0, n++) = set;
- }
-
- /* Then fill in the other register moves. */
- offset = top_offset;
- for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
- {
- regno = mips16e_save_restore_regs[i];
- if (BITSET_P (*mask_ptr, regno))
- {
- offset -= UNITS_PER_WORD;
- set = mips16e_save_restore_reg (restore_p, offset, regno);
- XVECEXP (pattern, 0, n++) = set;
- *mask_ptr &= ~(1 << regno);
- }
- }
-
- /* Tell the caller what offset it should use for the remaining registers. */
- *offset_ptr = size + (offset - top_offset) + size;
-
- gcc_assert (n == XVECLEN (pattern, 0));
+/* The __gnu_local_gp symbol. */
- return pattern;
-}
+static GTY(()) rtx mips_gnu_local_gp;
-/* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
- pointer. Return true if PATTERN matches the kind of instruction
- generated by mips16e_build_save_restore. If INFO is nonnull,
- initialize it when returning true. */
+/* If we're generating n32 or n64 abicalls, emit instructions
+ to set up the global pointer. */
-bool
-mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
- struct mips16e_save_restore_info *info)
+static void
+mips_emit_loadgp (void)
{
- unsigned int i, nargs, mask;
- HOST_WIDE_INT top_offset, save_offset, offset, extra;
- rtx set, reg, mem, base;
- int n;
-
- if (!GENERATE_MIPS16E_SAVE_RESTORE)
- return false;
-
- /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
- top_offset = adjust > 0 ? adjust : 0;
+ rtx addr, offset, incoming_address, base, index;
- /* Interpret all other members of the PARALLEL. */
- save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
- mask = 0;
- nargs = 0;
- i = 0;
- for (n = 1; n < XVECLEN (pattern, 0); n++)
+ switch (mips_current_loadgp_style ())
{
- /* Check that we have a SET. */
- set = XVECEXP (pattern, 0, n);
- if (GET_CODE (set) != SET)
- return false;
-
- /* Check that the SET is a load (if restoring) or a store
- (if saving). */
- mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
- if (!MEM_P (mem))
- return false;
-
- /* Check that the address is the sum of the stack pointer and a
- possibly-zero constant offset. */
- mips_split_plus (XEXP (mem, 0), &base, &offset);
- if (base != stack_pointer_rtx)
- return false;
-
- /* Check that SET's other operand is a register. */
- reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
- if (!REG_P (reg))
- return false;
-
- /* Check for argument saves. */
- if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
- && REGNO (reg) == GP_ARG_FIRST + nargs)
- nargs++;
- else if (offset == save_offset)
+ case LOADGP_ABSOLUTE:
+ if (mips_gnu_local_gp == NULL)
{
- while (mips16e_save_restore_regs[i++] != REGNO (reg))
- if (i == ARRAY_SIZE (mips16e_save_restore_regs))
- return false;
-
- mask |= 1 << REGNO (reg);
- save_offset -= GET_MODE_SIZE (gpr_mode);
+ mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
+ SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
}
- else
- return false;
- }
-
- /* Check that the restrictions on register ranges are met. */
- extra = 0;
- mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
- ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
- mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
- ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
- if (extra != 0)
- return false;
-
- /* Make sure that the topmost argument register is not saved twice.
- The checks above ensure that the same is then true for the other
- argument registers. */
- if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
- return false;
-
- /* Pass back information, if requested. */
- if (info)
- {
- info->nargs = nargs;
- info->mask = mask;
- info->size = (adjust > 0 ? adjust : -adjust);
- }
-
- return true;
-}
-
-/* Add a MIPS16e SAVE or RESTORE register-range argument to string S
- for the register range [MIN_REG, MAX_REG]. Return a pointer to
- the null terminator. */
-
-static char *
-mips16e_add_register_range (char *s, unsigned int min_reg,
- unsigned int max_reg)
-{
- if (min_reg != max_reg)
- s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
- else
- s += sprintf (s, ",%s", reg_names[min_reg]);
- return s;
-}
-
-/* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
- PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
-
-const char *
-mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
-{
- static char buffer[300];
-
- struct mips16e_save_restore_info info;
- unsigned int i, end;
- char *s;
-
- /* Parse the pattern. */
- if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
- gcc_unreachable ();
-
- /* Add the mnemonic. */
- s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
- s += strlen (s);
-
- /* Save the arguments. */
- if (info.nargs > 1)
- s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
- reg_names[GP_ARG_FIRST + info.nargs - 1]);
- else if (info.nargs == 1)
- s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
-
- /* Emit the amount of stack space to allocate or deallocate. */
- s += sprintf (s, "%d", (int) info.size);
-
- /* Save or restore $16. */
- if (BITSET_P (info.mask, 16))
- s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
-
- /* Save or restore $17. */
- if (BITSET_P (info.mask, 17))
- s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
-
- /* Save or restore registers in the range $s2...$s8, which
- mips16e_s2_s8_regs lists in decreasing order. Note that this
- is a software register range; the hardware registers are not
- numbered consecutively. */
- end = ARRAY_SIZE (mips16e_s2_s8_regs);
- i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
- if (i < end)
- s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
- mips16e_s2_s8_regs[i]);
-
- /* Save or restore registers in the range $a0...$a3. */
- end = ARRAY_SIZE (mips16e_a0_a3_regs);
- i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
- if (i < end)
- s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
- mips16e_a0_a3_regs[end - 1]);
-
- /* Save or restore $31. */
- if (BITSET_P (info.mask, 31))
- s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
-
- return buffer;
-}
-
-/* Return a simplified form of X using the register values in REG_VALUES.
- REG_VALUES[R] is the last value assigned to hard register R, or null
- if R has not been modified.
-
- This function is rather limited, but is good enough for our purposes. */
-
-static rtx
-mips16e_collect_propagate_value (rtx x, rtx *reg_values)
-{
- rtx x0, x1;
-
- x = avoid_constant_pool_reference (x);
-
- if (UNARY_P (x))
- {
- x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
- return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
- x0, GET_MODE (XEXP (x, 0)));
- }
-
- if (ARITHMETIC_P (x))
- {
- x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
- x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
- return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
- }
-
- if (REG_P (x)
- && reg_values[REGNO (x)]
- && !rtx_unstable_p (reg_values[REGNO (x)]))
- return reg_values[REGNO (x)];
-
- return x;
-}
-
-/* Return true if (set DEST SRC) stores an argument register into its
- caller-allocated save slot, storing the number of that argument
- register in *REGNO_PTR if so. REG_VALUES is as for
- mips16e_collect_propagate_value. */
-
-static bool
-mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
- unsigned int *regno_ptr)
-{
- unsigned int argno, regno;
- HOST_WIDE_INT offset, required_offset;
- rtx addr, base;
-
- /* Check that this is a word-mode store. */
- if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
- return false;
-
- /* Check that the register being saved is an unmodified argument
- register. */
- regno = REGNO (src);
- if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
- return false;
- argno = regno - GP_ARG_FIRST;
-
- /* Check whether the address is an appropriate stack pointer or
- frame pointer access. The frame pointer is offset from the
- stack pointer by the size of the outgoing arguments. */
- addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
- mips_split_plus (addr, &base, &offset);
- required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
- if (base == hard_frame_pointer_rtx)
- required_offset -= cfun->machine->frame.args_size;
- else if (base != stack_pointer_rtx)
- return false;
- if (offset != required_offset)
- return false;
-
- *regno_ptr = regno;
- return true;
-}
-
-/* A subroutine of mips_expand_prologue, called only when generating
- MIPS16e SAVE instructions. Search the start of the function for any
- instructions that save argument registers into their caller-allocated
- save slots. Delete such instructions and return a value N such that
- saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
- instructions redundant. */
-
-static unsigned int
-mips16e_collect_argument_saves (void)
-{
- rtx reg_values[FIRST_PSEUDO_REGISTER];
- rtx insn, next, set, dest, src;
- unsigned int nargs, regno;
-
- push_topmost_sequence ();
- nargs = 0;
- memset (reg_values, 0, sizeof (reg_values));
- for (insn = get_insns (); insn; insn = next)
- {
- next = NEXT_INSN (insn);
- if (NOTE_P (insn))
- continue;
+ emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
+ break;
- if (!INSN_P (insn))
- break;
+ case LOADGP_NEWABI:
+ addr = XEXP (DECL_RTL (current_function_decl), 0);
+ offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
+ incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
+ emit_insn (gen_loadgp_newabi (offset, incoming_address));
+ if (!TARGET_EXPLICIT_RELOCS)
+ emit_insn (gen_loadgp_blockage ());
+ break;
- set = PATTERN (insn);
- if (GET_CODE (set) != SET)
- break;
+ case LOADGP_RTP:
+ base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
+ index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
+ emit_insn (gen_loadgp_rtp (base, index));
+ if (!TARGET_EXPLICIT_RELOCS)
+ emit_insn (gen_loadgp_blockage ());
+ break;
- dest = SET_DEST (set);
- src = SET_SRC (set);
- if (mips16e_collect_argument_save_p (dest, src, reg_values, &regno))
- {
- if (!BITSET_P (cfun->machine->frame.mask, regno))
- {
- delete_insn (insn);
- nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
- }
- }
- else if (REG_P (dest) && GET_MODE (dest) == word_mode)
- reg_values[REGNO (dest)]
- = mips16e_collect_propagate_value (src, reg_values);
- else
- break;
+ default:
+ break;
}
- pop_topmost_sequence ();
-
- return nargs;
}
/* Expand the prologue into a bunch of separate insns. */
@@ -8586,41 +8610,6 @@ mips_expand_prologue (void)
emit_insn (gen_blockage ());
}
-/* Do any necessary cleanup after a function to restore stack, frame,
- and regs. */
-
-#define RA_MASK BITMASK_HIGH /* 1 << 31 */
-
-static void
-mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
- HOST_WIDE_INT size ATTRIBUTE_UNUSED)
-{
- /* Reinstate the normal $gp. */
- SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
- mips_output_cplocal ();
-
- if (cfun->machine->all_noreorder_p)
- {
- /* Avoid using %>%) since it adds excess whitespace. */
- output_asm_insn (".set\tmacro", 0);
- output_asm_insn (".set\treorder", 0);
- set_noreorder = set_nomacro = 0;
- }
-
- if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
- {
- const char *fnname;
-
- /* Get the function name the same way that toplev.c does before calling
- assemble_start_function. This is needed so that the name used here
- exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
- fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
- fputs ("\t.end\t", file);
- assemble_name (file, fnname);
- fputs ("\n", file);
- }
-}
-
/* Emit instructions to restore register REG from slot MEM. */
static void
@@ -8836,462 +8825,46 @@ mips_can_use_return_insn (void)
return compute_frame_size (get_frame_size ()) == 0;
}
-/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
- in order to avoid duplicating too much logic from elsewhere. */
+/* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
+ by UNITS_PER_FPREG. The size of FP status registers is always 4, because
+ they only hold condition code modes, and CCmode is always considered to
+ be 4 bytes wide. All other registers are word sized. */
-static void
-mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
- HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
- tree function)
+unsigned int
+mips_hard_regno_nregs (int regno, enum machine_mode mode)
{
- rtx this, temp1, temp2, insn, fnaddr;
- bool use_sibcall_p;
-
- /* Pretend to be a post-reload pass while generating rtl. */
- reload_completed = 1;
-
- /* Mark the end of the (empty) prologue. */
- emit_note (NOTE_INSN_PROLOGUE_END);
-
- /* Determine if we can use a sibcall to call FUNCTION directly. */
- fnaddr = XEXP (DECL_RTL (function), 0);
- use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
- && const_call_insn_operand (fnaddr, Pmode));
-
- /* Determine if we need to load FNADDR from the GOT. */
- if (!use_sibcall_p)
- switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
- {
- case SYMBOL_GOT_PAGE_OFST:
- case SYMBOL_GOT_DISP:
- /* Pick a global pointer. Use a call-clobbered register if
- TARGET_CALL_SAVED_GP. */
- cfun->machine->global_pointer =
- TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
- SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
-
- /* Set up the global pointer for n32 or n64 abicalls. */
- mips_emit_loadgp ();
- break;
-
- default:
- break;
- }
-
- /* We need two temporary registers in some cases. */
- temp1 = gen_rtx_REG (Pmode, 2);
- temp2 = gen_rtx_REG (Pmode, 3);
-
- /* Find out which register contains the "this" pointer. */
- if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
- this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
- else
- this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
-
- /* Add DELTA to THIS. */
- if (delta != 0)
- {
- rtx offset = GEN_INT (delta);
- if (!SMALL_OPERAND (delta))
- {
- mips_emit_move (temp1, offset);
- offset = temp1;
- }
- emit_insn (gen_add3_insn (this, this, offset));
- }
-
- /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
- if (vcall_offset != 0)
- {
- rtx addr;
-
- /* Set TEMP1 to *THIS. */
- mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
-
- /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
- addr = mips_add_offset (temp2, temp1, vcall_offset);
-
- /* Load the offset and add it to THIS. */
- mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
- emit_insn (gen_add3_insn (this, this, temp1));
- }
-
- /* Jump to the target function. Use a sibcall if direct jumps are
- allowed, otherwise load the address into a register first. */
- if (use_sibcall_p)
- {
- insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
- SIBLING_CALL_P (insn) = 1;
- }
+ if (ST_REG_P (regno))
+ return ((GET_MODE_SIZE (mode) + 3) / 4);
+ else if (! FP_REG_P (regno))
+ return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
else
- {
- /* This is messy. gas treats "la $25,foo" as part of a call
- sequence and may allow a global "foo" to be lazily bound.
- The general move patterns therefore reject this combination.
-
- In this context, lazy binding would actually be OK
- for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
- TARGET_CALL_SAVED_GP; see mips_load_call_address.
- We must therefore load the address via a temporary
- register if mips_dangerous_for_la25_p.
-
- If we jump to the temporary register rather than $25, the assembler
- can use the move insn to fill the jump's delay slot. */
- if (TARGET_USE_PIC_FN_ADDR_REG
- && !mips_dangerous_for_la25_p (fnaddr))
- temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
- mips_load_call_address (temp1, fnaddr, true);
-
- if (TARGET_USE_PIC_FN_ADDR_REG
- && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
- mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
- emit_jump_insn (gen_indirect_jump (temp1));
- }
-
- /* Run just enough of rest_of_compilation. This sequence was
- "borrowed" from alpha.c. */
- insn = get_insns ();
- insn_locators_alloc ();
- split_all_insns_noflow ();
- mips16_lay_out_constants ();
- shorten_branches (insn);
- final_start_function (insn, file, 1);
- final (insn, file, 1);
- final_end_function ();
-
- /* Clean up the vars set above. Note that final_end_function resets
- the global pointer for us. */
- reload_completed = 0;
-}
-
-/* Implement TARGET_SELECT_RTX_SECTION. */
-
-static section *
-mips_select_rtx_section (enum machine_mode mode, rtx x,
- unsigned HOST_WIDE_INT align)
-{
- /* ??? Consider using mergeable small data sections. */
- if (mips_rtx_constant_in_small_data_p (mode))
- return get_named_section (NULL, ".sdata", 0);
-
- return default_elf_select_rtx_section (mode, x, align);
-}
-
-/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
-
- The complication here is that, with the combination TARGET_ABICALLS
- && !TARGET_GPWORD, jump tables will use absolute addresses, and should
- therefore not be included in the read-only part of a DSO. Handle such
- cases by selecting a normal data section instead of a read-only one.
- The logic apes that in default_function_rodata_section. */
-
-static section *
-mips_function_rodata_section (tree decl)
-{
- if (!TARGET_ABICALLS || TARGET_GPWORD)
- return default_function_rodata_section (decl);
-
- if (decl && DECL_SECTION_NAME (decl))
- {
- const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
- if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
- {
- char *rname = ASTRDUP (name);
- rname[14] = 'd';
- return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
- }
- else if (flag_function_sections && flag_data_sections
- && strncmp (name, ".text.", 6) == 0)
- {
- char *rname = ASTRDUP (name);
- memcpy (rname + 1, "data", 4);
- return get_section (rname, SECTION_WRITE, decl);
- }
- }
- return data_section;
-}
-
-/* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
- locally-defined objects go in a small data section. It also controls
- the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
- mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
-
-static bool
-mips_in_small_data_p (const_tree decl)
-{
- HOST_WIDE_INT size;
-
- if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
- return false;
-
- /* We don't yet generate small-data references for -mabicalls or
- VxWorks RTP code. See the related -G handling in override_options. */
- if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
- return false;
-
- if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
- {
- const char *name;
-
- /* Reject anything that isn't in a known small-data section. */
- name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
- if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
- return false;
-
- /* If a symbol is defined externally, the assembler will use the
- usual -G rules when deciding how to implement macros. */
- if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
- return true;
- }
- else if (TARGET_EMBEDDED_DATA)
- {
- /* Don't put constants into the small data section: we want them
- to be in ROM rather than RAM. */
- if (TREE_CODE (decl) != VAR_DECL)
- return false;
-
- if (TREE_READONLY (decl)
- && !TREE_SIDE_EFFECTS (decl)
- && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
- return false;
- }
-
- /* Enforce -mlocal-sdata. */
- if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
- return false;
-
- /* Enforce -mextern-sdata. */
- if (!TARGET_EXTERN_SDATA && DECL_P (decl))
- {
- if (DECL_EXTERNAL (decl))
- return false;
- if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
- return false;
- }
-
- size = int_size_in_bytes (TREE_TYPE (decl));
- return (size > 0 && size <= mips_section_threshold);
-}
-
-/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
- anchors for small data: the GP register acts as an anchor in that
- case. We also don't want to use them for PC-relative accesses,
- where the PC acts as an anchor. */
-
-static bool
-mips_use_anchors_for_symbol_p (const_rtx symbol)
-{
- switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
- {
- case SYMBOL_PC_RELATIVE:
- case SYMBOL_GP_RELATIVE:
- return false;
-
- default:
- return default_use_anchors_for_symbol_p (symbol);
- }
-}
-
-/* See whether VALTYPE is a record whose fields should be returned in
- floating-point registers. If so, return the number of fields and
- list them in FIELDS (which should have two elements). Return 0
- otherwise.
-
- For n32 & n64, a structure with one or two fields is returned in
- floating-point registers as long as every field has a floating-point
- type. */
-
-static int
-mips_fpr_return_fields (const_tree valtype, tree *fields)
-{
- tree field;
- int i;
-
- if (!TARGET_NEWABI)
- return 0;
-
- if (TREE_CODE (valtype) != RECORD_TYPE)
- return 0;
-
- i = 0;
- for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
- {
- if (TREE_CODE (field) != FIELD_DECL)
- continue;
-
- if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
- return 0;
-
- if (i == 2)
- return 0;
-
- fields[i++] = field;
- }
- return i;
-}
-
-
-/* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
- a value in the most significant part of $2/$3 if:
-
- - the target is big-endian;
-
- - the value has a structure or union type (we generalize this to
- cover aggregates from other languages too); and
-
- - the structure is not returned in floating-point registers. */
-
-static bool
-mips_return_in_msb (const_tree valtype)
-{
- tree fields[2];
-
- return (TARGET_NEWABI
- && TARGET_BIG_ENDIAN
- && AGGREGATE_TYPE_P (valtype)
- && mips_fpr_return_fields (valtype, fields) == 0);
-}
-
-
-/* Return a composite value in a pair of floating-point registers.
- MODE1 and OFFSET1 are the mode and byte offset for the first value,
- likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
- complete value.
-
- For n32 & n64, $f0 always holds the first value and $f2 the second.
- Otherwise the values are packed together as closely as possible. */
-
-static rtx
-mips_return_fpr_pair (enum machine_mode mode,
- enum machine_mode mode1, HOST_WIDE_INT offset1,
- enum machine_mode mode2, HOST_WIDE_INT offset2)
-{
- int inc;
-
- inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
- return gen_rtx_PARALLEL
- (mode,
- gen_rtvec (2,
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (mode1, FP_RETURN),
- GEN_INT (offset1)),
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_REG (mode2, FP_RETURN + inc),
- GEN_INT (offset2))));
-
+ return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
}
+/* Implement CLASS_MAX_NREGS.
-/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
- VALTYPE is the return type and MODE is VOIDmode. For libcalls,
- VALTYPE is null and MODE is the mode of the return value. */
-
-rtx
-mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
- enum machine_mode mode)
-{
- if (valtype)
- {
- tree fields[2];
- int unsignedp;
-
- mode = TYPE_MODE (valtype);
- unsignedp = TYPE_UNSIGNED (valtype);
-
- /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
- true, we must promote the mode just as PROMOTE_MODE does. */
- mode = promote_mode (valtype, mode, &unsignedp, 1);
-
- /* Handle structures whose fields are returned in $f0/$f2. */
- switch (mips_fpr_return_fields (valtype, fields))
- {
- case 1:
- return gen_rtx_REG (mode, FP_RETURN);
-
- case 2:
- return mips_return_fpr_pair (mode,
- TYPE_MODE (TREE_TYPE (fields[0])),
- int_byte_position (fields[0]),
- TYPE_MODE (TREE_TYPE (fields[1])),
- int_byte_position (fields[1]));
- }
-
- /* If a value is passed in the most significant part of a register, see
- whether we have to round the mode up to a whole number of words. */
- if (mips_return_in_msb (valtype))
- {
- HOST_WIDE_INT size = int_size_in_bytes (valtype);
- if (size % UNITS_PER_WORD != 0)
- {
- size += UNITS_PER_WORD - size % UNITS_PER_WORD;
- mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
- }
- }
-
- /* For EABI, the class of return register depends entirely on MODE.
- For example, "struct { some_type x; }" and "union { some_type x; }"
- are returned in the same way as a bare "some_type" would be.
- Other ABIs only use FPRs for scalar, complex or vector types. */
- if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
- return gen_rtx_REG (mode, GP_RETURN);
- }
-
- if (!TARGET_MIPS16)
- {
- /* Handle long doubles for n32 & n64. */
- if (mode == TFmode)
- return mips_return_fpr_pair (mode,
- DImode, 0,
- DImode, GET_MODE_SIZE (mode) / 2);
-
- if (mips_return_mode_in_fpr_p (mode))
- {
- if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
- return mips_return_fpr_pair (mode,
- GET_MODE_INNER (mode), 0,
- GET_MODE_INNER (mode),
- GET_MODE_SIZE (mode) / 2);
- else
- return gen_rtx_REG (mode, FP_RETURN);
- }
- }
+ - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
- return gen_rtx_REG (mode, GP_RETURN);
-}
+ - ST_REGS are always hold CCmode values, and CCmode values are
+ considered to be 4 bytes wide.
-/* Return nonzero when an argument must be passed by reference. */
+ All other register classes are covered by UNITS_PER_WORD. Note that
+ this is true even for unions of integer and float registers when the
+ latter are smaller than the former. The only supported combination
+ in which case this occurs is -mgp64 -msingle-float, which has 64-bit
+ words but 32-bit float registers. A word-based calculation is correct
+ in that case since -msingle-float disallows multi-FPR values. */
-static bool
-mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
- enum machine_mode mode, const_tree type,
- bool named ATTRIBUTE_UNUSED)
+int
+mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
{
- if (mips_abi == ABI_EABI)
- {
- int size;
-
- /* ??? How should SCmode be handled? */
- if (mode == DImode || mode == DFmode
- || mode == DQmode || mode == UDQmode
- || mode == DAmode || mode == UDAmode)
- return 0;
-
- size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
- return size == -1 || size > UNITS_PER_WORD;
- }
+ if (class == ST_REGS)
+ return (GET_MODE_SIZE (mode) + 3) / 4;
+ else if (class == FP_REGS)
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
else
- {
- /* If we have a variable-sized parameter, we have no choice. */
- return targetm.calls.must_pass_in_stack (mode, type);
- }
-}
-
-static bool
-mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- const_tree type ATTRIBUTE_UNUSED, bool named)
-{
- return mips_abi == ABI_EABI && named;
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
}
/* Return true if registers of class CLASS cannot change from mode FROM
@@ -9332,19 +8905,6 @@ mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
|| GET_MODE_SIZE (to) > 4));
}
-/* Return true if X should not be moved directly into register $25.
- We need this because many versions of GAS will treat "la $25,foo" as
- part of a call sequence and so allow a global "foo" to be lazily bound. */
-
-bool
-mips_dangerous_for_la25_p (rtx x)
-{
- return (!TARGET_EXPLICIT_RELOCS
- && TARGET_USE_GOT
- && GET_CODE (x) == SYMBOL_REF
- && mips_global_symbol_p (x));
-}
-
/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
static bool
@@ -9387,6 +8947,89 @@ mips_preferred_reload_class (rtx x, enum reg_class class)
return class;
}
+/* Return a number assessing the cost of moving a register in class
+ FROM to class TO. The classes are expressed using the enumeration
+ values such as `GENERAL_REGS'. A value of 2 is the default; other
+ values are interpreted relative to that.
+
+ It is not required that the cost always equal 2 when FROM is the
+ same as TO; on some machines it is expensive to move between
+ registers if they are not general registers.
+
+ If reload sees an insn consisting of a single `set' between two
+ hard registers, and if `REGISTER_MOVE_COST' applied to their
+ classes returns a value of 2, reload does not check to ensure that
+ the constraints of the insn are met. Setting a cost of other than
+ 2 will allow reload to verify that the constraints are met. You
+ should do this if the `movM' pattern's constraints do not allow
+ such copying.
+
+ ??? We make the cost of moving from HI/LO into general
+ registers the same as for one of moving general registers to
+ HI/LO for TARGET_MIPS16 in order to prevent allocating a
+ pseudo to HI/LO. This might hurt optimizations though, it
+ isn't clear if it is wise. And it might not work in all cases. We
+ could solve the DImode LO reg problem by using a multiply, just
+ like reload_{in,out}si. We could solve the SImode/HImode HI reg
+ problem by using divide instructions. divu puts the remainder in
+ the HI reg, so doing a divide by -1 will move the value in the HI
+ reg for all values except -1. We could handle that case by using a
+ signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
+ a compare/branch to test the input value to see which instruction
+ we need to use. This gets pretty messy, but it is feasible. */
+
+int
+mips_register_move_cost (enum machine_mode mode,
+ enum reg_class to, enum reg_class from)
+{
+ if (TARGET_MIPS16)
+ {
+ if (reg_class_subset_p (from, GENERAL_REGS)
+ && reg_class_subset_p (to, GENERAL_REGS))
+ {
+ if (reg_class_subset_p (from, M16_REGS)
+ || reg_class_subset_p (to, M16_REGS))
+ return 2;
+ /* Two MOVEs. */
+ return 4;
+ }
+ }
+ else if (reg_class_subset_p (from, GENERAL_REGS))
+ {
+ if (reg_class_subset_p (to, GENERAL_REGS))
+ return 2;
+ if (reg_class_subset_p (to, FP_REGS))
+ return 4;
+ if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
+ return 5;
+ if (reg_class_subset_p (to, ACC_REGS))
+ return 6;
+ }
+ else if (reg_class_subset_p (to, GENERAL_REGS))
+ {
+ if (reg_class_subset_p (from, FP_REGS))
+ return 4;
+ if (reg_class_subset_p (from, ST_REGS))
+ /* LUI followed by MOVF. */
+ return 4;
+ if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
+ return 5;
+ if (reg_class_subset_p (from, ACC_REGS))
+ return 6;
+ }
+ else if (reg_class_subset_p (from, FP_REGS))
+ {
+ if (reg_class_subset_p (to, FP_REGS)
+ && mips_mode_ok_for_mov_fmt_p (mode))
+ return 4;
+ if (reg_class_subset_p (to, ST_REGS))
+ /* An expensive sequence. */
+ return 8;
+ }
+
+ return 12;
+}
+
/* This function returns the register class required for a secondary
register when copying between one of the registers in CLASS, and X,
using MODE. If IN_P is nonzero, the copy is going from X to the
@@ -9473,30 +9116,15 @@ mips_secondary_reload_class (enum reg_class class,
return NO_REGS;
}
-/* Implement CLASS_MAX_NREGS.
-
- - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
-
- - ST_REGS are always hold CCmode values, and CCmode values are
- considered to be 4 bytes wide.
-
- All other register classes are covered by UNITS_PER_WORD. Note that
- this is true even for unions of integer and float registers when the
- latter are smaller than the former. The only supported combination
- in which case this occurs is -mgp64 -msingle-float, which has 64-bit
- words but 32-bit float registers. A word-based calculation is correct
- in that case since -msingle-float disallows multi-FPR values. */
+/* SImode values are represented as sign-extended to DImode. */
-int
-mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
- enum machine_mode mode)
+static int
+mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
{
- if (class == ST_REGS)
- return (GET_MODE_SIZE (mode) + 3) / 4;
- else if (class == FP_REGS)
- return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
- else
- return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
+ return SIGN_EXTEND;
+
+ return UNKNOWN;
}
static bool
@@ -9541,1359 +9169,6 @@ mips_scalar_mode_supported_p (enum machine_mode mode)
return default_scalar_mode_supported_p (mode);
}
-
-/* If we can access small data directly (using gp-relative relocation
- operators) return the small data pointer, otherwise return null.
-
- For each mips16 function which refers to GP relative symbols, we
- use a pseudo register, initialized at the start of the function, to
- hold the $gp value. */
-
-static rtx
-mips16_gp_pseudo_reg (void)
-{
- if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
- cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
-
- /* Don't initialize the pseudo register if we are being called from
- the tree optimizers' cost-calculation routines. */
- if (!cfun->machine->initialized_mips16_gp_pseudo_p
- && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
- {
- rtx insn, scan;
-
- /* We want to initialize this to a value which gcc will believe
- is constant. */
- insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
-
- push_topmost_sequence ();
- /* We need to emit the initialization after the FUNCTION_BEG
- note, so that it will be integrated. */
- for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
- if (NOTE_P (scan)
- && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
- break;
- if (scan == NULL_RTX)
- scan = get_insns ();
- insn = emit_insn_after (insn, scan);
- pop_topmost_sequence ();
-
- cfun->machine->initialized_mips16_gp_pseudo_p = true;
- }
-
- return cfun->machine->mips16_gp_pseudo_rtx;
-}
-
-/* Write out code to move floating point arguments in or out of
- general registers. Output the instructions to FILE. FP_CODE is
- the code describing which arguments are present (see the comment at
- the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
- we are copying from the floating point registers. */
-
-static void
-mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
-{
- const char *s;
- int gparg, fparg;
- unsigned int f;
- CUMULATIVE_ARGS cum;
-
- /* This code only works for the original 32-bit ABI and the O64 ABI. */
- gcc_assert (TARGET_OLDABI);
-
- if (from_fp_p)
- s = "mfc1";
- else
- s = "mtc1";
-
- init_cumulative_args (&cum, NULL, NULL);
-
- for (f = (unsigned int) fp_code; f != 0; f >>= 2)
- {
- enum machine_mode mode;
- struct mips_arg_info info;
-
- if ((f & 3) == 1)
- mode = SFmode;
- else if ((f & 3) == 2)
- mode = DFmode;
- else
- gcc_unreachable ();
-
- mips_arg_info (&cum, mode, NULL, true, &info);
- gparg = mips_arg_regno (&info, false);
- fparg = mips_arg_regno (&info, true);
-
- if (mode == SFmode)
- fprintf (file, "\t%s\t%s,%s\n", s,
- reg_names[gparg], reg_names[fparg]);
- else if (TARGET_64BIT)
- fprintf (file, "\td%s\t%s,%s\n", s,
- reg_names[gparg], reg_names[fparg]);
- else if (ISA_HAS_MXHC1)
- /* -mips32r2 -mfp64 */
- fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
- s,
- reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
- reg_names[fparg],
- from_fp_p ? "mfhc1" : "mthc1",
- reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
- reg_names[fparg]);
- else if (TARGET_BIG_ENDIAN)
- fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
- reg_names[gparg], reg_names[fparg + 1], s,
- reg_names[gparg + 1], reg_names[fparg]);
- else
- fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
- reg_names[gparg], reg_names[fparg], s,
- reg_names[gparg + 1], reg_names[fparg + 1]);
-
- function_arg_advance (&cum, mode, NULL, true);
- }
-}
-
-/* Build a mips16 function stub. This is used for functions which
- take arguments in the floating point registers. It is 32-bit code
- that moves the floating point args into the general registers, and
- then jumps to the 16-bit code. */
-
-static void
-build_mips16_function_stub (FILE *file)
-{
- const char *fnname;
- char *secname, *stubname;
- tree stubid, stubdecl;
- int need_comma;
- unsigned int f;
-
- fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
- fnname = targetm.strip_name_encoding (fnname);
- secname = (char *) alloca (strlen (fnname) + 20);
- sprintf (secname, ".mips16.fn.%s", fnname);
- stubname = (char *) alloca (strlen (fnname) + 20);
- sprintf (stubname, "__fn_stub_%s", fnname);
- stubid = get_identifier (stubname);
- stubdecl = build_decl (FUNCTION_DECL, stubid,
- build_function_type (void_type_node, NULL_TREE));
- DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
- DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
-
- fprintf (file, "\t# Stub function for %s (", current_function_name ());
- need_comma = 0;
- for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
- {
- fprintf (file, "%s%s",
- need_comma ? ", " : "",
- (f & 3) == 1 ? "float" : "double");
- need_comma = 1;
- }
- fprintf (file, ")\n");
-
- fprintf (file, "\t.set\tnomips16\n");
- switch_to_section (function_section (stubdecl));
- ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
-
- /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
- within a .ent, and we cannot emit another .ent. */
- if (!FUNCTION_NAME_ALREADY_DECLARED)
- {
- fputs ("\t.ent\t", file);
- assemble_name (file, stubname);
- fputs ("\n", file);
- }
-
- assemble_name (file, stubname);
- fputs (":\n", file);
-
- /* We don't want the assembler to insert any nops here. */
- fprintf (file, "\t.set\tnoreorder\n");
-
- mips16_fp_args (file, current_function_args_info.fp_code, 1);
-
- fprintf (asm_out_file, "\t.set\tnoat\n");
- fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
- assemble_name (file, fnname);
- fprintf (file, "\n");
- fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
- fprintf (asm_out_file, "\t.set\tat\n");
-
- /* Unfortunately, we can't fill the jump delay slot. We can't fill
- with one of the mfc1 instructions, because the result is not
- available for one instruction, so if the very first instruction
- in the function refers to the register, it will see the wrong
- value. */
- fprintf (file, "\tnop\n");
-
- fprintf (file, "\t.set\treorder\n");
-
- if (!FUNCTION_NAME_ALREADY_DECLARED)
- {
- fputs ("\t.end\t", file);
- assemble_name (file, stubname);
- fputs ("\n", file);
- }
-
- switch_to_section (function_section (current_function_decl));
-}
-
-/* We keep a list of functions for which we have already built stubs
- in build_mips16_call_stub. */
-
-struct mips16_stub
-{
- struct mips16_stub *next;
- char *name;
- int fpret;
-};
-
-static struct mips16_stub *mips16_stubs;
-
-/* Emit code to return a double value from a mips16 stub. GPREG is the
- first GP reg to use, FPREG is the first FP reg to use. */
-
-static void
-mips16_fpret_double (int gpreg, int fpreg)
-{
- if (TARGET_64BIT)
- fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
- reg_names[gpreg], reg_names[fpreg]);
- else if (TARGET_FLOAT64)
- {
- fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
- reg_names[gpreg + WORDS_BIG_ENDIAN],
- reg_names[fpreg]);
- fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
- reg_names[gpreg + !WORDS_BIG_ENDIAN],
- reg_names[fpreg]);
- }
- else
- {
- if (TARGET_BIG_ENDIAN)
- {
- fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
- reg_names[gpreg + 0],
- reg_names[fpreg + 1]);
- fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
- reg_names[gpreg + 1],
- reg_names[fpreg + 0]);
- }
- else
- {
- fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
- reg_names[gpreg + 0],
- reg_names[fpreg + 0]);
- fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
- reg_names[gpreg + 1],
- reg_names[fpreg + 1]);
- }
- }
-}
-
-/* Build a call stub for a mips16 call. A stub is needed if we are
- passing any floating point values which should go into the floating
- point registers. If we are, and the call turns out to be to a
- 32-bit function, the stub will be used to move the values into the
- floating point registers before calling the 32-bit function. The
- linker will magically adjust the function call to either the 16-bit
- function or the 32-bit stub, depending upon where the function call
- is actually defined.
-
- Similarly, we need a stub if the return value might come back in a
- floating point register.
-
- RETVAL is the location of the return value, or null if this is
- a call rather than a call_value. FN is the address of the
- function and ARG_SIZE is the size of the arguments. FP_CODE
- is the code built by function_arg. This function returns a nonzero
- value if it builds the call instruction itself. */
-
-int
-build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
-{
- int fpret = 0;
- const char *fnname;
- char *secname, *stubname;
- struct mips16_stub *l;
- tree stubid, stubdecl;
- int need_comma;
- unsigned int f;
- rtx insn;
-
- /* We don't need to do anything if we aren't in mips16 mode, or if
- we were invoked with the -msoft-float option. */
- if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
- return 0;
-
- /* Figure out whether the value might come back in a floating point
- register. */
- if (retval)
- fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
-
- /* We don't need to do anything if there were no floating point
- arguments and the value will not be returned in a floating point
- register. */
- if (fp_code == 0 && ! fpret)
- return 0;
-
- /* We don't need to do anything if this is a call to a special
- mips16 support function. */
- if (GET_CODE (fn) == SYMBOL_REF
- && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
- return 0;
-
- /* This code will only work for o32 and o64 abis. The other ABI's
- require more sophisticated support. */
- gcc_assert (TARGET_OLDABI);
-
- /* If we're calling via a function pointer, then we must always call
- via a stub. There are magic stubs provided in libgcc.a for each
- of the required cases. Each of them expects the function address
- to arrive in register $2. */
-
- if (GET_CODE (fn) != SYMBOL_REF)
- {
- char buf[30];
- tree id;
- rtx stub_fn, insn;
-
- /* ??? If this code is modified to support other ABI's, we need
- to handle PARALLEL return values here. */
-
- if (fpret)
- sprintf (buf, "__mips16_call_stub_%s_%d",
- mips16_call_stub_mode_suffix (GET_MODE (retval)),
- fp_code);
- else
- sprintf (buf, "__mips16_call_stub_%d",
- fp_code);
-
- id = get_identifier (buf);
- stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
-
- mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
-
- if (retval == NULL_RTX)
- insn = gen_call_internal (stub_fn, arg_size);
- else
- insn = gen_call_value_internal (retval, stub_fn, arg_size);
- insn = emit_call_insn (insn);
-
- /* Put the register usage information on the CALL. */
- CALL_INSN_FUNCTION_USAGE (insn) =
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
- CALL_INSN_FUNCTION_USAGE (insn));
-
- /* If we are handling a floating point return value, we need to
- save $18 in the function prologue. Putting a note on the
- call will mean that df_regs_ever_live_p ($18) will be true if the
- call is not eliminated, and we can check that in the prologue
- code. */
- if (fpret)
- CALL_INSN_FUNCTION_USAGE (insn) =
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_USE (VOIDmode,
- gen_rtx_REG (word_mode, 18)),
- CALL_INSN_FUNCTION_USAGE (insn));
-
- /* Return 1 to tell the caller that we've generated the call
- insn. */
- return 1;
- }
-
- /* We know the function we are going to call. If we have already
- built a stub, we don't need to do anything further. */
-
- fnname = targetm.strip_name_encoding (XSTR (fn, 0));
- for (l = mips16_stubs; l != NULL; l = l->next)
- if (strcmp (l->name, fnname) == 0)
- break;
-
- if (l == NULL)
- {
- /* Build a special purpose stub. When the linker sees a
- function call in mips16 code, it will check where the target
- is defined. If the target is a 32-bit call, the linker will
- search for the section defined here. It can tell which
- symbol this section is associated with by looking at the
- relocation information (the name is unreliable, since this
- might be a static function). If such a section is found, the
- linker will redirect the call to the start of the magic
- section.
-
- If the function does not return a floating point value, the
- special stub section is named
- .mips16.call.FNNAME
-
- If the function does return a floating point value, the stub
- section is named
- .mips16.call.fp.FNNAME
- */
-
- secname = (char *) alloca (strlen (fnname) + 40);
- sprintf (secname, ".mips16.call.%s%s",
- fpret ? "fp." : "",
- fnname);
- stubname = (char *) alloca (strlen (fnname) + 20);
- sprintf (stubname, "__call_stub_%s%s",
- fpret ? "fp_" : "",
- fnname);
- stubid = get_identifier (stubname);
- stubdecl = build_decl (FUNCTION_DECL, stubid,
- build_function_type (void_type_node, NULL_TREE));
- DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
- DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
-
- fprintf (asm_out_file, "\t# Stub function to call %s%s (",
- (fpret
- ? (GET_MODE (retval) == SFmode ? "float " : "double ")
- : ""),
- fnname);
- need_comma = 0;
- for (f = (unsigned int) fp_code; f != 0; f >>= 2)
- {
- fprintf (asm_out_file, "%s%s",
- need_comma ? ", " : "",
- (f & 3) == 1 ? "float" : "double");
- need_comma = 1;
- }
- fprintf (asm_out_file, ")\n");
-
- fprintf (asm_out_file, "\t.set\tnomips16\n");
- assemble_start_function (stubdecl, stubname);
-
- if (!FUNCTION_NAME_ALREADY_DECLARED)
- {
- fputs ("\t.ent\t", asm_out_file);
- assemble_name (asm_out_file, stubname);
- fputs ("\n", asm_out_file);
-
- assemble_name (asm_out_file, stubname);
- fputs (":\n", asm_out_file);
- }
-
- /* We build the stub code by hand. That's the only way we can
- do it, since we can't generate 32-bit code during a 16-bit
- compilation. */
-
- /* We don't want the assembler to insert any nops here. */
- fprintf (asm_out_file, "\t.set\tnoreorder\n");
-
- mips16_fp_args (asm_out_file, fp_code, 0);
-
- if (! fpret)
- {
- fprintf (asm_out_file, "\t.set\tnoat\n");
- fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
- fnname);
- fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
- fprintf (asm_out_file, "\t.set\tat\n");
- /* Unfortunately, we can't fill the jump delay slot. We
- can't fill with one of the mtc1 instructions, because the
- result is not available for one instruction, so if the
- very first instruction in the function refers to the
- register, it will see the wrong value. */
- fprintf (asm_out_file, "\tnop\n");
- }
- else
- {
- fprintf (asm_out_file, "\tmove\t%s,%s\n",
- reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
- fprintf (asm_out_file, "\tjal\t%s\n", fnname);
- /* As above, we can't fill the delay slot. */
- fprintf (asm_out_file, "\tnop\n");
- switch (GET_MODE (retval))
- {
- case SCmode:
- fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
- reg_names[GP_REG_FIRST + 3],
- reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
- /* Fall though. */
- case SFmode:
- fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
- reg_names[GP_REG_FIRST + 2],
- reg_names[FP_REG_FIRST + 0]);
- if (GET_MODE (retval) == SCmode && TARGET_64BIT)
- {
- /* On 64-bit targets, complex floats are returned in
- a single GPR, such that "sd" on a suitably-aligned
- target would store the value correctly. */
- fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
- reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN],
- reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN]);
- fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
- reg_names[GP_REG_FIRST + 2],
- reg_names[GP_REG_FIRST + 2],
- reg_names[GP_REG_FIRST + 3]);
- }
- break;
-
- case DCmode:
- mips16_fpret_double (GP_REG_FIRST + 2 + (8 / UNITS_PER_WORD),
- FP_REG_FIRST + MAX_FPRS_PER_FMT);
- /* Fall though. */
- case DFmode:
- case V2SFmode:
- mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
- break;
-
- default:
- gcc_unreachable ();
- }
- fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
- /* As above, we can't fill the delay slot. */
- fprintf (asm_out_file, "\tnop\n");
- }
-
- fprintf (asm_out_file, "\t.set\treorder\n");
-
-#ifdef ASM_DECLARE_FUNCTION_SIZE
- ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
-#endif
-
- if (!FUNCTION_NAME_ALREADY_DECLARED)
- {
- fputs ("\t.end\t", asm_out_file);
- assemble_name (asm_out_file, stubname);
- fputs ("\n", asm_out_file);
- }
-
- /* Record this stub. */
- l = (struct mips16_stub *) xmalloc (sizeof *l);
- l->name = xstrdup (fnname);
- l->fpret = fpret;
- l->next = mips16_stubs;
- mips16_stubs = l;
- }
-
- /* If we expect a floating point return value, but we've built a
- stub which does not expect one, then we're in trouble. We can't
- use the existing stub, because it won't handle the floating point
- value. We can't build a new stub, because the linker won't know
- which stub to use for the various calls in this object file.
- Fortunately, this case is illegal, since it means that a function
- was declared in two different ways in a single compilation. */
- if (fpret && ! l->fpret)
- error ("cannot handle inconsistent calls to %qs", fnname);
-
- if (retval == NULL_RTX)
- insn = gen_call_internal_direct (fn, arg_size);
- else
- insn = gen_call_value_internal_direct (retval, fn, arg_size);
- insn = emit_call_insn (insn);
-
- /* If we are calling a stub which handles a floating point return
- value, we need to arrange to save $18 in the prologue. We do
- this by marking the function call as using the register. The
- prologue will later see that it is used, and emit code to save
- it. */
- if (l->fpret)
- CALL_INSN_FUNCTION_USAGE (insn) =
- gen_rtx_EXPR_LIST (VOIDmode,
- gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
- CALL_INSN_FUNCTION_USAGE (insn));
-
- /* Return 1 to tell the caller that we've generated the call
- insn. */
- return 1;
-}
-
-/* An entry in the mips16 constant pool. VALUE is the pool constant,
- MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
-
-struct mips16_constant {
- struct mips16_constant *next;
- rtx value;
- rtx label;
- enum machine_mode mode;
-};
-
-/* Information about an incomplete mips16 constant pool. FIRST is the
- first constant, HIGHEST_ADDRESS is the highest address that the first
- byte of the pool can have, and INSN_ADDRESS is the current instruction
- address. */
-
-struct mips16_constant_pool {
- struct mips16_constant *first;
- int highest_address;
- int insn_address;
-};
-
-/* Add constant VALUE to POOL and return its label. MODE is the
- value's mode (used for CONST_INTs, etc.). */
-
-static rtx
-add_constant (struct mips16_constant_pool *pool,
- rtx value, enum machine_mode mode)
-{
- struct mips16_constant **p, *c;
- bool first_of_size_p;
-
- /* See whether the constant is already in the pool. If so, return the
- existing label, otherwise leave P pointing to the place where the
- constant should be added.
-
- Keep the pool sorted in increasing order of mode size so that we can
- reduce the number of alignments needed. */
- first_of_size_p = true;
- for (p = &pool->first; *p != 0; p = &(*p)->next)
- {
- if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
- return (*p)->label;
- if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
- break;
- if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
- first_of_size_p = false;
- }
-
- /* In the worst case, the constant needed by the earliest instruction
- will end up at the end of the pool. The entire pool must then be
- accessible from that instruction.
-
- When adding the first constant, set the pool's highest address to
- the address of the first out-of-range byte. Adjust this address
- downwards each time a new constant is added. */
- if (pool->first == 0)
- /* For pc-relative lw, addiu and daddiu instructions, the base PC value
- is the address of the instruction with the lowest two bits clear.
- The base PC value for ld has the lowest three bits clear. Assume
- the worst case here. */
- pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
- pool->highest_address -= GET_MODE_SIZE (mode);
- if (first_of_size_p)
- /* Take into account the worst possible padding due to alignment. */
- pool->highest_address -= GET_MODE_SIZE (mode) - 1;
-
- /* Create a new entry. */
- c = (struct mips16_constant *) xmalloc (sizeof *c);
- c->value = value;
- c->mode = mode;
- c->label = gen_label_rtx ();
- c->next = *p;
- *p = c;
-
- return c->label;
-}
-
-/* Output constant VALUE after instruction INSN and return the last
- instruction emitted. MODE is the mode of the constant. */
-
-static rtx
-dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
-{
- if (SCALAR_INT_MODE_P (mode)
- || ALL_SCALAR_FRACT_MODE_P (mode)
- || ALL_SCALAR_ACCUM_MODE_P (mode))
- {
- rtx size = GEN_INT (GET_MODE_SIZE (mode));
- return emit_insn_after (gen_consttable_int (value, size), insn);
- }
-
- if (SCALAR_FLOAT_MODE_P (mode))
- return emit_insn_after (gen_consttable_float (value), insn);
-
- if (VECTOR_MODE_P (mode))
- {
- int i;
-
- for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
- insn = dump_constants_1 (GET_MODE_INNER (mode),
- CONST_VECTOR_ELT (value, i), insn);
- return insn;
- }
-
- gcc_unreachable ();
-}
-
-
-/* Dump out the constants in CONSTANTS after INSN. */
-
-static void
-dump_constants (struct mips16_constant *constants, rtx insn)
-{
- struct mips16_constant *c, *next;
- int align;
-
- align = 0;
- for (c = constants; c != NULL; c = next)
- {
- /* If necessary, increase the alignment of PC. */
- if (align < GET_MODE_SIZE (c->mode))
- {
- int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
- insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
- }
- align = GET_MODE_SIZE (c->mode);
-
- insn = emit_label_after (c->label, insn);
- insn = dump_constants_1 (c->mode, c->value, insn);
-
- next = c->next;
- free (c);
- }
-
- emit_barrier_after (insn);
-}
-
-/* Return the length of instruction INSN. */
-
-static int
-mips16_insn_length (rtx insn)
-{
- if (JUMP_P (insn))
- {
- rtx body = PATTERN (insn);
- if (GET_CODE (body) == ADDR_VEC)
- return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
- if (GET_CODE (body) == ADDR_DIFF_VEC)
- return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
- }
- return get_attr_length (insn);
-}
-
-/* If *X is a symbolic constant that refers to the constant pool, add
- the constant to POOL and rewrite *X to use the constant's label. */
-
-static void
-mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
-{
- rtx base, offset, label;
-
- split_const (*x, &base, &offset);
- if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
- {
- label = add_constant (pool, get_pool_constant (base),
- get_pool_mode (base));
- base = gen_rtx_LABEL_REF (Pmode, label);
- *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
- }
-}
-
-/* This structure is used to communicate with mips16_rewrite_pool_refs.
- INSN is the instruction we're rewriting and POOL points to the current
- constant pool. */
-struct mips16_rewrite_pool_refs_info {
- rtx insn;
- struct mips16_constant_pool *pool;
-};
-
-/* Rewrite *X so that constant pool references refer to the constant's
- label instead. DATA points to a mips16_rewrite_pool_refs_info
- structure. */
-
-static int
-mips16_rewrite_pool_refs (rtx *x, void *data)
-{
- struct mips16_rewrite_pool_refs_info *info = data;
-
- if (force_to_mem_operand (*x, Pmode))
- {
- rtx mem = force_const_mem (GET_MODE (*x), *x);
- validate_change (info->insn, x, mem, false);
- }
-
- if (MEM_P (*x))
- {
- mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
- return -1;
- }
-
- if (TARGET_MIPS16_TEXT_LOADS)
- mips16_rewrite_pool_constant (info->pool, x);
-
- return GET_CODE (*x) == CONST ? -1 : 0;
-}
-
-/* Build MIPS16 constant pools. */
-
-static void
-mips16_lay_out_constants (void)
-{
- struct mips16_constant_pool pool;
- struct mips16_rewrite_pool_refs_info info;
- rtx insn, barrier;
-
- if (!TARGET_MIPS16_PCREL_LOADS)
- return;
-
- barrier = 0;
- memset (&pool, 0, sizeof (pool));
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- {
- /* Rewrite constant pool references in INSN. */
- if (INSN_P (insn))
- {
- info.insn = insn;
- info.pool = &pool;
- for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
- }
-
- pool.insn_address += mips16_insn_length (insn);
-
- if (pool.first != NULL)
- {
- /* If there are no natural barriers between the first user of
- the pool and the highest acceptable address, we'll need to
- create a new instruction to jump around the constant pool.
- In the worst case, this instruction will be 4 bytes long.
-
- If it's too late to do this transformation after INSN,
- do it immediately before INSN. */
- if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
- {
- rtx label, jump;
-
- label = gen_label_rtx ();
-
- jump = emit_jump_insn_before (gen_jump (label), insn);
- JUMP_LABEL (jump) = label;
- LABEL_NUSES (label) = 1;
- barrier = emit_barrier_after (jump);
-
- emit_label_after (label, barrier);
- pool.insn_address += 4;
- }
-
- /* See whether the constant pool is now out of range of the first
- user. If so, output the constants after the previous barrier.
- Note that any instructions between BARRIER and INSN (inclusive)
- will use negative offsets to refer to the pool. */
- if (pool.insn_address > pool.highest_address)
- {
- dump_constants (pool.first, barrier);
- pool.first = NULL;
- barrier = 0;
- }
- else if (BARRIER_P (insn))
- barrier = insn;
- }
- }
- dump_constants (pool.first, get_last_insn ());
-}
-
-/* A temporary variable used by for_each_rtx callbacks, etc. */
-static rtx mips_sim_insn;
-
-/* A structure representing the state of the processor pipeline.
- Used by the mips_sim_* family of functions. */
-struct mips_sim {
- /* The maximum number of instructions that can be issued in a cycle.
- (Caches mips_issue_rate.) */
- unsigned int issue_rate;
-
- /* The current simulation time. */
- unsigned int time;
-
- /* How many more instructions can be issued in the current cycle. */
- unsigned int insns_left;
-
- /* LAST_SET[X].INSN is the last instruction to set register X.
- LAST_SET[X].TIME is the time at which that instruction was issued.
- INSN is null if no instruction has yet set register X. */
- struct {
- rtx insn;
- unsigned int time;
- } last_set[FIRST_PSEUDO_REGISTER];
-
- /* The pipeline's current DFA state. */
- state_t dfa_state;
-};
-
-/* Reset STATE to the initial simulation state. */
-
-static void
-mips_sim_reset (struct mips_sim *state)
-{
- state->time = 0;
- state->insns_left = state->issue_rate;
- memset (&state->last_set, 0, sizeof (state->last_set));
- state_reset (state->dfa_state);
-}
-
-/* Initialize STATE before its first use. DFA_STATE points to an
- allocated but uninitialized DFA state. */
-
-static void
-mips_sim_init (struct mips_sim *state, state_t dfa_state)
-{
- state->issue_rate = mips_issue_rate ();
- state->dfa_state = dfa_state;
- mips_sim_reset (state);
-}
-
-/* Advance STATE by one clock cycle. */
-
-static void
-mips_sim_next_cycle (struct mips_sim *state)
-{
- state->time++;
- state->insns_left = state->issue_rate;
- state_transition (state->dfa_state, 0);
-}
-
-/* Advance simulation state STATE until instruction INSN can read
- register REG. */
-
-static void
-mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
-{
- unsigned int i;
-
- for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
- if (state->last_set[REGNO (reg) + i].insn != 0)
- {
- unsigned int t;
-
- t = state->last_set[REGNO (reg) + i].time;
- t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
- while (state->time < t)
- mips_sim_next_cycle (state);
- }
-}
-
-/* A for_each_rtx callback. If *X is a register, advance simulation state
- DATA until mips_sim_insn can read the register's value. */
-
-static int
-mips_sim_wait_regs_2 (rtx *x, void *data)
-{
- if (REG_P (*x))
- mips_sim_wait_reg (data, mips_sim_insn, *x);
- return 0;
-}
-
-/* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
-
-static void
-mips_sim_wait_regs_1 (rtx *x, void *data)
-{
- for_each_rtx (x, mips_sim_wait_regs_2, data);
-}
-
-/* Advance simulation state STATE until all of INSN's register
- dependencies are satisfied. */
-
-static void
-mips_sim_wait_regs (struct mips_sim *state, rtx insn)
-{
- mips_sim_insn = insn;
- note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
-}
-
-/* Advance simulation state STATE until the units required by
- instruction INSN are available. */
-
-static void
-mips_sim_wait_units (struct mips_sim *state, rtx insn)
-{
- state_t tmp_state;
-
- tmp_state = alloca (state_size ());
- while (state->insns_left == 0
- || (memcpy (tmp_state, state->dfa_state, state_size ()),
- state_transition (tmp_state, insn) >= 0))
- mips_sim_next_cycle (state);
-}
-
-/* Advance simulation state STATE until INSN is ready to issue. */
-
-static void
-mips_sim_wait_insn (struct mips_sim *state, rtx insn)
-{
- mips_sim_wait_regs (state, insn);
- mips_sim_wait_units (state, insn);
-}
-
-/* mips_sim_insn has just set X. Update the LAST_SET array
- in simulation state DATA. */
-
-static void
-mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
-{
- struct mips_sim *state;
- unsigned int i;
-
- state = data;
- if (REG_P (x))
- for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
- {
- state->last_set[REGNO (x) + i].insn = mips_sim_insn;
- state->last_set[REGNO (x) + i].time = state->time;
- }
-}
-
-/* Issue instruction INSN in scheduler state STATE. Assume that INSN
- can issue immediately (i.e., that mips_sim_wait_insn has already
- been called). */
-
-static void
-mips_sim_issue_insn (struct mips_sim *state, rtx insn)
-{
- state_transition (state->dfa_state, insn);
- state->insns_left--;
-
- mips_sim_insn = insn;
- note_stores (PATTERN (insn), mips_sim_record_set, state);
-}
-
-/* Simulate issuing a NOP in state STATE. */
-
-static void
-mips_sim_issue_nop (struct mips_sim *state)
-{
- if (state->insns_left == 0)
- mips_sim_next_cycle (state);
- state->insns_left--;
-}
-
-/* Update simulation state STATE so that it's ready to accept the instruction
- after INSN. INSN should be part of the main rtl chain, not a member of a
- SEQUENCE. */
-
-static void
-mips_sim_finish_insn (struct mips_sim *state, rtx insn)
-{
- /* If INSN is a jump with an implicit delay slot, simulate a nop. */
- if (JUMP_P (insn))
- mips_sim_issue_nop (state);
-
- switch (GET_CODE (SEQ_BEGIN (insn)))
- {
- case CODE_LABEL:
- case CALL_INSN:
- /* We can't predict the processor state after a call or label. */
- mips_sim_reset (state);
- break;
-
- case JUMP_INSN:
- /* The delay slots of branch likely instructions are only executed
- when the branch is taken. Therefore, if the caller has simulated
- the delay slot instruction, STATE does not really reflect the state
- of the pipeline for the instruction after the delay slot. Also,
- branch likely instructions tend to incur a penalty when not taken,
- so there will probably be an extra delay between the branch and
- the instruction after the delay slot. */
- if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
- mips_sim_reset (state);
- break;
-
- default:
- break;
- }
-}
-
-/* The VR4130 pipeline issues aligned pairs of instructions together,
- but it stalls the second instruction if it depends on the first.
- In order to cut down the amount of logic required, this dependence
- check is not based on a full instruction decode. Instead, any non-SPECIAL
- instruction is assumed to modify the register specified by bits 20-16
- (which is usually the "rt" field).
-
- In beq, beql, bne and bnel instructions, the rt field is actually an
- input, so we can end up with a false dependence between the branch
- and its delay slot. If this situation occurs in instruction INSN,
- try to avoid it by swapping rs and rt. */
-
-static void
-vr4130_avoid_branch_rt_conflict (rtx insn)
-{
- rtx first, second;
-
- first = SEQ_BEGIN (insn);
- second = SEQ_END (insn);
- if (JUMP_P (first)
- && NONJUMP_INSN_P (second)
- && GET_CODE (PATTERN (first)) == SET
- && GET_CODE (SET_DEST (PATTERN (first))) == PC
- && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
- {
- /* Check for the right kind of condition. */
- rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
- if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
- && REG_P (XEXP (cond, 0))
- && REG_P (XEXP (cond, 1))
- && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
- && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
- {
- /* SECOND mentions the rt register but not the rs register. */
- rtx tmp = XEXP (cond, 0);
- XEXP (cond, 0) = XEXP (cond, 1);
- XEXP (cond, 1) = tmp;
- }
- }
-}
-
-/* Implement -mvr4130-align. Go through each basic block and simulate the
- processor pipeline. If we find that a pair of instructions could execute
- in parallel, and the first of those instruction is not 8-byte aligned,
- insert a nop to make it aligned. */
-
-static void
-vr4130_align_insns (void)
-{
- struct mips_sim state;
- rtx insn, subinsn, last, last2, next;
- bool aligned_p;
-
- dfa_start ();
-
- /* LAST is the last instruction before INSN to have a nonzero length.
- LAST2 is the last such instruction before LAST. */
- last = 0;
- last2 = 0;
-
- /* ALIGNED_P is true if INSN is known to be at an aligned address. */
- aligned_p = true;
-
- mips_sim_init (&state, alloca (state_size ()));
- for (insn = get_insns (); insn != 0; insn = next)
- {
- unsigned int length;
-
- next = NEXT_INSN (insn);
-
- /* See the comment above vr4130_avoid_branch_rt_conflict for details.
- This isn't really related to the alignment pass, but we do it on
- the fly to avoid a separate instruction walk. */
- vr4130_avoid_branch_rt_conflict (insn);
-
- if (USEFUL_INSN_P (insn))
- FOR_EACH_SUBINSN (subinsn, insn)
- {
- mips_sim_wait_insn (&state, subinsn);
-
- /* If we want this instruction to issue in parallel with the
- previous one, make sure that the previous instruction is
- aligned. There are several reasons why this isn't worthwhile
- when the second instruction is a call:
-
- - Calls are less likely to be performance critical,
- - There's a good chance that the delay slot can execute
- in parallel with the call.
- - The return address would then be unaligned.
-
- In general, if we're going to insert a nop between instructions
- X and Y, it's better to insert it immediately after X. That
- way, if the nop makes Y aligned, it will also align any labels
- between X and Y. */
- if (state.insns_left != state.issue_rate
- && !CALL_P (subinsn))
- {
- if (subinsn == SEQ_BEGIN (insn) && aligned_p)
- {
- /* SUBINSN is the first instruction in INSN and INSN is
- aligned. We want to align the previous instruction
- instead, so insert a nop between LAST2 and LAST.
-
- Note that LAST could be either a single instruction
- or a branch with a delay slot. In the latter case,
- LAST, like INSN, is already aligned, but the delay
- slot must have some extra delay that stops it from
- issuing at the same time as the branch. We therefore
- insert a nop before the branch in order to align its
- delay slot. */
- emit_insn_after (gen_nop (), last2);
- aligned_p = false;
- }
- else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
- {
- /* SUBINSN is the delay slot of INSN, but INSN is
- currently unaligned. Insert a nop between
- LAST and INSN to align it. */
- emit_insn_after (gen_nop (), last);
- aligned_p = true;
- }
- }
- mips_sim_issue_insn (&state, subinsn);
- }
- mips_sim_finish_insn (&state, insn);
-
- /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
- length = get_attr_length (insn);
- if (length > 0)
- {
- /* If the instruction is an asm statement or multi-instruction
- mips.md patern, the length is only an estimate. Insert an
- 8 byte alignment after it so that the following instructions
- can be handled correctly. */
- if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
- && (recog_memoized (insn) < 0 || length >= 8))
- {
- next = emit_insn_after (gen_align (GEN_INT (3)), insn);
- next = NEXT_INSN (next);
- mips_sim_next_cycle (&state);
- aligned_p = true;
- }
- else if (length & 4)
- aligned_p = !aligned_p;
- last2 = last;
- last = insn;
- }
-
- /* See whether INSN is an aligned label. */
- if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
- aligned_p = true;
- }
- dfa_finish ();
-}
-
-/* Subroutine of mips_reorg. If there is a hazard between INSN
- and a previous instruction, avoid it by inserting nops after
- instruction AFTER.
-
- *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
- this point. If *DELAYED_REG is non-null, INSN must wait a cycle
- before using the value of that register. *HILO_DELAY counts the
- number of instructions since the last hilo hazard (that is,
- the number of instructions since the last mflo or mfhi).
-
- After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
- for the next instruction.
-
- LO_REG is an rtx for the LO register, used in dependence checking. */
-
-static void
-mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
- rtx *delayed_reg, rtx lo_reg)
-{
- rtx pattern, set;
- int nops, ninsns, hazard_set;
-
- if (!INSN_P (insn))
- return;
-
- pattern = PATTERN (insn);
-
- /* Do not put the whole function in .set noreorder if it contains
- an asm statement. We don't know whether there will be hazards
- between the asm statement and the gcc-generated code. */
- if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
- cfun->machine->all_noreorder_p = false;
-
- /* Ignore zero-length instructions (barriers and the like). */
- ninsns = get_attr_length (insn) / 4;
- if (ninsns == 0)
- return;
-
- /* Work out how many nops are needed. Note that we only care about
- registers that are explicitly mentioned in the instruction's pattern.
- It doesn't matter that calls use the argument registers or that they
- clobber hi and lo. */
- if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
- nops = 2 - *hilo_delay;
- else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
- nops = 1;
- else
- nops = 0;
-
- /* Insert the nops between this instruction and the previous one.
- Each new nop takes us further from the last hilo hazard. */
- *hilo_delay += nops;
- while (nops-- > 0)
- emit_insn_after (gen_hazard_nop (), after);
-
- /* Set up the state for the next instruction. */
- *hilo_delay += ninsns;
- *delayed_reg = 0;
- if (INSN_CODE (insn) >= 0)
- switch (get_attr_hazard (insn))
- {
- case HAZARD_NONE:
- break;
-
- case HAZARD_HILO:
- *hilo_delay = 0;
- break;
-
- case HAZARD_DELAY:
- hazard_set = (int) get_attr_hazard_set (insn);
- if (hazard_set == 0)
- set = single_set (insn);
- else
- {
- gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
- set = XVECEXP (PATTERN (insn), 0, hazard_set - 1);
- }
- gcc_assert (set && GET_CODE (set) == SET);
- *delayed_reg = SET_DEST (set);
- break;
- }
-}
-
-
-/* Go through the instruction stream and insert nops where necessary.
- See if the whole function can then be put into .set noreorder &
- .set nomacro. */
-
-static void
-mips_avoid_hazards (void)
-{
- rtx insn, last_insn, lo_reg, delayed_reg;
- int hilo_delay, i;
-
- /* Force all instructions to be split into their final form. */
- split_all_insns_noflow ();
-
- /* Recalculate instruction lengths without taking nops into account. */
- cfun->machine->ignore_hazard_length_p = true;
- shorten_branches (get_insns ());
-
- cfun->machine->all_noreorder_p = true;
-
- /* Profiled functions can't be all noreorder because the profiler
- support uses assembler macros. */
- if (current_function_profile)
- cfun->machine->all_noreorder_p = false;
-
- /* Code compiled with -mfix-vr4120 can't be all noreorder because
- we rely on the assembler to work around some errata. */
- if (TARGET_FIX_VR4120)
- cfun->machine->all_noreorder_p = false;
-
- /* The same is true for -mfix-vr4130 if we might generate mflo or
- mfhi instructions. Note that we avoid using mflo and mfhi if
- the VR4130 macc and dmacc instructions are available instead;
- see the *mfhilo_{si,di}_macc patterns. */
- if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
- cfun->machine->all_noreorder_p = false;
-
- last_insn = 0;
- hilo_delay = 2;
- delayed_reg = 0;
- lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
-
- for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
- if (INSN_P (insn))
- {
- if (GET_CODE (PATTERN (insn)) == SEQUENCE)
- for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
- mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
- &hilo_delay, &delayed_reg, lo_reg);
- else
- mips_avoid_hazard (last_insn, insn, &hilo_delay,
- &delayed_reg, lo_reg);
-
- last_insn = insn;
- }
-}
-
-
-/* Implement TARGET_MACHINE_DEPENDENT_REORG. */
-
-static void
-mips_reorg (void)
-{
- mips16_lay_out_constants ();
- if (TARGET_EXPLICIT_RELOCS)
- {
- if (mips_flag_delayed_branch)
- dbr_schedule (get_insns ());
- mips_avoid_hazards ();
- if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
- vr4130_align_insns ();
- }
-}
-
/* This function does three things:
- Register the special divsi3 and modsi3 functions if -mfix-vr4120.
@@ -10957,89 +9232,6 @@ mips_init_libfuncs (void)
gofast_maybe_init_libfuncs ();
}
-/* Return a number assessing the cost of moving a register in class
- FROM to class TO. The classes are expressed using the enumeration
- values such as `GENERAL_REGS'. A value of 2 is the default; other
- values are interpreted relative to that.
-
- It is not required that the cost always equal 2 when FROM is the
- same as TO; on some machines it is expensive to move between
- registers if they are not general registers.
-
- If reload sees an insn consisting of a single `set' between two
- hard registers, and if `REGISTER_MOVE_COST' applied to their
- classes returns a value of 2, reload does not check to ensure that
- the constraints of the insn are met. Setting a cost of other than
- 2 will allow reload to verify that the constraints are met. You
- should do this if the `movM' pattern's constraints do not allow
- such copying.
-
- ??? We make the cost of moving from HI/LO into general
- registers the same as for one of moving general registers to
- HI/LO for TARGET_MIPS16 in order to prevent allocating a
- pseudo to HI/LO. This might hurt optimizations though, it
- isn't clear if it is wise. And it might not work in all cases. We
- could solve the DImode LO reg problem by using a multiply, just
- like reload_{in,out}si. We could solve the SImode/HImode HI reg
- problem by using divide instructions. divu puts the remainder in
- the HI reg, so doing a divide by -1 will move the value in the HI
- reg for all values except -1. We could handle that case by using a
- signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
- a compare/branch to test the input value to see which instruction
- we need to use. This gets pretty messy, but it is feasible. */
-
-int
-mips_register_move_cost (enum machine_mode mode,
- enum reg_class to, enum reg_class from)
-{
- if (TARGET_MIPS16)
- {
- if (reg_class_subset_p (from, GENERAL_REGS)
- && reg_class_subset_p (to, GENERAL_REGS))
- {
- if (reg_class_subset_p (from, M16_REGS)
- || reg_class_subset_p (to, M16_REGS))
- return 2;
- /* Two MOVEs. */
- return 4;
- }
- }
- else if (reg_class_subset_p (from, GENERAL_REGS))
- {
- if (reg_class_subset_p (to, GENERAL_REGS))
- return 2;
- if (reg_class_subset_p (to, FP_REGS))
- return 4;
- if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
- return 5;
- if (reg_class_subset_p (to, ACC_REGS))
- return 6;
- }
- else if (reg_class_subset_p (to, GENERAL_REGS))
- {
- if (reg_class_subset_p (from, FP_REGS))
- return 4;
- if (reg_class_subset_p (from, ST_REGS))
- /* LUI followed by MOVF. */
- return 4;
- if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
- return 5;
- if (reg_class_subset_p (from, ACC_REGS))
- return 6;
- }
- else if (reg_class_subset_p (from, FP_REGS))
- {
- if (reg_class_subset_p (to, FP_REGS)
- && mips_mode_ok_for_mov_fmt_p (mode))
- return 4;
- if (reg_class_subset_p (to, ST_REGS))
- /* An expensive sequence. */
- return 8;
- }
-
- return 12;
-}
-
/* Return the length of INSN. LENGTH is the initial length computed by
attributes in the machine-description file. */
@@ -11349,175 +9541,145 @@ mips_output_division (const char *division, rtx *operands)
return s;
}
-/* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
- with a final "000" replaced by "k". Ignore case.
-
- Note: this function is shared between GCC and GAS. */
+/* Return true if INSN is a multiply-add or multiply-subtract
+ instruction and PREV assigns to the accumulator operand. */
-static bool
-mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
+bool
+mips_linked_madd_p (rtx prev, rtx insn)
{
- while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
- given++, canonical++;
-
- return ((*given == 0 && *canonical == 0)
- || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
-}
-
+ rtx x;
-/* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
- CPU name. We've traditionally allowed a lot of variation here.
+ x = single_set (insn);
+ if (x == 0)
+ return false;
- Note: this function is shared between GCC and GAS. */
+ x = SET_SRC (x);
-static bool
-mips_matching_cpu_name_p (const char *canonical, const char *given)
-{
- /* First see if the name matches exactly, or with a final "000"
- turned into "k". */
- if (mips_strict_matching_cpu_name_p (canonical, given))
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == MULT
+ && reg_set_p (XEXP (x, 1), prev))
return true;
- /* If not, try comparing based on numerical designation alone.
- See if GIVEN is an unadorned number, or 'r' followed by a number. */
- if (TOLOWER (*given) == 'r')
- given++;
- if (!ISDIGIT (*given))
- return false;
-
- /* Skip over some well-known prefixes in the canonical name,
- hoping to find a number there too. */
- if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
- canonical += 2;
- else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
- canonical += 2;
- else if (TOLOWER (canonical[0]) == 'r')
- canonical += 1;
+ if (GET_CODE (x) == MINUS
+ && GET_CODE (XEXP (x, 1)) == MULT
+ && reg_set_p (XEXP (x, 0), prev))
+ return true;
- return mips_strict_matching_cpu_name_p (canonical, given);
+ return false;
}
+/* Implements a store data bypass check. We need this because the cprestore
+ pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
+ default routine to abort. We just return false for that case. */
+/* ??? Should try to give a better result here than assuming false. */
-/* Return the mips_cpu_info entry for the processor or ISA given
- by CPU_STRING. Return null if the string isn't recognized.
-
- A similar function exists in GAS. */
-
-static const struct mips_cpu_info *
-mips_parse_cpu (const char *cpu_string)
+int
+mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
{
- unsigned int i;
- const char *s;
-
- /* In the past, we allowed upper-case CPU names, but it doesn't
- work well with the multilib machinery. */
- for (s = cpu_string; *s != 0; s++)
- if (ISUPPER (*s))
- {
- warning (0, "the cpu name must be lower case");
- break;
- }
+ if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
+ return false;
- /* 'from-abi' selects the most compatible architecture for the given
- ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
- EABIs, we have to decide whether we're using the 32-bit or 64-bit
- version. Look first at the -mgp options, if given, otherwise base
- the choice on MASK_64BIT in TARGET_DEFAULT. */
- if (strcasecmp (cpu_string, "from-abi") == 0)
- return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
- : ABI_NEEDS_64BIT_REGS ? 3
- : (TARGET_64BIT ? 3 : 1));
+ return ! store_data_bypass_p (out_insn, in_insn);
+}
+
+/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
+ dependencies have no cost, except on the 20Kc where output-dependence
+ is treated like input-dependence. */
- /* 'default' has traditionally been a no-op. Probably not very useful. */
- if (strcasecmp (cpu_string, "default") == 0)
+static int
+mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
+ rtx dep ATTRIBUTE_UNUSED, int cost)
+{
+ if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
+ && TUNE_20KC)
+ return cost;
+ if (REG_NOTE_KIND (link) != 0)
return 0;
-
- for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
- if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
- return mips_cpu_info_table + i;
-
- return 0;
+ return cost;
}
+/* Return the number of instructions that can be issued per cycle. */
-/* Return the processor associated with the given ISA level, or null
- if the ISA isn't valid. */
-
-static const struct mips_cpu_info *
-mips_cpu_info_from_isa (int isa)
+static int
+mips_issue_rate (void)
{
- unsigned int i;
+ switch (mips_tune)
+ {
+ case PROCESSOR_74KC:
+ case PROCESSOR_74KF2_1:
+ case PROCESSOR_74KF1_1:
+ case PROCESSOR_74KF3_2:
+ /* The 74k is not strictly quad-issue cpu, but can be seen as one
+ by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
+ but in reality only a maximum of 3 insns can be issued as the
+ floating point load/stores also require a slot in the AGEN pipe. */
+ return 4;
- for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
- if (mips_cpu_info_table[i].isa == isa)
- return mips_cpu_info_table + i;
+ case PROCESSOR_20KC:
+ case PROCESSOR_R4130:
+ case PROCESSOR_R5400:
+ case PROCESSOR_R5500:
+ case PROCESSOR_R7000:
+ case PROCESSOR_R9000:
+ return 2;
- return 0;
-}
-
-/* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
- by UNITS_PER_FPREG. The size of FP status registers is always 4, because
- they only hold condition code modes, and CCmode is always considered to
- be 4 bytes wide. All other registers are word sized. */
+ case PROCESSOR_SB1:
+ case PROCESSOR_SB1A:
+ /* This is actually 4, but we get better performance if we claim 3.
+ This is partly because of unwanted speculative code motion with the
+ larger number, and partly because in most common cases we can't
+ reach the theoretical max of 4. */
+ return 3;
-unsigned int
-mips_hard_regno_nregs (int regno, enum machine_mode mode)
-{
- if (ST_REG_P (regno))
- return ((GET_MODE_SIZE (mode) + 3) / 4);
- else if (! FP_REG_P (regno))
- return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
- else
- return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
+ default:
+ return 1;
+ }
}
-/* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
- all BLKmode objects are returned in memory. Under the new (N32 and
- 64-bit MIPS ABIs) small structures are returned in a register.
- Objects with varying size must still be returned in memory, of
- course. */
+/* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
+ be as wide as the scheduling freedom in the DFA. */
-static bool
-mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
+static int
+mips_multipass_dfa_lookahead (void)
{
- if (TARGET_OLDABI)
- return (TYPE_MODE (type) == BLKmode);
- else
- return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
- || (int_size_in_bytes (type) == -1));
-}
+ /* Can schedule up to 4 of the 6 function units in any one cycle. */
+ if (TUNE_SB1)
+ return 4;
-static bool
-mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
-{
- return !TARGET_OLDABI;
+ return 0;
}
-/* Return true if INSN is a multiply-add or multiply-subtract
- instruction and PREV assigns to the accumulator operand. */
+/* Remove the instruction at index LOWER from ready queue READY and
+ reinsert it in front of the instruction at index HIGHER. LOWER must
+ be <= HIGHER. */
-bool
-mips_linked_madd_p (rtx prev, rtx insn)
+static void
+mips_promote_ready (rtx *ready, int lower, int higher)
{
- rtx x;
-
- x = single_set (insn);
- if (x == 0)
- return false;
-
- x = SET_SRC (x);
+ rtx new_head;
+ int i;
- if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == MULT
- && reg_set_p (XEXP (x, 1), prev))
- return true;
+ new_head = ready[lower];
+ for (i = lower; i < higher; i++)
+ ready[i] = ready[i + 1];
+ ready[i] = new_head;
+}
- if (GET_CODE (x) == MINUS
- && GET_CODE (XEXP (x, 1)) == MULT
- && reg_set_p (XEXP (x, 0), prev))
- return true;
+/* If the priority of the instruction at POS2 in the ready queue READY
+ is within LIMIT units of that of the instruction at POS1, swap the
+ instructions if POS2 is not already less than POS1. */
- return false;
+static void
+mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
+{
+ if (pos1 < pos2
+ && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
+ {
+ rtx temp;
+ temp = ready[pos1];
+ ready[pos1] = ready[pos2];
+ ready[pos2] = temp;
+ }
}
/* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
@@ -11660,39 +9822,6 @@ vr4130_reorder (rtx *ready, int nready)
mips_promote_ready (ready, nready - 2, nready - 1);
}
-/* Remove the instruction at index LOWER from ready queue READY and
- reinsert it in front of the instruction at index HIGHER. LOWER must
- be <= HIGHER. */
-
-static void
-mips_promote_ready (rtx *ready, int lower, int higher)
-{
- rtx new_head;
- int i;
-
- new_head = ready[lower];
- for (i = lower; i < higher; i++)
- ready[i] = ready[i + 1];
- ready[i] = new_head;
-}
-
-/* If the priority of the instruction at POS2 in the ready queue READY
- is within LIMIT units of that of the instruction at POS1, swap the
- instructions if POS2 is not already less than POS1. */
-
-static void
-mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
-{
- if (pos1 < pos2
- && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
- {
- rtx temp;
- temp = ready[pos1];
- ready[pos1] = ready[pos2];
- ready[pos2] = temp;
- }
-}
-
/* Record whether last 74k AGEN instruction was a load or store. */
static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
@@ -11766,7 +9895,7 @@ mips_74k_agen_reorder (rtx *ready, int nready)
break;
}
}
-
+
/* Implement TARGET_SCHED_INIT. */
static void
@@ -11823,87 +9952,6 @@ mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
return more;
}
-/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
- dependencies have no cost, except on the 20Kc where output-dependence
- is treated like input-dependence. */
-
-static int
-mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
- rtx dep ATTRIBUTE_UNUSED, int cost)
-{
- if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
- && TUNE_20KC)
- return cost;
- if (REG_NOTE_KIND (link) != 0)
- return 0;
- return cost;
-}
-
-/* Return the number of instructions that can be issued per cycle. */
-
-static int
-mips_issue_rate (void)
-{
- switch (mips_tune)
- {
- case PROCESSOR_74KC:
- case PROCESSOR_74KF2_1:
- case PROCESSOR_74KF1_1:
- case PROCESSOR_74KF3_2:
- /* The 74k is not strictly quad-issue cpu, but can be seen as one
- by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
- but in reality only a maximum of 3 insns can be issued as the
- floating point load/stores also require a slot in the AGEN pipe. */
- return 4;
-
- case PROCESSOR_20KC:
- case PROCESSOR_R4130:
- case PROCESSOR_R5400:
- case PROCESSOR_R5500:
- case PROCESSOR_R7000:
- case PROCESSOR_R9000:
- return 2;
-
- case PROCESSOR_SB1:
- case PROCESSOR_SB1A:
- /* This is actually 4, but we get better performance if we claim 3.
- This is partly because of unwanted speculative code motion with the
- larger number, and partly because in most common cases we can't
- reach the theoretical max of 4. */
- return 3;
-
- default:
- return 1;
- }
-}
-
-/* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
- be as wide as the scheduling freedom in the DFA. */
-
-static int
-mips_multipass_dfa_lookahead (void)
-{
- /* Can schedule up to 4 of the 6 function units in any one cycle. */
- if (TUNE_SB1)
- return 4;
-
- return 0;
-}
-
-/* Implements a store data bypass check. We need this because the cprestore
- pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
- default routine to abort. We just return false for that case. */
-/* ??? Should try to give a better result here than assuming false. */
-
-int
-mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
-{
- if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
- return false;
-
- return ! store_data_bypass_p (out_insn, in_insn);
-}
-
/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
return the first operand of the associated "pref" or "prefx" insn. */
@@ -12254,118 +10302,9 @@ static const struct bdesc_map bdesc_arrays[] =
MASK_64BIT }
};
-/* Take the argument ARGNUM of the arglist of EXP and convert it into a form
- suitable for input operand OP of instruction ICODE. Return the value. */
-
-static rtx
-mips_prepare_builtin_arg (enum insn_code icode,
- unsigned int op, tree exp, unsigned int argnum)
-{
- rtx value;
- enum machine_mode mode;
-
- value = expand_normal (CALL_EXPR_ARG (exp, argnum));
- mode = insn_data[icode].operand[op].mode;
- if (!insn_data[icode].operand[op].predicate (value, mode))
- {
- value = copy_to_mode_reg (mode, value);
- /* Check the predicate again. */
- if (!insn_data[icode].operand[op].predicate (value, mode))
- {
- error ("invalid argument to builtin function");
- return const0_rtx;
- }
- }
-
- return value;
-}
-
-/* Return an rtx suitable for output operand OP of instruction ICODE.
- If TARGET is non-null, try to use it where possible. */
-
-static rtx
-mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
-{
- enum machine_mode mode;
-
- mode = insn_data[icode].operand[op].mode;
- if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
- target = gen_reg_rtx (mode);
-
- return target;
-}
-
-/* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
-
-rtx
-mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
- enum machine_mode mode ATTRIBUTE_UNUSED,
- int ignore ATTRIBUTE_UNUSED)
-{
- enum insn_code icode;
- enum mips_builtin_type type;
- tree fndecl;
- unsigned int fcode;
- const struct builtin_description *bdesc;
- const struct bdesc_map *m;
-
- fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
- fcode = DECL_FUNCTION_CODE (fndecl);
-
- if (TARGET_MIPS16)
- {
- error ("built-in function %qs not supported for MIPS16",
- IDENTIFIER_POINTER (DECL_NAME (fndecl)));
- return const0_rtx;
- }
-
- bdesc = NULL;
- for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
- {
- if (fcode < m->size)
- {
- bdesc = m->bdesc;
- icode = bdesc[fcode].icode;
- type = bdesc[fcode].builtin_type;
- break;
- }
- fcode -= m->size;
- }
- if (bdesc == NULL)
- return 0;
-
- switch (type)
- {
- case MIPS_BUILTIN_DIRECT:
- return mips_expand_builtin_direct (icode, target, exp, true);
-
- case MIPS_BUILTIN_DIRECT_NO_TARGET:
- return mips_expand_builtin_direct (icode, target, exp, false);
-
- case MIPS_BUILTIN_MOVT:
- case MIPS_BUILTIN_MOVF:
- return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
- target, exp);
-
- case MIPS_BUILTIN_CMP_ANY:
- case MIPS_BUILTIN_CMP_ALL:
- case MIPS_BUILTIN_CMP_UPPER:
- case MIPS_BUILTIN_CMP_LOWER:
- case MIPS_BUILTIN_CMP_SINGLE:
- return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
- target, exp);
-
- case MIPS_BUILTIN_BPOSGE32:
- return mips_expand_builtin_bposge (type, target);
-
- default:
- return 0;
- }
-}
-
/* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
-void
+static void
mips_init_builtins (void)
{
const struct builtin_description *d;
@@ -12633,6 +10572,47 @@ mips_init_builtins (void)
}
}
+/* Take the argument ARGNUM of the arglist of EXP and convert it into a form
+ suitable for input operand OP of instruction ICODE. Return the value. */
+
+static rtx
+mips_prepare_builtin_arg (enum insn_code icode,
+ unsigned int op, tree exp, unsigned int argnum)
+{
+ rtx value;
+ enum machine_mode mode;
+
+ value = expand_normal (CALL_EXPR_ARG (exp, argnum));
+ mode = insn_data[icode].operand[op].mode;
+ if (!insn_data[icode].operand[op].predicate (value, mode))
+ {
+ value = copy_to_mode_reg (mode, value);
+ /* Check the predicate again. */
+ if (!insn_data[icode].operand[op].predicate (value, mode))
+ {
+ error ("invalid argument to builtin function");
+ return const0_rtx;
+ }
+ }
+
+ return value;
+}
+
+/* Return an rtx suitable for output operand OP of instruction ICODE.
+ If TARGET is non-null, try to use it where possible. */
+
+static rtx
+mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
+{
+ enum machine_mode mode;
+
+ mode = insn_data[icode].operand[op].mode;
+ if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
+ target = gen_reg_rtx (mode);
+
+ return target;
+}
+
/* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
.md pattern and CALL is the function expr with arguments. TARGET,
if nonnull, suggests a good place to put the result.
@@ -12824,93 +10804,1954 @@ mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
return mips_builtin_branch_and_move (condition, target,
const1_rtx, const0_rtx);
}
+
+/* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
+
+static rtx
+mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ enum insn_code icode;
+ enum mips_builtin_type type;
+ tree fndecl;
+ unsigned int fcode;
+ const struct builtin_description *bdesc;
+ const struct bdesc_map *m;
+
+ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ fcode = DECL_FUNCTION_CODE (fndecl);
+
+ if (TARGET_MIPS16)
+ {
+ error ("built-in function %qs not supported for MIPS16",
+ IDENTIFIER_POINTER (DECL_NAME (fndecl)));
+ return const0_rtx;
+ }
+
+ bdesc = NULL;
+ for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
+ {
+ if (fcode < m->size)
+ {
+ bdesc = m->bdesc;
+ icode = bdesc[fcode].icode;
+ type = bdesc[fcode].builtin_type;
+ break;
+ }
+ fcode -= m->size;
+ }
+ if (bdesc == NULL)
+ return 0;
+
+ switch (type)
+ {
+ case MIPS_BUILTIN_DIRECT:
+ return mips_expand_builtin_direct (icode, target, exp, true);
+
+ case MIPS_BUILTIN_DIRECT_NO_TARGET:
+ return mips_expand_builtin_direct (icode, target, exp, false);
+
+ case MIPS_BUILTIN_MOVT:
+ case MIPS_BUILTIN_MOVF:
+ return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
+ target, exp);
+
+ case MIPS_BUILTIN_CMP_ANY:
+ case MIPS_BUILTIN_CMP_ALL:
+ case MIPS_BUILTIN_CMP_UPPER:
+ case MIPS_BUILTIN_CMP_LOWER:
+ case MIPS_BUILTIN_CMP_SINGLE:
+ return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
+ target, exp);
+
+ case MIPS_BUILTIN_BPOSGE32:
+ return mips_expand_builtin_bposge (type, target);
+
+ default:
+ return 0;
+ }
+}
-/* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
- FIRST is true if this is the first time handling this decl. */
+/* An entry in the mips16 constant pool. VALUE is the pool constant,
+ MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
+
+struct mips16_constant {
+ struct mips16_constant *next;
+ rtx value;
+ rtx label;
+ enum machine_mode mode;
+};
+
+/* Information about an incomplete mips16 constant pool. FIRST is the
+ first constant, HIGHEST_ADDRESS is the highest address that the first
+ byte of the pool can have, and INSN_ADDRESS is the current instruction
+ address. */
+
+struct mips16_constant_pool {
+ struct mips16_constant *first;
+ int highest_address;
+ int insn_address;
+};
+
+/* Add constant VALUE to POOL and return its label. MODE is the
+ value's mode (used for CONST_INTs, etc.). */
+
+static rtx
+add_constant (struct mips16_constant_pool *pool,
+ rtx value, enum machine_mode mode)
+{
+ struct mips16_constant **p, *c;
+ bool first_of_size_p;
+
+ /* See whether the constant is already in the pool. If so, return the
+ existing label, otherwise leave P pointing to the place where the
+ constant should be added.
+
+ Keep the pool sorted in increasing order of mode size so that we can
+ reduce the number of alignments needed. */
+ first_of_size_p = true;
+ for (p = &pool->first; *p != 0; p = &(*p)->next)
+ {
+ if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
+ return (*p)->label;
+ if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
+ break;
+ if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
+ first_of_size_p = false;
+ }
+
+ /* In the worst case, the constant needed by the earliest instruction
+ will end up at the end of the pool. The entire pool must then be
+ accessible from that instruction.
+
+ When adding the first constant, set the pool's highest address to
+ the address of the first out-of-range byte. Adjust this address
+ downwards each time a new constant is added. */
+ if (pool->first == 0)
+ /* For pc-relative lw, addiu and daddiu instructions, the base PC value
+ is the address of the instruction with the lowest two bits clear.
+ The base PC value for ld has the lowest three bits clear. Assume
+ the worst case here. */
+ pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
+ pool->highest_address -= GET_MODE_SIZE (mode);
+ if (first_of_size_p)
+ /* Take into account the worst possible padding due to alignment. */
+ pool->highest_address -= GET_MODE_SIZE (mode) - 1;
+
+ /* Create a new entry. */
+ c = (struct mips16_constant *) xmalloc (sizeof *c);
+ c->value = value;
+ c->mode = mode;
+ c->label = gen_label_rtx ();
+ c->next = *p;
+ *p = c;
+
+ return c->label;
+}
+
+/* Output constant VALUE after instruction INSN and return the last
+ instruction emitted. MODE is the mode of the constant. */
+
+static rtx
+dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
+{
+ if (SCALAR_INT_MODE_P (mode)
+ || ALL_SCALAR_FRACT_MODE_P (mode)
+ || ALL_SCALAR_ACCUM_MODE_P (mode))
+ {
+ rtx size = GEN_INT (GET_MODE_SIZE (mode));
+ return emit_insn_after (gen_consttable_int (value, size), insn);
+ }
+
+ if (SCALAR_FLOAT_MODE_P (mode))
+ return emit_insn_after (gen_consttable_float (value), insn);
+
+ if (VECTOR_MODE_P (mode))
+ {
+ int i;
+
+ for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
+ insn = dump_constants_1 (GET_MODE_INNER (mode),
+ CONST_VECTOR_ELT (value, i), insn);
+ return insn;
+ }
+
+ gcc_unreachable ();
+}
+
+
+/* Dump out the constants in CONSTANTS after INSN. */
static void
-mips_encode_section_info (tree decl, rtx rtl, int first)
+dump_constants (struct mips16_constant *constants, rtx insn)
{
- default_encode_section_info (decl, rtl, first);
+ struct mips16_constant *c, *next;
+ int align;
- if (TREE_CODE (decl) == FUNCTION_DECL)
+ align = 0;
+ for (c = constants; c != NULL; c = next)
{
- rtx symbol = XEXP (rtl, 0);
- tree type = TREE_TYPE (decl);
+ /* If necessary, increase the alignment of PC. */
+ if (align < GET_MODE_SIZE (c->mode))
+ {
+ int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
+ insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
+ }
+ align = GET_MODE_SIZE (c->mode);
- if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
- || mips_far_type_p (type))
- SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
+ insn = emit_label_after (c->label, insn);
+ insn = dump_constants_1 (c->mode, c->value, insn);
+
+ next = c->next;
+ free (c);
}
+
+ emit_barrier_after (insn);
}
-/* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
- value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
+/* Return the length of instruction INSN. */
+
+static int
+mips16_insn_length (rtx insn)
+{
+ if (JUMP_P (insn))
+ {
+ rtx body = PATTERN (insn);
+ if (GET_CODE (body) == ADDR_VEC)
+ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
+ if (GET_CODE (body) == ADDR_DIFF_VEC)
+ return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
+ }
+ return get_attr_length (insn);
+}
+
+/* If *X is a symbolic constant that refers to the constant pool, add
+ the constant to POOL and rewrite *X to use the constant's label. */
static void
-mips_extra_live_on_entry (bitmap regs)
+mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
{
- if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
- bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
+ rtx base, offset, label;
+
+ split_const (*x, &base, &offset);
+ if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
+ {
+ label = add_constant (pool, get_pool_constant (base),
+ get_pool_mode (base));
+ base = gen_rtx_LABEL_REF (Pmode, label);
+ *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
+ }
}
-/* SImode values are represented as sign-extended to DImode. */
+/* This structure is used to communicate with mips16_rewrite_pool_refs.
+ INSN is the instruction we're rewriting and POOL points to the current
+ constant pool. */
+struct mips16_rewrite_pool_refs_info {
+ rtx insn;
+ struct mips16_constant_pool *pool;
+};
-int
-mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
+/* Rewrite *X so that constant pool references refer to the constant's
+ label instead. DATA points to a mips16_rewrite_pool_refs_info
+ structure. */
+
+static int
+mips16_rewrite_pool_refs (rtx *x, void *data)
{
- if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
- return SIGN_EXTEND;
+ struct mips16_rewrite_pool_refs_info *info = data;
- return UNKNOWN;
+ if (force_to_mem_operand (*x, Pmode))
+ {
+ rtx mem = force_const_mem (GET_MODE (*x), *x);
+ validate_change (info->insn, x, mem, false);
+ }
+
+ if (MEM_P (*x))
+ {
+ mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
+ return -1;
+ }
+
+ if (TARGET_MIPS16_TEXT_LOADS)
+ mips16_rewrite_pool_constant (info->pool, x);
+
+ return GET_CODE (*x) == CONST ? -1 : 0;
+}
+
+/* Build MIPS16 constant pools. */
+
+static void
+mips16_lay_out_constants (void)
+{
+ struct mips16_constant_pool pool;
+ struct mips16_rewrite_pool_refs_info info;
+ rtx insn, barrier;
+
+ if (!TARGET_MIPS16_PCREL_LOADS)
+ return;
+
+ barrier = 0;
+ memset (&pool, 0, sizeof (pool));
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ /* Rewrite constant pool references in INSN. */
+ if (INSN_P (insn))
+ {
+ info.insn = insn;
+ info.pool = &pool;
+ for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
+ }
+
+ pool.insn_address += mips16_insn_length (insn);
+
+ if (pool.first != NULL)
+ {
+ /* If there are no natural barriers between the first user of
+ the pool and the highest acceptable address, we'll need to
+ create a new instruction to jump around the constant pool.
+ In the worst case, this instruction will be 4 bytes long.
+
+ If it's too late to do this transformation after INSN,
+ do it immediately before INSN. */
+ if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
+ {
+ rtx label, jump;
+
+ label = gen_label_rtx ();
+
+ jump = emit_jump_insn_before (gen_jump (label), insn);
+ JUMP_LABEL (jump) = label;
+ LABEL_NUSES (label) = 1;
+ barrier = emit_barrier_after (jump);
+
+ emit_label_after (label, barrier);
+ pool.insn_address += 4;
+ }
+
+ /* See whether the constant pool is now out of range of the first
+ user. If so, output the constants after the previous barrier.
+ Note that any instructions between BARRIER and INSN (inclusive)
+ will use negative offsets to refer to the pool. */
+ if (pool.insn_address > pool.highest_address)
+ {
+ dump_constants (pool.first, barrier);
+ pool.first = NULL;
+ barrier = 0;
+ }
+ else if (BARRIER_P (insn))
+ barrier = insn;
+ }
+ }
+ dump_constants (pool.first, get_last_insn ());
}
-/* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
+/* A temporary variable used by for_each_rtx callbacks, etc. */
+static rtx mips_sim_insn;
+
+/* A structure representing the state of the processor pipeline.
+ Used by the mips_sim_* family of functions. */
+struct mips_sim {
+ /* The maximum number of instructions that can be issued in a cycle.
+ (Caches mips_issue_rate.) */
+ unsigned int issue_rate;
+
+ /* The current simulation time. */
+ unsigned int time;
+
+ /* How many more instructions can be issued in the current cycle. */
+ unsigned int insns_left;
+
+ /* LAST_SET[X].INSN is the last instruction to set register X.
+ LAST_SET[X].TIME is the time at which that instruction was issued.
+ INSN is null if no instruction has yet set register X. */
+ struct {
+ rtx insn;
+ unsigned int time;
+ } last_set[FIRST_PSEUDO_REGISTER];
+
+ /* The pipeline's current DFA state. */
+ state_t dfa_state;
+};
+
+/* Reset STATE to the initial simulation state. */
static void
-mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
+mips_sim_reset (struct mips_sim *state)
{
- switch (size)
+ state->time = 0;
+ state->insns_left = state->issue_rate;
+ memset (&state->last_set, 0, sizeof (state->last_set));
+ state_reset (state->dfa_state);
+}
+
+/* Initialize STATE before its first use. DFA_STATE points to an
+ allocated but uninitialized DFA state. */
+
+static void
+mips_sim_init (struct mips_sim *state, state_t dfa_state)
+{
+ state->issue_rate = mips_issue_rate ();
+ state->dfa_state = dfa_state;
+ mips_sim_reset (state);
+}
+
+/* Advance STATE by one clock cycle. */
+
+static void
+mips_sim_next_cycle (struct mips_sim *state)
+{
+ state->time++;
+ state->insns_left = state->issue_rate;
+ state_transition (state->dfa_state, 0);
+}
+
+/* Advance simulation state STATE until instruction INSN can read
+ register REG. */
+
+static void
+mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
+{
+ unsigned int i;
+
+ for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
+ if (state->last_set[REGNO (reg) + i].insn != 0)
+ {
+ unsigned int t;
+
+ t = state->last_set[REGNO (reg) + i].time;
+ t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
+ while (state->time < t)
+ mips_sim_next_cycle (state);
+ }
+}
+
+/* A for_each_rtx callback. If *X is a register, advance simulation state
+ DATA until mips_sim_insn can read the register's value. */
+
+static int
+mips_sim_wait_regs_2 (rtx *x, void *data)
+{
+ if (REG_P (*x))
+ mips_sim_wait_reg (data, mips_sim_insn, *x);
+ return 0;
+}
+
+/* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
+
+static void
+mips_sim_wait_regs_1 (rtx *x, void *data)
+{
+ for_each_rtx (x, mips_sim_wait_regs_2, data);
+}
+
+/* Advance simulation state STATE until all of INSN's register
+ dependencies are satisfied. */
+
+static void
+mips_sim_wait_regs (struct mips_sim *state, rtx insn)
+{
+ mips_sim_insn = insn;
+ note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
+}
+
+/* Advance simulation state STATE until the units required by
+ instruction INSN are available. */
+
+static void
+mips_sim_wait_units (struct mips_sim *state, rtx insn)
+{
+ state_t tmp_state;
+
+ tmp_state = alloca (state_size ());
+ while (state->insns_left == 0
+ || (memcpy (tmp_state, state->dfa_state, state_size ()),
+ state_transition (tmp_state, insn) >= 0))
+ mips_sim_next_cycle (state);
+}
+
+/* Advance simulation state STATE until INSN is ready to issue. */
+
+static void
+mips_sim_wait_insn (struct mips_sim *state, rtx insn)
+{
+ mips_sim_wait_regs (state, insn);
+ mips_sim_wait_units (state, insn);
+}
+
+/* mips_sim_insn has just set X. Update the LAST_SET array
+ in simulation state DATA. */
+
+static void
+mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
+{
+ struct mips_sim *state;
+ unsigned int i;
+
+ state = data;
+ if (REG_P (x))
+ for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
+ {
+ state->last_set[REGNO (x) + i].insn = mips_sim_insn;
+ state->last_set[REGNO (x) + i].time = state->time;
+ }
+}
+
+/* Issue instruction INSN in scheduler state STATE. Assume that INSN
+ can issue immediately (i.e., that mips_sim_wait_insn has already
+ been called). */
+
+static void
+mips_sim_issue_insn (struct mips_sim *state, rtx insn)
+{
+ state_transition (state->dfa_state, insn);
+ state->insns_left--;
+
+ mips_sim_insn = insn;
+ note_stores (PATTERN (insn), mips_sim_record_set, state);
+}
+
+/* Simulate issuing a NOP in state STATE. */
+
+static void
+mips_sim_issue_nop (struct mips_sim *state)
+{
+ if (state->insns_left == 0)
+ mips_sim_next_cycle (state);
+ state->insns_left--;
+}
+
+/* Update simulation state STATE so that it's ready to accept the instruction
+ after INSN. INSN should be part of the main rtl chain, not a member of a
+ SEQUENCE. */
+
+static void
+mips_sim_finish_insn (struct mips_sim *state, rtx insn)
+{
+ /* If INSN is a jump with an implicit delay slot, simulate a nop. */
+ if (JUMP_P (insn))
+ mips_sim_issue_nop (state);
+
+ switch (GET_CODE (SEQ_BEGIN (insn)))
{
- case 4:
- fputs ("\t.dtprelword\t", file);
+ case CODE_LABEL:
+ case CALL_INSN:
+ /* We can't predict the processor state after a call or label. */
+ mips_sim_reset (state);
break;
- case 8:
- fputs ("\t.dtpreldword\t", file);
+ case JUMP_INSN:
+ /* The delay slots of branch likely instructions are only executed
+ when the branch is taken. Therefore, if the caller has simulated
+ the delay slot instruction, STATE does not really reflect the state
+ of the pipeline for the instruction after the delay slot. Also,
+ branch likely instructions tend to incur a penalty when not taken,
+ so there will probably be an extra delay between the branch and
+ the instruction after the delay slot. */
+ if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
+ mips_sim_reset (state);
break;
default:
- gcc_unreachable ();
+ break;
}
- output_addr_const (file, x);
- fputs ("+0x8000", file);
}
+
+/* The VR4130 pipeline issues aligned pairs of instructions together,
+ but it stalls the second instruction if it depends on the first.
+ In order to cut down the amount of logic required, this dependence
+ check is not based on a full instruction decode. Instead, any non-SPECIAL
+ instruction is assumed to modify the register specified by bits 20-16
+ (which is usually the "rt" field).
-/* Implement TARGET_DWARF_REGISTER_SPAN. */
+ In beq, beql, bne and bnel instructions, the rt field is actually an
+ input, so we can end up with a false dependence between the branch
+ and its delay slot. If this situation occurs in instruction INSN,
+ try to avoid it by swapping rs and rt. */
-static rtx
-mips_dwarf_register_span (rtx reg)
+static void
+vr4130_avoid_branch_rt_conflict (rtx insn)
{
- rtx high, low;
+ rtx first, second;
+
+ first = SEQ_BEGIN (insn);
+ second = SEQ_END (insn);
+ if (JUMP_P (first)
+ && NONJUMP_INSN_P (second)
+ && GET_CODE (PATTERN (first)) == SET
+ && GET_CODE (SET_DEST (PATTERN (first))) == PC
+ && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
+ {
+ /* Check for the right kind of condition. */
+ rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
+ if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
+ && REG_P (XEXP (cond, 0))
+ && REG_P (XEXP (cond, 1))
+ && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
+ && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
+ {
+ /* SECOND mentions the rt register but not the rs register. */
+ rtx tmp = XEXP (cond, 0);
+ XEXP (cond, 0) = XEXP (cond, 1);
+ XEXP (cond, 1) = tmp;
+ }
+ }
+}
+
+/* Implement -mvr4130-align. Go through each basic block and simulate the
+ processor pipeline. If we find that a pair of instructions could execute
+ in parallel, and the first of those instruction is not 8-byte aligned,
+ insert a nop to make it aligned. */
+
+static void
+vr4130_align_insns (void)
+{
+ struct mips_sim state;
+ rtx insn, subinsn, last, last2, next;
+ bool aligned_p;
+
+ dfa_start ();
+
+ /* LAST is the last instruction before INSN to have a nonzero length.
+ LAST2 is the last such instruction before LAST. */
+ last = 0;
+ last2 = 0;
+
+ /* ALIGNED_P is true if INSN is known to be at an aligned address. */
+ aligned_p = true;
+
+ mips_sim_init (&state, alloca (state_size ()));
+ for (insn = get_insns (); insn != 0; insn = next)
+ {
+ unsigned int length;
+
+ next = NEXT_INSN (insn);
+
+ /* See the comment above vr4130_avoid_branch_rt_conflict for details.
+ This isn't really related to the alignment pass, but we do it on
+ the fly to avoid a separate instruction walk. */
+ vr4130_avoid_branch_rt_conflict (insn);
+
+ if (USEFUL_INSN_P (insn))
+ FOR_EACH_SUBINSN (subinsn, insn)
+ {
+ mips_sim_wait_insn (&state, subinsn);
+
+ /* If we want this instruction to issue in parallel with the
+ previous one, make sure that the previous instruction is
+ aligned. There are several reasons why this isn't worthwhile
+ when the second instruction is a call:
+
+ - Calls are less likely to be performance critical,
+ - There's a good chance that the delay slot can execute
+ in parallel with the call.
+ - The return address would then be unaligned.
+
+ In general, if we're going to insert a nop between instructions
+ X and Y, it's better to insert it immediately after X. That
+ way, if the nop makes Y aligned, it will also align any labels
+ between X and Y. */
+ if (state.insns_left != state.issue_rate
+ && !CALL_P (subinsn))
+ {
+ if (subinsn == SEQ_BEGIN (insn) && aligned_p)
+ {
+ /* SUBINSN is the first instruction in INSN and INSN is
+ aligned. We want to align the previous instruction
+ instead, so insert a nop between LAST2 and LAST.
+
+ Note that LAST could be either a single instruction
+ or a branch with a delay slot. In the latter case,
+ LAST, like INSN, is already aligned, but the delay
+ slot must have some extra delay that stops it from
+ issuing at the same time as the branch. We therefore
+ insert a nop before the branch in order to align its
+ delay slot. */
+ emit_insn_after (gen_nop (), last2);
+ aligned_p = false;
+ }
+ else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
+ {
+ /* SUBINSN is the delay slot of INSN, but INSN is
+ currently unaligned. Insert a nop between
+ LAST and INSN to align it. */
+ emit_insn_after (gen_nop (), last);
+ aligned_p = true;
+ }
+ }
+ mips_sim_issue_insn (&state, subinsn);
+ }
+ mips_sim_finish_insn (&state, insn);
+
+ /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
+ length = get_attr_length (insn);
+ if (length > 0)
+ {
+ /* If the instruction is an asm statement or multi-instruction
+ mips.md patern, the length is only an estimate. Insert an
+ 8 byte alignment after it so that the following instructions
+ can be handled correctly. */
+ if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
+ && (recog_memoized (insn) < 0 || length >= 8))
+ {
+ next = emit_insn_after (gen_align (GEN_INT (3)), insn);
+ next = NEXT_INSN (next);
+ mips_sim_next_cycle (&state);
+ aligned_p = true;
+ }
+ else if (length & 4)
+ aligned_p = !aligned_p;
+ last2 = last;
+ last = insn;
+ }
+
+ /* See whether INSN is an aligned label. */
+ if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
+ aligned_p = true;
+ }
+ dfa_finish ();
+}
+
+/* Subroutine of mips_reorg. If there is a hazard between INSN
+ and a previous instruction, avoid it by inserting nops after
+ instruction AFTER.
+
+ *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
+ this point. If *DELAYED_REG is non-null, INSN must wait a cycle
+ before using the value of that register. *HILO_DELAY counts the
+ number of instructions since the last hilo hazard (that is,
+ the number of instructions since the last mflo or mfhi).
+
+ After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
+ for the next instruction.
+
+ LO_REG is an rtx for the LO register, used in dependence checking. */
+
+static void
+mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
+ rtx *delayed_reg, rtx lo_reg)
+{
+ rtx pattern, set;
+ int nops, ninsns, hazard_set;
+
+ if (!INSN_P (insn))
+ return;
+
+ pattern = PATTERN (insn);
+
+ /* Do not put the whole function in .set noreorder if it contains
+ an asm statement. We don't know whether there will be hazards
+ between the asm statement and the gcc-generated code. */
+ if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
+ cfun->machine->all_noreorder_p = false;
+
+ /* Ignore zero-length instructions (barriers and the like). */
+ ninsns = get_attr_length (insn) / 4;
+ if (ninsns == 0)
+ return;
+
+ /* Work out how many nops are needed. Note that we only care about
+ registers that are explicitly mentioned in the instruction's pattern.
+ It doesn't matter that calls use the argument registers or that they
+ clobber hi and lo. */
+ if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
+ nops = 2 - *hilo_delay;
+ else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
+ nops = 1;
+ else
+ nops = 0;
+
+ /* Insert the nops between this instruction and the previous one.
+ Each new nop takes us further from the last hilo hazard. */
+ *hilo_delay += nops;
+ while (nops-- > 0)
+ emit_insn_after (gen_hazard_nop (), after);
+
+ /* Set up the state for the next instruction. */
+ *hilo_delay += ninsns;
+ *delayed_reg = 0;
+ if (INSN_CODE (insn) >= 0)
+ switch (get_attr_hazard (insn))
+ {
+ case HAZARD_NONE:
+ break;
+
+ case HAZARD_HILO:
+ *hilo_delay = 0;
+ break;
+
+ case HAZARD_DELAY:
+ hazard_set = (int) get_attr_hazard_set (insn);
+ if (hazard_set == 0)
+ set = single_set (insn);
+ else
+ {
+ gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
+ set = XVECEXP (PATTERN (insn), 0, hazard_set - 1);
+ }
+ gcc_assert (set && GET_CODE (set) == SET);
+ *delayed_reg = SET_DEST (set);
+ break;
+ }
+}
+
+
+/* Go through the instruction stream and insert nops where necessary.
+ See if the whole function can then be put into .set noreorder &
+ .set nomacro. */
+
+static void
+mips_avoid_hazards (void)
+{
+ rtx insn, last_insn, lo_reg, delayed_reg;
+ int hilo_delay, i;
+
+ /* Force all instructions to be split into their final form. */
+ split_all_insns_noflow ();
+
+ /* Recalculate instruction lengths without taking nops into account. */
+ cfun->machine->ignore_hazard_length_p = true;
+ shorten_branches (get_insns ());
+
+ cfun->machine->all_noreorder_p = true;
+
+ /* Profiled functions can't be all noreorder because the profiler
+ support uses assembler macros. */
+ if (current_function_profile)
+ cfun->machine->all_noreorder_p = false;
+
+ /* Code compiled with -mfix-vr4120 can't be all noreorder because
+ we rely on the assembler to work around some errata. */
+ if (TARGET_FIX_VR4120)
+ cfun->machine->all_noreorder_p = false;
+
+ /* The same is true for -mfix-vr4130 if we might generate mflo or
+ mfhi instructions. Note that we avoid using mflo and mfhi if
+ the VR4130 macc and dmacc instructions are available instead;
+ see the *mfhilo_{si,di}_macc patterns. */
+ if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
+ cfun->machine->all_noreorder_p = false;
+
+ last_insn = 0;
+ hilo_delay = 2;
+ delayed_reg = 0;
+ lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
+
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ if (INSN_P (insn))
+ {
+ if (GET_CODE (PATTERN (insn)) == SEQUENCE)
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
+ &hilo_delay, &delayed_reg, lo_reg);
+ else
+ mips_avoid_hazard (last_insn, insn, &hilo_delay,
+ &delayed_reg, lo_reg);
+
+ last_insn = insn;
+ }
+}
+
+
+/* Implement TARGET_MACHINE_DEPENDENT_REORG. */
+
+static void
+mips_reorg (void)
+{
+ mips16_lay_out_constants ();
+ if (TARGET_EXPLICIT_RELOCS)
+ {
+ if (mips_flag_delayed_branch)
+ dbr_schedule (get_insns ());
+ mips_avoid_hazards ();
+ if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
+ vr4130_align_insns ();
+ }
+}
+
+/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
+ in order to avoid duplicating too much logic from elsewhere. */
+
+static void
+mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ rtx this, temp1, temp2, insn, fnaddr;
+ bool use_sibcall_p;
+
+ /* Pretend to be a post-reload pass while generating rtl. */
+ reload_completed = 1;
+
+ /* Mark the end of the (empty) prologue. */
+ emit_note (NOTE_INSN_PROLOGUE_END);
+
+ /* Determine if we can use a sibcall to call FUNCTION directly. */
+ fnaddr = XEXP (DECL_RTL (function), 0);
+ use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
+ && const_call_insn_operand (fnaddr, Pmode));
+
+ /* Determine if we need to load FNADDR from the GOT. */
+ if (!use_sibcall_p)
+ switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
+ {
+ case SYMBOL_GOT_PAGE_OFST:
+ case SYMBOL_GOT_DISP:
+ /* Pick a global pointer. Use a call-clobbered register if
+ TARGET_CALL_SAVED_GP. */
+ cfun->machine->global_pointer =
+ TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
+ SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
+
+ /* Set up the global pointer for n32 or n64 abicalls. */
+ mips_emit_loadgp ();
+ break;
+
+ default:
+ break;
+ }
+
+ /* We need two temporary registers in some cases. */
+ temp1 = gen_rtx_REG (Pmode, 2);
+ temp2 = gen_rtx_REG (Pmode, 3);
+
+ /* Find out which register contains the "this" pointer. */
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
+ this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
+ else
+ this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
+
+ /* Add DELTA to THIS. */
+ if (delta != 0)
+ {
+ rtx offset = GEN_INT (delta);
+ if (!SMALL_OPERAND (delta))
+ {
+ mips_emit_move (temp1, offset);
+ offset = temp1;
+ }
+ emit_insn (gen_add3_insn (this, this, offset));
+ }
+
+ /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
+ if (vcall_offset != 0)
+ {
+ rtx addr;
+
+ /* Set TEMP1 to *THIS. */
+ mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
+
+ /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
+ addr = mips_add_offset (temp2, temp1, vcall_offset);
+
+ /* Load the offset and add it to THIS. */
+ mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
+ emit_insn (gen_add3_insn (this, this, temp1));
+ }
+
+ /* Jump to the target function. Use a sibcall if direct jumps are
+ allowed, otherwise load the address into a register first. */
+ if (use_sibcall_p)
+ {
+ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
+ SIBLING_CALL_P (insn) = 1;
+ }
+ else
+ {
+ /* This is messy. gas treats "la $25,foo" as part of a call
+ sequence and may allow a global "foo" to be lazily bound.
+ The general move patterns therefore reject this combination.
+
+ In this context, lazy binding would actually be OK
+ for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
+ TARGET_CALL_SAVED_GP; see mips_load_call_address.
+ We must therefore load the address via a temporary
+ register if mips_dangerous_for_la25_p.
+
+ If we jump to the temporary register rather than $25, the assembler
+ can use the move insn to fill the jump's delay slot. */
+ if (TARGET_USE_PIC_FN_ADDR_REG
+ && !mips_dangerous_for_la25_p (fnaddr))
+ temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
+ mips_load_call_address (temp1, fnaddr, true);
+
+ if (TARGET_USE_PIC_FN_ADDR_REG
+ && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
+ mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
+ emit_jump_insn (gen_indirect_jump (temp1));
+ }
+
+ /* Run just enough of rest_of_compilation. This sequence was
+ "borrowed" from alpha.c. */
+ insn = get_insns ();
+ insn_locators_alloc ();
+ split_all_insns_noflow ();
+ mips16_lay_out_constants ();
+ shorten_branches (insn);
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+
+ /* Clean up the vars set above. Note that final_end_function resets
+ the global pointer for us. */
+ reload_completed = 0;
+}
+
+static GTY(()) int was_mips16_p = -1;
+
+/* Set up the target-dependent global state so that it matches the
+ current function's ISA mode. */
+
+static void
+mips_set_mips16_mode (int mips16_p)
+{
+ if (mips16_p == was_mips16_p)
+ return;
+
+ /* Restore base settings of various flags. */
+ target_flags = mips_base_target_flags;
+ flag_delayed_branch = mips_flag_delayed_branch;
+ flag_schedule_insns = mips_base_schedule_insns;
+ flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
+ flag_move_loop_invariants = mips_base_move_loop_invariants;
+ align_loops = mips_base_align_loops;
+ align_jumps = mips_base_align_jumps;
+ align_functions = mips_base_align_functions;
+
+ if (mips16_p)
+ {
+ /* Select mips16 instruction set. */
+ target_flags |= MASK_MIPS16;
+
+ /* Don't run the scheduler before reload, since it tends to
+ increase register pressure. */
+ flag_schedule_insns = 0;
+
+ /* Don't do hot/cold partitioning. The constant layout code expects
+ the whole function to be in a single section. */
+ flag_reorder_blocks_and_partition = 0;
+
+ /* Don't move loop invariants, because it tends to increase
+ register pressure. It also introduces an extra move in cases
+ where the constant is the first operand in a two-operand binary
+ instruction, or when it forms a register argument to a functon
+ call. */
+ flag_move_loop_invariants = 0;
+
+ /* Silently disable -mexplicit-relocs since it doesn't apply
+ to mips16 code. Even so, it would overly pedantic to warn
+ about "-mips16 -mexplicit-relocs", especially given that
+ we use a %gprel() operator. */
+ target_flags &= ~MASK_EXPLICIT_RELOCS;
+
+ /* Experiments suggest we get the best overall results from using
+ the range of an unextended lw or sw. Code that makes heavy use
+ of byte or short accesses can do better with ranges of 0...31
+ and 0...63 respectively, but most code is sensitive to the range
+ of lw and sw instead. */
+ targetm.min_anchor_offset = 0;
+ targetm.max_anchor_offset = 127;
+
+ if (flag_pic || TARGET_ABICALLS)
+ sorry ("MIPS16 PIC");
+ }
+ else
+ {
+ /* Reset to select base non-mips16 ISA. */
+ target_flags &= ~MASK_MIPS16;
+
+ /* When using explicit relocs, we call dbr_schedule from within
+ mips_reorg. */
+ if (TARGET_EXPLICIT_RELOCS)
+ flag_delayed_branch = 0;
+
+ /* Provide default values for align_* for 64-bit targets. */
+ if (TARGET_64BIT)
+ {
+ if (align_loops == 0)
+ align_loops = 8;
+ if (align_jumps == 0)
+ align_jumps = 8;
+ if (align_functions == 0)
+ align_functions = 8;
+ }
+
+ targetm.min_anchor_offset = -32768;
+ targetm.max_anchor_offset = 32767;
+ }
+
+ /* (Re)initialize mips target internals for new ISA. */
+ mips_init_split_addresses ();
+ mips_init_relocs ();
+
+ if (was_mips16_p >= 0)
+ /* Reinitialize target-dependent state. */
+ target_reinit ();
+
+ was_mips16_p = TARGET_MIPS16;
+}
+
+/* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
+ function should use the MIPS16 ISA and switch modes accordingly. */
+
+static void
+mips_set_current_function (tree fndecl)
+{
+ mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
+}
+
+/* Allocate a chunk of memory for per-function machine-dependent data. */
+static struct machine_function *
+mips_init_machine_status (void)
+{
+ return ((struct machine_function *)
+ ggc_alloc_cleared (sizeof (struct machine_function)));
+}
+
+/* Return the processor associated with the given ISA level, or null
+ if the ISA isn't valid. */
+
+static const struct mips_cpu_info *
+mips_cpu_info_from_isa (int isa)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
+ if (mips_cpu_info_table[i].isa == isa)
+ return mips_cpu_info_table + i;
+
+ return 0;
+}
+
+/* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
+ with a final "000" replaced by "k". Ignore case.
+
+ Note: this function is shared between GCC and GAS. */
+
+static bool
+mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
+{
+ while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
+ given++, canonical++;
+
+ return ((*given == 0 && *canonical == 0)
+ || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
+}
+
+
+/* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
+ CPU name. We've traditionally allowed a lot of variation here.
+
+ Note: this function is shared between GCC and GAS. */
+
+static bool
+mips_matching_cpu_name_p (const char *canonical, const char *given)
+{
+ /* First see if the name matches exactly, or with a final "000"
+ turned into "k". */
+ if (mips_strict_matching_cpu_name_p (canonical, given))
+ return true;
+
+ /* If not, try comparing based on numerical designation alone.
+ See if GIVEN is an unadorned number, or 'r' followed by a number. */
+ if (TOLOWER (*given) == 'r')
+ given++;
+ if (!ISDIGIT (*given))
+ return false;
+
+ /* Skip over some well-known prefixes in the canonical name,
+ hoping to find a number there too. */
+ if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
+ canonical += 2;
+ else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
+ canonical += 2;
+ else if (TOLOWER (canonical[0]) == 'r')
+ canonical += 1;
+
+ return mips_strict_matching_cpu_name_p (canonical, given);
+}
+
+
+/* Return the mips_cpu_info entry for the processor or ISA given
+ by CPU_STRING. Return null if the string isn't recognized.
+
+ A similar function exists in GAS. */
+
+static const struct mips_cpu_info *
+mips_parse_cpu (const char *cpu_string)
+{
+ unsigned int i;
+ const char *s;
+
+ /* In the past, we allowed upper-case CPU names, but it doesn't
+ work well with the multilib machinery. */
+ for (s = cpu_string; *s != 0; s++)
+ if (ISUPPER (*s))
+ {
+ warning (0, "the cpu name must be lower case");
+ break;
+ }
+
+ /* 'from-abi' selects the most compatible architecture for the given
+ ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
+ EABIs, we have to decide whether we're using the 32-bit or 64-bit
+ version. Look first at the -mgp options, if given, otherwise base
+ the choice on MASK_64BIT in TARGET_DEFAULT. */
+ if (strcasecmp (cpu_string, "from-abi") == 0)
+ return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
+ : ABI_NEEDS_64BIT_REGS ? 3
+ : (TARGET_64BIT ? 3 : 1));
+
+ /* 'default' has traditionally been a no-op. Probably not very useful. */
+ if (strcasecmp (cpu_string, "default") == 0)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
+ if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
+ return mips_cpu_info_table + i;
+
+ return 0;
+}
+
+
+/* Set up globals to generate code for the ISA or processor
+ described by INFO. */
+
+static void
+mips_set_architecture (const struct mips_cpu_info *info)
+{
+ if (info != 0)
+ {
+ mips_arch_info = info;
+ mips_arch = info->cpu;
+ mips_isa = info->isa;
+ }
+}
+
+
+/* Likewise for tuning. */
+
+static void
+mips_set_tune (const struct mips_cpu_info *info)
+{
+ if (info != 0)
+ {
+ mips_tune_info = info;
+ mips_tune = info->cpu;
+ }
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
+{
+ switch (code)
+ {
+ case OPT_mabi_:
+ if (strcmp (arg, "32") == 0)
+ mips_abi = ABI_32;
+ else if (strcmp (arg, "o64") == 0)
+ mips_abi = ABI_O64;
+ else if (strcmp (arg, "n32") == 0)
+ mips_abi = ABI_N32;
+ else if (strcmp (arg, "64") == 0)
+ mips_abi = ABI_64;
+ else if (strcmp (arg, "eabi") == 0)
+ mips_abi = ABI_EABI;
+ else
+ return false;
+ return true;
+
+ case OPT_march_:
+ case OPT_mtune_:
+ return mips_parse_cpu (arg) != 0;
+
+ case OPT_mips:
+ mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
+ return mips_isa_info != 0;
+
+ case OPT_mno_flush_func:
+ mips_cache_flush_func = NULL;
+ return true;
+
+ case OPT_mcode_readable_:
+ if (strcmp (arg, "yes") == 0)
+ mips_code_readable = CODE_READABLE_YES;
+ else if (strcmp (arg, "pcrel") == 0)
+ mips_code_readable = CODE_READABLE_PCREL;
+ else if (strcmp (arg, "no") == 0)
+ mips_code_readable = CODE_READABLE_NO;
+ else
+ return false;
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+/* Set up the threshold for data to go into the small data area, instead
+ of the normal data area, and detect any conflicts in the switches. */
+
+void
+override_options (void)
+{
+ int i, start, regno;
enum machine_mode mode;
- /* By default, GCC maps increasing register numbers to increasing
- memory locations, but paired FPRs are always little-endian,
- regardless of the prevailing endianness. */
- mode = GET_MODE (reg);
- if (FP_REG_P (REGNO (reg))
- && TARGET_BIG_ENDIAN
- && MAX_FPRS_PER_FMT > 1
- && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+ mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
+
+ /* The following code determines the architecture and register size.
+ Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
+ The GAS and GCC code should be kept in sync as much as possible. */
+
+ if (mips_arch_string != 0)
+ mips_set_architecture (mips_parse_cpu (mips_arch_string));
+
+ if (mips_isa_info != 0)
{
- gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
- high = mips_subword (reg, true);
- low = mips_subword (reg, false);
- return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
+ if (mips_arch_info == 0)
+ mips_set_architecture (mips_isa_info);
+ else if (mips_arch_info->isa != mips_isa_info->isa)
+ error ("-%s conflicts with the other architecture options, "
+ "which specify a %s processor",
+ mips_isa_info->name,
+ mips_cpu_info_from_isa (mips_arch_info->isa)->name);
}
- return NULL_RTX;
+ if (mips_arch_info == 0)
+ {
+#ifdef MIPS_CPU_STRING_DEFAULT
+ mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
+#else
+ mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
+#endif
+ }
+
+ if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
+ error ("-march=%s is not compatible with the selected ABI",
+ mips_arch_info->name);
+
+ /* Optimize for mips_arch, unless -mtune selects a different processor. */
+ if (mips_tune_string != 0)
+ mips_set_tune (mips_parse_cpu (mips_tune_string));
+
+ if (mips_tune_info == 0)
+ mips_set_tune (mips_arch_info);
+
+ /* Set cost structure for the processor. */
+ if (optimize_size)
+ mips_cost = &mips_rtx_cost_optimize_size;
+ else
+ mips_cost = &mips_rtx_cost_data[mips_tune];
+
+ /* If the user hasn't specified a branch cost, use the processor's
+ default. */
+ if (mips_branch_cost == 0)
+ mips_branch_cost = mips_cost->branch_cost;
+
+ if ((target_flags_explicit & MASK_64BIT) != 0)
+ {
+ /* The user specified the size of the integer registers. Make sure
+ it agrees with the ABI and ISA. */
+ if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
+ error ("-mgp64 used with a 32-bit processor");
+ else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
+ error ("-mgp32 used with a 64-bit ABI");
+ else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
+ error ("-mgp64 used with a 32-bit ABI");
+ }
+ else
+ {
+ /* Infer the integer register size from the ABI and processor.
+ Restrict ourselves to 32-bit registers if that's all the
+ processor has, or if the ABI cannot handle 64-bit registers. */
+ if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
+ target_flags &= ~MASK_64BIT;
+ else
+ target_flags |= MASK_64BIT;
+ }
+
+ if ((target_flags_explicit & MASK_FLOAT64) != 0)
+ {
+ /* Really, -mfp32 and -mfp64 are ornamental options. There's
+ only one right answer here. */
+ if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
+ error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
+ else if (!TARGET_64BIT && TARGET_FLOAT64
+ && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
+ error ("-mgp32 and -mfp64 can only be combined if the target"
+ " supports the mfhc1 and mthc1 instructions");
+ else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
+ error ("unsupported combination: %s", "-mfp64 -msingle-float");
+ }
+ else
+ {
+ /* -msingle-float selects 32-bit float registers. Otherwise the
+ float registers should be the same size as the integer ones. */
+ if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
+ target_flags |= MASK_FLOAT64;
+ else
+ target_flags &= ~MASK_FLOAT64;
+ }
+
+ /* End of code shared with GAS. */
+
+ if ((target_flags_explicit & MASK_LONG64) == 0)
+ {
+ if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
+ target_flags |= MASK_LONG64;
+ else
+ target_flags &= ~MASK_LONG64;
+ }
+
+ if (!TARGET_OLDABI)
+ flag_pcc_struct_return = 0;
+
+ if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
+ {
+ /* If neither -mbranch-likely nor -mno-branch-likely was given
+ on the command line, set MASK_BRANCHLIKELY based on the target
+ architecture and tuning flags. Annulled delay slots are a
+ size win, so we only consider the processor-specific tuning
+ for !optimize_size. */
+ if (ISA_HAS_BRANCHLIKELY
+ && (optimize_size
+ || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
+ target_flags |= MASK_BRANCHLIKELY;
+ else
+ target_flags &= ~MASK_BRANCHLIKELY;
+ }
+ else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
+ warning (0, "the %qs architecture does not support branch-likely"
+ " instructions", mips_arch_info->name);
+
+ /* The effect of -mabicalls isn't defined for the EABI. */
+ if (mips_abi == ABI_EABI && TARGET_ABICALLS)
+ {
+ error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
+ target_flags &= ~MASK_ABICALLS;
+ }
+
+ /* MIPS16 cannot generate PIC yet. */
+ if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
+ {
+ sorry ("MIPS16 PIC");
+ target_flags &= ~MASK_ABICALLS;
+ flag_pic = flag_pie = flag_shlib = 0;
+ }
+
+ if (TARGET_ABICALLS)
+ /* We need to set flag_pic for executables as well as DSOs
+ because we may reference symbols that are not defined in
+ the final executable. (MIPS does not use things like
+ copy relocs, for example.)
+
+ Also, there is a body of code that uses __PIC__ to distinguish
+ between -mabicalls and -mno-abicalls code. */
+ flag_pic = 1;
+
+ /* -mvr4130-align is a "speed over size" optimization: it usually produces
+ faster code, but at the expense of more nops. Enable it at -O3 and
+ above. */
+ if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
+ target_flags |= MASK_VR4130_ALIGN;
+
+ /* Prefer a call to memcpy over inline code when optimizing for size,
+ though see MOVE_RATIO in mips.h. */
+ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
+ target_flags |= MASK_MEMCPY;
+
+ /* If we have a nonzero small-data limit, check that the -mgpopt
+ setting is consistent with the other target flags. */
+ if (mips_section_threshold > 0)
+ {
+ if (!TARGET_GPOPT)
+ {
+ if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
+ error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
+
+ TARGET_LOCAL_SDATA = false;
+ TARGET_EXTERN_SDATA = false;
+ }
+ else
+ {
+ if (TARGET_VXWORKS_RTP)
+ warning (0, "cannot use small-data accesses for %qs", "-mrtp");
+
+ if (TARGET_ABICALLS)
+ warning (0, "cannot use small-data accesses for %qs",
+ "-mabicalls");
+ }
+ }
+
+#ifdef MIPS_TFMODE_FORMAT
+ REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
+#endif
+
+ /* Make sure that the user didn't turn off paired single support when
+ MIPS-3D support is requested. */
+ if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
+ && !TARGET_PAIRED_SINGLE_FLOAT)
+ error ("-mips3d requires -mpaired-single");
+
+ /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
+ if (TARGET_MIPS3D)
+ target_flags |= MASK_PAIRED_SINGLE_FLOAT;
+
+ /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
+ and TARGET_HARD_FLOAT_ABI are both true. */
+ if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
+ error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
+
+ /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
+ enabled. */
+ if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
+ error ("-mips3d/-mpaired-single must be used with -mips64");
+
+ /* If TARGET_DSPR2, enable MASK_DSP. */
+ if (TARGET_DSPR2)
+ target_flags |= MASK_DSP;
+
+ mips_print_operand_punct['?'] = 1;
+ mips_print_operand_punct['#'] = 1;
+ mips_print_operand_punct['/'] = 1;
+ mips_print_operand_punct['&'] = 1;
+ mips_print_operand_punct['!'] = 1;
+ mips_print_operand_punct['*'] = 1;
+ mips_print_operand_punct['@'] = 1;
+ mips_print_operand_punct['.'] = 1;
+ mips_print_operand_punct['('] = 1;
+ mips_print_operand_punct[')'] = 1;
+ mips_print_operand_punct['['] = 1;
+ mips_print_operand_punct[']'] = 1;
+ mips_print_operand_punct['<'] = 1;
+ mips_print_operand_punct['>'] = 1;
+ mips_print_operand_punct['{'] = 1;
+ mips_print_operand_punct['}'] = 1;
+ mips_print_operand_punct['^'] = 1;
+ mips_print_operand_punct['$'] = 1;
+ mips_print_operand_punct['+'] = 1;
+ mips_print_operand_punct['~'] = 1;
+ mips_print_operand_punct['|'] = 1;
+ mips_print_operand_punct['-'] = 1;
+
+ /* Set up array to map GCC register number to debug register number.
+ Ignore the special purpose register numbers. */
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ mips_dbx_regno[i] = INVALID_REGNUM;
+ if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
+ mips_dwarf_regno[i] = i;
+ else
+ mips_dwarf_regno[i] = INVALID_REGNUM;
+ }
+
+ start = GP_DBX_FIRST - GP_REG_FIRST;
+ for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
+ mips_dbx_regno[i] = i + start;
+
+ start = FP_DBX_FIRST - FP_REG_FIRST;
+ for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
+ mips_dbx_regno[i] = i + start;
+
+ /* HI and LO debug registers use big-endian ordering. */
+ mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
+ mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
+ mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
+ mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
+ for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
+ {
+ mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
+ mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
+ }
+
+ /* Set up array giving whether a given register can hold a given mode. */
+
+ for (mode = VOIDmode;
+ mode != MAX_MACHINE_MODE;
+ mode = (enum machine_mode) ((int)mode + 1))
+ {
+ register int size = GET_MODE_SIZE (mode);
+ register enum mode_class class = GET_MODE_CLASS (mode);
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ register int temp;
+
+ if (mode == CCV2mode)
+ temp = (ISA_HAS_8CC
+ && ST_REG_P (regno)
+ && (regno - ST_REG_FIRST) % 2 == 0);
+
+ else if (mode == CCV4mode)
+ temp = (ISA_HAS_8CC
+ && ST_REG_P (regno)
+ && (regno - ST_REG_FIRST) % 4 == 0);
+
+ else if (mode == CCmode)
+ {
+ if (! ISA_HAS_8CC)
+ temp = (regno == FPSW_REGNUM);
+ else
+ temp = (ST_REG_P (regno) || GP_REG_P (regno)
+ || FP_REG_P (regno));
+ }
+
+ else if (GP_REG_P (regno))
+ temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
+
+ else if (FP_REG_P (regno))
+ temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
+ || (MIN_FPRS_PER_FMT == 1
+ && size <= UNITS_PER_FPREG))
+ && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
+ || class == MODE_VECTOR_FLOAT)
+ && size <= UNITS_PER_FPVALUE)
+ /* Allow integer modes that fit into a single
+ register. We need to put integers into FPRs
+ when using instructions like cvt and trunc.
+ We can't allow sizes smaller than a word,
+ the FPU has no appropriate load/store
+ instructions for those. */
+ || (class == MODE_INT
+ && size >= MIN_UNITS_PER_WORD
+ && size <= UNITS_PER_FPREG)
+ /* Allow TFmode for CCmode reloads. */
+ || (ISA_HAS_8CC && mode == TFmode)));
+
+ else if (ACC_REG_P (regno))
+ temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
+ && size <= UNITS_PER_WORD * 2
+ && (size <= UNITS_PER_WORD
+ || regno == MD_REG_FIRST
+ || (DSP_ACC_REG_P (regno)
+ && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
+
+ else if (ALL_COP_REG_P (regno))
+ temp = (class == MODE_INT && size <= UNITS_PER_WORD);
+ else
+ temp = 0;
+
+ mips_hard_regno_mode_ok[(int)mode][regno] = temp;
+ }
+ }
+
+ /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
+ initialized yet, so we can't use that here. */
+ gpr_mode = TARGET_64BIT ? DImode : SImode;
+
+ /* Function to allocate machine-dependent function status. */
+ init_machine_status = &mips_init_machine_status;
+
+ /* Default to working around R4000 errata only if the processor
+ was selected explicitly. */
+ if ((target_flags_explicit & MASK_FIX_R4000) == 0
+ && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
+ target_flags |= MASK_FIX_R4000;
+
+ /* Default to working around R4400 errata only if the processor
+ was selected explicitly. */
+ if ((target_flags_explicit & MASK_FIX_R4400) == 0
+ && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
+ target_flags |= MASK_FIX_R4400;
+
+ /* Save base state of options. */
+ mips_base_mips16 = TARGET_MIPS16;
+ mips_base_target_flags = target_flags;
+ mips_flag_delayed_branch = flag_delayed_branch;
+ mips_base_schedule_insns = flag_schedule_insns;
+ mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
+ mips_base_move_loop_invariants = flag_move_loop_invariants;
+ mips_base_align_loops = align_loops;
+ mips_base_align_jumps = align_jumps;
+ mips_base_align_functions = align_functions;
+
+ /* Now select the mips16 or 32-bit instruction set, as requested. */
+ mips_set_mips16_mode (mips_base_mips16);
+}
+
+/* Swap the register information for registers I and I + 1, which
+ currently have the wrong endianness. Note that the registers'
+ fixedness and call-clobberedness might have been set on the
+ command line. */
+
+static void
+mips_swap_registers (unsigned int i)
+{
+ int tmpi;
+ const char *tmps;
+
+#define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
+#define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
+
+ SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
+ SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
+ SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
+ SWAP_STRING (reg_names[i], reg_names[i + 1]);
+
+#undef SWAP_STRING
+#undef SWAP_INT
+}
+
+/* Implement CONDITIONAL_REGISTER_USAGE. */
+
+void
+mips_conditional_register_usage (void)
+{
+ if (!ISA_HAS_DSP)
+ {
+ int regno;
+
+ for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
+ fixed_regs[regno] = call_used_regs[regno] = 1;
+ }
+ if (!TARGET_HARD_FLOAT)
+ {
+ int regno;
+
+ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
+ fixed_regs[regno] = call_used_regs[regno] = 1;
+ for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
+ fixed_regs[regno] = call_used_regs[regno] = 1;
+ }
+ else if (! ISA_HAS_8CC)
+ {
+ int regno;
+
+ /* We only have a single condition code register. We
+ implement this by hiding all the condition code registers,
+ and generating RTL that refers directly to ST_REG_FIRST. */
+ for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
+ fixed_regs[regno] = call_used_regs[regno] = 1;
+ }
+ /* In mips16 mode, we permit the $t temporary registers to be used
+ for reload. We prohibit the unused $s registers, since they
+ are caller saved, and saving them via a mips16 register would
+ probably waste more time than just reloading the value. */
+ if (TARGET_MIPS16)
+ {
+ fixed_regs[18] = call_used_regs[18] = 1;
+ fixed_regs[19] = call_used_regs[19] = 1;
+ fixed_regs[20] = call_used_regs[20] = 1;
+ fixed_regs[21] = call_used_regs[21] = 1;
+ fixed_regs[22] = call_used_regs[22] = 1;
+ fixed_regs[23] = call_used_regs[23] = 1;
+ fixed_regs[26] = call_used_regs[26] = 1;
+ fixed_regs[27] = call_used_regs[27] = 1;
+ fixed_regs[30] = call_used_regs[30] = 1;
+ }
+ /* fp20-23 are now caller saved. */
+ if (mips_abi == ABI_64)
+ {
+ int regno;
+ for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
+ call_really_used_regs[regno] = call_used_regs[regno] = 1;
+ }
+ /* Odd registers from fp21 to fp31 are now caller saved. */
+ if (mips_abi == ABI_N32)
+ {
+ int regno;
+ for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
+ call_really_used_regs[regno] = call_used_regs[regno] = 1;
+ }
+ /* Make sure that double-register accumulator values are correctly
+ ordered for the current endianness. */
+ if (TARGET_LITTLE_ENDIAN)
+ {
+ int regno;
+ mips_swap_registers (MD_REG_FIRST);
+ for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
+ mips_swap_registers (regno);
+ }
+}
+
+/* On the mips16, we want to allocate $24 (T_REG) before other
+ registers for instructions for which it is possible. This helps
+ avoid shuffling registers around in order to set up for an xor,
+ encouraging the compiler to use a cmp instead. */
+
+void
+mips_order_regs_for_local_alloc (void)
+{
+ register int i;
+
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ reg_alloc_order[i] = i;
+
+ if (TARGET_MIPS16)
+ {
+ /* It really doesn't matter where we put register 0, since it is
+ a fixed register anyhow. */
+ reg_alloc_order[0] = 24;
+ reg_alloc_order[24] = 0;
+ }
}
+/* Initialize the GCC target structure. */
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
+#undef TARGET_ASM_FUNCTION_RODATA_SECTION
+#define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
+
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT mips_sched_init
+#undef TARGET_SCHED_REORDER
+#define TARGET_SCHED_REORDER mips_sched_reorder
+#undef TARGET_SCHED_REORDER2
+#define TARGET_SCHED_REORDER2 mips_sched_reorder
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST mips_adjust_cost
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE mips_issue_rate
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+ mips_multipass_dfa_lookahead
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
+ (TARGET_DEFAULT \
+ | TARGET_CPU_DEFAULT \
+ | TARGET_ENDIAN_DEFAULT \
+ | TARGET_FP_EXCEPTIONS_DEFAULT \
+ | MASK_CHECK_ZERO_DIV \
+ | MASK_FUSED_MADD)
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION mips_handle_option
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
+
+#undef TARGET_INSERT_ATTRIBUTES
+#define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
+#undef TARGET_MERGE_DECL_ATTRIBUTES
+#define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
+
+#undef TARGET_VALID_POINTER_MODE
+#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS mips_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST mips_address_cost
+
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START mips_file_start
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS mips_init_libfuncs
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
+
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY mips_return_in_memory
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB mips_return_in_msb
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES mips_callee_copies
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
+
+#undef TARGET_MODE_REP_EXTENDED
+#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS mips_init_builtins
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN mips_expand_builtin
+
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS HAVE_AS_TLS
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
+
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE mips_attribute_table
+/* All our function attributes are related to how out-of-line copies should
+ be compiled or called. They don't in themselves prevent inlining. */
+#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
+#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
+
+#undef TARGET_EXTRA_LIVE_ON_ENTRY
+#define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
+#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
+#define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
+
+#undef TARGET_COMP_TYPE_ATTRIBUTES
+#define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
+
+#ifdef HAVE_AS_DTPRELWORD
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
+#endif
+#undef TARGET_DWARF_REGISTER_SPAN
+#define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
#include "gt-mips.h"
diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
index 952093a..061bac8 100644
--- a/gcc/config/mips/mips.h
+++ b/gcc/config/mips/mips.h
@@ -132,32 +132,6 @@ enum mips_code_readable_setting {
CODE_READABLE_YES
};
-#ifndef USED_FOR_TARGET
-extern char mips_print_operand_punct[256]; /* print_operand punctuation chars */
-extern const char *current_function_file; /* filename current function is in */
-extern int num_source_filenames; /* current .file # */
-extern int mips_section_threshold; /* # bytes of data/sdata cutoff */
-extern int sym_lineno; /* sgi next label # for each stmt */
-extern int set_noreorder; /* # of nested .set noreorder's */
-extern int set_nomacro; /* # of nested .set nomacro's */
-extern int set_noat; /* # of nested .set noat's */
-extern int set_volatile; /* # of nested .set volatile's */
-extern int mips_branch_likely; /* emit 'l' after br (branch likely) */
-extern int mips_dbx_regno[];
-extern int mips_dwarf_regno[];
-extern bool mips_split_p[];
-extern GTY(()) rtx cmp_operands[2];
-extern enum processor_type mips_arch; /* which cpu to codegen for */
-extern enum processor_type mips_tune; /* which cpu to schedule for */
-extern int mips_isa; /* architectural level */
-extern int mips_abi; /* which ABI to use */
-extern const struct mips_cpu_info mips_cpu_info_table[];
-extern const struct mips_cpu_info *mips_arch_info;
-extern const struct mips_cpu_info *mips_tune_info;
-extern const struct mips_rtx_cost_data *mips_cost;
-extern enum mips_code_readable_setting mips_code_readable;
-#endif
-
/* Macros to silence warnings about numbers being signed in traditional
C and unsigned in ISO C when compiled on 32-bit hosts. */
@@ -1585,13 +1559,6 @@ extern enum mips_code_readable_setting mips_code_readable;
#define HARD_REGNO_NREGS(REGNO, MODE) mips_hard_regno_nregs (REGNO, MODE)
-/* To make the code simpler, HARD_REGNO_MODE_OK just references an
- array built in override_options. Because machmodes.h is not yet
- included before this file is processed, the MODE bound can't be
- expressed here. */
-
-extern char mips_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
-
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
mips_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
@@ -1804,8 +1771,6 @@ enum reg_class
choose a class which is "minimal", meaning that no smaller class
also contains the register. */
-extern const enum reg_class mips_regno_to_class[];
-
#define REGNO_REG_CLASS(REGNO) mips_regno_to_class[ (REGNO) ]
/* A macro whose definition is the name of the class to which a
@@ -3094,3 +3059,30 @@ while (0)
"\tnop\n" \
"\tsync%-%]%>%)"
+#ifndef USED_FOR_TARGET
+extern const enum reg_class mips_regno_to_class[];
+extern char mips_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
+extern char mips_print_operand_punct[256]; /* print_operand punctuation chars */
+extern const char *current_function_file; /* filename current function is in */
+extern int num_source_filenames; /* current .file # */
+extern int mips_section_threshold; /* # bytes of data/sdata cutoff */
+extern int sym_lineno; /* sgi next label # for each stmt */
+extern int set_noreorder; /* # of nested .set noreorder's */
+extern int set_nomacro; /* # of nested .set nomacro's */
+extern int set_noat; /* # of nested .set noat's */
+extern int set_volatile; /* # of nested .set volatile's */
+extern int mips_branch_likely; /* emit 'l' after br (branch likely) */
+extern int mips_dbx_regno[];
+extern int mips_dwarf_regno[];
+extern bool mips_split_p[];
+extern GTY(()) rtx cmp_operands[2];
+extern enum processor_type mips_arch; /* which cpu to codegen for */
+extern enum processor_type mips_tune; /* which cpu to schedule for */
+extern int mips_isa; /* architectural level */
+extern int mips_abi; /* which ABI to use */
+extern const struct mips_cpu_info mips_cpu_info_table[];
+extern const struct mips_cpu_info *mips_arch_info;
+extern const struct mips_cpu_info *mips_tune_info;
+extern const struct mips_rtx_cost_data *mips_cost;
+extern enum mips_code_readable_setting mips_code_readable;
+#endif