aboutsummaryrefslogtreecommitdiff
path: root/gcc/config
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2019-11-12 11:08:40 +0100
committerMartin Liska <marxin@gcc.gnu.org>2019-11-12 10:08:40 +0000
commit028d409252058d88805341a3f6dc0ff1553f5bdc (patch)
tree9b1290038426396fe9c71c5e3d0b890845146767 /gcc/config
parent7e1792c953ec544e4bc0182b50026b41720fce2e (diff)
downloadgcc-028d409252058d88805341a3f6dc0ff1553f5bdc.zip
gcc-028d409252058d88805341a3f6dc0ff1553f5bdc.tar.gz
gcc-028d409252058d88805341a3f6dc0ff1553f5bdc.tar.bz2
Apply mechanical replacement (generated patch).
2019-11-12 Martin Liska <mliska@suse.cz> * asan.c (asan_sanitize_stack_p): Replace old parameter syntax with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET macro. (asan_sanitize_allocas_p): Likewise. (asan_emit_stack_protection): Likewise. (asan_protect_global): Likewise. (instrument_derefs): Likewise. (instrument_builtin_call): Likewise. (asan_expand_mark_ifn): Likewise. * auto-profile.c (auto_profile): Likewise. * bb-reorder.c (copy_bb_p): Likewise. (duplicate_computed_gotos): Likewise. * builtins.c (inline_expand_builtin_string_cmp): Likewise. * cfgcleanup.c (try_crossjump_to_edge): Likewise. (try_crossjump_bb): Likewise. * cfgexpand.c (defer_stack_allocation): Likewise. (stack_protect_classify_type): Likewise. (pass_expand::execute): Likewise. * cfgloopanal.c (expected_loop_iterations_unbounded): Likewise. (estimate_reg_pressure_cost): Likewise. * cgraph.c (cgraph_edge::maybe_hot_p): Likewise. * combine.c (combine_instructions): Likewise. (record_value_for_reg): Likewise. * common/config/aarch64/aarch64-common.c (aarch64_option_validate_param): Likewise. (aarch64_option_default_params): Likewise. * common/config/ia64/ia64-common.c (ia64_option_default_params): Likewise. * common/config/powerpcspe/powerpcspe-common.c (rs6000_option_default_params): Likewise. * common/config/rs6000/rs6000-common.c (rs6000_option_default_params): Likewise. * common/config/sh/sh-common.c (sh_option_default_params): Likewise. * config/aarch64/aarch64.c (aarch64_output_probe_stack_range): Likewise. (aarch64_allocate_and_probe_stack_space): Likewise. (aarch64_expand_epilogue): Likewise. (aarch64_override_options_internal): Likewise. * config/alpha/alpha.c (alpha_option_override): Likewise. * config/arm/arm.c (arm_option_override): Likewise. (arm_valid_target_attribute_p): Likewise. * config/i386/i386-options.c (ix86_option_override_internal): Likewise. * config/i386/i386.c (get_probe_interval): Likewise. (ix86_adjust_stack_and_probe_stack_clash): Likewise. (ix86_max_noce_ifcvt_seq_cost): Likewise. * config/ia64/ia64.c (ia64_adjust_cost): Likewise. * config/rs6000/rs6000-logue.c (get_stack_clash_protection_probe_interval): Likewise. (get_stack_clash_protection_guard_size): Likewise. * config/rs6000/rs6000.c (rs6000_option_override_internal): Likewise. * config/s390/s390.c (allocate_stack_space): Likewise. (s390_emit_prologue): Likewise. (s390_option_override_internal): Likewise. * config/sparc/sparc.c (sparc_option_override): Likewise. * config/visium/visium.c (visium_option_override): Likewise. * coverage.c (get_coverage_counts): Likewise. (coverage_compute_profile_id): Likewise. (coverage_begin_function): Likewise. (coverage_end_function): Likewise. * cse.c (cse_find_path): Likewise. (cse_extended_basic_block): Likewise. (cse_main): Likewise. * cselib.c (cselib_invalidate_mem): Likewise. * dse.c (dse_step1): Likewise. * emit-rtl.c (set_new_first_and_last_insn): Likewise. (get_max_insn_count): Likewise. (make_debug_insn_raw): Likewise. (init_emit): Likewise. * explow.c (compute_stack_clash_protection_loop_data): Likewise. * final.c (compute_alignments): Likewise. * fold-const.c (fold_range_test): Likewise. (fold_truth_andor): Likewise. (tree_single_nonnegative_warnv_p): Likewise. (integer_valued_real_single_p): Likewise. * gcse.c (want_to_gcse_p): Likewise. (prune_insertions_deletions): Likewise. (hoist_code): Likewise. (gcse_or_cprop_is_too_expensive): Likewise. * ggc-common.c: Likewise. * ggc-page.c (ggc_collect): Likewise. * gimple-loop-interchange.cc (MAX_NUM_STMT): Likewise. (MAX_DATAREFS): Likewise. (OUTER_STRIDE_RATIO): Likewise. * gimple-loop-jam.c (tree_loop_unroll_and_jam): Likewise. * gimple-loop-versioning.cc (loop_versioning::max_insns_for_loop): Likewise. * gimple-ssa-split-paths.c (is_feasible_trace): Likewise. * gimple-ssa-store-merging.c (imm_store_chain_info::try_coalesce_bswap): Likewise. (imm_store_chain_info::coalesce_immediate_stores): Likewise. (imm_store_chain_info::output_merged_store): Likewise. (pass_store_merging::process_store): Likewise. * gimple-ssa-strength-reduction.c (find_basis_for_base_expr): Likewise. * graphite-isl-ast-to-gimple.c (class translate_isl_ast_to_gimple): Likewise. (scop_to_isl_ast): Likewise. * graphite-optimize-isl.c (get_schedule_for_node_st): Likewise. (optimize_isl): Likewise. * graphite-scop-detection.c (build_scops): Likewise. * haifa-sched.c (set_modulo_params): Likewise. (rank_for_schedule): Likewise. (model_add_to_worklist): Likewise. (model_promote_insn): Likewise. (model_choose_insn): Likewise. (queue_to_ready): Likewise. (autopref_multipass_dfa_lookahead_guard): Likewise. (schedule_block): Likewise. (sched_init): Likewise. * hsa-gen.c (init_prologue): Likewise. * ifcvt.c (bb_ok_for_noce_convert_multiple_sets): Likewise. (cond_move_process_if_block): Likewise. * ipa-cp.c (ipcp_lattice::add_value): Likewise. (merge_agg_lats_step): Likewise. (devirtualization_time_bonus): Likewise. (hint_time_bonus): Likewise. (incorporate_penalties): Likewise. (good_cloning_opportunity_p): Likewise. (ipcp_propagate_stage): Likewise. * ipa-fnsummary.c (decompose_param_expr): Likewise. (set_switch_stmt_execution_predicate): Likewise. (analyze_function_body): Likewise. (compute_fn_summary): Likewise. * ipa-inline-analysis.c (estimate_growth): Likewise. * ipa-inline.c (caller_growth_limits): Likewise. (inline_insns_single): Likewise. (inline_insns_auto): Likewise. (can_inline_edge_by_limits_p): Likewise. (want_early_inline_function_p): Likewise. (big_speedup_p): Likewise. (want_inline_small_function_p): Likewise. (want_inline_self_recursive_call_p): Likewise. (edge_badness): Likewise. (recursive_inlining): Likewise. (compute_max_insns): Likewise. (early_inliner): Likewise. * ipa-polymorphic-call.c (csftc_abort_walking_p): Likewise. * ipa-profile.c (ipa_profile): Likewise. * ipa-prop.c (determine_known_aggregate_parts): Likewise. (ipa_analyze_node): Likewise. (ipcp_transform_function): Likewise. * ipa-split.c (consider_split): Likewise. * ipa-sra.c (allocate_access): Likewise. (process_scan_results): Likewise. (ipa_sra_summarize_function): Likewise. (pull_accesses_from_callee): Likewise. * ira-build.c (loop_compare_func): Likewise. (mark_loops_for_removal): Likewise. * ira-conflicts.c (build_conflict_bit_table): Likewise. * loop-doloop.c (doloop_optimize): Likewise. * loop-invariant.c (gain_for_invariant): Likewise. (move_loop_invariants): Likewise. * loop-unroll.c (decide_unroll_constant_iterations): Likewise. (decide_unroll_runtime_iterations): Likewise. (decide_unroll_stupid): Likewise. (expand_var_during_unrolling): Likewise. * lra-assigns.c (spill_for): Likewise. * lra-constraints.c (EBB_PROBABILITY_CUTOFF): Likewise. * modulo-sched.c (sms_schedule): Likewise. (DFA_HISTORY): Likewise. * opts.c (default_options_optimization): Likewise. (finish_options): Likewise. (common_handle_option): Likewise. * postreload-gcse.c (eliminate_partially_redundant_load): Likewise. (if): Likewise. * predict.c (get_hot_bb_threshold): Likewise. (maybe_hot_count_p): Likewise. (probably_never_executed): Likewise. (predictable_edge_p): Likewise. (predict_loops): Likewise. (expr_expected_value_1): Likewise. (tree_predict_by_opcode): Likewise. (handle_missing_profiles): Likewise. * reload.c (find_equiv_reg): Likewise. * reorg.c (redundant_insn): Likewise. * resource.c (mark_target_live_regs): Likewise. (incr_ticks_for_insn): Likewise. * sanopt.c (pass_sanopt::execute): Likewise. * sched-deps.c (sched_analyze_1): Likewise. (sched_analyze_2): Likewise. (sched_analyze_insn): Likewise. (deps_analyze_insn): Likewise. * sched-ebb.c (schedule_ebbs): Likewise. * sched-rgn.c (find_single_block_region): Likewise. (too_large): Likewise. (haifa_find_rgns): Likewise. (extend_rgns): Likewise. (new_ready): Likewise. (schedule_region): Likewise. (sched_rgn_init): Likewise. * sel-sched-ir.c (make_region_from_loop): Likewise. * sel-sched-ir.h (MAX_WS): Likewise. * sel-sched.c (process_pipelined_exprs): Likewise. (sel_setup_region_sched_flags): Likewise. * shrink-wrap.c (try_shrink_wrapping): Likewise. * targhooks.c (default_max_noce_ifcvt_seq_cost): Likewise. * toplev.c (print_version): Likewise. (process_options): Likewise. * tracer.c (tail_duplicate): Likewise. * trans-mem.c (tm_log_add): Likewise. * tree-chrec.c (chrec_fold_plus_1): Likewise. * tree-data-ref.c (split_constant_offset): Likewise. (compute_all_dependences): Likewise. * tree-if-conv.c (MAX_PHI_ARG_NUM): Likewise. * tree-inline.c (remap_gimple_stmt): Likewise. * tree-loop-distribution.c (MAX_DATAREFS_NUM): Likewise. * tree-parloops.c (MIN_PER_THREAD): Likewise. (create_parallel_loop): Likewise. * tree-predcom.c (determine_unroll_factor): Likewise. * tree-scalar-evolution.c (instantiate_scev_r): Likewise. * tree-sra.c (analyze_all_variable_accesses): Likewise. * tree-ssa-ccp.c (fold_builtin_alloca_with_align): Likewise. * tree-ssa-dse.c (setup_live_bytes_from_ref): Likewise. (dse_optimize_redundant_stores): Likewise. (dse_classify_store): Likewise. * tree-ssa-ifcombine.c (ifcombine_ifandif): Likewise. * tree-ssa-loop-ch.c (ch_base::copy_headers): Likewise. * tree-ssa-loop-im.c (LIM_EXPENSIVE): Likewise. * tree-ssa-loop-ivcanon.c (try_unroll_loop_completely): Likewise. (try_peel_loop): Likewise. (tree_unroll_loops_completely): Likewise. * tree-ssa-loop-ivopts.c (avg_loop_niter): Likewise. (CONSIDER_ALL_CANDIDATES_BOUND): Likewise. (MAX_CONSIDERED_GROUPS): Likewise. (ALWAYS_PRUNE_CAND_SET_BOUND): Likewise. * tree-ssa-loop-manip.c (can_unroll_loop_p): Likewise. * tree-ssa-loop-niter.c (MAX_ITERATIONS_TO_TRACK): Likewise. * tree-ssa-loop-prefetch.c (PREFETCH_BLOCK): Likewise. (L1_CACHE_SIZE_BYTES): Likewise. (L2_CACHE_SIZE_BYTES): Likewise. (should_issue_prefetch_p): Likewise. (schedule_prefetches): Likewise. (determine_unroll_factor): Likewise. (volume_of_references): Likewise. (add_subscript_strides): Likewise. (self_reuse_distance): Likewise. (mem_ref_count_reasonable_p): Likewise. (insn_to_prefetch_ratio_too_small_p): Likewise. (loop_prefetch_arrays): Likewise. (tree_ssa_prefetch_arrays): Likewise. * tree-ssa-loop-unswitch.c (tree_unswitch_single_loop): Likewise. * tree-ssa-math-opts.c (gimple_expand_builtin_pow): Likewise. (convert_mult_to_fma): Likewise. (math_opts_dom_walker::after_dom_children): Likewise. * tree-ssa-phiopt.c (cond_if_else_store_replacement): Likewise. (hoist_adjacent_loads): Likewise. (gate_hoist_loads): Likewise. * tree-ssa-pre.c (translate_vuse_through_block): Likewise. (compute_partial_antic_aux): Likewise. * tree-ssa-reassoc.c (get_reassociation_width): Likewise. * tree-ssa-sccvn.c (vn_reference_lookup_pieces): Likewise. (vn_reference_lookup): Likewise. (do_rpo_vn): Likewise. * tree-ssa-scopedtables.c (avail_exprs_stack::lookup_avail_expr): Likewise. * tree-ssa-sink.c (select_best_block): Likewise. * tree-ssa-strlen.c (new_stridx): Likewise. (new_addr_stridx): Likewise. (get_range_strlen_dynamic): Likewise. (class ssa_name_limit_t): Likewise. * tree-ssa-structalias.c (push_fields_onto_fieldstack): Likewise. (create_variable_info_for_1): Likewise. (init_alias_vars): Likewise. * tree-ssa-tail-merge.c (find_clusters_1): Likewise. (tail_merge_optimize): Likewise. * tree-ssa-threadbackward.c (thread_jumps::profitable_jump_thread_path): Likewise. (thread_jumps::fsm_find_control_statement_thread_paths): Likewise. (thread_jumps::find_jump_threads_backwards): Likewise. * tree-ssa-threadedge.c (record_temporary_equivalences_from_stmts_at_dest): Likewise. * tree-ssa-uninit.c (compute_control_dep_chain): Likewise. * tree-switch-conversion.c (switch_conversion::check_range): Likewise. (jump_table_cluster::can_be_handled): Likewise. * tree-switch-conversion.h (jump_table_cluster::case_values_threshold): Likewise. (SWITCH_CONVERSION_BRANCH_RATIO): Likewise. (param_switch_conversion_branch_ratio): Likewise. * tree-vect-data-refs.c (vect_mark_for_runtime_alias_test): Likewise. (vect_enhance_data_refs_alignment): Likewise. (vect_prune_runtime_alias_test_list): Likewise. * tree-vect-loop.c (vect_analyze_loop_costing): Likewise. (vect_get_datarefs_in_loop): Likewise. (vect_analyze_loop): Likewise. * tree-vect-slp.c (vect_slp_bb): Likewise. * tree-vectorizer.h: Likewise. * tree-vrp.c (find_switch_asserts): Likewise. (vrp_prop::check_mem_ref): Likewise. * tree.c (wide_int_to_tree_1): Likewise. (cache_integer_cst): Likewise. * var-tracking.c (EXPR_USE_DEPTH): Likewise. (reverse_op): Likewise. (vt_find_locations): Likewise. 2019-11-12 Martin Liska <mliska@suse.cz> * gimple-parser.c (c_parser_parse_gimple_body): Replace old parameter syntax with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET macro. 2019-11-12 Martin Liska <mliska@suse.cz> * name-lookup.c (namespace_hints::namespace_hints): Replace old parameter syntax with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET macro. * typeck.c (comptypes): Likewise. 2019-11-12 Martin Liska <mliska@suse.cz> * lto-partition.c (lto_balanced_map): Replace old parameter syntax with the new one, include opts.h if needed. Use SET_OPTION_IF_UNSET macro. * lto.c (do_whole_program_analysis): Likewise. From-SVN: r278085
Diffstat (limited to 'gcc/config')
-rw-r--r--gcc/config/aarch64/aarch64.c79
-rw-r--r--gcc/config/alpha/alpha.c16
-rw-r--r--gcc/config/arm/arm.c43
-rw-r--r--gcc/config/i386/i386-options.c32
-rw-r--r--gcc/config/i386/i386.c26
-rw-r--r--gcc/config/ia64/ia64.c2
-rw-r--r--gcc/config/rs6000/rs6000-logue.c4
-rw-r--r--gcc/config/rs6000/rs6000.c44
-rw-r--r--gcc/config/s390/s390.c79
-rw-r--r--gcc/config/sparc/sparc.c83
-rw-r--r--gcc/config/visium/visium.c6
11 files changed, 173 insertions, 241 deletions
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index 1dfff33..c478386 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -5589,7 +5589,7 @@ aarch64_output_probe_stack_range (rtx reg1, rtx reg2)
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
HOST_WIDE_INT stack_clash_probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
/* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
xops[0] = reg1;
@@ -6842,7 +6842,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
bool final_adjustment_p)
{
HOST_WIDE_INT guard_size
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
HOST_WIDE_INT min_probe_threshold
= (final_adjustment_p
@@ -7364,7 +7364,7 @@ aarch64_expand_epilogue (bool for_sibcall)
for each allocation. For stack clash we are in a usable state if
the adjustment is less than GUARD_SIZE - GUARD_USED_BY_CALLER. */
HOST_WIDE_INT guard_size
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
/* We can re-use the registers when:
@@ -13306,73 +13306,62 @@ aarch64_override_options_internal (struct gcc_options *opts)
/* We don't mind passing in global_options_set here as we don't use
the *options_set structs anyway. */
- maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
- queue_depth,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_sched_autopref_queue_depth, queue_depth);
/* Set up parameters to be used in prefetching algorithm. Do not
override the defaults unless we are tuning for a core we have
researched values for. */
if (aarch64_tune_params.prefetch->num_slots > 0)
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- aarch64_tune_params.prefetch->num_slots,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_simultaneous_prefetches,
+ aarch64_tune_params.prefetch->num_slots);
if (aarch64_tune_params.prefetch->l1_cache_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- aarch64_tune_params.prefetch->l1_cache_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_l1_cache_size,
+ aarch64_tune_params.prefetch->l1_cache_size);
if (aarch64_tune_params.prefetch->l1_cache_line_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- aarch64_tune_params.prefetch->l1_cache_line_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_l1_cache_line_size,
+ aarch64_tune_params.prefetch->l1_cache_line_size);
if (aarch64_tune_params.prefetch->l2_cache_size >= 0)
- maybe_set_param_value (PARAM_L2_CACHE_SIZE,
- aarch64_tune_params.prefetch->l2_cache_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_l2_cache_size,
+ aarch64_tune_params.prefetch->l2_cache_size);
if (!aarch64_tune_params.prefetch->prefetch_dynamic_strides)
- maybe_set_param_value (PARAM_PREFETCH_DYNAMIC_STRIDES,
- 0,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_prefetch_dynamic_strides, 0);
if (aarch64_tune_params.prefetch->minimum_stride >= 0)
- maybe_set_param_value (PARAM_PREFETCH_MINIMUM_STRIDE,
- aarch64_tune_params.prefetch->minimum_stride,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_prefetch_minimum_stride,
+ aarch64_tune_params.prefetch->minimum_stride);
/* Use the alternative scheduling-pressure algorithm by default. */
- maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, SCHED_PRESSURE_MODEL,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_sched_pressure_algorithm,
+ SCHED_PRESSURE_MODEL);
/* If the user hasn't changed it via configure then set the default to 64 KB
for the backend. */
- maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE,
- DEFAULT_STK_CLASH_GUARD_SIZE == 0
- ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_stack_clash_protection_guard_size,
+ (DEFAULT_STK_CLASH_GUARD_SIZE == 0
+ ? 16 : DEFAULT_STK_CLASH_GUARD_SIZE));
/* Validate the guard size. */
- int guard_size = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ int guard_size = param_stack_clash_protection_guard_size;
/* Enforce that interval is the same size as size so the mid-end does the
right thing. */
- maybe_set_param_value (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL,
- guard_size,
- opts->x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (opts, &global_options_set,
+ param_stack_clash_protection_probe_interval,
+ guard_size);
/* The maybe_set calls won't update the value if the user has explicitly set
one. Which means we need to validate that probing interval and guard size
are equal. */
int probe_interval
- = PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ = param_stack_clash_protection_probe_interval;
if (guard_size != probe_interval)
error ("stack clash guard size %<%d%> must be equal to probing interval "
"%<%d%>", guard_size, probe_interval);
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index a7d5454..8f389ea 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -68,6 +68,7 @@ along with GCC; see the file COPYING3. If not see
#include "builtins.h"
#include "rtl-iter.h"
#include "flags.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
@@ -484,17 +485,14 @@ alpha_option_override (void)
}
if (line_size)
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, line_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size, line_size);
if (l1_size)
- maybe_set_param_value (PARAM_L1_CACHE_SIZE, l1_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size, l1_size);
if (l2_size)
- maybe_set_param_value (PARAM_L2_CACHE_SIZE, l2_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l2_cache_size, l2_size);
/* Do some sanity checks on the above options. */
diff --git a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
index 7c9cdbd..ca7fd90 100644
--- a/gcc/config/arm/arm.c
+++ b/gcc/config/arm/arm.c
@@ -3524,9 +3524,8 @@ arm_option_override (void)
but measurable, size reduction for PIC code. Therefore, we decrease
the bar for unrestricted expression hoisting to the cost of PIC address
calculation, which is 2 instructions. */
- maybe_set_param_value (PARAM_GCSE_UNRESTRICTED_COST, 2,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_gcse_unrestricted_cost, 2);
/* ARM EABI defaults to strict volatile bitfields. */
if (TARGET_AAPCS_BASED && flag_strict_volatile_bitfields < 0
@@ -3546,47 +3545,43 @@ arm_option_override (void)
override the defaults unless we are tuning for a core we have
researched values for. */
if (current_tune->prefetch.num_slots > 0)
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- current_tune->prefetch.num_slots,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_simultaneous_prefetches,
+ current_tune->prefetch.num_slots);
if (current_tune->prefetch.l1_cache_line_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- current_tune->prefetch.l1_cache_line_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size,
+ current_tune->prefetch.l1_cache_line_size);
if (current_tune->prefetch.l1_cache_size >= 0)
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- current_tune->prefetch.l1_cache_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size,
+ current_tune->prefetch.l1_cache_size);
/* Look through ready list and all of queue for instructions
relevant for L2 auto-prefetcher. */
- int param_sched_autopref_queue_depth;
+ int sched_autopref_queue_depth;
switch (current_tune->sched_autopref)
{
case tune_params::SCHED_AUTOPREF_OFF:
- param_sched_autopref_queue_depth = -1;
+ sched_autopref_queue_depth = -1;
break;
case tune_params::SCHED_AUTOPREF_RANK:
- param_sched_autopref_queue_depth = 0;
+ sched_autopref_queue_depth = 0;
break;
case tune_params::SCHED_AUTOPREF_FULL:
- param_sched_autopref_queue_depth = max_insn_queue_index + 1;
+ sched_autopref_queue_depth = max_insn_queue_index + 1;
break;
default:
gcc_unreachable ();
}
- maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
- param_sched_autopref_queue_depth,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_sched_autopref_queue_depth,
+ sched_autopref_queue_depth);
/* Currently, for slow flash data, we just disable literal pools. We also
disable it for pure-code. */
@@ -31748,8 +31743,6 @@ arm_valid_target_attribute_p (tree fndecl, tree ARG_UNUSED (name),
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
- finalize_options_struct (&func_options);
-
return ret;
}
diff --git a/gcc/config/i386/i386-options.c b/gcc/config/i386/i386-options.c
index dfc8ae2..72cd6dc 100644
--- a/gcc/config/i386/i386-options.c
+++ b/gcc/config/i386/i386-options.c
@@ -2618,22 +2618,14 @@ ix86_option_override_internal (bool main_args_p,
if (!TARGET_SCHEDULE)
opts->x_flag_schedule_insns_after_reload = opts->x_flag_schedule_insns = 0;
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- ix86_tune_cost->simultaneous_prefetches,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- ix86_tune_cost->prefetch_block,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- ix86_tune_cost->l1_cache_size,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L2_CACHE_SIZE,
- ix86_tune_cost->l2_cache_size,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches,
+ ix86_tune_cost->simultaneous_prefetches);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size,
+ ix86_tune_cost->prefetch_block);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size,
+ ix86_tune_cost->l1_cache_size);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size,
+ ix86_tune_cost->l2_cache_size);
/* Enable sw prefetching at -O3 for CPUS that prefetching is helpful. */
if (opts->x_flag_prefetch_loop_arrays < 0
@@ -2868,13 +2860,9 @@ ix86_option_override_internal (bool main_args_p,
= (cf_protection_level) (opts->x_flag_cf_protection | CF_SET);
if (ix86_tune_features [X86_TUNE_AVOID_256FMA_CHAINS])
- maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 256,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 256);
else if (ix86_tune_features [X86_TUNE_AVOID_128FMA_CHAINS])
- maybe_set_param_value (PARAM_AVOID_FMA_MAX_BITS, 128,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_avoid_fma_max_bits, 128);
/* PR86952: jump table usage with retpolines is slow.
The PR provides some numbers about the slowness. */
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 03a7082..f775697 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -5773,7 +5773,7 @@ get_probe_interval (void)
{
if (flag_stack_clash_protection)
return (HOST_WIDE_INT_1U
- << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+ << param_stack_clash_protection_probe_interval);
else
return (HOST_WIDE_INT_1U << STACK_CHECK_PROBE_INTERVAL_EXP);
}
@@ -6942,7 +6942,7 @@ ix86_adjust_stack_and_probe_stack_clash (HOST_WIDE_INT size,
/* If we allocate less than the size of the guard statically,
then no probing is necessary, but we do need to allocate
the stack. */
- if (size < (1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE)))
+ if (size < (1 << param_stack_clash_protection_guard_size))
{
pro_epilogue_adjust_stack (stack_pointer_rtx, stack_pointer_rtx,
GEN_INT (-size), -1,
@@ -21468,18 +21468,18 @@ static unsigned int
ix86_max_noce_ifcvt_seq_cost (edge e)
{
bool predictable_p = predictable_edge_p (e);
-
- enum compiler_param param
- = (predictable_p
- ? PARAM_MAX_RTL_IF_CONVERSION_PREDICTABLE_COST
- : PARAM_MAX_RTL_IF_CONVERSION_UNPREDICTABLE_COST);
-
- /* If we have a parameter set, use that, otherwise take a guess using
- BRANCH_COST. */
- if (global_options_set.x_param_values[param])
- return PARAM_VALUE (param);
+ if (predictable_p)
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_predictable_cost)
+ return param_max_rtl_if_conversion_predictable_cost;
+ }
else
- return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
+ {
+ if (global_options_set.x_param_max_rtl_if_conversion_unpredictable_cost)
+ return param_max_rtl_if_conversion_unpredictable_cost;
+ }
+
+ return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (2);
}
/* Return true if SEQ is a good candidate as a replacement for the
diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c
index 7697e90..44f7f2e 100644
--- a/gcc/config/ia64/ia64.c
+++ b/gcc/config/ia64/ia64.c
@@ -7307,7 +7307,7 @@ ia64_adjust_cost (rtx_insn *insn, int dep_type1, rtx_insn *dep_insn,
if (dw == MIN_DEP_WEAK)
/* Store and load are likely to alias, use higher cost to avoid stall. */
- return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
+ return param_sched_mem_true_dep_cost;
else if (dw > MIN_DEP_WEAK)
{
/* Store and load are less likely to alias. */
diff --git a/gcc/config/rs6000/rs6000-logue.c b/gcc/config/rs6000/rs6000-logue.c
index 04aae80..f0fd206 100644
--- a/gcc/config/rs6000/rs6000-logue.c
+++ b/gcc/config/rs6000/rs6000-logue.c
@@ -1515,14 +1515,14 @@ static HOST_WIDE_INT
get_stack_clash_protection_probe_interval (void)
{
return (HOST_WIDE_INT_1U
- << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
+ << param_stack_clash_protection_probe_interval);
}
static HOST_WIDE_INT
get_stack_clash_protection_guard_size (void)
{
return (HOST_WIDE_INT_1U
- << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
+ << param_stack_clash_protection_guard_size);
}
/* Allocate ORIG_SIZE bytes on the stack and probe the newly
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index 32101b7..4c830fc 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -80,6 +80,7 @@
#include "tree-vrp.h"
#include "tree-ssanames.h"
#include "rs6000-internal.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
@@ -4514,34 +4515,29 @@ rs6000_option_override_internal (bool global_init_p)
if (global_init_p)
{
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- rs6000_cost->simultaneous_prefetches,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- rs6000_cost->cache_line_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_simultaneous_prefetches,
+ rs6000_cost->simultaneous_prefetches);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size,
+ rs6000_cost->l1_cache_size);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size,
+ rs6000_cost->cache_line_size);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l2_cache_size,
+ rs6000_cost->l2_cache_size);
/* Increase loop peeling limits based on performance analysis. */
- maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
- global_options.x_param_values,
- global_options_set.x_param_values);
- maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_max_peeled_insns, 400);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_max_completely_peeled_insns, 400);
/* Use the 'model' -fsched-pressure algorithm by default. */
- maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
- SCHED_PRESSURE_MODEL,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_sched_pressure_algorithm,
+ SCHED_PRESSURE_MODEL);
/* Explicit -funroll-loops turns -munroll-only-small-loops off. */
if (((global_options_set.x_flag_unroll_loops && flag_unroll_loops)
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index ff0b43c..b3a7522 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -10968,9 +10968,9 @@ allocate_stack_space (rtx size, HOST_WIDE_INT last_probe_offset,
{
bool temp_reg_clobbered_p = false;
HOST_WIDE_INT probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ = 1 << param_stack_clash_protection_probe_interval;
HOST_WIDE_INT guard_size
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE);
+ = 1 << param_stack_clash_protection_guard_size;
if (flag_stack_clash_protection)
{
@@ -11086,7 +11086,7 @@ s390_emit_prologue (void)
only exception is when TARGET_BACKCHAIN is active, in which case
we know *sp (offset 0) was written. */
HOST_WIDE_INT probe_interval
- = 1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL);
+ = 1 << param_stack_clash_protection_probe_interval;
HOST_WIDE_INT last_probe_offset
= (TARGET_BACKCHAIN
? (TARGET_PACKED_STACK ? STACK_POINTER_OFFSET - UNITS_PER_LONG : 0)
@@ -15264,10 +15264,8 @@ s390_option_override_internal (struct gcc_options *opts,
displacements. Trim that value down to 4k if that happens. This
might result in too many probes being generated only on the
oldest supported machine level z900. */
- if (!DISP_IN_RANGE ((1 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL))))
- set_param_value ("stack-clash-protection-probe-interval", 12,
- opts->x_param_values,
- opts_set->x_param_values);
+ if (!DISP_IN_RANGE ((1 << param_stack_clash_protection_probe_interval)))
+ param_stack_clash_protection_probe_interval = 12;
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
if (!TARGET_LONG_DOUBLE_128_P (opts_set->x_target_flags))
@@ -15276,62 +15274,37 @@ s390_option_override_internal (struct gcc_options *opts,
if (opts->x_s390_tune >= PROCESSOR_2097_Z10)
{
- maybe_set_param_value (PARAM_MAX_UNROLLED_INSNS, 100,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_MAX_UNROLL_TIMES, 32,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 2000,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 64,
- opts->x_param_values,
- opts_set->x_param_values);
- }
-
- maybe_set_param_value (PARAM_MAX_PENDING_LIST_LENGTH, 256,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_unrolled_insns,
+ 100);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_unroll_times, 32);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peeled_insns,
+ 2000);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_completely_peel_times,
+ 64);
+ }
+
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_pending_list_length,
+ 256);
/* values for loop prefetching */
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE, 256,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_L1_CACHE_SIZE, 128,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size, 256);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size, 128);
/* s390 has more than 2 levels and the size is much larger. Since
we are always running virtualized assume that we only get a small
part of the caches above l1. */
- maybe_set_param_value (PARAM_L2_CACHE_SIZE, 1500,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_PREFETCH_MIN_INSN_TO_MEM_RATIO, 2,
- opts->x_param_values,
- opts_set->x_param_values);
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size, 1500);
+ SET_OPTION_IF_UNSET (opts, opts_set,
+ param_prefetch_min_insn_to_mem_ratio, 2);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches, 6);
/* Use the alternative scheduling-pressure algorithm by default. */
- maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, 2,
- opts->x_param_values,
- opts_set->x_param_values);
-
- maybe_set_param_value (PARAM_MIN_VECT_LOOP_BOUND, 2,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_sched_pressure_algorithm, 2);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_min_vect_loop_bound, 2);
/* Use aggressive inlining parameters. */
if (opts->x_s390_tune >= PROCESSOR_2964_Z13)
{
- maybe_set_param_value (PARAM_INLINE_MIN_SPEEDUP, 2,
- opts->x_param_values,
- opts_set->x_param_values);
-
- maybe_set_param_value (PARAM_MAX_INLINE_INSNS_AUTO, 80,
- opts->x_param_values,
- opts_set->x_param_values);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_inline_min_speedup, 2);
+ SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 80);
}
/* Set the default alignment. */
diff --git a/gcc/config/sparc/sparc.c b/gcc/config/sparc/sparc.c
index 75b3d4e..5038551 100644
--- a/gcc/config/sparc/sparc.c
+++ b/gcc/config/sparc/sparc.c
@@ -61,6 +61,7 @@ along with GCC; see the file COPYING3. If not see
#include "context.h"
#include "builtins.h"
#include "tree-vector-builder.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
@@ -2010,7 +2011,7 @@ sparc_option_override (void)
gcc_unreachable ();
};
- /* PARAM_SIMULTANEOUS_PREFETCHES is the number of prefetches that
+ /* param_simultaneous_prefetches is the number of prefetches that
can run at the same time. More important, it is the threshold
defining when additional prefetches will be dropped by the
hardware.
@@ -2033,21 +2034,20 @@ sparc_option_override (void)
single-threaded program. Experimental results show that setting
this parameter to 32 works well when the number of threads is not
high. */
- maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
- ((sparc_cpu == PROCESSOR_ULTRASPARC
- || sparc_cpu == PROCESSOR_NIAGARA
- || sparc_cpu == PROCESSOR_NIAGARA2
- || sparc_cpu == PROCESSOR_NIAGARA3
- || sparc_cpu == PROCESSOR_NIAGARA4)
- ? 2
- : (sparc_cpu == PROCESSOR_ULTRASPARC3
- ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
- || sparc_cpu == PROCESSOR_M8)
- ? 32 : 3))),
- global_options.x_param_values,
- global_options_set.x_param_values);
-
- /* PARAM_L1_CACHE_LINE_SIZE is the size of the L1 cache line, in
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_simultaneous_prefetches,
+ ((sparc_cpu == PROCESSOR_ULTRASPARC
+ || sparc_cpu == PROCESSOR_NIAGARA
+ || sparc_cpu == PROCESSOR_NIAGARA2
+ || sparc_cpu == PROCESSOR_NIAGARA3
+ || sparc_cpu == PROCESSOR_NIAGARA4)
+ ? 2
+ : (sparc_cpu == PROCESSOR_ULTRASPARC3
+ ? 8 : ((sparc_cpu == PROCESSOR_NIAGARA7
+ || sparc_cpu == PROCESSOR_M8)
+ ? 32 : 3))));
+
+ /* param_l1_cache_line_size is the size of the L1 cache line, in
bytes.
The Oracle SPARC Architecture (previously the UltraSPARC
@@ -2064,38 +2064,33 @@ sparc_option_override (void)
L2 and L3, but only 32B are brought into the L1D$. (Assuming it
is a read_n prefetch, which is the only type which allocates to
the L1.) */
- maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
- (sparc_cpu == PROCESSOR_M8
- ? 64 : 32),
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_line_size,
+ (sparc_cpu == PROCESSOR_M8 ? 64 : 32));
- /* PARAM_L1_CACHE_SIZE is the size of the L1D$ (most SPARC chips use
+ /* param_l1_cache_size is the size of the L1D$ (most SPARC chips use
Hardvard level-1 caches) in kilobytes. Both UltraSPARC and
Niagara processors feature a L1D$ of 16KB. */
- maybe_set_param_value (PARAM_L1_CACHE_SIZE,
- ((sparc_cpu == PROCESSOR_ULTRASPARC
- || sparc_cpu == PROCESSOR_ULTRASPARC3
- || sparc_cpu == PROCESSOR_NIAGARA
- || sparc_cpu == PROCESSOR_NIAGARA2
- || sparc_cpu == PROCESSOR_NIAGARA3
- || sparc_cpu == PROCESSOR_NIAGARA4
- || sparc_cpu == PROCESSOR_NIAGARA7
- || sparc_cpu == PROCESSOR_M8)
- ? 16 : 64),
- global_options.x_param_values,
- global_options_set.x_param_values);
-
-
- /* PARAM_L2_CACHE_SIZE is the size fo the L2 in kilobytes. Note
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l1_cache_size,
+ ((sparc_cpu == PROCESSOR_ULTRASPARC
+ || sparc_cpu == PROCESSOR_ULTRASPARC3
+ || sparc_cpu == PROCESSOR_NIAGARA
+ || sparc_cpu == PROCESSOR_NIAGARA2
+ || sparc_cpu == PROCESSOR_NIAGARA3
+ || sparc_cpu == PROCESSOR_NIAGARA4
+ || sparc_cpu == PROCESSOR_NIAGARA7
+ || sparc_cpu == PROCESSOR_M8)
+ ? 16 : 64));
+
+ /* param_l2_cache_size is the size fo the L2 in kilobytes. Note
that 512 is the default in params.def. */
- maybe_set_param_value (PARAM_L2_CACHE_SIZE,
- ((sparc_cpu == PROCESSOR_NIAGARA4
- || sparc_cpu == PROCESSOR_M8)
- ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
- ? 256 : 512)),
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_l2_cache_size,
+ ((sparc_cpu == PROCESSOR_NIAGARA4
+ || sparc_cpu == PROCESSOR_M8)
+ ? 128 : (sparc_cpu == PROCESSOR_NIAGARA7
+ ? 256 : 512)));
/* Disable save slot sharing for call-clobbered registers by default.
diff --git a/gcc/config/visium/visium.c b/gcc/config/visium/visium.c
index 8477008..b1ace70 100644
--- a/gcc/config/visium/visium.c
+++ b/gcc/config/visium/visium.c
@@ -57,6 +57,7 @@
#include "tree-pass.h"
#include "context.h"
#include "builtins.h"
+#include "opts.h"
/* This file should be included last. */
#include "target-def.h"
@@ -457,9 +458,8 @@ visium_option_override (void)
/* Allow the size of compilation units to double because of inlining.
In practice the global size of the object code is hardly affected
because the additional instructions will take up the padding. */
- maybe_set_param_value (PARAM_INLINE_UNIT_GROWTH, 100,
- global_options.x_param_values,
- global_options_set.x_param_values);
+ SET_OPTION_IF_UNSET (&global_options, &global_options_set,
+ param_inline_unit_growth, 100);
}
/* Likewise for loops. */