diff options
author | Thomas Schwinge <tschwinge@baylibre.com> | 2024-03-16 23:03:21 +0100 |
---|---|---|
committer | Thomas Schwinge <tschwinge@baylibre.com> | 2024-03-16 23:03:21 +0100 |
commit | 155817d33e48138e2ed42616d957bf42a598bfdf (patch) | |
tree | fe217a4c6694dca31ce3dce344ac38d42ce19598 /gcc | |
parent | 75dc00584e0e7a73ddcbef358abae24762b27bcd (diff) | |
parent | 829b0c4cbabbe5056b79a8a1ec4cd9e9d928a5fb (diff) | |
download | gcc-155817d33e48138e2ed42616d957bf42a598bfdf.zip gcc-155817d33e48138e2ed42616d957bf42a598bfdf.tar.gz gcc-155817d33e48138e2ed42616d957bf42a598bfdf.tar.bz2 |
Merge commit '6a6d3817afa02bbcd2388c8e005da6faf88932f1^' into HEAD
Diffstat (limited to 'gcc')
436 files changed, 16133 insertions, 8646 deletions
diff --git a/gcc/ABOUT-GCC-NLS b/gcc/ABOUT-GCC-NLS index e90a671..9424de4 100644 --- a/gcc/ABOUT-GCC-NLS +++ b/gcc/ABOUT-GCC-NLS @@ -23,6 +23,22 @@ For example, GCC source code should not contain calls like `error ("unterminated comment")' instead, as it is the `error' function's responsibility to translate the message before the user sees it. +In general, use no markup for strings that are the immediate format string +argument of a diagnostic function. Use G_("str") for strings that will be +used as the format string for a diagnostic but are e.g. assigned to a +variable first. Use N_("str") for strings that are not diagnostic format +strings, but will still be translated later. Use _("str") for strings that +will not be translated elsewhere. It's important not to use _("str") in +the initializer of a statically allocated variable; use one of the others +instead and make sure that uses of that variable translate the string, +whether directly with _(msg) or by passing it to a diagnostic or other +function that performs the translation. + +Avoid using %s to compose a diagnostic message from multiple translatable +strings; instead, write out the full diagnostic message for each variant. +Only use %s for message components that do not need translation, such as +keywords. + By convention, any function parameter in the GCC sources whose name ends in `msgid' is expected to be a message requiring translation. If the parameter name ends with `gmsgid', it is assumed to be a GCC diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 0f1bd1d..9b4542e 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,856 @@ +2023-10-21 Pan Li <pan2.li@intel.com> + + PR target/111857 + * config/riscv/riscv-opts.h (TARGET_VECTOR_VLS): Remove. + * config/riscv/riscv-protos.h (vls_mode_valid_p): New func decl. + * config/riscv/riscv-v.cc (autovectorize_vector_modes): Replace + macro reference to func. + (vls_mode_valid_p): New func impl for vls mode valid or not. + * config/riscv/riscv-vector-switch.def (VLS_ENTRY): Replace + macro reference to func. + * config/riscv/vector-iterators.md: Ditto. + +2023-10-20 Roger Sayle <roger@nextmovesoftware.com> + Uros Bizjak <ubizjak@gmail.com> + + PR middle-end/101955 + PR tree-optimization/106245 + * config/i386/i386.md (*extv<mode>_1_0): New define_insn_and_split. + +2023-10-20 David Edelsohn <dje.gcc@gmail.com> + + * gimple-harden-control-flow.cc: Include memmodel.h. + +2023-10-20 David Edelsohn <dje.gcc@gmail.com> + + * gimple-harden-control-flow.cc: Include tm_p.h. + +2023-10-20 Andre Vieira <andre.simoesdiasvieira@arm.com> + + PR tree-optimization/111882 + * tree-if-conv.cc (get_bitfield_rep): Return NULL_TREE for bitfields + with non-constant offsets. + +2023-10-20 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/111866 + * tree-vect-loop-manip.cc (vect_do_peeling): Pass null as vinfo to + vect_set_loop_condition during prolog peeling. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111445 + * tree-scalar-evolution.cc (simple_iv_with_niters): + Add missing check for a sign-conversion. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/110243 + PR tree-optimization/111336 + * tree-ssa-loop-ivopts.cc (strip_offset_1): Rewrite + operations with undefined behavior on overflow to + unsigned arithmetic. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111891 + * tree-vect-stmts.cc (vectorizable_simd_clone_call): Fix + assert. + +2023-10-20 Andrew Stubbs <ams@codesourcery.com> + + * config.gcc: Allow --with-arch=gfx1030. + * config/gcn/gcn-hsa.h (NO_XNACK): gfx1030 does not support xnack. + (ASM_SPEC): gfx1030 needs -mattr=+wavefrontsize64 set. + * config/gcn/gcn-opts.h (enum processor_type): Add PROCESSOR_GFX1030. + (TARGET_GFX1030): New. + (TARGET_RDNA2): New. + * config/gcn/gcn-valu.md (@dpp_move<mode>): Disable for RDNA2. + (addc<mode>3<exec_vcc>): Add RDNA2 syntax variant. + (subc<mode>3<exec_vcc>): Likewise. + (<convop><mode><vndi>2_exec): Add RDNA2 alternatives. + (vec_cmp<mode>di): Likewise. + (vec_cmp<u><mode>di): Likewise. + (vec_cmp<mode>di_exec): Likewise. + (vec_cmp<u><mode>di_exec): Likewise. + (vec_cmp<mode>di_dup): Likewise. + (vec_cmp<mode>di_dup_exec): Likewise. + (reduc_<reduc_op>_scal_<mode>): Disable for RDNA2. + (*<reduc_op>_dpp_shr_<mode>): Likewise. + (*plus_carry_dpp_shr_<mode>): Likewise. + (*plus_carry_in_dpp_shr_<mode>): Likewise. + * config/gcn/gcn.cc (gcn_option_override): Recognise gfx1030. + (gcn_global_address_p): RDNA2 only allows smaller offsets. + (gcn_addr_space_legitimate_address_p): Likewise. + (gcn_omp_device_kind_arch_isa): Recognise gfx1030. + (gcn_expand_epilogue): Use VGPRs instead of SGPRs. + (output_file_start): Configure gfx1030. + * config/gcn/gcn.h (TARGET_CPU_CPP_BUILTINS): Add __RDNA2__; + (ASSEMBLER_DIALECT): New. + * config/gcn/gcn.md (rdna): New define_attr. + (enabled): Use "rdna" attribute. + (gcn_return): Remove s_dcache_wb. + (addcsi3_scalar): Add RDNA2 syntax variant. + (addcsi3_scalar_zero): Likewise. + (addptrdi3): Likewise. + (mulsi3): v_mul_lo_i32 should be v_mul_lo_u32 on all ISA. + (*memory_barrier): Add RDNA2 syntax variant. + (atomic_load<mode>): Add RDNA2 cache control variants, and disable + scalar atomics for RDNA2. + (atomic_store<mode>): Likewise. + (atomic_exchange<mode>): Likewise. + * config/gcn/gcn.opt (gpu_type): Add gfx1030. + * config/gcn/mkoffload.cc (EF_AMDGPU_MACH_AMDGCN_GFX1030): New. + (main): Recognise -march=gfx1030. + * config/gcn/t-omp-device: Add gfx1030 isa. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111000 + * stor-layout.h (element_precision): Move .. + * tree.h (element_precision): .. here. + * tree-ssa-loop-im.cc (movement_possibility_1): Restrict + motion of shifts and rotates. + +2023-10-20 Alexandre Oliva <oliva@adacore.com> + + * tree-core.h (ECF_XTHROW): New macro. + * tree.cc (set_call_expr): Add expected_throw attribute when + ECF_XTHROW is set. + (build_common_builtin_node): Add ECF_XTHROW to + __cxa_end_cleanup and _Unwind_Resume or _Unwind_SjLj_Resume. + * calls.cc (flags_from_decl_or_type): Check for expected_throw + attribute to set ECF_XTHROW. + * gimple.cc (gimple_build_call_from_tree): Propagate + ECF_XTHROW from decl flags to gimple call... + (gimple_call_flags): ... and back. + * gimple.h (GF_CALL_XTHROW): New gf_mask flag. + (gimple_call_set_expected_throw): New. + (gimple_call_expected_throw_p): New. + * Makefile.in (OBJS): Add gimple-harden-control-flow.o. + * builtins.def (BUILT_IN___HARDCFR_CHECK): New. + * common.opt (fharden-control-flow-redundancy): New. + (-fhardcfr-check-returning-calls): New. + (-fhardcfr-check-exceptions): New. + (-fhardcfr-check-noreturn-calls=*): New. + (Enum hardcfr_check_noreturn_calls): New. + (fhardcfr-skip-leaf): New. + * doc/invoke.texi: Document them. + (hardcfr-max-blocks, hardcfr-max-inline-blocks): New params. + * flag-types.h (enum hardcfr_noret): New. + * gimple-harden-control-flow.cc: New. + * params.opt (-param=hardcfr-max-blocks=): New. + (-param=hradcfr-max-inline-blocks=): New. + * passes.def (pass_harden_control_flow_redundancy): Add. + * tree-pass.h (make_pass_harden_control_flow_redundancy): + Declare. + * doc/extend.texi: Document expected_throw attribute. + +2023-10-20 Alex Coplan <alex.coplan@arm.com> + + * rtl-ssa/changes.cc (function_info::change_insns): Ensure we call + ::remove_insn on deleted insns. + +2023-10-20 Richard Biener <rguenther@suse.de> + + * doc/generic.texi ({L,R}ROTATE_EXPR): Document. + +2023-10-20 Oleg Endo <olegendo@gcc.gnu.org> + + PR target/101177 + * config/sh/sh.md (unnamed split pattern): Fix comparison of + find_regno_note result. + +2023-10-20 Richard Biener <rguenther@suse.de> + + * tree-vect-loop.cc (update_epilogue_loop_vinfo): Rewrite + both STMT_VINFO_GATHER_SCATTER_P and VMAT_GATHER_SCATTER + stmt refs. + +2023-10-20 Richard Biener <rguenther@suse.de> + + * tree-vect-slp.cc (off_map, off_op0_map, off_arg2_map, + off_arg3_arg2_map): New. + (vect_get_operand_map): Get flag whether the stmt was + recognized as gather or scatter and use the above + accordingly. + (vect_get_and_check_slp_defs): Adjust. + (vect_build_slp_tree_2): Likewise. + +2023-10-20 Juzhe-Zhong <juzhe.zhong@rivai.ai> + + * config/riscv/riscv-vsetvl.cc (pre_vsetvl::fuse_local_vsetvl_info): Rename variables. + (pre_vsetvl::pre_global_vsetvl_info): Ditto. + (pre_vsetvl::emit_vsetvl): Ditto. + +2023-10-20 Tamar Christina <tamar.christina@arm.com> + Andre Vieira <andre.simoesdiasvieira@arm.com> + + * tree-if-conv.cc (if_convertible_loop_p_1): Move check from here ... + (get_loop_body_if_conv_order): ... to here. + (if_convertible_loop_p): Remove single_exit check. + (tree_if_conversion): Move single_exit check to if-conversion part and + support multiple exits. + +2023-10-20 Tamar Christina <tamar.christina@arm.com> + Andre Vieira <andre.simoesdiasvieira@arm.com> + + * tree-vect-patterns.cc (vect_init_pattern_stmt): Copy STMT_VINFO_TYPE + from original statement. + (vect_recog_bitfield_ref_pattern): Support bitfields in gcond. + +2023-10-20 Juzhe-Zhong <juzhe.zhong@rivai.ai> + + PR target/111848 + * config/riscv/riscv-selftests.cc (run_const_vector_selftests): Adapt selftest. + * config/riscv/riscv-v.cc (expand_const_vector): Change it into vec_duplicate splitter. + +2023-10-20 Lehua Ding <lehua.ding@rivai.ai> + + PR target/111037 + PR target/111234 + PR target/111725 + * config/riscv/riscv-vsetvl.cc (bitmap_union_of_preds_with_entry): New. + (debug): Removed. + (compute_reaching_defintion): New. + (enum vsetvl_type): Moved. + (vlmax_avl_p): Moved. + (enum emit_type): Moved. + (vlmul_to_str): Moved. + (vlmax_avl_insn_p): Removed. + (policy_to_str): Moved. + (loop_basic_block_p): Removed. + (valid_sew_p): Removed. + (vsetvl_insn_p): Moved. + (vsetvl_vtype_change_only_p): Removed. + (after_or_same_p): Removed. + (before_p): Removed. + (anticipatable_occurrence_p): Removed. + (available_occurrence_p): Removed. + (insn_should_be_added_p): Removed. + (get_all_sets): Moved. + (get_same_bb_set): Moved. + (gen_vsetvl_pat): Removed. + (calculate_vlmul): Moved. + (get_max_int_sew): New. + (emit_vsetvl_insn): Removed. + (get_max_float_sew): New. + (eliminate_insn): Removed. + (insert_vsetvl): Removed. + (count_regno_occurrences): Moved. + (get_vl_vtype_info): Removed. + (enum def_type): Moved. + (validate_change_or_fail): Moved. + (change_insn): Removed. + (get_all_real_uses): Moved. + (get_forward_read_vl_insn): Removed. + (get_backward_fault_first_load_insn): Removed. + (change_vsetvl_insn): Removed. + (avl_source_has_vsetvl_p): Removed. + (source_equal_p): Moved. + (calculate_sew): Removed. + (same_equiv_note_p): Moved. + (get_expr_id): New. + (incompatible_avl_p): Removed. + (get_regno): New. + (different_sew_p): Removed. + (get_bb_index): New. + (different_lmul_p): Removed. + (has_no_uses): Moved. + (different_ratio_p): Removed. + (different_tail_policy_p): Removed. + (different_mask_policy_p): Removed. + (possible_zero_avl_p): Removed. + (enum demand_flags): New. + (second_ratio_invalid_for_first_sew_p): Removed. + (second_ratio_invalid_for_first_lmul_p): Removed. + (enum class): New. + (float_insn_valid_sew_p): Removed. + (second_sew_less_than_first_sew_p): Removed. + (first_sew_less_than_second_sew_p): Removed. + (class vsetvl_info): New. + (compare_lmul): Removed. + (second_lmul_less_than_first_lmul_p): Removed. + (second_ratio_less_than_first_ratio_p): Removed. + (DEF_INCOMPATIBLE_COND): Removed. + (greatest_sew): Removed. + (first_sew): Removed. + (second_sew): Removed. + (first_vlmul): Removed. + (second_vlmul): Removed. + (first_ratio): Removed. + (second_ratio): Removed. + (vlmul_for_first_sew_second_ratio): Removed. + (vlmul_for_greatest_sew_second_ratio): Removed. + (ratio_for_second_sew_first_vlmul): Removed. + (class vsetvl_block_info): New. + (DEF_SEW_LMUL_FUSE_RULE): New. + (always_unavailable): Removed. + (avl_unavailable_p): Removed. + (class demand_system): New. + (sew_unavailable_p): Removed. + (lmul_unavailable_p): Removed. + (ge_sew_unavailable_p): Removed. + (ge_sew_lmul_unavailable_p): Removed. + (ge_sew_ratio_unavailable_p): Removed. + (DEF_UNAVAILABLE_COND): Removed. + (same_sew_lmul_demand_p): Removed. + (propagate_avl_across_demands_p): Removed. + (reg_available_p): Removed. + (support_relaxed_compatible_p): Removed. + (demands_can_be_fused_p): Removed. + (earliest_pred_can_be_fused_p): Removed. + (vsetvl_dominated_by_p): Removed. + (avl_info::avl_info): Removed. + (avl_info::single_source_equal_p): Removed. + (avl_info::multiple_source_equal_p): Removed. + (DEF_SEW_LMUL_RULE): New. + (avl_info::operator=): Removed. + (avl_info::operator==): Removed. + (DEF_POLICY_RULE): New. + (avl_info::operator!=): Removed. + (avl_info::has_non_zero_avl): Removed. + (vl_vtype_info::vl_vtype_info): Removed. + (vl_vtype_info::operator==): Removed. + (DEF_AVL_RULE): New. + (vl_vtype_info::operator!=): Removed. + (vl_vtype_info::same_avl_p): Removed. + (vl_vtype_info::same_vtype_p): Removed. + (vl_vtype_info::same_vlmax_p): Removed. + (vector_insn_info::operator>=): Removed. + (vector_insn_info::operator==): Removed. + (class pre_vsetvl): New. + (vector_insn_info::parse_insn): Removed. + (vector_insn_info::compatible_p): Removed. + (vector_insn_info::skip_avl_compatible_p): Removed. + (vector_insn_info::compatible_avl_p): Removed. + (vector_insn_info::compatible_vtype_p): Removed. + (vector_insn_info::available_p): Removed. + (vector_insn_info::fuse_avl): Removed. + (vector_insn_info::fuse_sew_lmul): Removed. + (vector_insn_info::fuse_tail_policy): Removed. + (vector_insn_info::fuse_mask_policy): Removed. + (vector_insn_info::local_merge): Removed. + (vector_insn_info::global_merge): Removed. + (vector_insn_info::get_avl_or_vl_reg): Removed. + (vector_insn_info::update_fault_first_load_avl): Removed. + (vector_insn_info::dump): Removed. + (vector_infos_manager::vector_infos_manager): Removed. + (vector_infos_manager::create_expr): Removed. + (vector_infos_manager::get_expr_id): Removed. + (vector_infos_manager::all_same_ratio_p): Removed. + (vector_infos_manager::all_avail_in_compatible_p): Removed. + (vector_infos_manager::all_same_avl_p): Removed. + (vector_infos_manager::expr_set_num): Removed. + (vector_infos_manager::release): Removed. + (vector_infos_manager::create_bitmap_vectors): Removed. + (vector_infos_manager::free_bitmap_vectors): Removed. + (vector_infos_manager::dump): Removed. + (class pass_vsetvl): Adjust. + (pass_vsetvl::get_vector_info): Removed. + (pass_vsetvl::get_block_info): Removed. + (pass_vsetvl::update_vector_info): Removed. + (pass_vsetvl::update_block_info): Removed. + (pre_vsetvl::compute_avl_def_data): New. + (pass_vsetvl::simple_vsetvl): Removed. + (pass_vsetvl::compute_local_backward_infos): Removed. + (pass_vsetvl::need_vsetvl): Removed. + (pass_vsetvl::transfer_before): Removed. + (pass_vsetvl::transfer_after): Removed. + (pre_vsetvl::compute_vsetvl_def_data): New. + (pass_vsetvl::emit_local_forward_vsetvls): Removed. + (pass_vsetvl::prune_expressions): Removed. + (pass_vsetvl::compute_local_properties): Removed. + (pre_vsetvl::compute_lcm_local_properties): New. + (pass_vsetvl::earliest_fusion): Removed. + (pre_vsetvl::fuse_local_vsetvl_info): New. + (pass_vsetvl::vsetvl_fusion): Removed. + (pass_vsetvl::can_refine_vsetvl_p): Removed. + (pre_vsetvl::earliest_fuse_vsetvl_info): New. + (pass_vsetvl::refine_vsetvls): Removed. + (pass_vsetvl::cleanup_vsetvls): Removed. + (pass_vsetvl::commit_vsetvls): Removed. + (pass_vsetvl::pre_vsetvl): Removed. + (pass_vsetvl::get_vsetvl_at_end): Removed. + (local_avl_compatible_p): Removed. + (pass_vsetvl::local_eliminate_vsetvl_insn): Removed. + (pre_vsetvl::pre_global_vsetvl_info): New. + (get_first_vsetvl_before_rvv_insns): Removed. + (pass_vsetvl::global_eliminate_vsetvl_insn): Removed. + (pre_vsetvl::emit_vsetvl): New. + (pass_vsetvl::ssa_post_optimization): Removed. + (pre_vsetvl::cleaup): New. + (pre_vsetvl::remove_avl_operand): New. + (pass_vsetvl::df_post_optimization): Removed. + (pre_vsetvl::remove_unused_dest_operand): New. + (pass_vsetvl::init): Removed. + (pass_vsetvl::done): Removed. + (pass_vsetvl::compute_probabilities): Removed. + (pass_vsetvl::lazy_vsetvl): Adjust. + (pass_vsetvl::execute): Adjust. + * config/riscv/riscv-vsetvl.def (DEF_INCOMPATIBLE_COND): Removed. + (DEF_SEW_LMUL_RULE): New. + (DEF_SEW_LMUL_FUSE_RULE): Removed. + (DEF_POLICY_RULE): New. + (DEF_UNAVAILABLE_COND): Removed + (DEF_AVL_RULE): New demand type. + (sew_lmul): New demand type. + (ratio_only): New demand type. + (sew_only): New demand type. + (ge_sew): New demand type. + (ratio_and_ge_sew): New demand type. + (tail_mask_policy): New demand type. + (tail_policy_only): New demand type. + (mask_policy_only): New demand type. + (ignore_policy): New demand type. + (avl): New demand type. + (non_zero_avl): New demand type. + (ignore_avl): New demand type. + * config/riscv/t-riscv: Removed riscv-vsetvl.h + * config/riscv/riscv-vsetvl.h: Removed. + +2023-10-20 Alexandre Oliva <oliva@adacore.com> + + * tree-eh.cc (make_eh_edges): Return the new edge. + * tree-eh.h (make_eh_edges): Likewise. + +2023-10-19 Marek Polacek <polacek@redhat.com> + + * doc/contrib.texi: Add entry for Patrick Palka. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * omp-simd-clone.cc (simd_clone_adjust_argument_types): Make function + compatible with mask parameters in clone. + * tree-vect-stmts.cc (vect_build_all_ones_mask): Allow vector boolean + typed masks. + (vectorizable_simd_clone_call): Enable the use of masked clones in + fully masked loops. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + PR tree-optimization/110485 + * tree-vect-stmts.cc (vectorizable_simd_clone_call): Disable partial + vectors usage if a notinbranch simdclone has been selected. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * tree-vect-data-refs.cc (vect_get_smallest_scalar_type): Special case + simd clone calls and only use types that are mapped to vectors. + (simd_clone_call_p): New helper function. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * tree-parloops.cc (try_transform_to_exit_first_loop_alt): Accept + poly NIT and ALT_BOUND. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * tree-parloops.cc (create_loop_fn): Copy specific target and + optimization options to clone. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * omp-simd-clone.cc (simd_clone_subparts): Remove. + (simd_clone_init_simd_arrays): Replace simd_clone_supbarts with + TYPE_VECTOR_SUBPARTS. + (ipa_simd_modify_function_body): Likewise. + * tree-vect-stmts.cc (vectorizable_simd_clone_call): Likewise. + (simd_clone_subparts): Remove. + +2023-10-19 Jason Merrill <jason@redhat.com> + + * ABOUT-GCC-NLS: Add usage guidance. + +2023-10-19 Jason Merrill <jason@redhat.com> + + * diagnostic-core.h (permerror): Rename new overloads... + (permerror_opt): To this. + * diagnostic.cc: Likewise. + +2023-10-19 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/111860 + * tree-vect-loop-manip.cc (slpeel_tree_duplicate_loop_to_edge_cfg): + Remove PHI nodes that dominate loop. + +2023-10-19 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111131 + * tree-vect-loop.cc (update_epilogue_loop_vinfo): Make + sure to update all gather/scatter stmt DRs, not only those + that eventually got VMAT_GATHER_SCATTER set. + * tree-vect-slp.cc (_slp_oprnd_info::first_gs_info): Add. + (vect_get_and_check_slp_defs): Handle gathers/scatters, + adding the offset as SLP operand and comparing base and scale. + (vect_build_slp_tree_1): Handle gathers. + (vect_build_slp_tree_2): Likewise. + +2023-10-19 Richard Biener <rguenther@suse.de> + + * tree-vect-stmts.cc (vect_build_gather_load_calls): Rename + to ... + (vect_build_one_gather_load_call): ... this. Refactor, + inline widening/narrowing support ... + (vectorizable_load): ... here, do gather vectorization + with builtin decls along other gather vectorization. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * config/aarch64/aarch64.md (load_pair_dw_tftf): Rename to ... + (load_pair_dw_<TX:mode><TX2:mode>): ... this. + (store_pair_dw_tftf): Rename to ... + (store_pair_dw_<TX:mode><TX2:mode>): ... this. + * config/aarch64/iterators.md (TX2): New. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * rtl-ssa/changes.cc (function_info::finalize_new_accesses): Add new + parameter to give final insn position, infer use of mem if it isn't + specified explicitly. + (function_info::change_insns): Pass down final insn position to + finalize_new_accesses. + * rtl-ssa/functions.h: Add parameter to finalize_new_accesses. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * rtl-ssa/accesses.cc (function_info::reparent_use): New. + * rtl-ssa/functions.h (function_info): Declare new member + function reparent_use. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * rtl-ssa/access-utils.h (drop_memory_access): New. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * rtl-ssa/insns.cc (function_info::add_insn_after): Ensure we + update the prev pointer on the following nondebug insn in the + case that !insn->is_debug_insn () && next->is_debug_insn (). + +2023-10-19 Haochen Jiang <haochen.jiang@intel.com> + + * config/i386/i386.h: Correct the ISA enabled for Arrow Lake. + Also make Clearwater Forest depends on Sierra Forest. + * config/i386/i386-options.cc: Revise the order of the macro + definition to avoid confusion. + * doc/extend.texi: Revise documentation. + * doc/invoke.texi: Correct documentation. + +2023-10-19 Andrew Stubbs <ams@codesourcery.com> + + * config.gcc (amdgcn): Switch default to --with-arch=gfx900. + Implement support for --with-multilib-list. + * config/gcn/t-gcn-hsa: Likewise. + * doc/install.texi: Likewise. + * doc/invoke.texi: Mark Fiji deprecated. + +2023-10-19 Jiahao Xu <xujiahao@loongson.cn> + + * config/loongarch/loongarch.cc (loongarch_vector_costs): Inherit from + vector_costs. Add a constructor. + (loongarch_vector_costs::add_stmt_cost): Use adjust_cost_for_freq to + adjust the cost for inner loops. + (loongarch_vector_costs::count_operations): New function. + (loongarch_vector_costs::determine_suggested_unroll_factor): Ditto. + (loongarch_vector_costs::finish_cost): Ditto. + (loongarch_builtin_vectorization_cost): Adjust. + * config/loongarch/loongarch.opt (loongarch-vect-unroll-limit): New parameter. + (loongarcg-vect-issue-info): Ditto. + (mmemvec-cost): Delete. + * config/loongarch/genopts/loongarch.opt.in + (loongarch-vect-unroll-limit): Ditto. + (loongarcg-vect-issue-info): Ditto. + (mmemvec-cost): Delete. + * doc/invoke.texi (loongarcg-vect-unroll-limit): Document new option. + +2023-10-19 Jiahao Xu <xujiahao@loongson.cn> + + * config/loongarch/lasx.md + (vec_widen_<su>mult_even_v8si): New patterns. + (vec_widen_<su>add_hi_<mode>): Ditto. + (vec_widen_<su>add_lo_<mode>): Ditto. + (vec_widen_<su>sub_hi_<mode>): Ditto. + (vec_widen_<su>sub_lo_<mode>): Ditto. + (vec_widen_<su>mult_hi_<mode>): Ditto. + (vec_widen_<su>mult_lo_<mode>): Ditto. + * config/loongarch/loongarch.md (u_bool): New iterator. + * config/loongarch/loongarch-protos.h + (loongarch_expand_vec_widen_hilo): New prototype. + * config/loongarch/loongarch.cc + (loongarch_expand_vec_interleave): New function. + (loongarch_expand_vec_widen_hilo): New function. + +2023-10-19 Jiahao Xu <xujiahao@loongson.cn> + + * config/loongarch/lasx.md + (avg<mode>3_ceil): New patterns. + (uavg<mode>3_ceil): Ditto. + (avg<mode>3_floor): Ditto. + (uavg<mode>3_floor): Ditto. + (usadv32qi): Ditto. + (ssadv32qi): Ditto. + * config/loongarch/lsx.md + (avg<mode>3_ceil): New patterns. + (uavg<mode>3_ceil): Ditto. + (avg<mode>3_floor): Ditto. + (uavg<mode>3_floor): Ditto. + (usadv16qi): Ditto. + (ssadv16qi): Ditto. + +2023-10-18 Andrew Pinski <pinskia@gmail.com> + + PR middle-end/111863 + * expr.cc (do_store_flag): Don't over write arg0 + when stripping off `& POW2`. + +2023-10-18 Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org> + + PR tree-optimization/111648 + * fold-const.cc (valid_mask_for_fold_vec_perm_cst_p): If a1 + chooses base element from arg, ensure that it's a natural stepped + sequence. + (build_vec_cst_rand): New param natural_stepped and use it to + construct a naturally stepped sequence. + (test_nunits_min_2): Add new unit tests Case 6 and Case 7. + +2023-10-18 Dimitar Dimitrov <dimitar@dinux.eu> + + * config/pru/pru.cc (pru_insn_cost): New function. + (TARGET_INSN_COST): Define for PRU. + +2023-10-18 Andrew Carlotti <andrew.carlotti@arm.com> + + * config/aarch64/aarch64.cc (aarch64_test_fractional_cost): + Test <= instead of testing < twice. + +2023-10-18 Jakub Jelinek <jakub@redhat.com> + + PR bootstrap/111852 + * cse.cc (cse_insn): Add workaround for GCC 4.8-4.9, instead of + using rtx_def type for memory_extend_buf, use unsigned char + arrayy with size of rtx_def and its alignment. + +2023-10-18 Jason Merrill <jason@redhat.com> + + * doc/invoke.texi: Move -fpermissive to Warning Options. + * diagnostic.cc (update_effective_level_from_pragmas): Remove + redundant system header check. + (diagnostic_report_diagnostic): Move down syshdr/-w check. + (diagnostic_impl): Handle DK_PERMERROR with an option number. + (permerror): Add new overloads. + * diagnostic-core.h (permerror): Declare them. + +2023-10-18 Tobias Burnus <tobias@codesourcery.com> + + * gimplify.cc (gimplify_bind_expr): Remove "omp allocate" attribute + to avoid that auxillary statement list reaches LTO. + +2023-10-18 Jakub Jelinek <jakub@redhat.com> + + PR tree-optimization/111845 + * tree-ssa-math-opts.cc (match_uaddc_usubc): Remember temporary + statements for the 4 operand addition or subtraction of 3 operands + from 1 operand cases and remove them when successful. Look for + nested additions even from rhs[2], not just rhs[1]. + +2023-10-18 Tobias Burnus <tobias@codesourcery.com> + + PR target/111093 + * config/nvptx/nvptx.cc (nvptx_option_override): Issue fatal error + instead of an assert ICE when no -march= has been specified. + +2023-10-18 Iain Sandoe <iain@sandoe.co.uk> + + * config.in: Regenerate. + * config/darwin.cc (darwin_file_start): Add assembler directives + for the target OS version, where these are supported by the + assembler. + (darwin_override_options): Check for building >= macOS 10.14. + * configure: Regenerate. + * configure.ac: Check for assembler support of .build_version + directives. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/109154 + * tree-if-conv.cc (INCLUDE_ALGORITHM): Remove. + (typedef struct ifcvt_arg_entry): New. + (cmp_arg_entry): New. + (gen_phi_arg_condition, gen_phi_nest_statement, + predicate_scalar_phi): Use them. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/109154 + * config/aarch64/aarch64-simd.md (*aarch64_simd_mov<VDMOV:mode>): + Rewrite to new syntax. + (*aarch64_simd_mov<VQMOV:mode): Rewrite to new syntax and merge in + splits. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/109154 + * tree-if-conv.cc (if_convertible_stmt_p): Allow any const IFN. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/109154 + * match.pd: Add new cond_op rule. + +2023-10-18 Xi Ruoyao <xry111@xry111.site> + + * config/loongarch/loongarch.md (movfcc): Use fcmp.caf.s for + zeroing a fcc. + +2023-10-18 Richard Biener <rguenther@suse.de> + + * tree-vect-stmts.cc (vectorizable_simd_clone_call): + Relax check to again allow passing integer mode masks + as traditional vectors. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + * tree-loop-distribution.cc (copy_loop_before): Request no LCSSA. + * tree-vect-loop-manip.cc (adjust_phi_and_debug_stmts): Add additional + asserts. + (slpeel_tree_duplicate_loop_to_edge_cfg): Keep LCSSA during peeling. + (find_guard_arg): Look value up through explicit edge and original defs. + (vect_do_peeling): Use it. + (slpeel_update_phi_nodes_for_guard2): Take explicit exit edge. + (slpeel_update_phi_nodes_for_lcssa, slpeel_update_phi_nodes_for_loops): + Remove. + * tree-vect-loop.cc (vect_create_epilog_for_reduction): Initialize phi. + * tree-vectorizer.h (slpeel_tree_duplicate_loop_to_edge_cfg): Add + optional param to turn off LCSSA mode. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + * tree-if-conv.cc (tree_if_conversion): Record exits in aux. + * tree-vect-loop-manip.cc (slpeel_tree_duplicate_loop_to_edge_cfg): Use + it. + * tree-vect-loop.cc (vect_get_loop_niters): Determine main exit. + (vec_init_loop_exit_info): Extend analysis when multiple exits. + (vect_analyze_loop_form): Record conds and determine main cond. + (vect_create_loop_vinfo): Extend bookkeeping of conds. + (vect_analyze_loop): Release conds. + * tree-vectorizer.h (LOOP_VINFO_LOOP_CONDS, + LOOP_VINFO_LOOP_IV_COND): New. + (struct vect_loop_form_info): Add conds, alt_loop_conds; + (struct loop_vec_info): Add conds, loop_iv_cond. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + * tree-loop-distribution.cc (copy_loop_before): Pass exit explicitly. + (loop_distribution::distribute_loop): Bail out of not single exit. + * tree-scalar-evolution.cc (get_loop_exit_condition): New. + * tree-scalar-evolution.h (get_loop_exit_condition): New. + * tree-vect-data-refs.cc (vect_enhance_data_refs_alignment): Pass exit + explicitly. + * tree-vect-loop-manip.cc (vect_set_loop_condition_partial_vectors, + vect_set_loop_condition_partial_vectors_avx512, + vect_set_loop_condition_normal, vect_set_loop_condition): Explicitly + take exit. + (slpeel_tree_duplicate_loop_to_edge_cfg): Explicitly take exit and + return new peeled corresponding peeled exit. + (slpeel_can_duplicate_loop_p): Explicitly take exit. + (find_loop_location): Handle not knowing an explicit exit. + (vect_update_ivs_after_vectorizer, vect_gen_vector_loop_niters_mult_vf, + find_guard_arg, slpeel_update_phi_nodes_for_loops, + slpeel_update_phi_nodes_for_guard2): Use new exits. + (vect_do_peeling): Update bookkeeping to keep track of exits. + * tree-vect-loop.cc (vect_get_loop_niters): Explicitly take exit to + analyze. + (vec_init_loop_exit_info): New. + (_loop_vec_info::_loop_vec_info): Initialize vec_loop_iv, + vec_epilogue_loop_iv, scalar_loop_iv. + (vect_analyze_loop_form): Initialize exits. + (vect_create_loop_vinfo): Set main exit. + (vect_create_epilog_for_reduction, vectorizable_live_operation, + vect_transform_loop): Use it. + (scale_profile_for_vect_loop): Explicitly take exit to scale. + * tree-vectorizer.cc (set_uid_loop_bbs): Initialize loop exit. + * tree-vectorizer.h (LOOP_VINFO_IV_EXIT, LOOP_VINFO_EPILOGUE_IV_EXIT, + LOOP_VINFO_SCALAR_IV_EXIT): New. + (struct loop_vec_info): Add vec_loop_iv, vec_epilogue_loop_iv, + scalar_loop_iv. + (vect_set_loop_condition, slpeel_can_duplicate_loop_p, + slpeel_tree_duplicate_loop_to_edge_cfg): Take explicit exits. + (vec_init_loop_exit_info): New. + (struct vect_loop_form_info): Add loop_exit. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + * tree-vect-stmts.cc (vectorizable_comparison): Refactor, splitting body + to ... + (vectorizable_comparison_1): ...This. + +2023-10-18 Juzhe-Zhong <juzhe.zhong@rivai.ai> + + * config/riscv/riscv-v.cc (shuffle_consecutive_patterns): New function. + (expand_vec_perm_const_1): Add consecutive pattern recognition. + +2023-10-18 Haochen Jiang <haochen.jiang@intel.com> + + * common/config/i386/cpuinfo.h (get_intel_cpu): Add Panther + Lake. + * common/config/i386/i386-common.cc (processor_name): + Ditto. + (processor_alias_table): Ditto. + * common/config/i386/i386-cpuinfo.h (enum processor_types): + Add INTEL_PANTHERLAKE. + * config.gcc: Add -march=pantherlake. + * config/i386/driver-i386.cc (host_detect_local_cpu): Refactor + the if clause. Handle pantherlake. + * config/i386/i386-c.cc (ix86_target_macros_internal): + Handle pantherlake. + * config/i386/i386-options.cc (processor_cost_table): Ditto. + (m_PANTHERLAKE): New. + (m_CORE_HYBRID): Add pantherlake. + * config/i386/i386.h (enum processor_type): Ditto. + * doc/extend.texi: Ditto. + * doc/invoke.texi: Ditto. + +2023-10-18 Haochen Jiang <haochen.jiang@intel.com> + + * config/i386/i386-options.cc (m_CORE_HYBRID): New. + * config/i386/x86-tune.def: Replace hybrid client tune to + m_CORE_HYBRID. + +2023-10-18 Haochen Jiang <haochen.jiang@intel.com> + + * common/config/i386/cpuinfo.h + (get_intel_cpu): Handle Clearwater Forest. + * common/config/i386/i386-common.cc (processor_name): + Add Clearwater Forest. + (processor_alias_table): Ditto. + * common/config/i386/i386-cpuinfo.h (enum processor_types): + Add INTEL_CLEARWATERFOREST. + * config.gcc: Add -march=clearwaterforest. + * config/i386/driver-i386.cc (host_detect_local_cpu): Handle + clearwaterforest. + * config/i386/i386-c.cc (ix86_target_macros_internal): Ditto. + * config/i386/i386-options.cc (processor_cost_table): Ditto. + (m_CLEARWATERFOREST): New. + (m_CORE_ATOM): Add clearwaterforest. + * config/i386/i386.h (enum processor_type): Ditto. + * doc/extend.texi: Ditto. + * doc/invoke.texi: Ditto. + +2023-10-18 liuhongt <hongtao.liu@intel.com> + + * config/i386/mmx.md (fma<mode>4): New expander. + (fms<mode>4): Ditto. + (fnma<mode>4): Ditto. + (fnms<mode>4): Ditto. + (vec_fmaddsubv4hf4): Ditto. + (vec_fmsubaddv4hf4): Ditto. + +2023-10-18 Juzhe-Zhong <juzhe.zhong@rivai.ai> + + PR target/111832 + * config/riscv/riscv-vector-costs.cc (get_biggest_mode): New function. + 2023-10-17 Richard Sandiford <richard.sandiford@arm.com> * config/aarch64/aarch64.cc (aarch64_layout_frame): Don't make diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP index 304d626..244ba03 100644 --- a/gcc/DATESTAMP +++ b/gcc/DATESTAMP @@ -1 +1 @@ -20231018 +20231022 diff --git a/gcc/Makefile.in b/gcc/Makefile.in index 747f749..a25a1e3 100644 --- a/gcc/Makefile.in +++ b/gcc/Makefile.in @@ -1461,6 +1461,7 @@ OBJS = \ gimple-iterator.o \ gimple-fold.o \ gimple-harden-conditionals.o \ + gimple-harden-control-flow.o \ gimple-laddress.o \ gimple-loop-interchange.o \ gimple-loop-jam.o \ diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog index 6cf1a63..5b5a9ec 100644 --- a/gcc/ada/ChangeLog +++ b/gcc/ada/ChangeLog @@ -1,3 +1,84 @@ +2023-10-20 Alexandre Oliva <oliva@adacore.com> + + * gcc-interface/trans.cc (gigi): Mark __gnat_reraise_zcx with + ECF_XTHROW. + (build_raise_check): Likewise for all rcheck subprograms. + +2023-10-19 Yannick Moy <moy@adacore.com> + + * aspects.ads: Add aspect Side_Effects. + * contracts.adb (Add_Pre_Post_Condition) + (Inherit_Subprogram_Contract): Add support for new contract. + * contracts.ads: Update comments. + * einfo-utils.adb (Get_Pragma): Add support. + * einfo-utils.ads (Prag): Update comment. + * errout.ads: Add explain codes. + * par-prag.adb (Prag): Add support. + * sem_ch13.adb (Analyze_Aspect_Specifications) + (Check_Aspect_At_Freeze_Point): Add support. + * sem_ch6.adb (Analyze_Subprogram_Body_Helper) + (Analyze_Subprogram_Declaration): Call new analysis procedure to + check SPARK legality rules. + (Analyze_SPARK_Subprogram_Specification): New procedure to check + SPARK legality rules. Use an explain code for the error. + (Analyze_Subprogram_Specification): Move checks to new subprogram. + This code was effectively dead, as the kind for parameters was set + to E_Void at this point to detect early references. + * sem_ch6.ads (Analyze_Subprogram_Specification): Add new + procedure. + * sem_prag.adb (Analyze_Depends_In_Decl_Part) + (Analyze_Global_In_Decl_Part): Adapt legality check to apply only + to functions without side-effects. + (Analyze_If_Present): Extract functionality in new procedure + Analyze_If_Present_Internal. + (Analyze_If_Present_Internal): New procedure to analyze given + pragma kind. + (Analyze_Pragmas_If_Present): New procedure to analyze given + pragma kind associated with a declaration. + (Analyze_Pragma): Adapt support for Always_Terminates and + Exceptional_Cases. Add support for Side_Effects. Make sure to call + Analyze_If_Present to ensure pragma Side_Effects is analyzed prior + to analyzing pragmas Global and Depends. Use explain codes for the + errors. + * sem_prag.ads (Analyze_Pragmas_If_Present): Add new procedure. + * sem_util.adb (Is_Function_With_Side_Effects): New query function + to determine if a function is a function with side-effects. + * sem_util.ads (Is_Function_With_Side_Effects): Same. + * snames.ads-tmpl: Declare new names for pragma and aspect. + * doc/gnat_rm/implementation_defined_aspects.rst: Document new aspect. + * doc/gnat_rm/implementation_defined_pragmas.rst: Document new pragma. + * gnat_rm.texi: Regenerate. + +2023-10-19 Sheri Bernstein <bernstein@adacore.com> + + * libgnat/s-imagef.adb (Set_Image_Fixed): Refactor loop. + +2023-10-19 Sheri Bernstein <bernstein@adacore.com> + + * libgnat/s-imguti.adb (Set_Decimal_Digits): Add pragma to exempt + Unassigned_OUT_Parameters. + (Set_Floating_Invalid_Value): Likewise + +2023-10-19 Patrick Bernardi <bernardi@adacore.com> + + * bindusg.adb (Display): Make it clear -Q adds to the number of + secondary stacks generated by the binder. + * doc/gnat_ugn/building_executable_programs_with_gnat.rst: + Document the -Q gnatbind switch and fix references to old + runtimes. + * gnat-style.texi: Regenerate. + * gnat_rm.texi: Regenerate. + * gnat_ugn.texi: Regenerate. + +2023-10-19 Ronan Desplanques <desplanques@adacore.com> + + * sem_ch3.adb (Constrain_Array): Replace manual list length + computation by call to List_Length. + +2023-10-19 Piotr Trojanek <trojanek@adacore.com> + + * exp_aggr.adb (Expand_Container_Aggregate): Simplify with "No". + 2023-10-10 Eric Botcazou <ebotcazou@adacore.com> * gcc-interface/decl.cc (inline_status_for_subprog): Minor tweak. diff --git a/gcc/ada/aspects.ads b/gcc/ada/aspects.ads index f718227..77d1d16 100644 --- a/gcc/ada/aspects.ads +++ b/gcc/ada/aspects.ads @@ -145,6 +145,7 @@ package Aspects is Aspect_Relaxed_Initialization, -- GNAT Aspect_Scalar_Storage_Order, -- GNAT Aspect_Secondary_Stack_Size, -- GNAT + Aspect_Side_Effects, -- GNAT Aspect_Simple_Storage_Pool, -- GNAT Aspect_Size, Aspect_Small, @@ -295,6 +296,7 @@ package Aspects is Aspect_Scalar_Storage_Order => True, Aspect_Secondary_Stack_Size => True, Aspect_Shared => True, + Aspect_Side_Effects => True, Aspect_Simple_Storage_Pool => True, Aspect_Simple_Storage_Pool_Type => True, Aspect_Subprogram_Variant => True, @@ -445,6 +447,7 @@ package Aspects is Aspect_Relaxed_Initialization => Optional_Expression, Aspect_Scalar_Storage_Order => Expression, Aspect_Secondary_Stack_Size => Expression, + Aspect_Side_Effects => Optional_Expression, Aspect_Simple_Storage_Pool => Name, Aspect_Size => Expression, Aspect_Small => Expression, @@ -556,6 +559,7 @@ package Aspects is Aspect_Relaxed_Initialization => False, Aspect_Scalar_Storage_Order => True, Aspect_Secondary_Stack_Size => True, + Aspect_Side_Effects => False, Aspect_Simple_Storage_Pool => True, Aspect_Size => True, Aspect_Small => True, @@ -741,6 +745,7 @@ package Aspects is Aspect_Secondary_Stack_Size => Name_Secondary_Stack_Size, Aspect_Shared => Name_Shared, Aspect_Shared_Passive => Name_Shared_Passive, + Aspect_Side_Effects => Name_Side_Effects, Aspect_Simple_Storage_Pool => Name_Simple_Storage_Pool, Aspect_Simple_Storage_Pool_Type => Name_Simple_Storage_Pool_Type, Aspect_Size => Name_Size, @@ -1023,6 +1028,7 @@ package Aspects is Aspect_Refined_Post => Never_Delay, Aspect_Refined_State => Never_Delay, Aspect_Relaxed_Initialization => Never_Delay, + Aspect_Side_Effects => Never_Delay, Aspect_SPARK_Mode => Never_Delay, Aspect_Stable_Properties => Always_Delay, Aspect_Static => Never_Delay, diff --git a/gcc/ada/bindusg.adb b/gcc/ada/bindusg.adb index fca425b..89a6cae 100644 --- a/gcc/ada/bindusg.adb +++ b/gcc/ada/bindusg.adb @@ -234,7 +234,7 @@ package body Bindusg is -- Line for Q switch Write_Line - (" -Qnnn Generate nnn default-sized secondary stacks"); + (" -Qnnn Generate nnn additional default-sized secondary stacks"); -- Line for -r switch diff --git a/gcc/ada/contracts.adb b/gcc/ada/contracts.adb index 4aaa276..b6e756f 100644 --- a/gcc/ada/contracts.adb +++ b/gcc/ada/contracts.adb @@ -235,6 +235,7 @@ package body Contracts is -- Interrupt_Handler -- Postcondition -- Precondition + -- Side_Effects -- Subprogram_Variant -- Test_Case -- Volatile_Function @@ -253,6 +254,7 @@ package body Contracts is elsif Prag_Nam in Name_Depends | Name_Extensions_Visible | Name_Global + | Name_Side_Effects then Add_Classification; @@ -3786,6 +3788,7 @@ package body Contracts is and then Present (Contract (From_Subp)) then Inherit_Pragma (Pragma_Extensions_Visible); + Inherit_Pragma (Pragma_Side_Effects); end if; end Inherit_Subprogram_Contract; diff --git a/gcc/ada/contracts.ads b/gcc/ada/contracts.ads index c3dc5d6..aa0cf66 100644 --- a/gcc/ada/contracts.ads +++ b/gcc/ada/contracts.ads @@ -60,6 +60,7 @@ package Contracts is -- Refined_Global -- Refined_Post -- Refined_States + -- Side_Effects -- Subprogram_Variant -- Test_Case -- Volatile_Function @@ -227,6 +228,7 @@ package Contracts is -- Inherit relevant contract items from source subprogram From_Subp. Subp -- denotes the destination subprogram. The inherited items are: -- Extensions_Visible + -- Side_Effects -- ??? it would be nice if this routine handles Pre'Class and Post'Class procedure Instantiate_Subprogram_Contract (Templ : Node_Id; L : List_Id); diff --git a/gcc/ada/doc/gnat_rm/implementation_defined_aspects.rst b/gcc/ada/doc/gnat_rm/implementation_defined_aspects.rst index b37a158..39e7d52 100644 --- a/gcc/ada/doc/gnat_rm/implementation_defined_aspects.rst +++ b/gcc/ada/doc/gnat_rm/implementation_defined_aspects.rst @@ -525,6 +525,12 @@ Aspect Shared This boolean aspect is equivalent to :ref:`pragma Shared<Pragma-Shared>` and is thus a synonym for aspect ``Atomic``. +Aspect Side_Effects +=================== +.. index:: Side_Effects + +This aspect is equivalent to :ref:`pragma Side_Effects<Pragma-Side_Effects>`. + Aspect Simple_Storage_Pool ========================== .. index:: Simple_Storage_Pool diff --git a/gcc/ada/doc/gnat_rm/implementation_defined_pragmas.rst b/gcc/ada/doc/gnat_rm/implementation_defined_pragmas.rst index 35a3fe5..b950d7c 100644 --- a/gcc/ada/doc/gnat_rm/implementation_defined_pragmas.rst +++ b/gcc/ada/doc/gnat_rm/implementation_defined_pragmas.rst @@ -5816,6 +5816,20 @@ Syntax: This pragma is provided for compatibility with other Ada implementations. It is recognized but ignored by all current versions of GNAT. +.. _Pragma-Side_Effects: + +Pragma Side_Effects +=================== + +Syntax: + +.. code-block:: ada + + pragma Side_Effects [ (static_boolean_EXPRESSION) ]; + +For the semantics of this pragma, see the entry for aspect +``Side_Effects`` in the SPARK Reference Manual, section 6.1.11. + .. _Pragma-Simple_Storage_Pool_Type: Pragma Simple_Storage_Pool_Type diff --git a/gcc/ada/doc/gnat_ugn/building_executable_programs_with_gnat.rst b/gcc/ada/doc/gnat_ugn/building_executable_programs_with_gnat.rst index 6e80163..a708ef4 100644 --- a/gcc/ada/doc/gnat_ugn/building_executable_programs_with_gnat.rst +++ b/gcc/ada/doc/gnat_ugn/building_executable_programs_with_gnat.rst @@ -6524,12 +6524,12 @@ be presented in subsequent sections. determines the initial size of the secondary stack for each task and the smallest amount the secondary stack can grow by. - For Ravenscar, ZFP, and Cert run-times the size of the secondary stack is - fixed. This switch can be used to change the default size of these stacks. - The default secondary stack size can be overridden on a per-task basis if - individual tasks have different secondary stack requirements. This is - achieved through the Secondary_Stack_Size aspect that takes the size of the - secondary stack in bytes. + For Light, Light-Tasking, and Embedded run-times the size of the secondary + stack is fixed. This switch can be used to change the default size of these + stacks. The default secondary stack size can be overridden on a per-task + basis if individual tasks have different secondary stack requirements. This + is achieved through the Secondary_Stack_Size aspect, which takes the size of + the secondary stack in bytes. .. index:: -e (gnatbind) @@ -6739,6 +6739,23 @@ be presented in subsequent sections. Generate binder file suitable for CodePeer. +.. index:: -Q (gnatbind) + +:switch:`-Q{nnn}` + Generate ``nnn`` additional default-sized secondary stacks. + + Tasks declared at the library level that use default-size secondary stacks + have their secondary stacks allocated from a pool of stacks generated by + gnatbind. This allows the default secondary stack size to be quickly changed + by rebinding the application. + + While the binder sizes this pool to match the number of such tasks defined in + the application, the pool size may need to be increased with the :switch:`-Q` + switch to accommodate foreign threads registered with the Light run-time. For + more information, please see the *The Primary and Secondary Stack* chapter in + the *GNAT User’s Guide Supplement for Cross Platforms*. + + .. index:: -R (gnatbind) :switch:`-R` diff --git a/gcc/ada/einfo-utils.adb b/gcc/ada/einfo-utils.adb index 9bee1f4..88f4d4b 100644 --- a/gcc/ada/einfo-utils.adb +++ b/gcc/ada/einfo-utils.adb @@ -1012,6 +1012,7 @@ package body Einfo.Utils is Id = Pragma_Refined_Depends or else Id = Pragma_Refined_Global or else Id = Pragma_Refined_State or else + Id = Pragma_Side_Effects or else Id = Pragma_Volatile_Function; -- Contract / subprogram variant / test case pragmas diff --git a/gcc/ada/einfo-utils.ads b/gcc/ada/einfo-utils.ads index 21a8891..742ca22 100644 --- a/gcc/ada/einfo-utils.ads +++ b/gcc/ada/einfo-utils.ads @@ -461,6 +461,7 @@ package Einfo.Utils is -- Refined_Global -- Refined_Post -- Refined_State + -- Side_Effects -- Subprogram_Variant -- Test_Case -- Volatile_Function diff --git a/gcc/ada/errout.ads b/gcc/ada/errout.ads index 2065d73..dc412a2 100644 --- a/gcc/ada/errout.ads +++ b/gcc/ada/errout.ads @@ -616,13 +616,17 @@ package Errout is -- selected error/warning messages. The subset of those codes used in -- the GNAT frontend are defined here. - GEC_None : constant := 0000; - GEC_Volatile_At_Library_Level : constant := 0001; - GEC_Type_Early_Call_Region : constant := 0003; - GEC_Volatile_Non_Interfering_Context : constant := 0004; - GEC_Required_Part_Of : constant := 0009; - GEC_Ownership_Moved_Object : constant := 0010; - GEC_SPARK_Mode_On_Not_Library_Level : constant := 0011; + GEC_None : constant := 0000; + GEC_Volatile_At_Library_Level : constant := 0001; + GEC_Type_Early_Call_Region : constant := 0003; + GEC_Volatile_Non_Interfering_Context : constant := 0004; + GEC_Required_Part_Of : constant := 0009; + GEC_Ownership_Moved_Object : constant := 0010; + GEC_SPARK_Mode_On_Not_Library_Level : constant := 0011; + GEC_Output_In_Function_Global_Or_Depends : constant := 0014; + GEC_Out_Parameter_In_Function : constant := 0015; + GEC_Always_Terminates_On_Function : constant := 0016; + GEC_Exceptional_Cases_On_Function : constant := 0017; ------------------------ -- List Pragmas Table -- diff --git a/gcc/ada/exp_aggr.adb b/gcc/ada/exp_aggr.adb index e5f3632..340c8c6 100644 --- a/gcc/ada/exp_aggr.adb +++ b/gcc/ada/exp_aggr.adb @@ -7288,7 +7288,7 @@ package body Exp_Aggr is -- Iterated component association. Discard -- positional insertion procedure. - if not Present (Iterator_Specification (Comp)) then + if No (Iterator_Specification (Comp)) then Add_Named_Subp := Assign_Indexed_Subp; Add_Unnamed_Subp := Empty; end if; diff --git a/gcc/ada/gcc-interface/trans.cc b/gcc/ada/gcc-interface/trans.cc index e99fbb4..89f0a07 100644 --- a/gcc/ada/gcc-interface/trans.cc +++ b/gcc/ada/gcc-interface/trans.cc @@ -519,6 +519,7 @@ gigi (Node_Id gnat_root, ftype, NULL_TREE, is_default, true, true, true, false, false, NULL, Empty); + set_call_expr_flags (reraise_zcx_decl, ECF_NORETURN | ECF_XTHROW); /* Dummy objects to materialize "others" and "all others" in the exception tables. These are exported by a-exexpr-gcc.adb, so see this unit for @@ -721,6 +722,7 @@ build_raise_check (int check, enum exception_info_kind kind) = create_subprog_decl (get_identifier (Name_Buffer), NULL_TREE, ftype, NULL_TREE, is_default, true, true, true, false, false, NULL, Empty); + set_call_expr_flags (result, ECF_NORETURN | ECF_XTHROW); return result; } diff --git a/gcc/ada/gnat-style.texi b/gcc/ada/gnat-style.texi index 5555bcd..33bb188 100644 --- a/gcc/ada/gnat-style.texi +++ b/gcc/ada/gnat-style.texi @@ -3,7 +3,7 @@ @setfilename gnat-style.info @documentencoding UTF-8 @ifinfo -@*Generated by Sphinx 5.2.3.@* +@*Generated by Sphinx 7.2.6.@* @end ifinfo @settitle GNAT Coding Style A Guide for GNAT Developers @defindex ge @@ -19,7 +19,7 @@ @copying @quotation -GNAT Coding Style: A Guide for GNAT Developers , May 09, 2023 +GNAT Coding Style: A Guide for GNAT Developers , Oct 16, 2023 AdaCore diff --git a/gcc/ada/gnat_rm.texi b/gcc/ada/gnat_rm.texi index b7e0983..ebc97a2 100644 --- a/gcc/ada/gnat_rm.texi +++ b/gcc/ada/gnat_rm.texi @@ -3,7 +3,7 @@ @setfilename gnat_rm.info @documentencoding UTF-8 @ifinfo -@*Generated by Sphinx 5.2.3.@* +@*Generated by Sphinx 7.2.6.@* @end ifinfo @settitle GNAT Reference Manual @defindex ge @@ -19,7 +19,7 @@ @copying @quotation -GNAT Reference Manual , Jul 17, 2023 +GNAT Reference Manual , Oct 16, 2023 AdaCore @@ -254,6 +254,7 @@ Implementation Defined Pragmas * Pragma Shared:: * Pragma Short_Circuit_And_Or:: * Pragma Short_Descriptors:: +* Pragma Side_Effects:: * Pragma Simple_Storage_Pool_Type:: * Pragma Source_File_Name:: * Pragma Source_File_Name_Project:: @@ -345,6 +346,7 @@ Implementation Defined Aspects * Aspect Secondary_Stack_Size:: * Aspect Scalar_Storage_Order:: * Aspect Shared:: +* Aspect Side_Effects:: * Aspect Simple_Storage_Pool:: * Aspect Simple_Storage_Pool_Type:: * Aspect SPARK_Mode:: @@ -1365,6 +1367,7 @@ consideration, the use of these pragmas should be minimized. * Pragma Shared:: * Pragma Short_Circuit_And_Or:: * Pragma Short_Descriptors:: +* Pragma Side_Effects:: * Pragma Simple_Storage_Pool_Type:: * Pragma Source_File_Name:: * Pragma Source_File_Name_Project:: @@ -7361,7 +7364,7 @@ short-circuited logical operators. If this configuration pragma occurs locally within the file being compiled, it applies only to the file being compiled. There is no requirement that all units in a partition use this option. -@node Pragma Short_Descriptors,Pragma Simple_Storage_Pool_Type,Pragma Short_Circuit_And_Or,Implementation Defined Pragmas +@node Pragma Short_Descriptors,Pragma Side_Effects,Pragma Short_Circuit_And_Or,Implementation Defined Pragmas @anchor{gnat_rm/implementation_defined_pragmas pragma-short-descriptors}@anchor{e4} @section Pragma Short_Descriptors @@ -7375,8 +7378,22 @@ pragma Short_Descriptors; This pragma is provided for compatibility with other Ada implementations. It is recognized but ignored by all current versions of GNAT. -@node Pragma Simple_Storage_Pool_Type,Pragma Source_File_Name,Pragma Short_Descriptors,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id40}@anchor{e5}@anchor{gnat_rm/implementation_defined_pragmas pragma-simple-storage-pool-type}@anchor{e6} +@node Pragma Side_Effects,Pragma Simple_Storage_Pool_Type,Pragma Short_Descriptors,Implementation Defined Pragmas +@anchor{gnat_rm/implementation_defined_pragmas id40}@anchor{e5}@anchor{gnat_rm/implementation_defined_pragmas pragma-side-effects}@anchor{e6} +@section Pragma Side_Effects + + +Syntax: + +@example +pragma Side_Effects [ (static_boolean_EXPRESSION) ]; +@end example + +For the semantics of this pragma, see the entry for aspect +@code{Side_Effects} in the SPARK Reference Manual, section 6.1.11. + +@node Pragma Simple_Storage_Pool_Type,Pragma Source_File_Name,Pragma Side_Effects,Implementation Defined Pragmas +@anchor{gnat_rm/implementation_defined_pragmas id41}@anchor{e7}@anchor{gnat_rm/implementation_defined_pragmas pragma-simple-storage-pool-type}@anchor{e8} @section Pragma Simple_Storage_Pool_Type @@ -7430,7 +7447,7 @@ storage-management discipline). An object of a simple storage pool type can be associated with an access type by specifying the attribute -@ref{e7,,Simple_Storage_Pool}. For example: +@ref{e9,,Simple_Storage_Pool}. For example: @example My_Pool : My_Simple_Storage_Pool_Type; @@ -7440,11 +7457,11 @@ type Acc is access My_Data_Type; for Acc'Simple_Storage_Pool use My_Pool; @end example -See attribute @ref{e7,,Simple_Storage_Pool} +See attribute @ref{e9,,Simple_Storage_Pool} for further details. @node Pragma Source_File_Name,Pragma Source_File_Name_Project,Pragma Simple_Storage_Pool_Type,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id41}@anchor{e8}@anchor{gnat_rm/implementation_defined_pragmas pragma-source-file-name}@anchor{e9} +@anchor{gnat_rm/implementation_defined_pragmas id42}@anchor{ea}@anchor{gnat_rm/implementation_defined_pragmas pragma-source-file-name}@anchor{eb} @section Pragma Source_File_Name @@ -7536,20 +7553,20 @@ aware of these pragmas, and so other tools that use the project file would not be aware of the intended naming conventions. If you are using project files, file naming is controlled by Source_File_Name_Project pragmas, which are usually supplied automatically by the project manager. A pragma -Source_File_Name cannot appear after a @ref{ea,,Pragma Source_File_Name_Project}. +Source_File_Name cannot appear after a @ref{ec,,Pragma Source_File_Name_Project}. For more details on the use of the @code{Source_File_Name} pragma, see the sections on @cite{Using Other File Names} and @cite{Alternative File Naming Schemes} in the @cite{GNAT User’s Guide}. @node Pragma Source_File_Name_Project,Pragma Source_Reference,Pragma Source_File_Name,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id42}@anchor{eb}@anchor{gnat_rm/implementation_defined_pragmas pragma-source-file-name-project}@anchor{ea} +@anchor{gnat_rm/implementation_defined_pragmas id43}@anchor{ed}@anchor{gnat_rm/implementation_defined_pragmas pragma-source-file-name-project}@anchor{ec} @section Pragma Source_File_Name_Project This pragma has the same syntax and semantics as pragma Source_File_Name. It is only allowed as a stand-alone configuration pragma. -It cannot appear after a @ref{e9,,Pragma Source_File_Name}, and +It cannot appear after a @ref{eb,,Pragma Source_File_Name}, and most importantly, once pragma Source_File_Name_Project appears, no further Source_File_Name pragmas are allowed. @@ -7561,7 +7578,7 @@ Source_File_Name or Source_File_Name_Project pragmas (which would not be known to the project manager). @node Pragma Source_Reference,Pragma SPARK_Mode,Pragma Source_File_Name_Project,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-source-reference}@anchor{ec} +@anchor{gnat_rm/implementation_defined_pragmas pragma-source-reference}@anchor{ee} @section Pragma Source_Reference @@ -7585,7 +7602,7 @@ string expression other than a string literal. This is because its value is needed for error messages issued by all phases of the compiler. @node Pragma SPARK_Mode,Pragma Static_Elaboration_Desired,Pragma Source_Reference,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id43}@anchor{ed}@anchor{gnat_rm/implementation_defined_pragmas pragma-spark-mode}@anchor{ee} +@anchor{gnat_rm/implementation_defined_pragmas id44}@anchor{ef}@anchor{gnat_rm/implementation_defined_pragmas pragma-spark-mode}@anchor{f0} @section Pragma SPARK_Mode @@ -7667,7 +7684,7 @@ SPARK_Mode (@code{Off}), then that pragma will need to be repeated in the package body. @node Pragma Static_Elaboration_Desired,Pragma Stream_Convert,Pragma SPARK_Mode,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-static-elaboration-desired}@anchor{ef} +@anchor{gnat_rm/implementation_defined_pragmas pragma-static-elaboration-desired}@anchor{f1} @section Pragma Static_Elaboration_Desired @@ -7691,7 +7708,7 @@ construction of larger aggregates with static components that include an others choice.) @node Pragma Stream_Convert,Pragma Style_Checks,Pragma Static_Elaboration_Desired,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-stream-convert}@anchor{f0} +@anchor{gnat_rm/implementation_defined_pragmas pragma-stream-convert}@anchor{f2} @section Pragma Stream_Convert @@ -7768,7 +7785,7 @@ the pragma is silently ignored, and the default implementation of the stream attributes is used instead. @node Pragma Style_Checks,Pragma Subtitle,Pragma Stream_Convert,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-style-checks}@anchor{f1} +@anchor{gnat_rm/implementation_defined_pragmas pragma-style-checks}@anchor{f3} @section Pragma Style_Checks @@ -7841,7 +7858,7 @@ Rf2 : Integer := ARG; -- OK, no error @end example @node Pragma Subtitle,Pragma Suppress,Pragma Style_Checks,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-subtitle}@anchor{f2} +@anchor{gnat_rm/implementation_defined_pragmas pragma-subtitle}@anchor{f4} @section Pragma Subtitle @@ -7855,7 +7872,7 @@ This pragma is recognized for compatibility with other Ada compilers but is ignored by GNAT. @node Pragma Suppress,Pragma Suppress_All,Pragma Subtitle,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress}@anchor{f3} +@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress}@anchor{f5} @section Pragma Suppress @@ -7928,7 +7945,7 @@ Of course, run-time checks are omitted whenever the compiler can prove that they will not fail, whether or not checks are suppressed. @node Pragma Suppress_All,Pragma Suppress_Debug_Info,Pragma Suppress,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-all}@anchor{f4} +@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-all}@anchor{f6} @section Pragma Suppress_All @@ -7947,7 +7964,7 @@ The use of the standard Ada pragma @code{Suppress (All_Checks)} as a normal configuration pragma is the preferred usage in GNAT. @node Pragma Suppress_Debug_Info,Pragma Suppress_Exception_Locations,Pragma Suppress_All,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id44}@anchor{f5}@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-debug-info}@anchor{f6} +@anchor{gnat_rm/implementation_defined_pragmas id45}@anchor{f7}@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-debug-info}@anchor{f8} @section Pragma Suppress_Debug_Info @@ -7962,7 +7979,7 @@ for the specified entity. It is intended primarily for use in debugging the debugger, and navigating around debugger problems. @node Pragma Suppress_Exception_Locations,Pragma Suppress_Initialization,Pragma Suppress_Debug_Info,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-exception-locations}@anchor{f7} +@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-exception-locations}@anchor{f9} @section Pragma Suppress_Exception_Locations @@ -7985,7 +8002,7 @@ a partition, so it is fine to have some units within a partition compiled with this pragma and others compiled in normal mode without it. @node Pragma Suppress_Initialization,Pragma Task_Name,Pragma Suppress_Exception_Locations,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id45}@anchor{f8}@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-initialization}@anchor{f9} +@anchor{gnat_rm/implementation_defined_pragmas id46}@anchor{fa}@anchor{gnat_rm/implementation_defined_pragmas pragma-suppress-initialization}@anchor{fb} @section Pragma Suppress_Initialization @@ -8030,7 +8047,7 @@ is suppressed, just as though its subtype had been given in a pragma Suppress_Initialization, as described above. @node Pragma Task_Name,Pragma Task_Storage,Pragma Suppress_Initialization,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-task-name}@anchor{fa} +@anchor{gnat_rm/implementation_defined_pragmas pragma-task-name}@anchor{fc} @section Pragma Task_Name @@ -8086,7 +8103,7 @@ end; @end example @node Pragma Task_Storage,Pragma Test_Case,Pragma Task_Name,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-task-storage}@anchor{fb} +@anchor{gnat_rm/implementation_defined_pragmas pragma-task-storage}@anchor{fd} @section Pragma Task_Storage @@ -8106,7 +8123,7 @@ created, depending on the target. This pragma can appear anywhere a type. @node Pragma Test_Case,Pragma Thread_Local_Storage,Pragma Task_Storage,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id46}@anchor{fc}@anchor{gnat_rm/implementation_defined_pragmas pragma-test-case}@anchor{fd} +@anchor{gnat_rm/implementation_defined_pragmas id47}@anchor{fe}@anchor{gnat_rm/implementation_defined_pragmas pragma-test-case}@anchor{ff} @section Pragma Test_Case @@ -8162,7 +8179,7 @@ postcondition. Mode @code{Robustness} indicates that the precondition and postcondition of the subprogram should be ignored for this test case. @node Pragma Thread_Local_Storage,Pragma Time_Slice,Pragma Test_Case,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id47}@anchor{fe}@anchor{gnat_rm/implementation_defined_pragmas pragma-thread-local-storage}@anchor{ff} +@anchor{gnat_rm/implementation_defined_pragmas id48}@anchor{100}@anchor{gnat_rm/implementation_defined_pragmas pragma-thread-local-storage}@anchor{101} @section Pragma Thread_Local_Storage @@ -8200,7 +8217,7 @@ If this pragma is used on a system where @code{TLS} is not supported, then an error message will be generated and the program will be rejected. @node Pragma Time_Slice,Pragma Title,Pragma Thread_Local_Storage,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-time-slice}@anchor{100} +@anchor{gnat_rm/implementation_defined_pragmas pragma-time-slice}@anchor{102} @section Pragma Time_Slice @@ -8216,7 +8233,7 @@ It is ignored if it is used in a system that does not allow this control, or if it appears in other than the main program unit. @node Pragma Title,Pragma Type_Invariant,Pragma Time_Slice,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-title}@anchor{101} +@anchor{gnat_rm/implementation_defined_pragmas pragma-title}@anchor{103} @section Pragma Title @@ -8241,7 +8258,7 @@ notation is used, and named and positional notation can be mixed following the normal rules for procedure calls in Ada. @node Pragma Type_Invariant,Pragma Type_Invariant_Class,Pragma Title,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-type-invariant}@anchor{102} +@anchor{gnat_rm/implementation_defined_pragmas pragma-type-invariant}@anchor{104} @section Pragma Type_Invariant @@ -8262,7 +8279,7 @@ controlled by the assertion identifier @code{Type_Invariant} rather than @code{Invariant}. @node Pragma Type_Invariant_Class,Pragma Unchecked_Union,Pragma Type_Invariant,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id48}@anchor{103}@anchor{gnat_rm/implementation_defined_pragmas pragma-type-invariant-class}@anchor{104} +@anchor{gnat_rm/implementation_defined_pragmas id49}@anchor{105}@anchor{gnat_rm/implementation_defined_pragmas pragma-type-invariant-class}@anchor{106} @section Pragma Type_Invariant_Class @@ -8289,7 +8306,7 @@ policy that controls this pragma is @code{Type_Invariant'Class}, not @code{Type_Invariant_Class}. @node Pragma Unchecked_Union,Pragma Unevaluated_Use_Of_Old,Pragma Type_Invariant_Class,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-unchecked-union}@anchor{105} +@anchor{gnat_rm/implementation_defined_pragmas pragma-unchecked-union}@anchor{107} @section Pragma Unchecked_Union @@ -8309,7 +8326,7 @@ version in all language modes (Ada 83, Ada 95, and Ada 2005). For full details, consult the Ada 2012 Reference Manual, section B.3.3. @node Pragma Unevaluated_Use_Of_Old,Pragma Unimplemented_Unit,Pragma Unchecked_Union,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-unevaluated-use-of-old}@anchor{106} +@anchor{gnat_rm/implementation_defined_pragmas pragma-unevaluated-use-of-old}@anchor{108} @section Pragma Unevaluated_Use_Of_Old @@ -8364,7 +8381,7 @@ uses up to the end of the corresponding statement sequence or sequence of package declarations. @node Pragma Unimplemented_Unit,Pragma Universal_Aliasing,Pragma Unevaluated_Use_Of_Old,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-unimplemented-unit}@anchor{107} +@anchor{gnat_rm/implementation_defined_pragmas pragma-unimplemented-unit}@anchor{109} @section Pragma Unimplemented_Unit @@ -8384,7 +8401,7 @@ The abort only happens if code is being generated. Thus you can use specs of unimplemented packages in syntax or semantic checking mode. @node Pragma Universal_Aliasing,Pragma Unmodified,Pragma Unimplemented_Unit,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id49}@anchor{108}@anchor{gnat_rm/implementation_defined_pragmas pragma-universal-aliasing}@anchor{109} +@anchor{gnat_rm/implementation_defined_pragmas id50}@anchor{10a}@anchor{gnat_rm/implementation_defined_pragmas pragma-universal-aliasing}@anchor{10b} @section Pragma Universal_Aliasing @@ -8403,7 +8420,7 @@ situations in which it must be suppressed, see the section on @code{Optimization and Strict Aliasing} in the @cite{GNAT User’s Guide}. @node Pragma Unmodified,Pragma Unreferenced,Pragma Universal_Aliasing,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id50}@anchor{10a}@anchor{gnat_rm/implementation_defined_pragmas pragma-unmodified}@anchor{10b} +@anchor{gnat_rm/implementation_defined_pragmas id51}@anchor{10c}@anchor{gnat_rm/implementation_defined_pragmas pragma-unmodified}@anchor{10d} @section Pragma Unmodified @@ -8437,7 +8454,7 @@ Thus it is never necessary to use @code{pragma Unmodified} for such variables, though it is harmless to do so. @node Pragma Unreferenced,Pragma Unreferenced_Objects,Pragma Unmodified,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id51}@anchor{10c}@anchor{gnat_rm/implementation_defined_pragmas pragma-unreferenced}@anchor{10d} +@anchor{gnat_rm/implementation_defined_pragmas id52}@anchor{10e}@anchor{gnat_rm/implementation_defined_pragmas pragma-unreferenced}@anchor{10f} @section Pragma Unreferenced @@ -8499,7 +8516,7 @@ Thus it is never necessary to use @code{pragma Unreferenced} for such variables, though it is harmless to do so. @node Pragma Unreferenced_Objects,Pragma Unreserve_All_Interrupts,Pragma Unreferenced,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id52}@anchor{10e}@anchor{gnat_rm/implementation_defined_pragmas pragma-unreferenced-objects}@anchor{10f} +@anchor{gnat_rm/implementation_defined_pragmas id53}@anchor{110}@anchor{gnat_rm/implementation_defined_pragmas pragma-unreferenced-objects}@anchor{111} @section Pragma Unreferenced_Objects @@ -8524,7 +8541,7 @@ compiler will automatically suppress unwanted warnings about these variables not being referenced. @node Pragma Unreserve_All_Interrupts,Pragma Unsuppress,Pragma Unreferenced_Objects,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-unreserve-all-interrupts}@anchor{110} +@anchor{gnat_rm/implementation_defined_pragmas pragma-unreserve-all-interrupts}@anchor{112} @section Pragma Unreserve_All_Interrupts @@ -8560,7 +8577,7 @@ handled, see pragma @code{Interrupt_State}, which subsumes the functionality of the @code{Unreserve_All_Interrupts} pragma. @node Pragma Unsuppress,Pragma Unused,Pragma Unreserve_All_Interrupts,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-unsuppress}@anchor{111} +@anchor{gnat_rm/implementation_defined_pragmas pragma-unsuppress}@anchor{113} @section Pragma Unsuppress @@ -8596,7 +8613,7 @@ number of implementation-defined check names. See the description of pragma @code{Suppress} for full details. @node Pragma Unused,Pragma Use_VADS_Size,Pragma Unsuppress,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id53}@anchor{112}@anchor{gnat_rm/implementation_defined_pragmas pragma-unused}@anchor{113} +@anchor{gnat_rm/implementation_defined_pragmas id54}@anchor{114}@anchor{gnat_rm/implementation_defined_pragmas pragma-unused}@anchor{115} @section Pragma Unused @@ -8630,7 +8647,7 @@ Thus it is never necessary to use @code{pragma Unused} for such variables, though it is harmless to do so. @node Pragma Use_VADS_Size,Pragma Validity_Checks,Pragma Unused,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-use-vads-size}@anchor{114} +@anchor{gnat_rm/implementation_defined_pragmas pragma-use-vads-size}@anchor{116} @section Pragma Use_VADS_Size @@ -8654,7 +8671,7 @@ as implemented in the VADS compiler. See description of the VADS_Size attribute for further details. @node Pragma Validity_Checks,Pragma Volatile,Pragma Use_VADS_Size,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-validity-checks}@anchor{115} +@anchor{gnat_rm/implementation_defined_pragmas pragma-validity-checks}@anchor{117} @section Pragma Validity_Checks @@ -8710,7 +8727,7 @@ A := C; -- C will be validity checked @end example @node Pragma Volatile,Pragma Volatile_Full_Access,Pragma Validity_Checks,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id54}@anchor{116}@anchor{gnat_rm/implementation_defined_pragmas pragma-volatile}@anchor{117} +@anchor{gnat_rm/implementation_defined_pragmas id55}@anchor{118}@anchor{gnat_rm/implementation_defined_pragmas pragma-volatile}@anchor{119} @section Pragma Volatile @@ -8728,7 +8745,7 @@ implementation of pragma Volatile is upwards compatible with the implementation in DEC Ada 83. @node Pragma Volatile_Full_Access,Pragma Volatile_Function,Pragma Volatile,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id55}@anchor{118}@anchor{gnat_rm/implementation_defined_pragmas pragma-volatile-full-access}@anchor{119} +@anchor{gnat_rm/implementation_defined_pragmas id56}@anchor{11a}@anchor{gnat_rm/implementation_defined_pragmas pragma-volatile-full-access}@anchor{11b} @section Pragma Volatile_Full_Access @@ -8754,7 +8771,7 @@ is not to the whole object; the compiler is allowed (and generally will) access only part of the object in this case. @node Pragma Volatile_Function,Pragma Warning_As_Error,Pragma Volatile_Full_Access,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id56}@anchor{11a}@anchor{gnat_rm/implementation_defined_pragmas pragma-volatile-function}@anchor{11b} +@anchor{gnat_rm/implementation_defined_pragmas id57}@anchor{11c}@anchor{gnat_rm/implementation_defined_pragmas pragma-volatile-function}@anchor{11d} @section Pragma Volatile_Function @@ -8768,7 +8785,7 @@ For the semantics of this pragma, see the entry for aspect @code{Volatile_Functi in the SPARK 2014 Reference Manual, section 7.1.2. @node Pragma Warning_As_Error,Pragma Warnings,Pragma Volatile_Function,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-warning-as-error}@anchor{11c} +@anchor{gnat_rm/implementation_defined_pragmas pragma-warning-as-error}@anchor{11e} @section Pragma Warning_As_Error @@ -8808,7 +8825,7 @@ you can use multiple pragma Warning_As_Error. The above use of patterns to match the message applies only to warning messages generated by the front end. This pragma can also be applied to -warnings provided by the back end and mentioned in @ref{11d,,Pragma Warnings}. +warnings provided by the back end and mentioned in @ref{11f,,Pragma Warnings}. By using a single full `-Wxxx' switch in the pragma, such warnings can also be treated as errors. @@ -8858,7 +8875,7 @@ the tag is changed from “warning:” to “error:” and the string “[warning-as-error]” is appended to the end of the message. @node Pragma Warnings,Pragma Weak_External,Pragma Warning_As_Error,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas id57}@anchor{11e}@anchor{gnat_rm/implementation_defined_pragmas pragma-warnings}@anchor{11d} +@anchor{gnat_rm/implementation_defined_pragmas id58}@anchor{120}@anchor{gnat_rm/implementation_defined_pragmas pragma-warnings}@anchor{11f} @section Pragma Warnings @@ -9014,7 +9031,7 @@ selectively for each tool, and as a consequence to detect useless pragma Warnings with switch @code{-gnatw.w}. @node Pragma Weak_External,Pragma Wide_Character_Encoding,Pragma Warnings,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-weak-external}@anchor{11f} +@anchor{gnat_rm/implementation_defined_pragmas pragma-weak-external}@anchor{121} @section Pragma Weak_External @@ -9065,7 +9082,7 @@ end External_Module; @end example @node Pragma Wide_Character_Encoding,,Pragma Weak_External,Implementation Defined Pragmas -@anchor{gnat_rm/implementation_defined_pragmas pragma-wide-character-encoding}@anchor{120} +@anchor{gnat_rm/implementation_defined_pragmas pragma-wide-character-encoding}@anchor{122} @section Pragma Wide_Character_Encoding @@ -9096,7 +9113,7 @@ encoding within that file, and does not affect withed units, specs, or subunits. @node Implementation Defined Aspects,Implementation Defined Attributes,Implementation Defined Pragmas,Top -@anchor{gnat_rm/implementation_defined_aspects doc}@anchor{121}@anchor{gnat_rm/implementation_defined_aspects id1}@anchor{122}@anchor{gnat_rm/implementation_defined_aspects implementation-defined-aspects}@anchor{123} +@anchor{gnat_rm/implementation_defined_aspects doc}@anchor{123}@anchor{gnat_rm/implementation_defined_aspects id1}@anchor{124}@anchor{gnat_rm/implementation_defined_aspects implementation-defined-aspects}@anchor{125} @chapter Implementation Defined Aspects @@ -9198,6 +9215,7 @@ or attribute definition clause. * Aspect Secondary_Stack_Size:: * Aspect Scalar_Storage_Order:: * Aspect Shared:: +* Aspect Side_Effects:: * Aspect Simple_Storage_Pool:: * Aspect Simple_Storage_Pool_Type:: * Aspect SPARK_Mode:: @@ -9217,7 +9235,7 @@ or attribute definition clause. @end menu @node Aspect Abstract_State,Aspect Annotate,,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-abstract-state}@anchor{124} +@anchor{gnat_rm/implementation_defined_aspects aspect-abstract-state}@anchor{126} @section Aspect Abstract_State @@ -9226,7 +9244,7 @@ or attribute definition clause. This aspect is equivalent to @ref{1e,,pragma Abstract_State}. @node Aspect Annotate,Aspect Async_Readers,Aspect Abstract_State,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-annotate}@anchor{125} +@anchor{gnat_rm/implementation_defined_aspects aspect-annotate}@anchor{127} @section Aspect Annotate @@ -9253,7 +9271,7 @@ Equivalent to @code{pragma Annotate (ID, ID @{, ARG@}, Entity => Name);} @end table @node Aspect Async_Readers,Aspect Async_Writers,Aspect Annotate,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-async-readers}@anchor{126} +@anchor{gnat_rm/implementation_defined_aspects aspect-async-readers}@anchor{128} @section Aspect Async_Readers @@ -9262,7 +9280,7 @@ Equivalent to @code{pragma Annotate (ID, ID @{, ARG@}, Entity => Name);} This boolean aspect is equivalent to @ref{30,,pragma Async_Readers}. @node Aspect Async_Writers,Aspect Constant_After_Elaboration,Aspect Async_Readers,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-async-writers}@anchor{127} +@anchor{gnat_rm/implementation_defined_aspects aspect-async-writers}@anchor{129} @section Aspect Async_Writers @@ -9271,7 +9289,7 @@ This boolean aspect is equivalent to @ref{30,,pragma Async_Readers}. This boolean aspect is equivalent to @ref{32,,pragma Async_Writers}. @node Aspect Constant_After_Elaboration,Aspect Contract_Cases,Aspect Async_Writers,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-constant-after-elaboration}@anchor{128} +@anchor{gnat_rm/implementation_defined_aspects aspect-constant-after-elaboration}@anchor{12a} @section Aspect Constant_After_Elaboration @@ -9280,7 +9298,7 @@ This boolean aspect is equivalent to @ref{32,,pragma Async_Writers}. This aspect is equivalent to @ref{42,,pragma Constant_After_Elaboration}. @node Aspect Contract_Cases,Aspect Depends,Aspect Constant_After_Elaboration,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-contract-cases}@anchor{129} +@anchor{gnat_rm/implementation_defined_aspects aspect-contract-cases}@anchor{12b} @section Aspect Contract_Cases @@ -9291,7 +9309,7 @@ of clauses being enclosed in parentheses so that syntactically it is an aggregate. @node Aspect Depends,Aspect Default_Initial_Condition,Aspect Contract_Cases,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-depends}@anchor{12a} +@anchor{gnat_rm/implementation_defined_aspects aspect-depends}@anchor{12c} @section Aspect Depends @@ -9300,7 +9318,7 @@ aggregate. This aspect is equivalent to @ref{54,,pragma Depends}. @node Aspect Default_Initial_Condition,Aspect Dimension,Aspect Depends,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-default-initial-condition}@anchor{12b} +@anchor{gnat_rm/implementation_defined_aspects aspect-default-initial-condition}@anchor{12d} @section Aspect Default_Initial_Condition @@ -9309,7 +9327,7 @@ This aspect is equivalent to @ref{54,,pragma Depends}. This aspect is equivalent to @ref{50,,pragma Default_Initial_Condition}. @node Aspect Dimension,Aspect Dimension_System,Aspect Default_Initial_Condition,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-dimension}@anchor{12c} +@anchor{gnat_rm/implementation_defined_aspects aspect-dimension}@anchor{12e} @section Aspect Dimension @@ -9345,7 +9363,7 @@ Note that when the dimensioned type is an integer type, then any dimension value must be an integer literal. @node Aspect Dimension_System,Aspect Disable_Controlled,Aspect Dimension,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-dimension-system}@anchor{12d} +@anchor{gnat_rm/implementation_defined_aspects aspect-dimension-system}@anchor{12f} @section Aspect Dimension_System @@ -9405,7 +9423,7 @@ See section ‘Performing Dimensionality Analysis in GNAT’ in the GNAT Users Guide for detailed examples of use of the dimension system. @node Aspect Disable_Controlled,Aspect Effective_Reads,Aspect Dimension_System,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-disable-controlled}@anchor{12e} +@anchor{gnat_rm/implementation_defined_aspects aspect-disable-controlled}@anchor{130} @section Aspect Disable_Controlled @@ -9418,7 +9436,7 @@ where for example you might want a record to be controlled or not depending on whether some run-time check is enabled or suppressed. @node Aspect Effective_Reads,Aspect Effective_Writes,Aspect Disable_Controlled,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-effective-reads}@anchor{12f} +@anchor{gnat_rm/implementation_defined_aspects aspect-effective-reads}@anchor{131} @section Aspect Effective_Reads @@ -9427,7 +9445,7 @@ whether some run-time check is enabled or suppressed. This aspect is equivalent to @ref{59,,pragma Effective_Reads}. @node Aspect Effective_Writes,Aspect Extensions_Visible,Aspect Effective_Reads,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-effective-writes}@anchor{130} +@anchor{gnat_rm/implementation_defined_aspects aspect-effective-writes}@anchor{132} @section Aspect Effective_Writes @@ -9436,7 +9454,7 @@ This aspect is equivalent to @ref{59,,pragma Effective_Reads}. This aspect is equivalent to @ref{5b,,pragma Effective_Writes}. @node Aspect Extensions_Visible,Aspect Favor_Top_Level,Aspect Effective_Writes,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-extensions-visible}@anchor{131} +@anchor{gnat_rm/implementation_defined_aspects aspect-extensions-visible}@anchor{133} @section Aspect Extensions_Visible @@ -9445,7 +9463,7 @@ This aspect is equivalent to @ref{5b,,pragma Effective_Writes}. This aspect is equivalent to @ref{69,,pragma Extensions_Visible}. @node Aspect Favor_Top_Level,Aspect Ghost,Aspect Extensions_Visible,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-favor-top-level}@anchor{132} +@anchor{gnat_rm/implementation_defined_aspects aspect-favor-top-level}@anchor{134} @section Aspect Favor_Top_Level @@ -9454,7 +9472,7 @@ This aspect is equivalent to @ref{69,,pragma Extensions_Visible}. This boolean aspect is equivalent to @ref{6e,,pragma Favor_Top_Level}. @node Aspect Ghost,Aspect Ghost_Predicate,Aspect Favor_Top_Level,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-ghost}@anchor{133} +@anchor{gnat_rm/implementation_defined_aspects aspect-ghost}@anchor{135} @section Aspect Ghost @@ -9463,7 +9481,7 @@ This boolean aspect is equivalent to @ref{6e,,pragma Favor_Top_Level}. This aspect is equivalent to @ref{72,,pragma Ghost}. @node Aspect Ghost_Predicate,Aspect Global,Aspect Ghost,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-ghost-predicate}@anchor{134} +@anchor{gnat_rm/implementation_defined_aspects aspect-ghost-predicate}@anchor{136} @section Aspect Ghost_Predicate @@ -9476,7 +9494,7 @@ For the detailed semantics of this aspect, see the entry for subtype predicates in the SPARK Reference Manual, section 3.2.4. @node Aspect Global,Aspect Initial_Condition,Aspect Ghost_Predicate,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-global}@anchor{135} +@anchor{gnat_rm/implementation_defined_aspects aspect-global}@anchor{137} @section Aspect Global @@ -9485,7 +9503,7 @@ in the SPARK Reference Manual, section 3.2.4. This aspect is equivalent to @ref{74,,pragma Global}. @node Aspect Initial_Condition,Aspect Initializes,Aspect Global,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-initial-condition}@anchor{136} +@anchor{gnat_rm/implementation_defined_aspects aspect-initial-condition}@anchor{138} @section Aspect Initial_Condition @@ -9494,7 +9512,7 @@ This aspect is equivalent to @ref{74,,pragma Global}. This aspect is equivalent to @ref{81,,pragma Initial_Condition}. @node Aspect Initializes,Aspect Inline_Always,Aspect Initial_Condition,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-initializes}@anchor{137} +@anchor{gnat_rm/implementation_defined_aspects aspect-initializes}@anchor{139} @section Aspect Initializes @@ -9503,7 +9521,7 @@ This aspect is equivalent to @ref{81,,pragma Initial_Condition}. This aspect is equivalent to @ref{84,,pragma Initializes}. @node Aspect Inline_Always,Aspect Invariant,Aspect Initializes,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-inline-always}@anchor{138} +@anchor{gnat_rm/implementation_defined_aspects aspect-inline-always}@anchor{13a} @section Aspect Inline_Always @@ -9512,7 +9530,7 @@ This aspect is equivalent to @ref{84,,pragma Initializes}. This boolean aspect is equivalent to @ref{86,,pragma Inline_Always}. @node Aspect Invariant,Aspect Invariant’Class,Aspect Inline_Always,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-invariant}@anchor{139} +@anchor{gnat_rm/implementation_defined_aspects aspect-invariant}@anchor{13b} @section Aspect Invariant @@ -9523,18 +9541,18 @@ synonym for the language defined aspect @code{Type_Invariant} except that it is separately controllable using pragma @code{Assertion_Policy}. @node Aspect Invariant’Class,Aspect Iterable,Aspect Invariant,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-invariant-class}@anchor{13a} +@anchor{gnat_rm/implementation_defined_aspects aspect-invariant-class}@anchor{13c} @section Aspect Invariant’Class @geindex Invariant'Class -This aspect is equivalent to @ref{104,,pragma Type_Invariant_Class}. It is a +This aspect is equivalent to @ref{106,,pragma Type_Invariant_Class}. It is a synonym for the language defined aspect @code{Type_Invariant'Class} except that it is separately controllable using pragma @code{Assertion_Policy}. @node Aspect Iterable,Aspect Linker_Section,Aspect Invariant’Class,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-iterable}@anchor{13b} +@anchor{gnat_rm/implementation_defined_aspects aspect-iterable}@anchor{13d} @section Aspect Iterable @@ -9618,7 +9636,7 @@ function Get_Element (Cont : Container; Position : Cursor) return Element_Type; This aspect is used in the GNAT-defined formal container packages. @node Aspect Linker_Section,Aspect Lock_Free,Aspect Iterable,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-linker-section}@anchor{13c} +@anchor{gnat_rm/implementation_defined_aspects aspect-linker-section}@anchor{13e} @section Aspect Linker_Section @@ -9627,7 +9645,7 @@ This aspect is used in the GNAT-defined formal container packages. This aspect is equivalent to @ref{95,,pragma Linker_Section}. @node Aspect Lock_Free,Aspect Max_Queue_Length,Aspect Linker_Section,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-lock-free}@anchor{13d} +@anchor{gnat_rm/implementation_defined_aspects aspect-lock-free}@anchor{13f} @section Aspect Lock_Free @@ -9636,7 +9654,7 @@ This aspect is equivalent to @ref{95,,pragma Linker_Section}. This boolean aspect is equivalent to @ref{97,,pragma Lock_Free}. @node Aspect Max_Queue_Length,Aspect No_Caching,Aspect Lock_Free,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-max-queue-length}@anchor{13e} +@anchor{gnat_rm/implementation_defined_aspects aspect-max-queue-length}@anchor{140} @section Aspect Max_Queue_Length @@ -9645,7 +9663,7 @@ This boolean aspect is equivalent to @ref{97,,pragma Lock_Free}. This aspect is equivalent to @ref{9f,,pragma Max_Queue_Length}. @node Aspect No_Caching,Aspect No_Elaboration_Code_All,Aspect Max_Queue_Length,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-no-caching}@anchor{13f} +@anchor{gnat_rm/implementation_defined_aspects aspect-no-caching}@anchor{141} @section Aspect No_Caching @@ -9654,7 +9672,7 @@ This aspect is equivalent to @ref{9f,,pragma Max_Queue_Length}. This boolean aspect is equivalent to @ref{a2,,pragma No_Caching}. @node Aspect No_Elaboration_Code_All,Aspect No_Inline,Aspect No_Caching,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-no-elaboration-code-all}@anchor{140} +@anchor{gnat_rm/implementation_defined_aspects aspect-no-elaboration-code-all}@anchor{142} @section Aspect No_Elaboration_Code_All @@ -9664,7 +9682,7 @@ This aspect is equivalent to @ref{a5,,pragma No_Elaboration_Code_All} for a program unit. @node Aspect No_Inline,Aspect No_Tagged_Streams,Aspect No_Elaboration_Code_All,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-no-inline}@anchor{141} +@anchor{gnat_rm/implementation_defined_aspects aspect-no-inline}@anchor{143} @section Aspect No_Inline @@ -9673,7 +9691,7 @@ for a program unit. This boolean aspect is equivalent to @ref{a8,,pragma No_Inline}. @node Aspect No_Tagged_Streams,Aspect No_Task_Parts,Aspect No_Inline,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-no-tagged-streams}@anchor{142} +@anchor{gnat_rm/implementation_defined_aspects aspect-no-tagged-streams}@anchor{144} @section Aspect No_Tagged_Streams @@ -9684,7 +9702,7 @@ argument specifying a root tagged type (thus this aspect can only be applied to such a type). @node Aspect No_Task_Parts,Aspect Object_Size,Aspect No_Tagged_Streams,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-no-task-parts}@anchor{143} +@anchor{gnat_rm/implementation_defined_aspects aspect-no-task-parts}@anchor{145} @section Aspect No_Task_Parts @@ -9700,16 +9718,16 @@ away certain tasking-related code that would otherwise be needed for T’Class, because descendants of T might contain tasks. @node Aspect Object_Size,Aspect Obsolescent,Aspect No_Task_Parts,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-object-size}@anchor{144} +@anchor{gnat_rm/implementation_defined_aspects aspect-object-size}@anchor{146} @section Aspect Object_Size @geindex Object_Size -This aspect is equivalent to @ref{145,,attribute Object_Size}. +This aspect is equivalent to @ref{147,,attribute Object_Size}. @node Aspect Obsolescent,Aspect Part_Of,Aspect Object_Size,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-obsolescent}@anchor{146} +@anchor{gnat_rm/implementation_defined_aspects aspect-obsolescent}@anchor{148} @section Aspect Obsolescent @@ -9720,7 +9738,7 @@ evaluation of this aspect happens at the point of occurrence, it is not delayed until the freeze point. @node Aspect Part_Of,Aspect Persistent_BSS,Aspect Obsolescent,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-part-of}@anchor{147} +@anchor{gnat_rm/implementation_defined_aspects aspect-part-of}@anchor{149} @section Aspect Part_Of @@ -9729,7 +9747,7 @@ delayed until the freeze point. This aspect is equivalent to @ref{b5,,pragma Part_Of}. @node Aspect Persistent_BSS,Aspect Predicate,Aspect Part_Of,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-persistent-bss}@anchor{148} +@anchor{gnat_rm/implementation_defined_aspects aspect-persistent-bss}@anchor{14a} @section Aspect Persistent_BSS @@ -9738,7 +9756,7 @@ This aspect is equivalent to @ref{b5,,pragma Part_Of}. This boolean aspect is equivalent to @ref{b9,,pragma Persistent_BSS}. @node Aspect Predicate,Aspect Pure_Function,Aspect Persistent_BSS,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-predicate}@anchor{149} +@anchor{gnat_rm/implementation_defined_aspects aspect-predicate}@anchor{14b} @section Aspect Predicate @@ -9752,7 +9770,7 @@ expression. It is also separately controllable using pragma @code{Assertion_Policy}. @node Aspect Pure_Function,Aspect Refined_Depends,Aspect Predicate,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-pure-function}@anchor{14a} +@anchor{gnat_rm/implementation_defined_aspects aspect-pure-function}@anchor{14c} @section Aspect Pure_Function @@ -9761,7 +9779,7 @@ expression. It is also separately controllable using pragma This boolean aspect is equivalent to @ref{cc,,pragma Pure_Function}. @node Aspect Refined_Depends,Aspect Refined_Global,Aspect Pure_Function,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-refined-depends}@anchor{14b} +@anchor{gnat_rm/implementation_defined_aspects aspect-refined-depends}@anchor{14d} @section Aspect Refined_Depends @@ -9770,7 +9788,7 @@ This boolean aspect is equivalent to @ref{cc,,pragma Pure_Function}. This aspect is equivalent to @ref{d0,,pragma Refined_Depends}. @node Aspect Refined_Global,Aspect Refined_Post,Aspect Refined_Depends,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-refined-global}@anchor{14c} +@anchor{gnat_rm/implementation_defined_aspects aspect-refined-global}@anchor{14e} @section Aspect Refined_Global @@ -9779,7 +9797,7 @@ This aspect is equivalent to @ref{d0,,pragma Refined_Depends}. This aspect is equivalent to @ref{d2,,pragma Refined_Global}. @node Aspect Refined_Post,Aspect Refined_State,Aspect Refined_Global,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-refined-post}@anchor{14d} +@anchor{gnat_rm/implementation_defined_aspects aspect-refined-post}@anchor{14f} @section Aspect Refined_Post @@ -9788,7 +9806,7 @@ This aspect is equivalent to @ref{d2,,pragma Refined_Global}. This aspect is equivalent to @ref{d4,,pragma Refined_Post}. @node Aspect Refined_State,Aspect Relaxed_Initialization,Aspect Refined_Post,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-refined-state}@anchor{14e} +@anchor{gnat_rm/implementation_defined_aspects aspect-refined-state}@anchor{150} @section Aspect Refined_State @@ -9797,7 +9815,7 @@ This aspect is equivalent to @ref{d4,,pragma Refined_Post}. This aspect is equivalent to @ref{d6,,pragma Refined_State}. @node Aspect Relaxed_Initialization,Aspect Remote_Access_Type,Aspect Refined_State,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-relaxed-initialization}@anchor{14f} +@anchor{gnat_rm/implementation_defined_aspects aspect-relaxed-initialization}@anchor{151} @section Aspect Relaxed_Initialization @@ -9807,7 +9825,7 @@ For the syntax and semantics of this aspect, see the SPARK 2014 Reference Manual, section 6.10. @node Aspect Remote_Access_Type,Aspect Secondary_Stack_Size,Aspect Relaxed_Initialization,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-remote-access-type}@anchor{150} +@anchor{gnat_rm/implementation_defined_aspects aspect-remote-access-type}@anchor{152} @section Aspect Remote_Access_Type @@ -9816,7 +9834,7 @@ Manual, section 6.10. This aspect is equivalent to @ref{d9,,pragma Remote_Access_Type}. @node Aspect Secondary_Stack_Size,Aspect Scalar_Storage_Order,Aspect Remote_Access_Type,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-secondary-stack-size}@anchor{151} +@anchor{gnat_rm/implementation_defined_aspects aspect-secondary-stack-size}@anchor{153} @section Aspect Secondary_Stack_Size @@ -9825,16 +9843,16 @@ This aspect is equivalent to @ref{d9,,pragma Remote_Access_Type}. This aspect is equivalent to @ref{df,,pragma Secondary_Stack_Size}. @node Aspect Scalar_Storage_Order,Aspect Shared,Aspect Secondary_Stack_Size,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-scalar-storage-order}@anchor{152} +@anchor{gnat_rm/implementation_defined_aspects aspect-scalar-storage-order}@anchor{154} @section Aspect Scalar_Storage_Order @geindex Scalar_Storage_Order -This aspect is equivalent to a @ref{153,,attribute Scalar_Storage_Order}. +This aspect is equivalent to a @ref{155,,attribute Scalar_Storage_Order}. -@node Aspect Shared,Aspect Simple_Storage_Pool,Aspect Scalar_Storage_Order,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-shared}@anchor{154} +@node Aspect Shared,Aspect Side_Effects,Aspect Scalar_Storage_Order,Implementation Defined Aspects +@anchor{gnat_rm/implementation_defined_aspects aspect-shared}@anchor{156} @section Aspect Shared @@ -9843,151 +9861,160 @@ This aspect is equivalent to a @ref{153,,attribute Scalar_Storage_Order}. This boolean aspect is equivalent to @ref{e2,,pragma Shared} and is thus a synonym for aspect @code{Atomic}. -@node Aspect Simple_Storage_Pool,Aspect Simple_Storage_Pool_Type,Aspect Shared,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-simple-storage-pool}@anchor{155} +@node Aspect Side_Effects,Aspect Simple_Storage_Pool,Aspect Shared,Implementation Defined Aspects +@anchor{gnat_rm/implementation_defined_aspects aspect-side-effects}@anchor{157} +@section Aspect Side_Effects + + +@geindex Side_Effects + +This aspect is equivalent to @ref{e6,,pragma Side_Effects}. + +@node Aspect Simple_Storage_Pool,Aspect Simple_Storage_Pool_Type,Aspect Side_Effects,Implementation Defined Aspects +@anchor{gnat_rm/implementation_defined_aspects aspect-simple-storage-pool}@anchor{158} @section Aspect Simple_Storage_Pool @geindex Simple_Storage_Pool -This aspect is equivalent to @ref{e7,,attribute Simple_Storage_Pool}. +This aspect is equivalent to @ref{e9,,attribute Simple_Storage_Pool}. @node Aspect Simple_Storage_Pool_Type,Aspect SPARK_Mode,Aspect Simple_Storage_Pool,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-simple-storage-pool-type}@anchor{156} +@anchor{gnat_rm/implementation_defined_aspects aspect-simple-storage-pool-type}@anchor{159} @section Aspect Simple_Storage_Pool_Type @geindex Simple_Storage_Pool_Type -This boolean aspect is equivalent to @ref{e6,,pragma Simple_Storage_Pool_Type}. +This boolean aspect is equivalent to @ref{e8,,pragma Simple_Storage_Pool_Type}. @node Aspect SPARK_Mode,Aspect Suppress_Debug_Info,Aspect Simple_Storage_Pool_Type,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-spark-mode}@anchor{157} +@anchor{gnat_rm/implementation_defined_aspects aspect-spark-mode}@anchor{15a} @section Aspect SPARK_Mode @geindex SPARK_Mode -This aspect is equivalent to @ref{ee,,pragma SPARK_Mode} and +This aspect is equivalent to @ref{f0,,pragma SPARK_Mode} and may be specified for either or both of the specification and body of a subprogram or package. @node Aspect Suppress_Debug_Info,Aspect Suppress_Initialization,Aspect SPARK_Mode,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-suppress-debug-info}@anchor{158} +@anchor{gnat_rm/implementation_defined_aspects aspect-suppress-debug-info}@anchor{15b} @section Aspect Suppress_Debug_Info @geindex Suppress_Debug_Info -This boolean aspect is equivalent to @ref{f6,,pragma Suppress_Debug_Info}. +This boolean aspect is equivalent to @ref{f8,,pragma Suppress_Debug_Info}. @node Aspect Suppress_Initialization,Aspect Test_Case,Aspect Suppress_Debug_Info,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-suppress-initialization}@anchor{159} +@anchor{gnat_rm/implementation_defined_aspects aspect-suppress-initialization}@anchor{15c} @section Aspect Suppress_Initialization @geindex Suppress_Initialization -This boolean aspect is equivalent to @ref{f9,,pragma Suppress_Initialization}. +This boolean aspect is equivalent to @ref{fb,,pragma Suppress_Initialization}. @node Aspect Test_Case,Aspect Thread_Local_Storage,Aspect Suppress_Initialization,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-test-case}@anchor{15a} +@anchor{gnat_rm/implementation_defined_aspects aspect-test-case}@anchor{15d} @section Aspect Test_Case @geindex Test_Case -This aspect is equivalent to @ref{fd,,pragma Test_Case}. +This aspect is equivalent to @ref{ff,,pragma Test_Case}. @node Aspect Thread_Local_Storage,Aspect Universal_Aliasing,Aspect Test_Case,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-thread-local-storage}@anchor{15b} +@anchor{gnat_rm/implementation_defined_aspects aspect-thread-local-storage}@anchor{15e} @section Aspect Thread_Local_Storage @geindex Thread_Local_Storage -This boolean aspect is equivalent to @ref{ff,,pragma Thread_Local_Storage}. +This boolean aspect is equivalent to @ref{101,,pragma Thread_Local_Storage}. @node Aspect Universal_Aliasing,Aspect Unmodified,Aspect Thread_Local_Storage,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-universal-aliasing}@anchor{15c} +@anchor{gnat_rm/implementation_defined_aspects aspect-universal-aliasing}@anchor{15f} @section Aspect Universal_Aliasing @geindex Universal_Aliasing -This boolean aspect is equivalent to @ref{109,,pragma Universal_Aliasing}. +This boolean aspect is equivalent to @ref{10b,,pragma Universal_Aliasing}. @node Aspect Unmodified,Aspect Unreferenced,Aspect Universal_Aliasing,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-unmodified}@anchor{15d} +@anchor{gnat_rm/implementation_defined_aspects aspect-unmodified}@anchor{160} @section Aspect Unmodified @geindex Unmodified -This boolean aspect is equivalent to @ref{10b,,pragma Unmodified}. +This boolean aspect is equivalent to @ref{10d,,pragma Unmodified}. @node Aspect Unreferenced,Aspect Unreferenced_Objects,Aspect Unmodified,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-unreferenced}@anchor{15e} +@anchor{gnat_rm/implementation_defined_aspects aspect-unreferenced}@anchor{161} @section Aspect Unreferenced @geindex Unreferenced -This boolean aspect is equivalent to @ref{10d,,pragma Unreferenced}. +This boolean aspect is equivalent to @ref{10f,,pragma Unreferenced}. When using the @code{-gnat2022} switch, this aspect is also supported on formal parameters, which is in particular the only form possible for expression functions. @node Aspect Unreferenced_Objects,Aspect Value_Size,Aspect Unreferenced,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-unreferenced-objects}@anchor{15f} +@anchor{gnat_rm/implementation_defined_aspects aspect-unreferenced-objects}@anchor{162} @section Aspect Unreferenced_Objects @geindex Unreferenced_Objects -This boolean aspect is equivalent to @ref{10f,,pragma Unreferenced_Objects}. +This boolean aspect is equivalent to @ref{111,,pragma Unreferenced_Objects}. @node Aspect Value_Size,Aspect Volatile_Full_Access,Aspect Unreferenced_Objects,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-value-size}@anchor{160} +@anchor{gnat_rm/implementation_defined_aspects aspect-value-size}@anchor{163} @section Aspect Value_Size @geindex Value_Size -This aspect is equivalent to @ref{161,,attribute Value_Size}. +This aspect is equivalent to @ref{164,,attribute Value_Size}. @node Aspect Volatile_Full_Access,Aspect Volatile_Function,Aspect Value_Size,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-volatile-full-access}@anchor{162} +@anchor{gnat_rm/implementation_defined_aspects aspect-volatile-full-access}@anchor{165} @section Aspect Volatile_Full_Access @geindex Volatile_Full_Access -This boolean aspect is equivalent to @ref{119,,pragma Volatile_Full_Access}. +This boolean aspect is equivalent to @ref{11b,,pragma Volatile_Full_Access}. @node Aspect Volatile_Function,Aspect Warnings,Aspect Volatile_Full_Access,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-volatile-function}@anchor{163} +@anchor{gnat_rm/implementation_defined_aspects aspect-volatile-function}@anchor{166} @section Aspect Volatile_Function @geindex Volatile_Function -This boolean aspect is equivalent to @ref{11b,,pragma Volatile_Function}. +This boolean aspect is equivalent to @ref{11d,,pragma Volatile_Function}. @node Aspect Warnings,,Aspect Volatile_Function,Implementation Defined Aspects -@anchor{gnat_rm/implementation_defined_aspects aspect-warnings}@anchor{164} +@anchor{gnat_rm/implementation_defined_aspects aspect-warnings}@anchor{167} @section Aspect Warnings @geindex Warnings -This aspect is equivalent to the two argument form of @ref{11d,,pragma Warnings}, +This aspect is equivalent to the two argument form of @ref{11f,,pragma Warnings}, where the first argument is @code{ON} or @code{OFF} and the second argument is the entity. @node Implementation Defined Attributes,Standard and Implementation Defined Restrictions,Implementation Defined Aspects,Top -@anchor{gnat_rm/implementation_defined_attributes doc}@anchor{165}@anchor{gnat_rm/implementation_defined_attributes id1}@anchor{166}@anchor{gnat_rm/implementation_defined_attributes implementation-defined-attributes}@anchor{8} +@anchor{gnat_rm/implementation_defined_attributes doc}@anchor{168}@anchor{gnat_rm/implementation_defined_attributes id1}@anchor{169}@anchor{gnat_rm/implementation_defined_attributes implementation-defined-attributes}@anchor{8} @chapter Implementation Defined Attributes @@ -10093,7 +10120,7 @@ consideration, you should minimize the use of these attributes. @end menu @node Attribute Abort_Signal,Attribute Address_Size,,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-abort-signal}@anchor{167} +@anchor{gnat_rm/implementation_defined_attributes attribute-abort-signal}@anchor{16a} @section Attribute Abort_Signal @@ -10107,7 +10134,7 @@ completely outside the normal semantics of Ada, for a user program to intercept the abort exception). @node Attribute Address_Size,Attribute Asm_Input,Attribute Abort_Signal,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-address-size}@anchor{168} +@anchor{gnat_rm/implementation_defined_attributes attribute-address-size}@anchor{16b} @section Attribute Address_Size @@ -10123,7 +10150,7 @@ reference to System.Address’Size is nonstatic because Address is a private type. @node Attribute Asm_Input,Attribute Asm_Output,Attribute Address_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-asm-input}@anchor{169} +@anchor{gnat_rm/implementation_defined_attributes attribute-asm-input}@anchor{16c} @section Attribute Asm_Input @@ -10137,10 +10164,10 @@ to be a static expression, and is the constraint for the parameter, value to be used as the input argument. The possible values for the constant are the same as those used in the RTL, and are dependent on the configuration file used to built the GCC back end. -@ref{16a,,Machine Code Insertions} +@ref{16d,,Machine Code Insertions} @node Attribute Asm_Output,Attribute Atomic_Always_Lock_Free,Attribute Asm_Input,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-asm-output}@anchor{16b} +@anchor{gnat_rm/implementation_defined_attributes attribute-asm-output}@anchor{16e} @section Attribute Asm_Output @@ -10156,10 +10183,10 @@ result. The possible values for constraint are the same as those used in the RTL, and are dependent on the configuration file used to build the GCC back end. If there are no output operands, then this argument may either be omitted, or explicitly given as @code{No_Output_Operands}. -@ref{16a,,Machine Code Insertions} +@ref{16d,,Machine Code Insertions} @node Attribute Atomic_Always_Lock_Free,Attribute Bit,Attribute Asm_Output,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-atomic-always-lock-free}@anchor{16c} +@anchor{gnat_rm/implementation_defined_attributes attribute-atomic-always-lock-free}@anchor{16f} @section Attribute Atomic_Always_Lock_Free @@ -10171,7 +10198,7 @@ and False otherwise. The result indicate whether atomic operations are supported by the target for the given type. @node Attribute Bit,Attribute Bit_Position,Attribute Atomic_Always_Lock_Free,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-bit}@anchor{16d} +@anchor{gnat_rm/implementation_defined_attributes attribute-bit}@anchor{170} @section Attribute Bit @@ -10202,7 +10229,7 @@ This attribute is designed to be compatible with the DEC Ada 83 definition and implementation of the @code{Bit} attribute. @node Attribute Bit_Position,Attribute Code_Address,Attribute Bit,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-bit-position}@anchor{16e} +@anchor{gnat_rm/implementation_defined_attributes attribute-bit-position}@anchor{171} @section Attribute Bit_Position @@ -10217,7 +10244,7 @@ type `universal_integer'. The value depends only on the field the containing record @code{R}. @node Attribute Code_Address,Attribute Compiler_Version,Attribute Bit_Position,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-code-address}@anchor{16f} +@anchor{gnat_rm/implementation_defined_attributes attribute-code-address}@anchor{172} @section Attribute Code_Address @@ -10260,7 +10287,7 @@ the same value as is returned by the corresponding @code{'Address} attribute. @node Attribute Compiler_Version,Attribute Constrained,Attribute Code_Address,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-compiler-version}@anchor{170} +@anchor{gnat_rm/implementation_defined_attributes attribute-compiler-version}@anchor{173} @section Attribute Compiler_Version @@ -10271,7 +10298,7 @@ prefix) yields a static string identifying the version of the compiler being used to compile the unit containing the attribute reference. @node Attribute Constrained,Attribute Default_Bit_Order,Attribute Compiler_Version,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-constrained}@anchor{171} +@anchor{gnat_rm/implementation_defined_attributes attribute-constrained}@anchor{174} @section Attribute Constrained @@ -10286,7 +10313,7 @@ record type without discriminants is always @code{True}. This usage is compatible with older Ada compilers, including notably DEC Ada. @node Attribute Default_Bit_Order,Attribute Default_Scalar_Storage_Order,Attribute Constrained,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-default-bit-order}@anchor{172} +@anchor{gnat_rm/implementation_defined_attributes attribute-default-bit-order}@anchor{175} @section Attribute Default_Bit_Order @@ -10303,7 +10330,7 @@ as a @code{Pos} value (0 for @code{High_Order_First}, 1 for @code{Default_Bit_Order} in package @code{System}. @node Attribute Default_Scalar_Storage_Order,Attribute Deref,Attribute Default_Bit_Order,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-default-scalar-storage-order}@anchor{173} +@anchor{gnat_rm/implementation_defined_attributes attribute-default-scalar-storage-order}@anchor{176} @section Attribute Default_Scalar_Storage_Order @@ -10320,7 +10347,7 @@ equal to @code{Default_Bit_Order} if unspecified) as a @code{System.Bit_Order} value. This is a static attribute. @node Attribute Deref,Attribute Descriptor_Size,Attribute Default_Scalar_Storage_Order,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-deref}@anchor{174} +@anchor{gnat_rm/implementation_defined_attributes attribute-deref}@anchor{177} @section Attribute Deref @@ -10333,7 +10360,7 @@ a named access-to-@cite{typ} type, except that it yields a variable, so it can b used on the left side of an assignment. @node Attribute Descriptor_Size,Attribute Elaborated,Attribute Deref,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-descriptor-size}@anchor{175} +@anchor{gnat_rm/implementation_defined_attributes attribute-descriptor-size}@anchor{178} @section Attribute Descriptor_Size @@ -10362,7 +10389,7 @@ since @code{Positive} has an alignment of 4, the size of the descriptor is which yields a size of 32 bits, i.e. including 16 bits of padding. @node Attribute Elaborated,Attribute Elab_Body,Attribute Descriptor_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-elaborated}@anchor{176} +@anchor{gnat_rm/implementation_defined_attributes attribute-elaborated}@anchor{179} @section Attribute Elaborated @@ -10377,7 +10404,7 @@ units has been completed. An exception is for units which need no elaboration, the value is always False for such units. @node Attribute Elab_Body,Attribute Elab_Spec,Attribute Elaborated,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-elab-body}@anchor{177} +@anchor{gnat_rm/implementation_defined_attributes attribute-elab-body}@anchor{17a} @section Attribute Elab_Body @@ -10393,7 +10420,7 @@ e.g., if it is necessary to do selective re-elaboration to fix some error. @node Attribute Elab_Spec,Attribute Elab_Subp_Body,Attribute Elab_Body,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-elab-spec}@anchor{178} +@anchor{gnat_rm/implementation_defined_attributes attribute-elab-spec}@anchor{17b} @section Attribute Elab_Spec @@ -10409,7 +10436,7 @@ Ada code, e.g., if it is necessary to do selective re-elaboration to fix some error. @node Attribute Elab_Subp_Body,Attribute Emax,Attribute Elab_Spec,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-elab-subp-body}@anchor{179} +@anchor{gnat_rm/implementation_defined_attributes attribute-elab-subp-body}@anchor{17c} @section Attribute Elab_Subp_Body @@ -10423,7 +10450,7 @@ elaboration procedure by the binder in CodePeer mode only and is unrecognized otherwise. @node Attribute Emax,Attribute Enabled,Attribute Elab_Subp_Body,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-emax}@anchor{17a} +@anchor{gnat_rm/implementation_defined_attributes attribute-emax}@anchor{17d} @section Attribute Emax @@ -10436,7 +10463,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute. @node Attribute Enabled,Attribute Enum_Rep,Attribute Emax,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-enabled}@anchor{17b} +@anchor{gnat_rm/implementation_defined_attributes attribute-enabled}@anchor{17e} @section Attribute Enabled @@ -10460,7 +10487,7 @@ a @code{pragma Suppress} or @code{pragma Unsuppress} before instantiating the package or subprogram, controlling whether the check will be present. @node Attribute Enum_Rep,Attribute Enum_Val,Attribute Enabled,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-enum-rep}@anchor{17c} +@anchor{gnat_rm/implementation_defined_attributes attribute-enum-rep}@anchor{17f} @section Attribute Enum_Rep @@ -10500,7 +10527,7 @@ integer calculation is done at run time, then the call to @code{Enum_Rep} may raise @code{Constraint_Error}. @node Attribute Enum_Val,Attribute Epsilon,Attribute Enum_Rep,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-enum-val}@anchor{17d} +@anchor{gnat_rm/implementation_defined_attributes attribute-enum-val}@anchor{180} @section Attribute Enum_Val @@ -10526,7 +10553,7 @@ absence of an enumeration representation clause. This is a static attribute (i.e., the result is static if the argument is static). @node Attribute Epsilon,Attribute Fast_Math,Attribute Enum_Val,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-epsilon}@anchor{17e} +@anchor{gnat_rm/implementation_defined_attributes attribute-epsilon}@anchor{181} @section Attribute Epsilon @@ -10539,7 +10566,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute. @node Attribute Fast_Math,Attribute Finalization_Size,Attribute Epsilon,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-fast-math}@anchor{17f} +@anchor{gnat_rm/implementation_defined_attributes attribute-fast-math}@anchor{182} @section Attribute Fast_Math @@ -10550,7 +10577,7 @@ prefix) yields a static Boolean value that is True if pragma @code{Fast_Math} is active, and False otherwise. @node Attribute Finalization_Size,Attribute Fixed_Value,Attribute Fast_Math,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-finalization-size}@anchor{180} +@anchor{gnat_rm/implementation_defined_attributes attribute-finalization-size}@anchor{183} @section Attribute Finalization_Size @@ -10568,7 +10595,7 @@ class-wide type whose tag denotes a type with no controlled parts. Note that only heap-allocated objects contain finalization data. @node Attribute Fixed_Value,Attribute From_Any,Attribute Finalization_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-fixed-value}@anchor{181} +@anchor{gnat_rm/implementation_defined_attributes attribute-fixed-value}@anchor{184} @section Attribute Fixed_Value @@ -10595,7 +10622,7 @@ This attribute is primarily intended for use in implementation of the input-output functions for fixed-point values. @node Attribute From_Any,Attribute Has_Access_Values,Attribute Fixed_Value,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-from-any}@anchor{182} +@anchor{gnat_rm/implementation_defined_attributes attribute-from-any}@anchor{185} @section Attribute From_Any @@ -10605,7 +10632,7 @@ This internal attribute is used for the generation of remote subprogram stubs in the context of the Distributed Systems Annex. @node Attribute Has_Access_Values,Attribute Has_Discriminants,Attribute From_Any,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-has-access-values}@anchor{183} +@anchor{gnat_rm/implementation_defined_attributes attribute-has-access-values}@anchor{186} @section Attribute Has_Access_Values @@ -10623,7 +10650,7 @@ definitions. If the attribute is applied to a generic private type, it indicates whether or not the corresponding actual type has access values. @node Attribute Has_Discriminants,Attribute Has_Tagged_Values,Attribute Has_Access_Values,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-has-discriminants}@anchor{184} +@anchor{gnat_rm/implementation_defined_attributes attribute-has-discriminants}@anchor{187} @section Attribute Has_Discriminants @@ -10639,7 +10666,7 @@ definitions. If the attribute is applied to a generic private type, it indicates whether or not the corresponding actual type has discriminants. @node Attribute Has_Tagged_Values,Attribute Img,Attribute Has_Discriminants,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-has-tagged-values}@anchor{185} +@anchor{gnat_rm/implementation_defined_attributes attribute-has-tagged-values}@anchor{188} @section Attribute Has_Tagged_Values @@ -10656,7 +10683,7 @@ definitions. If the attribute is applied to a generic private type, it indicates whether or not the corresponding actual type has access values. @node Attribute Img,Attribute Initialized,Attribute Has_Tagged_Values,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-img}@anchor{186} +@anchor{gnat_rm/implementation_defined_attributes attribute-img}@anchor{189} @section Attribute Img @@ -10686,7 +10713,7 @@ that returns the appropriate string when called. This means that in an instantiation as a function parameter. @node Attribute Initialized,Attribute Integer_Value,Attribute Img,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-initialized}@anchor{187} +@anchor{gnat_rm/implementation_defined_attributes attribute-initialized}@anchor{18a} @section Attribute Initialized @@ -10696,7 +10723,7 @@ For the syntax and semantics of this attribute, see the SPARK 2014 Reference Manual, section 6.10. @node Attribute Integer_Value,Attribute Invalid_Value,Attribute Initialized,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-integer-value}@anchor{188} +@anchor{gnat_rm/implementation_defined_attributes attribute-integer-value}@anchor{18b} @section Attribute Integer_Value @@ -10724,7 +10751,7 @@ This attribute is primarily intended for use in implementation of the standard input-output functions for fixed-point values. @node Attribute Invalid_Value,Attribute Iterable,Attribute Integer_Value,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-invalid-value}@anchor{189} +@anchor{gnat_rm/implementation_defined_attributes attribute-invalid-value}@anchor{18c} @section Attribute Invalid_Value @@ -10738,7 +10765,7 @@ including the ability to modify the value with the binder -Sxx flag and relevant environment variables at run time. @node Attribute Iterable,Attribute Large,Attribute Invalid_Value,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-iterable}@anchor{18a} +@anchor{gnat_rm/implementation_defined_attributes attribute-iterable}@anchor{18d} @section Attribute Iterable @@ -10747,7 +10774,7 @@ relevant environment variables at run time. Equivalent to Aspect Iterable. @node Attribute Large,Attribute Library_Level,Attribute Iterable,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-large}@anchor{18b} +@anchor{gnat_rm/implementation_defined_attributes attribute-large}@anchor{18e} @section Attribute Large @@ -10760,7 +10787,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute. @node Attribute Library_Level,Attribute Loop_Entry,Attribute Large,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-library-level}@anchor{18c} +@anchor{gnat_rm/implementation_defined_attributes attribute-library-level}@anchor{18f} @section Attribute Library_Level @@ -10786,7 +10813,7 @@ end Gen; @end example @node Attribute Loop_Entry,Attribute Machine_Size,Attribute Library_Level,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-loop-entry}@anchor{18d} +@anchor{gnat_rm/implementation_defined_attributes attribute-loop-entry}@anchor{190} @section Attribute Loop_Entry @@ -10819,7 +10846,7 @@ entry. This copy is not performed if the loop is not entered, or if the corresponding pragmas are ignored or disabled. @node Attribute Machine_Size,Attribute Mantissa,Attribute Loop_Entry,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-machine-size}@anchor{18e} +@anchor{gnat_rm/implementation_defined_attributes attribute-machine-size}@anchor{191} @section Attribute Machine_Size @@ -10829,7 +10856,7 @@ This attribute is identical to the @code{Object_Size} attribute. It is provided for compatibility with the DEC Ada 83 attribute of this name. @node Attribute Mantissa,Attribute Maximum_Alignment,Attribute Machine_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-mantissa}@anchor{18f} +@anchor{gnat_rm/implementation_defined_attributes attribute-mantissa}@anchor{192} @section Attribute Mantissa @@ -10842,7 +10869,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute. @node Attribute Maximum_Alignment,Attribute Max_Integer_Size,Attribute Mantissa,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-maximum-alignment}@anchor{190}@anchor{gnat_rm/implementation_defined_attributes id2}@anchor{191} +@anchor{gnat_rm/implementation_defined_attributes attribute-maximum-alignment}@anchor{193}@anchor{gnat_rm/implementation_defined_attributes id2}@anchor{194} @section Attribute Maximum_Alignment @@ -10858,7 +10885,7 @@ for an object, guaranteeing that it is properly aligned in all cases. @node Attribute Max_Integer_Size,Attribute Mechanism_Code,Attribute Maximum_Alignment,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-max-integer-size}@anchor{192} +@anchor{gnat_rm/implementation_defined_attributes attribute-max-integer-size}@anchor{195} @section Attribute Max_Integer_Size @@ -10869,7 +10896,7 @@ prefix) provides the size of the largest supported integer type for the target. The result is a static constant. @node Attribute Mechanism_Code,Attribute Null_Parameter,Attribute Max_Integer_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-mechanism-code}@anchor{193} +@anchor{gnat_rm/implementation_defined_attributes attribute-mechanism-code}@anchor{196} @section Attribute Mechanism_Code @@ -10900,7 +10927,7 @@ by reference @end table @node Attribute Null_Parameter,Attribute Object_Size,Attribute Mechanism_Code,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-null-parameter}@anchor{194} +@anchor{gnat_rm/implementation_defined_attributes attribute-null-parameter}@anchor{197} @section Attribute Null_Parameter @@ -10925,7 +10952,7 @@ There is no way of indicating this without the @code{Null_Parameter} attribute. @node Attribute Object_Size,Attribute Old,Attribute Null_Parameter,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-object-size}@anchor{145}@anchor{gnat_rm/implementation_defined_attributes id3}@anchor{195} +@anchor{gnat_rm/implementation_defined_attributes attribute-object-size}@anchor{147}@anchor{gnat_rm/implementation_defined_attributes id3}@anchor{198} @section Attribute Object_Size @@ -10995,7 +11022,7 @@ Similar additional checks are performed in other contexts requiring statically matching subtypes. @node Attribute Old,Attribute Passed_By_Reference,Attribute Object_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-old}@anchor{196} +@anchor{gnat_rm/implementation_defined_attributes attribute-old}@anchor{199} @section Attribute Old @@ -11010,7 +11037,7 @@ definition are allowed under control of implementation defined pragma @code{Unevaluated_Use_Of_Old}. @node Attribute Passed_By_Reference,Attribute Pool_Address,Attribute Old,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-passed-by-reference}@anchor{197} +@anchor{gnat_rm/implementation_defined_attributes attribute-passed-by-reference}@anchor{19a} @section Attribute Passed_By_Reference @@ -11026,7 +11053,7 @@ passed by copy in calls. For scalar types, the result is always @code{False} and is static. For non-scalar types, the result is nonstatic. @node Attribute Pool_Address,Attribute Range_Length,Attribute Passed_By_Reference,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-pool-address}@anchor{198} +@anchor{gnat_rm/implementation_defined_attributes attribute-pool-address}@anchor{19b} @section Attribute Pool_Address @@ -11048,7 +11075,7 @@ For an object created by @code{new}, @code{Ptr.all'Pool_Address} is what is passed to @code{Allocate} and returned from @code{Deallocate}. @node Attribute Range_Length,Attribute Restriction_Set,Attribute Pool_Address,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-range-length}@anchor{199} +@anchor{gnat_rm/implementation_defined_attributes attribute-range-length}@anchor{19c} @section Attribute Range_Length @@ -11061,7 +11088,7 @@ applied to the index subtype of a one dimensional array always gives the same result as @code{Length} applied to the array itself. @node Attribute Restriction_Set,Attribute Result,Attribute Range_Length,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-restriction-set}@anchor{19a} +@anchor{gnat_rm/implementation_defined_attributes attribute-restriction-set}@anchor{19d} @section Attribute Restriction_Set @@ -11131,7 +11158,7 @@ Restrictions pragma, they are not analyzed semantically, so they do not have a type. @node Attribute Result,Attribute Safe_Emax,Attribute Restriction_Set,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-result}@anchor{19b} +@anchor{gnat_rm/implementation_defined_attributes attribute-result}@anchor{19e} @section Attribute Result @@ -11144,7 +11171,7 @@ For a further discussion of the use of this attribute and examples of its use, see the description of pragma Postcondition. @node Attribute Safe_Emax,Attribute Safe_Large,Attribute Result,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-safe-emax}@anchor{19c} +@anchor{gnat_rm/implementation_defined_attributes attribute-safe-emax}@anchor{19f} @section Attribute Safe_Emax @@ -11157,7 +11184,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute. @node Attribute Safe_Large,Attribute Safe_Small,Attribute Safe_Emax,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-safe-large}@anchor{19d} +@anchor{gnat_rm/implementation_defined_attributes attribute-safe-large}@anchor{1a0} @section Attribute Safe_Large @@ -11170,7 +11197,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute. @node Attribute Safe_Small,Attribute Scalar_Storage_Order,Attribute Safe_Large,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-safe-small}@anchor{19e} +@anchor{gnat_rm/implementation_defined_attributes attribute-safe-small}@anchor{1a1} @section Attribute Safe_Small @@ -11183,7 +11210,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute. @node Attribute Scalar_Storage_Order,Attribute Simple_Storage_Pool,Attribute Safe_Small,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-scalar-storage-order}@anchor{153}@anchor{gnat_rm/implementation_defined_attributes id4}@anchor{19f} +@anchor{gnat_rm/implementation_defined_attributes attribute-scalar-storage-order}@anchor{155}@anchor{gnat_rm/implementation_defined_attributes id4}@anchor{1a2} @section Attribute Scalar_Storage_Order @@ -11346,7 +11373,7 @@ Note that debuggers may be unable to display the correct value of scalar components of a type for which the opposite storage order is specified. @node Attribute Simple_Storage_Pool,Attribute Small,Attribute Scalar_Storage_Order,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-simple-storage-pool}@anchor{e7}@anchor{gnat_rm/implementation_defined_attributes id5}@anchor{1a0} +@anchor{gnat_rm/implementation_defined_attributes attribute-simple-storage-pool}@anchor{e9}@anchor{gnat_rm/implementation_defined_attributes id5}@anchor{1a3} @section Attribute Simple_Storage_Pool @@ -11409,7 +11436,7 @@ as defined in section 13.11.2 of the Ada Reference Manual, except that the term `simple storage pool' is substituted for `storage pool'. @node Attribute Small,Attribute Small_Denominator,Attribute Simple_Storage_Pool,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-small}@anchor{1a1} +@anchor{gnat_rm/implementation_defined_attributes attribute-small}@anchor{1a4} @section Attribute Small @@ -11425,7 +11452,7 @@ the Ada 83 reference manual for an exact description of the semantics of this attribute when applied to floating-point types. @node Attribute Small_Denominator,Attribute Small_Numerator,Attribute Small,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-small-denominator}@anchor{1a2} +@anchor{gnat_rm/implementation_defined_attributes attribute-small-denominator}@anchor{1a5} @section Attribute Small_Denominator @@ -11438,7 +11465,7 @@ denominator in the representation of @code{typ'Small} as a rational number with coprime factors (i.e. as an irreducible fraction). @node Attribute Small_Numerator,Attribute Storage_Unit,Attribute Small_Denominator,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-small-numerator}@anchor{1a3} +@anchor{gnat_rm/implementation_defined_attributes attribute-small-numerator}@anchor{1a6} @section Attribute Small_Numerator @@ -11451,7 +11478,7 @@ numerator in the representation of @code{typ'Small} as a rational number with coprime factors (i.e. as an irreducible fraction). @node Attribute Storage_Unit,Attribute Stub_Type,Attribute Small_Numerator,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-storage-unit}@anchor{1a4} +@anchor{gnat_rm/implementation_defined_attributes attribute-storage-unit}@anchor{1a7} @section Attribute Storage_Unit @@ -11461,7 +11488,7 @@ with coprime factors (i.e. as an irreducible fraction). prefix) provides the same value as @code{System.Storage_Unit}. @node Attribute Stub_Type,Attribute System_Allocator_Alignment,Attribute Storage_Unit,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-stub-type}@anchor{1a5} +@anchor{gnat_rm/implementation_defined_attributes attribute-stub-type}@anchor{1a8} @section Attribute Stub_Type @@ -11485,7 +11512,7 @@ unit @code{System.Partition_Interface}. Use of this attribute will create an implicit dependency on this unit. @node Attribute System_Allocator_Alignment,Attribute Target_Name,Attribute Stub_Type,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-system-allocator-alignment}@anchor{1a6} +@anchor{gnat_rm/implementation_defined_attributes attribute-system-allocator-alignment}@anchor{1a9} @section Attribute System_Allocator_Alignment @@ -11502,7 +11529,7 @@ with alignment too large or to enable a realignment circuitry if the alignment request is larger than this value. @node Attribute Target_Name,Attribute To_Address,Attribute System_Allocator_Alignment,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-target-name}@anchor{1a7} +@anchor{gnat_rm/implementation_defined_attributes attribute-target-name}@anchor{1aa} @section Attribute Target_Name @@ -11515,7 +11542,7 @@ standard gcc target name without the terminating slash (for example, GNAT 5.0 on windows yields “i586-pc-mingw32msv”). @node Attribute To_Address,Attribute To_Any,Attribute Target_Name,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-to-address}@anchor{1a8} +@anchor{gnat_rm/implementation_defined_attributes attribute-to-address}@anchor{1ab} @section Attribute To_Address @@ -11538,7 +11565,7 @@ modular manner (e.g., -1 means the same as 16#FFFF_FFFF# on a 32 bits machine). @node Attribute To_Any,Attribute Type_Class,Attribute To_Address,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-to-any}@anchor{1a9} +@anchor{gnat_rm/implementation_defined_attributes attribute-to-any}@anchor{1ac} @section Attribute To_Any @@ -11548,7 +11575,7 @@ This internal attribute is used for the generation of remote subprogram stubs in the context of the Distributed Systems Annex. @node Attribute Type_Class,Attribute Type_Key,Attribute To_Any,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-type-class}@anchor{1aa} +@anchor{gnat_rm/implementation_defined_attributes attribute-type-class}@anchor{1ad} @section Attribute Type_Class @@ -11578,7 +11605,7 @@ applies to all concurrent types. This attribute is designed to be compatible with the DEC Ada 83 attribute of the same name. @node Attribute Type_Key,Attribute TypeCode,Attribute Type_Class,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-type-key}@anchor{1ab} +@anchor{gnat_rm/implementation_defined_attributes attribute-type-key}@anchor{1ae} @section Attribute Type_Key @@ -11590,7 +11617,7 @@ about the type or subtype. This provides improved compatibility with other implementations that support this attribute. @node Attribute TypeCode,Attribute Unconstrained_Array,Attribute Type_Key,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-typecode}@anchor{1ac} +@anchor{gnat_rm/implementation_defined_attributes attribute-typecode}@anchor{1af} @section Attribute TypeCode @@ -11600,7 +11627,7 @@ This internal attribute is used for the generation of remote subprogram stubs in the context of the Distributed Systems Annex. @node Attribute Unconstrained_Array,Attribute Universal_Literal_String,Attribute TypeCode,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-unconstrained-array}@anchor{1ad} +@anchor{gnat_rm/implementation_defined_attributes attribute-unconstrained-array}@anchor{1b0} @section Attribute Unconstrained_Array @@ -11614,7 +11641,7 @@ still static, and yields the result of applying this test to the generic actual. @node Attribute Universal_Literal_String,Attribute Unrestricted_Access,Attribute Unconstrained_Array,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-universal-literal-string}@anchor{1ae} +@anchor{gnat_rm/implementation_defined_attributes attribute-universal-literal-string}@anchor{1b1} @section Attribute Universal_Literal_String @@ -11642,7 +11669,7 @@ end; @end example @node Attribute Unrestricted_Access,Attribute Update,Attribute Universal_Literal_String,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-unrestricted-access}@anchor{1af} +@anchor{gnat_rm/implementation_defined_attributes attribute-unrestricted-access}@anchor{1b2} @section Attribute Unrestricted_Access @@ -11829,7 +11856,7 @@ In general this is a risky approach. It may appear to “work” but such uses o of GNAT to another, so are best avoided if possible. @node Attribute Update,Attribute Valid_Value,Attribute Unrestricted_Access,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-update}@anchor{1b0} +@anchor{gnat_rm/implementation_defined_attributes attribute-update}@anchor{1b3} @section Attribute Update @@ -11910,7 +11937,7 @@ A := A'Update ((1, 2) => 20, (3, 4) => 30); which changes element (1,2) to 20 and (3,4) to 30. @node Attribute Valid_Value,Attribute Valid_Scalars,Attribute Update,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-valid-value}@anchor{1b1} +@anchor{gnat_rm/implementation_defined_attributes attribute-valid-value}@anchor{1b4} @section Attribute Valid_Value @@ -11922,7 +11949,7 @@ a String, and returns Boolean. @code{T'Valid_Value (S)} returns True if and only if @code{T'Value (S)} would not raise Constraint_Error. @node Attribute Valid_Scalars,Attribute VADS_Size,Attribute Valid_Value,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-valid-scalars}@anchor{1b2} +@anchor{gnat_rm/implementation_defined_attributes attribute-valid-scalars}@anchor{1b5} @section Attribute Valid_Scalars @@ -11956,7 +11983,7 @@ write a function with a single use of the attribute, and then call that function from multiple places. @node Attribute VADS_Size,Attribute Value_Size,Attribute Valid_Scalars,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-vads-size}@anchor{1b3} +@anchor{gnat_rm/implementation_defined_attributes attribute-vads-size}@anchor{1b6} @section Attribute VADS_Size @@ -11976,7 +12003,7 @@ gives the result that would be obtained by applying the attribute to the corresponding type. @node Attribute Value_Size,Attribute Wchar_T_Size,Attribute VADS_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-value-size}@anchor{161}@anchor{gnat_rm/implementation_defined_attributes id6}@anchor{1b4} +@anchor{gnat_rm/implementation_defined_attributes attribute-value-size}@anchor{164}@anchor{gnat_rm/implementation_defined_attributes id6}@anchor{1b7} @section Attribute Value_Size @@ -11990,7 +12017,7 @@ a value of the given subtype. It is the same as @code{type'Size}, but, unlike @code{Size}, may be set for non-first subtypes. @node Attribute Wchar_T_Size,Attribute Word_Size,Attribute Value_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-wchar-t-size}@anchor{1b5} +@anchor{gnat_rm/implementation_defined_attributes attribute-wchar-t-size}@anchor{1b8} @section Attribute Wchar_T_Size @@ -12002,7 +12029,7 @@ primarily for constructing the definition of this type in package @code{Interfaces.C}. The result is a static constant. @node Attribute Word_Size,,Attribute Wchar_T_Size,Implementation Defined Attributes -@anchor{gnat_rm/implementation_defined_attributes attribute-word-size}@anchor{1b6} +@anchor{gnat_rm/implementation_defined_attributes attribute-word-size}@anchor{1b9} @section Attribute Word_Size @@ -12013,7 +12040,7 @@ prefix) provides the value @code{System.Word_Size}. The result is a static constant. @node Standard and Implementation Defined Restrictions,Implementation Advice,Implementation Defined Attributes,Top -@anchor{gnat_rm/standard_and_implementation_defined_restrictions doc}@anchor{1b7}@anchor{gnat_rm/standard_and_implementation_defined_restrictions id1}@anchor{1b8}@anchor{gnat_rm/standard_and_implementation_defined_restrictions standard-and-implementation-defined-restrictions}@anchor{9} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions doc}@anchor{1ba}@anchor{gnat_rm/standard_and_implementation_defined_restrictions id1}@anchor{1bb}@anchor{gnat_rm/standard_and_implementation_defined_restrictions standard-and-implementation-defined-restrictions}@anchor{9} @chapter Standard and Implementation Defined Restrictions @@ -12042,7 +12069,7 @@ language defined or GNAT-specific, are listed in the following. @end menu @node Partition-Wide Restrictions,Program Unit Level Restrictions,,Standard and Implementation Defined Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions id2}@anchor{1b9}@anchor{gnat_rm/standard_and_implementation_defined_restrictions partition-wide-restrictions}@anchor{1ba} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions id2}@anchor{1bc}@anchor{gnat_rm/standard_and_implementation_defined_restrictions partition-wide-restrictions}@anchor{1bd} @section Partition-Wide Restrictions @@ -12135,7 +12162,7 @@ then all compilation units in the partition must obey the restriction). @end menu @node Immediate_Reclamation,Max_Asynchronous_Select_Nesting,,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions immediate-reclamation}@anchor{1bb} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions immediate-reclamation}@anchor{1be} @subsection Immediate_Reclamation @@ -12147,7 +12174,7 @@ deallocation, any storage reserved at run time for an object is immediately reclaimed when the object no longer exists. @node Max_Asynchronous_Select_Nesting,Max_Entry_Queue_Length,Immediate_Reclamation,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-asynchronous-select-nesting}@anchor{1bc} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-asynchronous-select-nesting}@anchor{1bf} @subsection Max_Asynchronous_Select_Nesting @@ -12159,7 +12186,7 @@ detected at compile time. Violations of this restriction with values other than zero cause Storage_Error to be raised. @node Max_Entry_Queue_Length,Max_Protected_Entries,Max_Asynchronous_Select_Nesting,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-entry-queue-length}@anchor{1bd} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-entry-queue-length}@anchor{1c0} @subsection Max_Entry_Queue_Length @@ -12180,7 +12207,7 @@ compatibility purposes (and a warning will be generated for its use if warnings on obsolescent features are activated). @node Max_Protected_Entries,Max_Select_Alternatives,Max_Entry_Queue_Length,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-protected-entries}@anchor{1be} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-protected-entries}@anchor{1c1} @subsection Max_Protected_Entries @@ -12191,7 +12218,7 @@ bounds of every entry family of a protected unit shall be static, or shall be defined by a discriminant of a subtype whose corresponding bound is static. @node Max_Select_Alternatives,Max_Storage_At_Blocking,Max_Protected_Entries,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-select-alternatives}@anchor{1bf} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-select-alternatives}@anchor{1c2} @subsection Max_Select_Alternatives @@ -12200,7 +12227,7 @@ defined by a discriminant of a subtype whose corresponding bound is static. [RM D.7] Specifies the maximum number of alternatives in a selective accept. @node Max_Storage_At_Blocking,Max_Task_Entries,Max_Select_Alternatives,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-storage-at-blocking}@anchor{1c0} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-storage-at-blocking}@anchor{1c3} @subsection Max_Storage_At_Blocking @@ -12211,7 +12238,7 @@ Storage_Size that can be retained by a blocked task. A violation of this restriction causes Storage_Error to be raised. @node Max_Task_Entries,Max_Tasks,Max_Storage_At_Blocking,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-task-entries}@anchor{1c1} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-task-entries}@anchor{1c4} @subsection Max_Task_Entries @@ -12224,7 +12251,7 @@ defined by a discriminant of a subtype whose corresponding bound is static. @node Max_Tasks,No_Abort_Statements,Max_Task_Entries,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-tasks}@anchor{1c2} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions max-tasks}@anchor{1c5} @subsection Max_Tasks @@ -12237,7 +12264,7 @@ time. Violations of this restriction with values other than zero cause Storage_Error to be raised. @node No_Abort_Statements,No_Access_Parameter_Allocators,Max_Tasks,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-abort-statements}@anchor{1c3} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-abort-statements}@anchor{1c6} @subsection No_Abort_Statements @@ -12247,7 +12274,7 @@ Storage_Error to be raised. no calls to Task_Identification.Abort_Task. @node No_Access_Parameter_Allocators,No_Access_Subprograms,No_Abort_Statements,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-access-parameter-allocators}@anchor{1c4} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-access-parameter-allocators}@anchor{1c7} @subsection No_Access_Parameter_Allocators @@ -12258,7 +12285,7 @@ occurrences of an allocator as the actual parameter to an access parameter. @node No_Access_Subprograms,No_Allocators,No_Access_Parameter_Allocators,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-access-subprograms}@anchor{1c5} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-access-subprograms}@anchor{1c8} @subsection No_Access_Subprograms @@ -12268,7 +12295,7 @@ parameter. declarations of access-to-subprogram types. @node No_Allocators,No_Anonymous_Allocators,No_Access_Subprograms,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-allocators}@anchor{1c6} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-allocators}@anchor{1c9} @subsection No_Allocators @@ -12278,7 +12305,7 @@ declarations of access-to-subprogram types. occurrences of an allocator. @node No_Anonymous_Allocators,No_Asynchronous_Control,No_Allocators,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-anonymous-allocators}@anchor{1c7} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-anonymous-allocators}@anchor{1ca} @subsection No_Anonymous_Allocators @@ -12288,7 +12315,7 @@ occurrences of an allocator. occurrences of an allocator of anonymous access type. @node No_Asynchronous_Control,No_Calendar,No_Anonymous_Allocators,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-asynchronous-control}@anchor{1c8} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-asynchronous-control}@anchor{1cb} @subsection No_Asynchronous_Control @@ -12298,7 +12325,7 @@ occurrences of an allocator of anonymous access type. dependences on the predefined package Asynchronous_Task_Control. @node No_Calendar,No_Coextensions,No_Asynchronous_Control,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-calendar}@anchor{1c9} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-calendar}@anchor{1cc} @subsection No_Calendar @@ -12308,7 +12335,7 @@ dependences on the predefined package Asynchronous_Task_Control. dependences on package Calendar. @node No_Coextensions,No_Default_Initialization,No_Calendar,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-coextensions}@anchor{1ca} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-coextensions}@anchor{1cd} @subsection No_Coextensions @@ -12318,7 +12345,7 @@ dependences on package Calendar. coextensions. See 3.10.2. @node No_Default_Initialization,No_Delay,No_Coextensions,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-default-initialization}@anchor{1cb} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-default-initialization}@anchor{1ce} @subsection No_Default_Initialization @@ -12335,7 +12362,7 @@ is to prohibit all cases of variables declared without a specific initializer (including the case of OUT scalar parameters). @node No_Delay,No_Dependence,No_Default_Initialization,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-delay}@anchor{1cc} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-delay}@anchor{1cf} @subsection No_Delay @@ -12345,7 +12372,7 @@ initializer (including the case of OUT scalar parameters). delay statements and no semantic dependences on package Calendar. @node No_Dependence,No_Direct_Boolean_Operators,No_Delay,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dependence}@anchor{1cd} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dependence}@anchor{1d0} @subsection No_Dependence @@ -12388,7 +12415,7 @@ to support specific constructs of the language. Here are some examples: @end itemize @node No_Direct_Boolean_Operators,No_Dispatch,No_Dependence,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-direct-boolean-operators}@anchor{1ce} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-direct-boolean-operators}@anchor{1d1} @subsection No_Direct_Boolean_Operators @@ -12401,7 +12428,7 @@ protocol requires the use of short-circuit (and then, or else) forms for all composite boolean operations. @node No_Dispatch,No_Dispatching_Calls,No_Direct_Boolean_Operators,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dispatch}@anchor{1cf} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dispatch}@anchor{1d2} @subsection No_Dispatch @@ -12411,7 +12438,7 @@ composite boolean operations. occurrences of @code{T'Class}, for any (tagged) subtype @code{T}. @node No_Dispatching_Calls,No_Dynamic_Attachment,No_Dispatch,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dispatching-calls}@anchor{1d0} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dispatching-calls}@anchor{1d3} @subsection No_Dispatching_Calls @@ -12472,7 +12499,7 @@ end Example; @end example @node No_Dynamic_Attachment,No_Dynamic_Priorities,No_Dispatching_Calls,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-attachment}@anchor{1d1} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-attachment}@anchor{1d4} @subsection No_Dynamic_Attachment @@ -12491,7 +12518,7 @@ compatibility purposes (and a warning will be generated for its use if warnings on obsolescent features are activated). @node No_Dynamic_Priorities,No_Entry_Calls_In_Elaboration_Code,No_Dynamic_Attachment,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-priorities}@anchor{1d2} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-priorities}@anchor{1d5} @subsection No_Dynamic_Priorities @@ -12500,7 +12527,7 @@ warnings on obsolescent features are activated). [RM D.7] There are no semantic dependencies on the package Dynamic_Priorities. @node No_Entry_Calls_In_Elaboration_Code,No_Enumeration_Maps,No_Dynamic_Priorities,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-entry-calls-in-elaboration-code}@anchor{1d3} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-entry-calls-in-elaboration-code}@anchor{1d6} @subsection No_Entry_Calls_In_Elaboration_Code @@ -12512,7 +12539,7 @@ restriction, the compiler can assume that no code past an accept statement in a task can be executed at elaboration time. @node No_Enumeration_Maps,No_Exception_Handlers,No_Entry_Calls_In_Elaboration_Code,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-enumeration-maps}@anchor{1d4} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-enumeration-maps}@anchor{1d7} @subsection No_Enumeration_Maps @@ -12523,7 +12550,7 @@ enumeration maps are used (that is Image and Value attributes applied to enumeration types). @node No_Exception_Handlers,No_Exception_Propagation,No_Enumeration_Maps,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exception-handlers}@anchor{1d5} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exception-handlers}@anchor{1d8} @subsection No_Exception_Handlers @@ -12548,7 +12575,7 @@ statement generated by the compiler). The Line parameter when nonzero represents the line number in the source program where the raise occurs. @node No_Exception_Propagation,No_Exception_Registration,No_Exception_Handlers,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exception-propagation}@anchor{1d6} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exception-propagation}@anchor{1d9} @subsection No_Exception_Propagation @@ -12565,7 +12592,7 @@ the package GNAT.Current_Exception is not permitted, and reraise statements (raise with no operand) are not permitted. @node No_Exception_Registration,No_Exceptions,No_Exception_Propagation,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exception-registration}@anchor{1d7} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exception-registration}@anchor{1da} @subsection No_Exception_Registration @@ -12579,7 +12606,7 @@ code is simplified by omitting the otherwise-required global registration of exceptions when they are declared. @node No_Exceptions,No_Finalization,No_Exception_Registration,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exceptions}@anchor{1d8} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-exceptions}@anchor{1db} @subsection No_Exceptions @@ -12590,7 +12617,7 @@ raise statements and no exception handlers and also suppresses the generation of language-defined run-time checks. @node No_Finalization,No_Fixed_Point,No_Exceptions,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-finalization}@anchor{1d9} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-finalization}@anchor{1dc} @subsection No_Finalization @@ -12631,7 +12658,7 @@ object or a nested component, either declared on the stack or on the heap. The deallocation of a controlled object no longer finalizes its contents. @node No_Fixed_Point,No_Floating_Point,No_Finalization,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-fixed-point}@anchor{1da} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-fixed-point}@anchor{1dd} @subsection No_Fixed_Point @@ -12641,7 +12668,7 @@ deallocation of a controlled object no longer finalizes its contents. occurrences of fixed point types and operations. @node No_Floating_Point,No_Implicit_Conditionals,No_Fixed_Point,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-floating-point}@anchor{1db} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-floating-point}@anchor{1de} @subsection No_Floating_Point @@ -12651,7 +12678,7 @@ occurrences of fixed point types and operations. occurrences of floating point types and operations. @node No_Implicit_Conditionals,No_Implicit_Dynamic_Code,No_Floating_Point,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-conditionals}@anchor{1dc} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-conditionals}@anchor{1df} @subsection No_Implicit_Conditionals @@ -12667,7 +12694,7 @@ normal manner. Constructs generating implicit conditionals include comparisons of composite objects and the Max/Min attributes. @node No_Implicit_Dynamic_Code,No_Implicit_Heap_Allocations,No_Implicit_Conditionals,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-dynamic-code}@anchor{1dd} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-dynamic-code}@anchor{1e0} @subsection No_Implicit_Dynamic_Code @@ -12697,7 +12724,7 @@ foreign-language convention; primitive operations of nested tagged types. @node No_Implicit_Heap_Allocations,No_Implicit_Protected_Object_Allocations,No_Implicit_Dynamic_Code,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-heap-allocations}@anchor{1de} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-heap-allocations}@anchor{1e1} @subsection No_Implicit_Heap_Allocations @@ -12706,7 +12733,7 @@ types. [RM D.7] No constructs are allowed to cause implicit heap allocation. @node No_Implicit_Protected_Object_Allocations,No_Implicit_Task_Allocations,No_Implicit_Heap_Allocations,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-protected-object-allocations}@anchor{1df} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-protected-object-allocations}@anchor{1e2} @subsection No_Implicit_Protected_Object_Allocations @@ -12716,7 +12743,7 @@ types. protected object. @node No_Implicit_Task_Allocations,No_Initialize_Scalars,No_Implicit_Protected_Object_Allocations,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-task-allocations}@anchor{1e0} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-task-allocations}@anchor{1e3} @subsection No_Implicit_Task_Allocations @@ -12725,7 +12752,7 @@ protected object. [GNAT] No constructs are allowed to cause implicit heap allocation of a task. @node No_Initialize_Scalars,No_IO,No_Implicit_Task_Allocations,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-initialize-scalars}@anchor{1e1} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-initialize-scalars}@anchor{1e4} @subsection No_Initialize_Scalars @@ -12737,7 +12764,7 @@ code, and in particular eliminates dummy null initialization routines that are otherwise generated for some record and array types. @node No_IO,No_Local_Allocators,No_Initialize_Scalars,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-io}@anchor{1e2} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-io}@anchor{1e5} @subsection No_IO @@ -12748,7 +12775,7 @@ dependences on any of the library units Sequential_IO, Direct_IO, Text_IO, Wide_Text_IO, Wide_Wide_Text_IO, or Stream_IO. @node No_Local_Allocators,No_Local_Protected_Objects,No_IO,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-allocators}@anchor{1e3} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-allocators}@anchor{1e6} @subsection No_Local_Allocators @@ -12759,7 +12786,7 @@ occurrences of an allocator in subprograms, generic subprograms, tasks, and entry bodies. @node No_Local_Protected_Objects,No_Local_Tagged_Types,No_Local_Allocators,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-protected-objects}@anchor{1e4} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-protected-objects}@anchor{1e7} @subsection No_Local_Protected_Objects @@ -12769,7 +12796,7 @@ and entry bodies. only declared at the library level. @node No_Local_Tagged_Types,No_Local_Timing_Events,No_Local_Protected_Objects,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-tagged-types}@anchor{1e5} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-tagged-types}@anchor{1e8} @subsection No_Local_Tagged_Types @@ -12779,7 +12806,7 @@ only declared at the library level. declared at the library level. @node No_Local_Timing_Events,No_Long_Long_Integers,No_Local_Tagged_Types,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-timing-events}@anchor{1e6} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-local-timing-events}@anchor{1e9} @subsection No_Local_Timing_Events @@ -12789,7 +12816,7 @@ declared at the library level. declared at the library level. @node No_Long_Long_Integers,No_Multiple_Elaboration,No_Local_Timing_Events,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-long-long-integers}@anchor{1e7} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-long-long-integers}@anchor{1ea} @subsection No_Long_Long_Integers @@ -12801,7 +12828,7 @@ implicit base type is Long_Long_Integer, and modular types whose size exceeds Long_Integer’Size. @node No_Multiple_Elaboration,No_Nested_Finalization,No_Long_Long_Integers,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-multiple-elaboration}@anchor{1e8} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-multiple-elaboration}@anchor{1eb} @subsection No_Multiple_Elaboration @@ -12817,7 +12844,7 @@ possible, including non-Ada main programs and Stand Alone libraries, are not permitted and will be diagnosed by the binder. @node No_Nested_Finalization,No_Protected_Type_Allocators,No_Multiple_Elaboration,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-nested-finalization}@anchor{1e9} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-nested-finalization}@anchor{1ec} @subsection No_Nested_Finalization @@ -12826,7 +12853,7 @@ permitted and will be diagnosed by the binder. [RM D.7] All objects requiring finalization are declared at the library level. @node No_Protected_Type_Allocators,No_Protected_Types,No_Nested_Finalization,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-protected-type-allocators}@anchor{1ea} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-protected-type-allocators}@anchor{1ed} @subsection No_Protected_Type_Allocators @@ -12836,7 +12863,7 @@ permitted and will be diagnosed by the binder. expressions that attempt to allocate protected objects. @node No_Protected_Types,No_Recursion,No_Protected_Type_Allocators,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-protected-types}@anchor{1eb} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-protected-types}@anchor{1ee} @subsection No_Protected_Types @@ -12846,7 +12873,7 @@ expressions that attempt to allocate protected objects. declarations of protected types or protected objects. @node No_Recursion,No_Reentrancy,No_Protected_Types,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-recursion}@anchor{1ec} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-recursion}@anchor{1ef} @subsection No_Recursion @@ -12856,7 +12883,7 @@ declarations of protected types or protected objects. part of its execution. @node No_Reentrancy,No_Relative_Delay,No_Recursion,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-reentrancy}@anchor{1ed} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-reentrancy}@anchor{1f0} @subsection No_Reentrancy @@ -12866,7 +12893,7 @@ part of its execution. two tasks at the same time. @node No_Relative_Delay,No_Requeue_Statements,No_Reentrancy,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-relative-delay}@anchor{1ee} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-relative-delay}@anchor{1f1} @subsection No_Relative_Delay @@ -12877,7 +12904,7 @@ relative statements and prevents expressions such as @code{delay 1.23;} from appearing in source code. @node No_Requeue_Statements,No_Secondary_Stack,No_Relative_Delay,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-requeue-statements}@anchor{1ef} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-requeue-statements}@anchor{1f2} @subsection No_Requeue_Statements @@ -12895,7 +12922,7 @@ compatibility purposes (and a warning will be generated for its use if warnings on oNobsolescent features are activated). @node No_Secondary_Stack,No_Select_Statements,No_Requeue_Statements,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-secondary-stack}@anchor{1f0} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-secondary-stack}@anchor{1f3} @subsection No_Secondary_Stack @@ -12908,7 +12935,7 @@ stack is used to implement functions returning unconstrained objects secondary stacks for tasks (excluding the environment task) at run time. @node No_Select_Statements,No_Specific_Termination_Handlers,No_Secondary_Stack,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-select-statements}@anchor{1f1} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-select-statements}@anchor{1f4} @subsection No_Select_Statements @@ -12918,7 +12945,7 @@ secondary stacks for tasks (excluding the environment task) at run time. kind are permitted, that is the keyword @code{select} may not appear. @node No_Specific_Termination_Handlers,No_Specification_of_Aspect,No_Select_Statements,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-specific-termination-handlers}@anchor{1f2} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-specific-termination-handlers}@anchor{1f5} @subsection No_Specific_Termination_Handlers @@ -12928,7 +12955,7 @@ kind are permitted, that is the keyword @code{select} may not appear. or to Ada.Task_Termination.Specific_Handler. @node No_Specification_of_Aspect,No_Standard_Allocators_After_Elaboration,No_Specific_Termination_Handlers,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-specification-of-aspect}@anchor{1f3} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-specification-of-aspect}@anchor{1f6} @subsection No_Specification_of_Aspect @@ -12939,7 +12966,7 @@ specification, attribute definition clause, or pragma is given for a given aspect. @node No_Standard_Allocators_After_Elaboration,No_Standard_Storage_Pools,No_Specification_of_Aspect,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-standard-allocators-after-elaboration}@anchor{1f4} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-standard-allocators-after-elaboration}@anchor{1f7} @subsection No_Standard_Allocators_After_Elaboration @@ -12951,7 +12978,7 @@ library items of the partition has completed. Otherwise, Storage_Error is raised. @node No_Standard_Storage_Pools,No_Stream_Optimizations,No_Standard_Allocators_After_Elaboration,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-standard-storage-pools}@anchor{1f5} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-standard-storage-pools}@anchor{1f8} @subsection No_Standard_Storage_Pools @@ -12963,7 +12990,7 @@ have an explicit Storage_Pool attribute defined specifying a user-defined storage pool. @node No_Stream_Optimizations,No_Streams,No_Standard_Storage_Pools,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-stream-optimizations}@anchor{1f6} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-stream-optimizations}@anchor{1f9} @subsection No_Stream_Optimizations @@ -12976,7 +13003,7 @@ due to their superior performance. When this restriction is in effect, the compiler performs all IO operations on a per-character basis. @node No_Streams,No_Tagged_Type_Registration,No_Stream_Optimizations,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-streams}@anchor{1f7} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-streams}@anchor{1fa} @subsection No_Streams @@ -12997,7 +13024,7 @@ unit declaring a tagged type should be compiled with the restriction, though this is not required. @node No_Tagged_Type_Registration,No_Task_Allocators,No_Streams,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-tagged-type-registration}@anchor{1f8} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-tagged-type-registration}@anchor{1fb} @subsection No_Tagged_Type_Registration @@ -13012,7 +13039,7 @@ are declared. This restriction may be necessary in order to also apply the No_Elaboration_Code restriction. @node No_Task_Allocators,No_Task_At_Interrupt_Priority,No_Tagged_Type_Registration,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-allocators}@anchor{1f9} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-allocators}@anchor{1fc} @subsection No_Task_Allocators @@ -13022,7 +13049,7 @@ the No_Elaboration_Code restriction. or types containing task subcomponents. @node No_Task_At_Interrupt_Priority,No_Task_Attributes_Package,No_Task_Allocators,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-at-interrupt-priority}@anchor{1fa} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-at-interrupt-priority}@anchor{1fd} @subsection No_Task_At_Interrupt_Priority @@ -13034,7 +13061,7 @@ a consequence, the tasks are always created with a priority below that an interrupt priority. @node No_Task_Attributes_Package,No_Task_Hierarchy,No_Task_At_Interrupt_Priority,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-attributes-package}@anchor{1fb} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-attributes-package}@anchor{1fe} @subsection No_Task_Attributes_Package @@ -13051,7 +13078,7 @@ compatibility purposes (and a warning will be generated for its use if warnings on obsolescent features are activated). @node No_Task_Hierarchy,No_Task_Termination,No_Task_Attributes_Package,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-hierarchy}@anchor{1fc} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-hierarchy}@anchor{1ff} @subsection No_Task_Hierarchy @@ -13061,7 +13088,7 @@ warnings on obsolescent features are activated). directly on the environment task of the partition. @node No_Task_Termination,No_Tasking,No_Task_Hierarchy,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-termination}@anchor{1fd} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-task-termination}@anchor{200} @subsection No_Task_Termination @@ -13070,7 +13097,7 @@ directly on the environment task of the partition. [RM D.7] Tasks that terminate are erroneous. @node No_Tasking,No_Terminate_Alternatives,No_Task_Termination,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-tasking}@anchor{1fe} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-tasking}@anchor{201} @subsection No_Tasking @@ -13083,7 +13110,7 @@ and cause an error message to be output either by the compiler or binder. @node No_Terminate_Alternatives,No_Unchecked_Access,No_Tasking,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-terminate-alternatives}@anchor{1ff} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-terminate-alternatives}@anchor{202} @subsection No_Terminate_Alternatives @@ -13092,7 +13119,7 @@ binder. [RM D.7] There are no selective accepts with terminate alternatives. @node No_Unchecked_Access,No_Unchecked_Conversion,No_Terminate_Alternatives,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-unchecked-access}@anchor{200} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-unchecked-access}@anchor{203} @subsection No_Unchecked_Access @@ -13102,7 +13129,7 @@ binder. occurrences of the Unchecked_Access attribute. @node No_Unchecked_Conversion,No_Unchecked_Deallocation,No_Unchecked_Access,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-unchecked-conversion}@anchor{201} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-unchecked-conversion}@anchor{204} @subsection No_Unchecked_Conversion @@ -13112,7 +13139,7 @@ occurrences of the Unchecked_Access attribute. dependences on the predefined generic function Unchecked_Conversion. @node No_Unchecked_Deallocation,No_Use_Of_Attribute,No_Unchecked_Conversion,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-unchecked-deallocation}@anchor{202} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-unchecked-deallocation}@anchor{205} @subsection No_Unchecked_Deallocation @@ -13122,7 +13149,7 @@ dependences on the predefined generic function Unchecked_Conversion. dependences on the predefined generic procedure Unchecked_Deallocation. @node No_Use_Of_Attribute,No_Use_Of_Entity,No_Unchecked_Deallocation,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-use-of-attribute}@anchor{203} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-use-of-attribute}@anchor{206} @subsection No_Use_Of_Attribute @@ -13132,7 +13159,7 @@ dependences on the predefined generic procedure Unchecked_Deallocation. earlier versions of Ada. @node No_Use_Of_Entity,No_Use_Of_Pragma,No_Use_Of_Attribute,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-use-of-entity}@anchor{204} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-use-of-entity}@anchor{207} @subsection No_Use_Of_Entity @@ -13152,7 +13179,7 @@ No_Use_Of_Entity => Ada.Text_IO.Put_Line @end example @node No_Use_Of_Pragma,Pure_Barriers,No_Use_Of_Entity,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-use-of-pragma}@anchor{205} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-use-of-pragma}@anchor{208} @subsection No_Use_Of_Pragma @@ -13162,7 +13189,7 @@ No_Use_Of_Entity => Ada.Text_IO.Put_Line earlier versions of Ada. @node Pure_Barriers,Simple_Barriers,No_Use_Of_Pragma,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions pure-barriers}@anchor{206} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions pure-barriers}@anchor{209} @subsection Pure_Barriers @@ -13213,7 +13240,7 @@ but still ensures absence of side effects, exceptions, and recursion during the evaluation of the barriers. @node Simple_Barriers,Static_Priorities,Pure_Barriers,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions simple-barriers}@anchor{207} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions simple-barriers}@anchor{20a} @subsection Simple_Barriers @@ -13232,7 +13259,7 @@ compatibility purposes (and a warning will be generated for its use if warnings on obsolescent features are activated). @node Static_Priorities,Static_Storage_Size,Simple_Barriers,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions static-priorities}@anchor{208} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions static-priorities}@anchor{20b} @subsection Static_Priorities @@ -13243,7 +13270,7 @@ are static, and that there are no dependences on the package @code{Ada.Dynamic_Priorities}. @node Static_Storage_Size,,Static_Priorities,Partition-Wide Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions static-storage-size}@anchor{209} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions static-storage-size}@anchor{20c} @subsection Static_Storage_Size @@ -13253,7 +13280,7 @@ are static, and that there are no dependences on the package in a Storage_Size pragma or attribute definition clause is static. @node Program Unit Level Restrictions,,Partition-Wide Restrictions,Standard and Implementation Defined Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions id3}@anchor{20a}@anchor{gnat_rm/standard_and_implementation_defined_restrictions program-unit-level-restrictions}@anchor{20b} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions id3}@anchor{20d}@anchor{gnat_rm/standard_and_implementation_defined_restrictions program-unit-level-restrictions}@anchor{20e} @section Program Unit Level Restrictions @@ -13284,7 +13311,7 @@ other compilation units in the partition. @end menu @node No_Elaboration_Code,No_Dynamic_Accessibility_Checks,,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-elaboration-code}@anchor{20c} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-elaboration-code}@anchor{20f} @subsection No_Elaboration_Code @@ -13340,7 +13367,7 @@ associated with the unit. This counter is typically used to check for access before elaboration and to control multiple elaboration attempts. @node No_Dynamic_Accessibility_Checks,No_Dynamic_Sized_Objects,No_Elaboration_Code,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-accessibility-checks}@anchor{20d} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-accessibility-checks}@anchor{210} @subsection No_Dynamic_Accessibility_Checks @@ -13389,7 +13416,7 @@ In all other cases, the level of T is as defined by the existing rules of Ada. @end itemize @node No_Dynamic_Sized_Objects,No_Entry_Queue,No_Dynamic_Accessibility_Checks,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-sized-objects}@anchor{20e} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-dynamic-sized-objects}@anchor{211} @subsection No_Dynamic_Sized_Objects @@ -13407,7 +13434,7 @@ access discriminants. It is often a good idea to combine this restriction with No_Secondary_Stack. @node No_Entry_Queue,No_Implementation_Aspect_Specifications,No_Dynamic_Sized_Objects,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-entry-queue}@anchor{20f} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-entry-queue}@anchor{212} @subsection No_Entry_Queue @@ -13420,7 +13447,7 @@ checked at compile time. A program execution is erroneous if an attempt is made to queue a second task on such an entry. @node No_Implementation_Aspect_Specifications,No_Implementation_Attributes,No_Entry_Queue,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-aspect-specifications}@anchor{210} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-aspect-specifications}@anchor{213} @subsection No_Implementation_Aspect_Specifications @@ -13431,7 +13458,7 @@ GNAT-defined aspects are present. With this restriction, the only aspects that can be used are those defined in the Ada Reference Manual. @node No_Implementation_Attributes,No_Implementation_Identifiers,No_Implementation_Aspect_Specifications,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-attributes}@anchor{211} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-attributes}@anchor{214} @subsection No_Implementation_Attributes @@ -13443,7 +13470,7 @@ attributes that can be used are those defined in the Ada Reference Manual. @node No_Implementation_Identifiers,No_Implementation_Pragmas,No_Implementation_Attributes,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-identifiers}@anchor{212} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-identifiers}@anchor{215} @subsection No_Implementation_Identifiers @@ -13454,7 +13481,7 @@ implementation-defined identifiers (marked with pragma Implementation_Defined) occur within language-defined packages. @node No_Implementation_Pragmas,No_Implementation_Restrictions,No_Implementation_Identifiers,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-pragmas}@anchor{213} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-pragmas}@anchor{216} @subsection No_Implementation_Pragmas @@ -13465,7 +13492,7 @@ GNAT-defined pragmas are present. With this restriction, the only pragmas that can be used are those defined in the Ada Reference Manual. @node No_Implementation_Restrictions,No_Implementation_Units,No_Implementation_Pragmas,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-restrictions}@anchor{214} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-restrictions}@anchor{217} @subsection No_Implementation_Restrictions @@ -13477,7 +13504,7 @@ are present. With this restriction, the only other restriction identifiers that can be used are those defined in the Ada Reference Manual. @node No_Implementation_Units,No_Implicit_Aliasing,No_Implementation_Restrictions,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-units}@anchor{215} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implementation-units}@anchor{218} @subsection No_Implementation_Units @@ -13488,7 +13515,7 @@ mention in the context clause of any implementation-defined descendants of packages Ada, Interfaces, or System. @node No_Implicit_Aliasing,No_Implicit_Loops,No_Implementation_Units,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-aliasing}@anchor{216} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-aliasing}@anchor{219} @subsection No_Implicit_Aliasing @@ -13503,7 +13530,7 @@ to be aliased, and in such cases, it can always be replaced by the standard attribute Unchecked_Access which is preferable. @node No_Implicit_Loops,No_Obsolescent_Features,No_Implicit_Aliasing,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-loops}@anchor{217} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-implicit-loops}@anchor{21a} @subsection No_Implicit_Loops @@ -13520,7 +13547,7 @@ arrays larger than about 5000 scalar components. Note that if this restriction is set in the spec of a package, it will not apply to its body. @node No_Obsolescent_Features,No_Wide_Characters,No_Implicit_Loops,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-obsolescent-features}@anchor{218} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-obsolescent-features}@anchor{21b} @subsection No_Obsolescent_Features @@ -13530,7 +13557,7 @@ is set in the spec of a package, it will not apply to its body. features are used, as defined in Annex J of the Ada Reference Manual. @node No_Wide_Characters,Static_Dispatch_Tables,No_Obsolescent_Features,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-wide-characters}@anchor{219} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions no-wide-characters}@anchor{21c} @subsection No_Wide_Characters @@ -13544,7 +13571,7 @@ appear in the program (that is literals representing characters not in type @code{Character}). @node Static_Dispatch_Tables,SPARK_05,No_Wide_Characters,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions static-dispatch-tables}@anchor{21a} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions static-dispatch-tables}@anchor{21d} @subsection Static_Dispatch_Tables @@ -13554,7 +13581,7 @@ type @code{Character}). associated with dispatch tables can be placed in read-only memory. @node SPARK_05,,Static_Dispatch_Tables,Program Unit Level Restrictions -@anchor{gnat_rm/standard_and_implementation_defined_restrictions spark-05}@anchor{21b} +@anchor{gnat_rm/standard_and_implementation_defined_restrictions spark-05}@anchor{21e} @subsection SPARK_05 @@ -13577,7 +13604,7 @@ gnatprove -P project.gpr --mode=check_all @end example @node Implementation Advice,Implementation Defined Characteristics,Standard and Implementation Defined Restrictions,Top -@anchor{gnat_rm/implementation_advice doc}@anchor{21c}@anchor{gnat_rm/implementation_advice id1}@anchor{21d}@anchor{gnat_rm/implementation_advice implementation-advice}@anchor{a} +@anchor{gnat_rm/implementation_advice doc}@anchor{21f}@anchor{gnat_rm/implementation_advice id1}@anchor{220}@anchor{gnat_rm/implementation_advice implementation-advice}@anchor{a} @chapter Implementation Advice @@ -13675,7 +13702,7 @@ case the text describes what GNAT does and why. @end menu @node RM 1 1 3 20 Error Detection,RM 1 1 3 31 Child Units,,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-1-1-3-20-error-detection}@anchor{21e} +@anchor{gnat_rm/implementation_advice rm-1-1-3-20-error-detection}@anchor{221} @section RM 1.1.3(20): Error Detection @@ -13692,7 +13719,7 @@ or diagnosed at compile time. @geindex Child Units @node RM 1 1 3 31 Child Units,RM 1 1 5 12 Bounded Errors,RM 1 1 3 20 Error Detection,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-1-1-3-31-child-units}@anchor{21f} +@anchor{gnat_rm/implementation_advice rm-1-1-3-31-child-units}@anchor{222} @section RM 1.1.3(31): Child Units @@ -13708,7 +13735,7 @@ Followed. @geindex Bounded errors @node RM 1 1 5 12 Bounded Errors,RM 2 8 16 Pragmas,RM 1 1 3 31 Child Units,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-1-1-5-12-bounded-errors}@anchor{220} +@anchor{gnat_rm/implementation_advice rm-1-1-5-12-bounded-errors}@anchor{223} @section RM 1.1.5(12): Bounded Errors @@ -13725,7 +13752,7 @@ runtime. @geindex Pragmas @node RM 2 8 16 Pragmas,RM 2 8 17-19 Pragmas,RM 1 1 5 12 Bounded Errors,Implementation Advice -@anchor{gnat_rm/implementation_advice id2}@anchor{221}@anchor{gnat_rm/implementation_advice rm-2-8-16-pragmas}@anchor{222} +@anchor{gnat_rm/implementation_advice id2}@anchor{224}@anchor{gnat_rm/implementation_advice rm-2-8-16-pragmas}@anchor{225} @section RM 2.8(16): Pragmas @@ -13838,7 +13865,7 @@ that this advice not be followed. For details see @ref{7,,Implementation Defined Pragmas}. @node RM 2 8 17-19 Pragmas,RM 3 5 2 5 Alternative Character Sets,RM 2 8 16 Pragmas,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-2-8-17-19-pragmas}@anchor{223} +@anchor{gnat_rm/implementation_advice rm-2-8-17-19-pragmas}@anchor{226} @section RM 2.8(17-19): Pragmas @@ -13859,14 +13886,14 @@ replacing @code{library_items}.” @end itemize @end quotation -See @ref{222,,RM 2.8(16); Pragmas}. +See @ref{225,,RM 2.8(16); Pragmas}. @geindex Character Sets @geindex Alternative Character Sets @node RM 3 5 2 5 Alternative Character Sets,RM 3 5 4 28 Integer Types,RM 2 8 17-19 Pragmas,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-3-5-2-5-alternative-character-sets}@anchor{224} +@anchor{gnat_rm/implementation_advice rm-3-5-2-5-alternative-character-sets}@anchor{227} @section RM 3.5.2(5): Alternative Character Sets @@ -13894,7 +13921,7 @@ there is no such restriction. @geindex Integer types @node RM 3 5 4 28 Integer Types,RM 3 5 4 29 Integer Types,RM 3 5 2 5 Alternative Character Sets,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-3-5-4-28-integer-types}@anchor{225} +@anchor{gnat_rm/implementation_advice rm-3-5-4-28-integer-types}@anchor{228} @section RM 3.5.4(28): Integer Types @@ -13913,7 +13940,7 @@ are supported for convenient interface to C, and so that all hardware types of the machine are easily available. @node RM 3 5 4 29 Integer Types,RM 3 5 5 8 Enumeration Values,RM 3 5 4 28 Integer Types,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-3-5-4-29-integer-types}@anchor{226} +@anchor{gnat_rm/implementation_advice rm-3-5-4-29-integer-types}@anchor{229} @section RM 3.5.4(29): Integer Types @@ -13929,7 +13956,7 @@ Followed. @geindex Enumeration values @node RM 3 5 5 8 Enumeration Values,RM 3 5 7 17 Float Types,RM 3 5 4 29 Integer Types,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-3-5-5-8-enumeration-values}@anchor{227} +@anchor{gnat_rm/implementation_advice rm-3-5-5-8-enumeration-values}@anchor{22a} @section RM 3.5.5(8): Enumeration Values @@ -13949,7 +13976,7 @@ Followed. @geindex Float types @node RM 3 5 7 17 Float Types,RM 3 6 2 11 Multidimensional Arrays,RM 3 5 5 8 Enumeration Values,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-3-5-7-17-float-types}@anchor{228} +@anchor{gnat_rm/implementation_advice rm-3-5-7-17-float-types}@anchor{22b} @section RM 3.5.7(17): Float Types @@ -13979,7 +14006,7 @@ is a software rather than a hardware format. @geindex multidimensional @node RM 3 6 2 11 Multidimensional Arrays,RM 9 6 30-31 Duration’Small,RM 3 5 7 17 Float Types,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-3-6-2-11-multidimensional-arrays}@anchor{229} +@anchor{gnat_rm/implementation_advice rm-3-6-2-11-multidimensional-arrays}@anchor{22c} @section RM 3.6.2(11): Multidimensional Arrays @@ -13997,7 +14024,7 @@ Followed. @geindex Duration'Small @node RM 9 6 30-31 Duration’Small,RM 10 2 1 12 Consistent Representation,RM 3 6 2 11 Multidimensional Arrays,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-9-6-30-31-duration-small}@anchor{22a} +@anchor{gnat_rm/implementation_advice rm-9-6-30-31-duration-small}@anchor{22d} @section RM 9.6(30-31): Duration’Small @@ -14018,7 +14045,7 @@ it need not be the same time base as used for @code{Calendar.Clock}.” Followed. @node RM 10 2 1 12 Consistent Representation,RM 11 4 1 19 Exception Information,RM 9 6 30-31 Duration’Small,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-10-2-1-12-consistent-representation}@anchor{22b} +@anchor{gnat_rm/implementation_advice rm-10-2-1-12-consistent-representation}@anchor{22e} @section RM 10.2.1(12): Consistent Representation @@ -14040,7 +14067,7 @@ advice without severely impacting efficiency of execution. @geindex Exception information @node RM 11 4 1 19 Exception Information,RM 11 5 28 Suppression of Checks,RM 10 2 1 12 Consistent Representation,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-11-4-1-19-exception-information}@anchor{22c} +@anchor{gnat_rm/implementation_advice rm-11-4-1-19-exception-information}@anchor{22f} @section RM 11.4.1(19): Exception Information @@ -14071,7 +14098,7 @@ Pragma @code{Discard_Names}. @geindex suppression of @node RM 11 5 28 Suppression of Checks,RM 13 1 21-24 Representation Clauses,RM 11 4 1 19 Exception Information,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-11-5-28-suppression-of-checks}@anchor{22d} +@anchor{gnat_rm/implementation_advice rm-11-5-28-suppression-of-checks}@anchor{230} @section RM 11.5(28): Suppression of Checks @@ -14086,7 +14113,7 @@ Followed. @geindex Representation clauses @node RM 13 1 21-24 Representation Clauses,RM 13 2 6-8 Packed Types,RM 11 5 28 Suppression of Checks,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-1-21-24-representation-clauses}@anchor{22e} +@anchor{gnat_rm/implementation_advice rm-13-1-21-24-representation-clauses}@anchor{231} @section RM 13.1 (21-24): Representation Clauses @@ -14135,7 +14162,7 @@ Followed. @geindex Packed types @node RM 13 2 6-8 Packed Types,RM 13 3 14-19 Address Clauses,RM 13 1 21-24 Representation Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-2-6-8-packed-types}@anchor{22f} +@anchor{gnat_rm/implementation_advice rm-13-2-6-8-packed-types}@anchor{232} @section RM 13.2(6-8): Packed Types @@ -14166,7 +14193,7 @@ subcomponent of the packed type. @geindex Address clauses @node RM 13 3 14-19 Address Clauses,RM 13 3 29-35 Alignment Clauses,RM 13 2 6-8 Packed Types,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-3-14-19-address-clauses}@anchor{230} +@anchor{gnat_rm/implementation_advice rm-13-3-14-19-address-clauses}@anchor{233} @section RM 13.3(14-19): Address Clauses @@ -14219,7 +14246,7 @@ Followed. @geindex Alignment clauses @node RM 13 3 29-35 Alignment Clauses,RM 13 3 42-43 Size Clauses,RM 13 3 14-19 Address Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-3-29-35-alignment-clauses}@anchor{231} +@anchor{gnat_rm/implementation_advice rm-13-3-29-35-alignment-clauses}@anchor{234} @section RM 13.3(29-35): Alignment Clauses @@ -14276,7 +14303,7 @@ Followed. @geindex Size clauses @node RM 13 3 42-43 Size Clauses,RM 13 3 50-56 Size Clauses,RM 13 3 29-35 Alignment Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-3-42-43-size-clauses}@anchor{232} +@anchor{gnat_rm/implementation_advice rm-13-3-42-43-size-clauses}@anchor{235} @section RM 13.3(42-43): Size Clauses @@ -14294,7 +14321,7 @@ object’s @code{Alignment} (if the @code{Alignment} is nonzero).” Followed. @node RM 13 3 50-56 Size Clauses,RM 13 3 71-73 Component Size Clauses,RM 13 3 42-43 Size Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-3-50-56-size-clauses}@anchor{233} +@anchor{gnat_rm/implementation_advice rm-13-3-50-56-size-clauses}@anchor{236} @section RM 13.3(50-56): Size Clauses @@ -14345,7 +14372,7 @@ Followed. @geindex Component_Size clauses @node RM 13 3 71-73 Component Size Clauses,RM 13 4 9-10 Enumeration Representation Clauses,RM 13 3 50-56 Size Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-3-71-73-component-size-clauses}@anchor{234} +@anchor{gnat_rm/implementation_advice rm-13-3-71-73-component-size-clauses}@anchor{237} @section RM 13.3(71-73): Component Size Clauses @@ -14379,7 +14406,7 @@ Followed. @geindex enumeration @node RM 13 4 9-10 Enumeration Representation Clauses,RM 13 5 1 17-22 Record Representation Clauses,RM 13 3 71-73 Component Size Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-4-9-10-enumeration-representation-clauses}@anchor{235} +@anchor{gnat_rm/implementation_advice rm-13-4-9-10-enumeration-representation-clauses}@anchor{238} @section RM 13.4(9-10): Enumeration Representation Clauses @@ -14401,7 +14428,7 @@ Followed. @geindex records @node RM 13 5 1 17-22 Record Representation Clauses,RM 13 5 2 5 Storage Place Attributes,RM 13 4 9-10 Enumeration Representation Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-5-1-17-22-record-representation-clauses}@anchor{236} +@anchor{gnat_rm/implementation_advice rm-13-5-1-17-22-record-representation-clauses}@anchor{239} @section RM 13.5.1(17-22): Record Representation Clauses @@ -14461,7 +14488,7 @@ and all mentioned features are implemented. @geindex Storage place attributes @node RM 13 5 2 5 Storage Place Attributes,RM 13 5 3 7-8 Bit Ordering,RM 13 5 1 17-22 Record Representation Clauses,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-5-2-5-storage-place-attributes}@anchor{237} +@anchor{gnat_rm/implementation_advice rm-13-5-2-5-storage-place-attributes}@anchor{23a} @section RM 13.5.2(5): Storage Place Attributes @@ -14481,7 +14508,7 @@ Followed. There are no such components in GNAT. @geindex Bit ordering @node RM 13 5 3 7-8 Bit Ordering,RM 13 7 37 Address as Private,RM 13 5 2 5 Storage Place Attributes,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-5-3-7-8-bit-ordering}@anchor{238} +@anchor{gnat_rm/implementation_advice rm-13-5-3-7-8-bit-ordering}@anchor{23b} @section RM 13.5.3(7-8): Bit Ordering @@ -14501,7 +14528,7 @@ Thus non-default bit ordering is not supported. @geindex as private type @node RM 13 7 37 Address as Private,RM 13 7 1 16 Address Operations,RM 13 5 3 7-8 Bit Ordering,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-7-37-address-as-private}@anchor{239} +@anchor{gnat_rm/implementation_advice rm-13-7-37-address-as-private}@anchor{23c} @section RM 13.7(37): Address as Private @@ -14519,7 +14546,7 @@ Followed. @geindex operations of @node RM 13 7 1 16 Address Operations,RM 13 9 14-17 Unchecked Conversion,RM 13 7 37 Address as Private,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-7-1-16-address-operations}@anchor{23a} +@anchor{gnat_rm/implementation_advice rm-13-7-1-16-address-operations}@anchor{23d} @section RM 13.7.1(16): Address Operations @@ -14537,7 +14564,7 @@ operation raises @code{Program_Error}, since all operations make sense. @geindex Unchecked conversion @node RM 13 9 14-17 Unchecked Conversion,RM 13 11 23-25 Implicit Heap Usage,RM 13 7 1 16 Address Operations,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-9-14-17-unchecked-conversion}@anchor{23b} +@anchor{gnat_rm/implementation_advice rm-13-9-14-17-unchecked-conversion}@anchor{23e} @section RM 13.9(14-17): Unchecked Conversion @@ -14581,7 +14608,7 @@ Followed. @geindex implicit @node RM 13 11 23-25 Implicit Heap Usage,RM 13 11 2 17 Unchecked Deallocation,RM 13 9 14-17 Unchecked Conversion,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-11-23-25-implicit-heap-usage}@anchor{23c} +@anchor{gnat_rm/implementation_advice rm-13-11-23-25-implicit-heap-usage}@anchor{23f} @section RM 13.11(23-25): Implicit Heap Usage @@ -14632,7 +14659,7 @@ Followed. @geindex Unchecked deallocation @node RM 13 11 2 17 Unchecked Deallocation,RM 13 13 2 1 6 Stream Oriented Attributes,RM 13 11 23-25 Implicit Heap Usage,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-11-2-17-unchecked-deallocation}@anchor{23d} +@anchor{gnat_rm/implementation_advice rm-13-11-2-17-unchecked-deallocation}@anchor{240} @section RM 13.11.2(17): Unchecked Deallocation @@ -14647,7 +14674,7 @@ Followed. @geindex Stream oriented attributes @node RM 13 13 2 1 6 Stream Oriented Attributes,RM A 1 52 Names of Predefined Numeric Types,RM 13 11 2 17 Unchecked Deallocation,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-13-13-2-1-6-stream-oriented-attributes}@anchor{23e} +@anchor{gnat_rm/implementation_advice rm-13-13-2-1-6-stream-oriented-attributes}@anchor{241} @section RM 13.13.2(1.6): Stream Oriented Attributes @@ -14678,7 +14705,7 @@ scalar types. This XDR alternative can be enabled via the binder switch -xdr. @geindex Stream oriented attributes @node RM A 1 52 Names of Predefined Numeric Types,RM A 3 2 49 Ada Characters Handling,RM 13 13 2 1 6 Stream Oriented Attributes,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-a-1-52-names-of-predefined-numeric-types}@anchor{23f} +@anchor{gnat_rm/implementation_advice rm-a-1-52-names-of-predefined-numeric-types}@anchor{242} @section RM A.1(52): Names of Predefined Numeric Types @@ -14696,7 +14723,7 @@ Followed. @geindex Ada.Characters.Handling @node RM A 3 2 49 Ada Characters Handling,RM A 4 4 106 Bounded-Length String Handling,RM A 1 52 Names of Predefined Numeric Types,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-a-3-2-49-ada-characters-handling}@anchor{240} +@anchor{gnat_rm/implementation_advice rm-a-3-2-49-ada-characters-handling}@anchor{243} @section RM A.3.2(49): @code{Ada.Characters.Handling} @@ -14713,7 +14740,7 @@ Followed. GNAT provides no such localized definitions. @geindex Bounded-length strings @node RM A 4 4 106 Bounded-Length String Handling,RM A 5 2 46-47 Random Number Generation,RM A 3 2 49 Ada Characters Handling,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-a-4-4-106-bounded-length-string-handling}@anchor{241} +@anchor{gnat_rm/implementation_advice rm-a-4-4-106-bounded-length-string-handling}@anchor{244} @section RM A.4.4(106): Bounded-Length String Handling @@ -14728,7 +14755,7 @@ Followed. No implicit pointers or dynamic allocation are used. @geindex Random number generation @node RM A 5 2 46-47 Random Number Generation,RM A 10 7 23 Get_Immediate,RM A 4 4 106 Bounded-Length String Handling,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-a-5-2-46-47-random-number-generation}@anchor{242} +@anchor{gnat_rm/implementation_advice rm-a-5-2-46-47-random-number-generation}@anchor{245} @section RM A.5.2(46-47): Random Number Generation @@ -14757,7 +14784,7 @@ condition here to hold true. @geindex Get_Immediate @node RM A 10 7 23 Get_Immediate,RM A 18 Containers,RM A 5 2 46-47 Random Number Generation,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-a-10-7-23-get-immediate}@anchor{243} +@anchor{gnat_rm/implementation_advice rm-a-10-7-23-get-immediate}@anchor{246} @section RM A.10.7(23): @code{Get_Immediate} @@ -14781,7 +14808,7 @@ this functionality. @geindex Containers @node RM A 18 Containers,RM B 1 39-41 Pragma Export,RM A 10 7 23 Get_Immediate,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-a-18-containers}@anchor{244} +@anchor{gnat_rm/implementation_advice rm-a-18-containers}@anchor{247} @section RM A.18: @code{Containers} @@ -14802,7 +14829,7 @@ follow the implementation advice. @geindex Export @node RM B 1 39-41 Pragma Export,RM B 2 12-13 Package Interfaces,RM A 18 Containers,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-b-1-39-41-pragma-export}@anchor{245} +@anchor{gnat_rm/implementation_advice rm-b-1-39-41-pragma-export}@anchor{248} @section RM B.1(39-41): Pragma @code{Export} @@ -14850,7 +14877,7 @@ Followed. @geindex Interfaces @node RM B 2 12-13 Package Interfaces,RM B 3 63-71 Interfacing with C,RM B 1 39-41 Pragma Export,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-b-2-12-13-package-interfaces}@anchor{246} +@anchor{gnat_rm/implementation_advice rm-b-2-12-13-package-interfaces}@anchor{249} @section RM B.2(12-13): Package @code{Interfaces} @@ -14880,7 +14907,7 @@ Followed. GNAT provides all the packages described in this section. @geindex interfacing with @node RM B 3 63-71 Interfacing with C,RM B 4 95-98 Interfacing with COBOL,RM B 2 12-13 Package Interfaces,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-b-3-63-71-interfacing-with-c}@anchor{247} +@anchor{gnat_rm/implementation_advice rm-b-3-63-71-interfacing-with-c}@anchor{24a} @section RM B.3(63-71): Interfacing with C @@ -14968,7 +14995,7 @@ Followed. @geindex interfacing with @node RM B 4 95-98 Interfacing with COBOL,RM B 5 22-26 Interfacing with Fortran,RM B 3 63-71 Interfacing with C,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-b-4-95-98-interfacing-with-cobol}@anchor{248} +@anchor{gnat_rm/implementation_advice rm-b-4-95-98-interfacing-with-cobol}@anchor{24b} @section RM B.4(95-98): Interfacing with COBOL @@ -15009,7 +15036,7 @@ Followed. @geindex interfacing with @node RM B 5 22-26 Interfacing with Fortran,RM C 1 3-5 Access to Machine Operations,RM B 4 95-98 Interfacing with COBOL,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-b-5-22-26-interfacing-with-fortran}@anchor{249} +@anchor{gnat_rm/implementation_advice rm-b-5-22-26-interfacing-with-fortran}@anchor{24c} @section RM B.5(22-26): Interfacing with Fortran @@ -15060,7 +15087,7 @@ Followed. @geindex Machine operations @node RM C 1 3-5 Access to Machine Operations,RM C 1 10-16 Access to Machine Operations,RM B 5 22-26 Interfacing with Fortran,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-1-3-5-access-to-machine-operations}@anchor{24a} +@anchor{gnat_rm/implementation_advice rm-c-1-3-5-access-to-machine-operations}@anchor{24d} @section RM C.1(3-5): Access to Machine Operations @@ -15095,7 +15122,7 @@ object that is specified as exported.” Followed. @node RM C 1 10-16 Access to Machine Operations,RM C 3 28 Interrupt Support,RM C 1 3-5 Access to Machine Operations,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-1-10-16-access-to-machine-operations}@anchor{24b} +@anchor{gnat_rm/implementation_advice rm-c-1-10-16-access-to-machine-operations}@anchor{24e} @section RM C.1(10-16): Access to Machine Operations @@ -15156,7 +15183,7 @@ Followed on any target supporting such operations. @geindex Interrupt support @node RM C 3 28 Interrupt Support,RM C 3 1 20-21 Protected Procedure Handlers,RM C 1 10-16 Access to Machine Operations,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-3-28-interrupt-support}@anchor{24c} +@anchor{gnat_rm/implementation_advice rm-c-3-28-interrupt-support}@anchor{24f} @section RM C.3(28): Interrupt Support @@ -15174,7 +15201,7 @@ of interrupt blocking. @geindex Protected procedure handlers @node RM C 3 1 20-21 Protected Procedure Handlers,RM C 3 2 25 Package Interrupts,RM C 3 28 Interrupt Support,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-3-1-20-21-protected-procedure-handlers}@anchor{24d} +@anchor{gnat_rm/implementation_advice rm-c-3-1-20-21-protected-procedure-handlers}@anchor{250} @section RM C.3.1(20-21): Protected Procedure Handlers @@ -15200,7 +15227,7 @@ Followed. Compile time warnings are given when possible. @geindex Interrupts @node RM C 3 2 25 Package Interrupts,RM C 4 14 Pre-elaboration Requirements,RM C 3 1 20-21 Protected Procedure Handlers,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-3-2-25-package-interrupts}@anchor{24e} +@anchor{gnat_rm/implementation_advice rm-c-3-2-25-package-interrupts}@anchor{251} @section RM C.3.2(25): Package @code{Interrupts} @@ -15218,7 +15245,7 @@ Followed. @geindex Pre-elaboration requirements @node RM C 4 14 Pre-elaboration Requirements,RM C 5 8 Pragma Discard_Names,RM C 3 2 25 Package Interrupts,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-4-14-pre-elaboration-requirements}@anchor{24f} +@anchor{gnat_rm/implementation_advice rm-c-4-14-pre-elaboration-requirements}@anchor{252} @section RM C.4(14): Pre-elaboration Requirements @@ -15234,7 +15261,7 @@ Followed. Executable code is generated in some cases, e.g., loops to initialize large arrays. @node RM C 5 8 Pragma Discard_Names,RM C 7 2 30 The Package Task_Attributes,RM C 4 14 Pre-elaboration Requirements,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-5-8-pragma-discard-names}@anchor{250} +@anchor{gnat_rm/implementation_advice rm-c-5-8-pragma-discard-names}@anchor{253} @section RM C.5(8): Pragma @code{Discard_Names} @@ -15252,7 +15279,7 @@ Followed. @geindex Task_Attributes @node RM C 7 2 30 The Package Task_Attributes,RM D 3 17 Locking Policies,RM C 5 8 Pragma Discard_Names,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-c-7-2-30-the-package-task-attributes}@anchor{251} +@anchor{gnat_rm/implementation_advice rm-c-7-2-30-the-package-task-attributes}@anchor{254} @section RM C.7.2(30): The Package Task_Attributes @@ -15273,7 +15300,7 @@ Not followed. This implementation is not targeted to such a domain. @geindex Locking Policies @node RM D 3 17 Locking Policies,RM D 4 16 Entry Queuing Policies,RM C 7 2 30 The Package Task_Attributes,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-d-3-17-locking-policies}@anchor{252} +@anchor{gnat_rm/implementation_advice rm-d-3-17-locking-policies}@anchor{255} @section RM D.3(17): Locking Policies @@ -15290,7 +15317,7 @@ whose names (@code{Inheritance_Locking} and @geindex Entry queuing policies @node RM D 4 16 Entry Queuing Policies,RM D 6 9-10 Preemptive Abort,RM D 3 17 Locking Policies,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-d-4-16-entry-queuing-policies}@anchor{253} +@anchor{gnat_rm/implementation_advice rm-d-4-16-entry-queuing-policies}@anchor{256} @section RM D.4(16): Entry Queuing Policies @@ -15305,7 +15332,7 @@ Followed. No such implementation-defined queuing policies exist. @geindex Preemptive abort @node RM D 6 9-10 Preemptive Abort,RM D 7 21 Tasking Restrictions,RM D 4 16 Entry Queuing Policies,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-d-6-9-10-preemptive-abort}@anchor{254} +@anchor{gnat_rm/implementation_advice rm-d-6-9-10-preemptive-abort}@anchor{257} @section RM D.6(9-10): Preemptive Abort @@ -15331,7 +15358,7 @@ Followed. @geindex Tasking restrictions @node RM D 7 21 Tasking Restrictions,RM D 8 47-49 Monotonic Time,RM D 6 9-10 Preemptive Abort,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-d-7-21-tasking-restrictions}@anchor{255} +@anchor{gnat_rm/implementation_advice rm-d-7-21-tasking-restrictions}@anchor{258} @section RM D.7(21): Tasking Restrictions @@ -15350,7 +15377,7 @@ pragma @code{Profile (Restricted)} for more details. @geindex monotonic @node RM D 8 47-49 Monotonic Time,RM E 5 28-29 Partition Communication Subsystem,RM D 7 21 Tasking Restrictions,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-d-8-47-49-monotonic-time}@anchor{256} +@anchor{gnat_rm/implementation_advice rm-d-8-47-49-monotonic-time}@anchor{259} @section RM D.8(47-49): Monotonic Time @@ -15385,7 +15412,7 @@ Followed. @geindex PCS @node RM E 5 28-29 Partition Communication Subsystem,RM F 7 COBOL Support,RM D 8 47-49 Monotonic Time,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-e-5-28-29-partition-communication-subsystem}@anchor{257} +@anchor{gnat_rm/implementation_advice rm-e-5-28-29-partition-communication-subsystem}@anchor{25a} @section RM E.5(28-29): Partition Communication Subsystem @@ -15413,7 +15440,7 @@ GNAT. @geindex COBOL support @node RM F 7 COBOL Support,RM F 1 2 Decimal Radix Support,RM E 5 28-29 Partition Communication Subsystem,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-f-7-cobol-support}@anchor{258} +@anchor{gnat_rm/implementation_advice rm-f-7-cobol-support}@anchor{25b} @section RM F(7): COBOL Support @@ -15433,7 +15460,7 @@ Followed. @geindex Decimal radix support @node RM F 1 2 Decimal Radix Support,RM G Numerics,RM F 7 COBOL Support,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-f-1-2-decimal-radix-support}@anchor{259} +@anchor{gnat_rm/implementation_advice rm-f-1-2-decimal-radix-support}@anchor{25c} @section RM F.1(2): Decimal Radix Support @@ -15449,7 +15476,7 @@ representations. @geindex Numerics @node RM G Numerics,RM G 1 1 56-58 Complex Types,RM F 1 2 Decimal Radix Support,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-g-numerics}@anchor{25a} +@anchor{gnat_rm/implementation_advice rm-g-numerics}@anchor{25d} @section RM G: Numerics @@ -15469,7 +15496,7 @@ Followed. @geindex Complex types @node RM G 1 1 56-58 Complex Types,RM G 1 2 49 Complex Elementary Functions,RM G Numerics,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-g-1-1-56-58-complex-types}@anchor{25b} +@anchor{gnat_rm/implementation_advice rm-g-1-1-56-58-complex-types}@anchor{25e} @section RM G.1.1(56-58): Complex Types @@ -15531,7 +15558,7 @@ Followed. @geindex Complex elementary functions @node RM G 1 2 49 Complex Elementary Functions,RM G 2 4 19 Accuracy Requirements,RM G 1 1 56-58 Complex Types,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-g-1-2-49-complex-elementary-functions}@anchor{25c} +@anchor{gnat_rm/implementation_advice rm-g-1-2-49-complex-elementary-functions}@anchor{25f} @section RM G.1.2(49): Complex Elementary Functions @@ -15553,7 +15580,7 @@ Followed. @geindex Accuracy requirements @node RM G 2 4 19 Accuracy Requirements,RM G 2 6 15 Complex Arithmetic Accuracy,RM G 1 2 49 Complex Elementary Functions,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-g-2-4-19-accuracy-requirements}@anchor{25d} +@anchor{gnat_rm/implementation_advice rm-g-2-4-19-accuracy-requirements}@anchor{260} @section RM G.2.4(19): Accuracy Requirements @@ -15577,7 +15604,7 @@ Followed. @geindex complex arithmetic @node RM G 2 6 15 Complex Arithmetic Accuracy,RM H 6 15/2 Pragma Partition_Elaboration_Policy,RM G 2 4 19 Accuracy Requirements,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-g-2-6-15-complex-arithmetic-accuracy}@anchor{25e} +@anchor{gnat_rm/implementation_advice rm-g-2-6-15-complex-arithmetic-accuracy}@anchor{261} @section RM G.2.6(15): Complex Arithmetic Accuracy @@ -15595,7 +15622,7 @@ Followed. @geindex Sequential elaboration policy @node RM H 6 15/2 Pragma Partition_Elaboration_Policy,,RM G 2 6 15 Complex Arithmetic Accuracy,Implementation Advice -@anchor{gnat_rm/implementation_advice rm-h-6-15-2-pragma-partition-elaboration-policy}@anchor{25f} +@anchor{gnat_rm/implementation_advice rm-h-6-15-2-pragma-partition-elaboration-policy}@anchor{262} @section RM H.6(15/2): Pragma Partition_Elaboration_Policy @@ -15610,7 +15637,7 @@ immediately terminated.” Not followed. @node Implementation Defined Characteristics,Intrinsic Subprograms,Implementation Advice,Top -@anchor{gnat_rm/implementation_defined_characteristics doc}@anchor{260}@anchor{gnat_rm/implementation_defined_characteristics id1}@anchor{261}@anchor{gnat_rm/implementation_defined_characteristics implementation-defined-characteristics}@anchor{b} +@anchor{gnat_rm/implementation_defined_characteristics doc}@anchor{263}@anchor{gnat_rm/implementation_defined_characteristics id1}@anchor{264}@anchor{gnat_rm/implementation_defined_characteristics implementation-defined-characteristics}@anchor{b} @chapter Implementation Defined Characteristics @@ -16459,7 +16486,7 @@ See separate section on data representations. such aspects and the legality rules for such aspects. See 13.1.1(38).” @end itemize -See @ref{123,,Implementation Defined Aspects}. +See @ref{125,,Implementation Defined Aspects}. @itemize * @@ -16905,7 +16932,7 @@ When the @code{Pattern} parameter is not the null string, it is interpreted according to the syntax of regular expressions as defined in the @code{GNAT.Regexp} package. -See @ref{262,,GNAT.Regexp (g-regexp.ads)}. +See @ref{265,,GNAT.Regexp (g-regexp.ads)}. @itemize * @@ -18003,7 +18030,7 @@ Information on those subjects is not yet available. Execution is erroneous in that case. @node Intrinsic Subprograms,Representation Clauses and Pragmas,Implementation Defined Characteristics,Top -@anchor{gnat_rm/intrinsic_subprograms doc}@anchor{263}@anchor{gnat_rm/intrinsic_subprograms id1}@anchor{264}@anchor{gnat_rm/intrinsic_subprograms intrinsic-subprograms}@anchor{c} +@anchor{gnat_rm/intrinsic_subprograms doc}@anchor{266}@anchor{gnat_rm/intrinsic_subprograms id1}@anchor{267}@anchor{gnat_rm/intrinsic_subprograms intrinsic-subprograms}@anchor{c} @chapter Intrinsic Subprograms @@ -18041,7 +18068,7 @@ Ada standard does not require Ada compilers to implement this feature. @end menu @node Intrinsic Operators,Compilation_ISO_Date,,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms id2}@anchor{265}@anchor{gnat_rm/intrinsic_subprograms intrinsic-operators}@anchor{266} +@anchor{gnat_rm/intrinsic_subprograms id2}@anchor{268}@anchor{gnat_rm/intrinsic_subprograms intrinsic-operators}@anchor{269} @section Intrinsic Operators @@ -18072,7 +18099,7 @@ It is also possible to specify such operators for private types, if the full views are appropriate arithmetic types. @node Compilation_ISO_Date,Compilation_Date,Intrinsic Operators,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms compilation-iso-date}@anchor{267}@anchor{gnat_rm/intrinsic_subprograms id3}@anchor{268} +@anchor{gnat_rm/intrinsic_subprograms compilation-iso-date}@anchor{26a}@anchor{gnat_rm/intrinsic_subprograms id3}@anchor{26b} @section Compilation_ISO_Date @@ -18086,7 +18113,7 @@ application program should simply call the function the current compilation (in local time format YYYY-MM-DD). @node Compilation_Date,Compilation_Time,Compilation_ISO_Date,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms compilation-date}@anchor{269}@anchor{gnat_rm/intrinsic_subprograms id4}@anchor{26a} +@anchor{gnat_rm/intrinsic_subprograms compilation-date}@anchor{26c}@anchor{gnat_rm/intrinsic_subprograms id4}@anchor{26d} @section Compilation_Date @@ -18096,7 +18123,7 @@ Same as Compilation_ISO_Date, except the string is in the form MMM DD YYYY. @node Compilation_Time,Enclosing_Entity,Compilation_Date,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms compilation-time}@anchor{26b}@anchor{gnat_rm/intrinsic_subprograms id5}@anchor{26c} +@anchor{gnat_rm/intrinsic_subprograms compilation-time}@anchor{26e}@anchor{gnat_rm/intrinsic_subprograms id5}@anchor{26f} @section Compilation_Time @@ -18110,7 +18137,7 @@ application program should simply call the function the current compilation (in local time format HH:MM:SS). @node Enclosing_Entity,Exception_Information,Compilation_Time,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms enclosing-entity}@anchor{26d}@anchor{gnat_rm/intrinsic_subprograms id6}@anchor{26e} +@anchor{gnat_rm/intrinsic_subprograms enclosing-entity}@anchor{270}@anchor{gnat_rm/intrinsic_subprograms id6}@anchor{271} @section Enclosing_Entity @@ -18124,7 +18151,7 @@ application program should simply call the function the current subprogram, package, task, entry, or protected subprogram. @node Exception_Information,Exception_Message,Enclosing_Entity,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms exception-information}@anchor{26f}@anchor{gnat_rm/intrinsic_subprograms id7}@anchor{270} +@anchor{gnat_rm/intrinsic_subprograms exception-information}@anchor{272}@anchor{gnat_rm/intrinsic_subprograms id7}@anchor{273} @section Exception_Information @@ -18138,7 +18165,7 @@ so an application program should simply call the function the exception information associated with the current exception. @node Exception_Message,Exception_Name,Exception_Information,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms exception-message}@anchor{271}@anchor{gnat_rm/intrinsic_subprograms id8}@anchor{272} +@anchor{gnat_rm/intrinsic_subprograms exception-message}@anchor{274}@anchor{gnat_rm/intrinsic_subprograms id8}@anchor{275} @section Exception_Message @@ -18152,7 +18179,7 @@ so an application program should simply call the function the message associated with the current exception. @node Exception_Name,File,Exception_Message,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms exception-name}@anchor{273}@anchor{gnat_rm/intrinsic_subprograms id9}@anchor{274} +@anchor{gnat_rm/intrinsic_subprograms exception-name}@anchor{276}@anchor{gnat_rm/intrinsic_subprograms id9}@anchor{277} @section Exception_Name @@ -18166,7 +18193,7 @@ so an application program should simply call the function the name of the current exception. @node File,Line,Exception_Name,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms file}@anchor{275}@anchor{gnat_rm/intrinsic_subprograms id10}@anchor{276} +@anchor{gnat_rm/intrinsic_subprograms file}@anchor{278}@anchor{gnat_rm/intrinsic_subprograms id10}@anchor{279} @section File @@ -18180,7 +18207,7 @@ application program should simply call the function file. @node Line,Shifts and Rotates,File,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms id11}@anchor{277}@anchor{gnat_rm/intrinsic_subprograms line}@anchor{278} +@anchor{gnat_rm/intrinsic_subprograms id11}@anchor{27a}@anchor{gnat_rm/intrinsic_subprograms line}@anchor{27b} @section Line @@ -18194,7 +18221,7 @@ application program should simply call the function source line. @node Shifts and Rotates,Source_Location,Line,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms id12}@anchor{279}@anchor{gnat_rm/intrinsic_subprograms shifts-and-rotates}@anchor{27a} +@anchor{gnat_rm/intrinsic_subprograms id12}@anchor{27c}@anchor{gnat_rm/intrinsic_subprograms shifts-and-rotates}@anchor{27d} @section Shifts and Rotates @@ -18237,7 +18264,7 @@ corresponding operator for modular type. In particular, shifting a negative number may change its sign bit to positive. @node Source_Location,,Shifts and Rotates,Intrinsic Subprograms -@anchor{gnat_rm/intrinsic_subprograms id13}@anchor{27b}@anchor{gnat_rm/intrinsic_subprograms source-location}@anchor{27c} +@anchor{gnat_rm/intrinsic_subprograms id13}@anchor{27e}@anchor{gnat_rm/intrinsic_subprograms source-location}@anchor{27f} @section Source_Location @@ -18251,7 +18278,7 @@ application program should simply call the function source file location. @node Representation Clauses and Pragmas,Standard Library Routines,Intrinsic Subprograms,Top -@anchor{gnat_rm/representation_clauses_and_pragmas doc}@anchor{27d}@anchor{gnat_rm/representation_clauses_and_pragmas id1}@anchor{27e}@anchor{gnat_rm/representation_clauses_and_pragmas representation-clauses-and-pragmas}@anchor{d} +@anchor{gnat_rm/representation_clauses_and_pragmas doc}@anchor{280}@anchor{gnat_rm/representation_clauses_and_pragmas id1}@anchor{281}@anchor{gnat_rm/representation_clauses_and_pragmas representation-clauses-and-pragmas}@anchor{d} @chapter Representation Clauses and Pragmas @@ -18297,7 +18324,7 @@ and this section describes the additional capabilities provided. @end menu @node Alignment Clauses,Size Clauses,,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas alignment-clauses}@anchor{27f}@anchor{gnat_rm/representation_clauses_and_pragmas id2}@anchor{280} +@anchor{gnat_rm/representation_clauses_and_pragmas alignment-clauses}@anchor{282}@anchor{gnat_rm/representation_clauses_and_pragmas id2}@anchor{283} @section Alignment Clauses @@ -18319,7 +18346,7 @@ For elementary types, the alignment is the minimum of the actual size of objects of the type divided by @code{Storage_Unit}, and the maximum alignment supported by the target. (This maximum alignment is given by the GNAT-specific attribute -@code{Standard'Maximum_Alignment}; see @ref{190,,Attribute Maximum_Alignment}.) +@code{Standard'Maximum_Alignment}; see @ref{193,,Attribute Maximum_Alignment}.) @geindex Maximum_Alignment attribute @@ -18428,7 +18455,7 @@ assumption is non-portable, and other compilers may choose different alignments for the subtype @code{RS}. @node Size Clauses,Storage_Size Clauses,Alignment Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id3}@anchor{281}@anchor{gnat_rm/representation_clauses_and_pragmas size-clauses}@anchor{282} +@anchor{gnat_rm/representation_clauses_and_pragmas id3}@anchor{284}@anchor{gnat_rm/representation_clauses_and_pragmas size-clauses}@anchor{285} @section Size Clauses @@ -18505,7 +18532,7 @@ if it is known that a Size value can be accommodated in an object of type Integer. @node Storage_Size Clauses,Size of Variant Record Objects,Size Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id4}@anchor{283}@anchor{gnat_rm/representation_clauses_and_pragmas storage-size-clauses}@anchor{284} +@anchor{gnat_rm/representation_clauses_and_pragmas id4}@anchor{286}@anchor{gnat_rm/representation_clauses_and_pragmas storage-size-clauses}@anchor{287} @section Storage_Size Clauses @@ -18578,7 +18605,7 @@ Of course in practice, there will not be any explicit allocators in the case of such an access declaration. @node Size of Variant Record Objects,Biased Representation,Storage_Size Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id5}@anchor{285}@anchor{gnat_rm/representation_clauses_and_pragmas size-of-variant-record-objects}@anchor{286} +@anchor{gnat_rm/representation_clauses_and_pragmas id5}@anchor{288}@anchor{gnat_rm/representation_clauses_and_pragmas size-of-variant-record-objects}@anchor{289} @section Size of Variant Record Objects @@ -18688,7 +18715,7 @@ the maximum size, regardless of the current variant value, the variant value. @node Biased Representation,Value_Size and Object_Size Clauses,Size of Variant Record Objects,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas biased-representation}@anchor{287}@anchor{gnat_rm/representation_clauses_and_pragmas id6}@anchor{288} +@anchor{gnat_rm/representation_clauses_and_pragmas biased-representation}@anchor{28a}@anchor{gnat_rm/representation_clauses_and_pragmas id6}@anchor{28b} @section Biased Representation @@ -18726,7 +18753,7 @@ biased representation can be used for all discrete types except for enumeration types for which a representation clause is given. @node Value_Size and Object_Size Clauses,Component_Size Clauses,Biased Representation,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id7}@anchor{289}@anchor{gnat_rm/representation_clauses_and_pragmas value-size-and-object-size-clauses}@anchor{28a} +@anchor{gnat_rm/representation_clauses_and_pragmas id7}@anchor{28c}@anchor{gnat_rm/representation_clauses_and_pragmas value-size-and-object-size-clauses}@anchor{28d} @section Value_Size and Object_Size Clauses @@ -19042,7 +19069,7 @@ definition clause forces biased representation. This warning can be turned off using @code{-gnatw.B}. @node Component_Size Clauses,Bit_Order Clauses,Value_Size and Object_Size Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas component-size-clauses}@anchor{28b}@anchor{gnat_rm/representation_clauses_and_pragmas id8}@anchor{28c} +@anchor{gnat_rm/representation_clauses_and_pragmas component-size-clauses}@anchor{28e}@anchor{gnat_rm/representation_clauses_and_pragmas id8}@anchor{28f} @section Component_Size Clauses @@ -19090,7 +19117,7 @@ and a pragma Pack for the same array type. if such duplicate clauses are given, the pragma Pack will be ignored. @node Bit_Order Clauses,Effect of Bit_Order on Byte Ordering,Component_Size Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas bit-order-clauses}@anchor{28d}@anchor{gnat_rm/representation_clauses_and_pragmas id9}@anchor{28e} +@anchor{gnat_rm/representation_clauses_and_pragmas bit-order-clauses}@anchor{290}@anchor{gnat_rm/representation_clauses_and_pragmas id9}@anchor{291} @section Bit_Order Clauses @@ -19196,7 +19223,7 @@ if desired. The following section contains additional details regarding the issue of byte ordering. @node Effect of Bit_Order on Byte Ordering,Pragma Pack for Arrays,Bit_Order Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-bit-order-on-byte-ordering}@anchor{28f}@anchor{gnat_rm/representation_clauses_and_pragmas id10}@anchor{290} +@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-bit-order-on-byte-ordering}@anchor{292}@anchor{gnat_rm/representation_clauses_and_pragmas id10}@anchor{293} @section Effect of Bit_Order on Byte Ordering @@ -19453,7 +19480,7 @@ to set the boolean constant @code{Master_Byte_First} in an appropriate manner. @node Pragma Pack for Arrays,Pragma Pack for Records,Effect of Bit_Order on Byte Ordering,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id11}@anchor{291}@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-arrays}@anchor{292} +@anchor{gnat_rm/representation_clauses_and_pragmas id11}@anchor{294}@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-arrays}@anchor{295} @section Pragma Pack for Arrays @@ -19573,7 +19600,7 @@ Here 31-bit packing is achieved as required, and no warning is generated, since in this case the programmer intention is clear. @node Pragma Pack for Records,Record Representation Clauses,Pragma Pack for Arrays,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id12}@anchor{293}@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-records}@anchor{294} +@anchor{gnat_rm/representation_clauses_and_pragmas id12}@anchor{296}@anchor{gnat_rm/representation_clauses_and_pragmas pragma-pack-for-records}@anchor{297} @section Pragma Pack for Records @@ -19657,7 +19684,7 @@ array that is longer than 64 bits, so it is itself non-packable on boundary, and takes an integral number of bytes, i.e., 72 bits. @node Record Representation Clauses,Handling of Records with Holes,Pragma Pack for Records,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id13}@anchor{295}@anchor{gnat_rm/representation_clauses_and_pragmas record-representation-clauses}@anchor{296} +@anchor{gnat_rm/representation_clauses_and_pragmas id13}@anchor{298}@anchor{gnat_rm/representation_clauses_and_pragmas record-representation-clauses}@anchor{299} @section Record Representation Clauses @@ -19736,7 +19763,7 @@ end record; @end example @node Handling of Records with Holes,Enumeration Clauses,Record Representation Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas handling-of-records-with-holes}@anchor{297}@anchor{gnat_rm/representation_clauses_and_pragmas id14}@anchor{298} +@anchor{gnat_rm/representation_clauses_and_pragmas handling-of-records-with-holes}@anchor{29a}@anchor{gnat_rm/representation_clauses_and_pragmas id14}@anchor{29b} @section Handling of Records with Holes @@ -19812,7 +19839,7 @@ for Hrec'Size use 64; @end example @node Enumeration Clauses,Address Clauses,Handling of Records with Holes,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas enumeration-clauses}@anchor{299}@anchor{gnat_rm/representation_clauses_and_pragmas id15}@anchor{29a} +@anchor{gnat_rm/representation_clauses_and_pragmas enumeration-clauses}@anchor{29c}@anchor{gnat_rm/representation_clauses_and_pragmas id15}@anchor{29d} @section Enumeration Clauses @@ -19855,7 +19882,7 @@ the overhead of converting representation values to the corresponding positional values, (i.e., the value delivered by the @code{Pos} attribute). @node Address Clauses,Use of Address Clauses for Memory-Mapped I/O,Enumeration Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas address-clauses}@anchor{29b}@anchor{gnat_rm/representation_clauses_and_pragmas id16}@anchor{29c} +@anchor{gnat_rm/representation_clauses_and_pragmas address-clauses}@anchor{29e}@anchor{gnat_rm/representation_clauses_and_pragmas id16}@anchor{29f} @section Address Clauses @@ -20195,7 +20222,7 @@ then the program compiles without the warning and when run will generate the output @code{X was not clobbered}. @node Use of Address Clauses for Memory-Mapped I/O,Effect of Convention on Representation,Address Clauses,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas id17}@anchor{29d}@anchor{gnat_rm/representation_clauses_and_pragmas use-of-address-clauses-for-memory-mapped-i-o}@anchor{29e} +@anchor{gnat_rm/representation_clauses_and_pragmas id17}@anchor{2a0}@anchor{gnat_rm/representation_clauses_and_pragmas use-of-address-clauses-for-memory-mapped-i-o}@anchor{2a1} @section Use of Address Clauses for Memory-Mapped I/O @@ -20253,7 +20280,7 @@ provides the pragma @code{Volatile_Full_Access} which can be used in lieu of pragma @code{Atomic} and will give the additional guarantee. @node Effect of Convention on Representation,Conventions and Anonymous Access Types,Use of Address Clauses for Memory-Mapped I/O,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-convention-on-representation}@anchor{29f}@anchor{gnat_rm/representation_clauses_and_pragmas id18}@anchor{2a0} +@anchor{gnat_rm/representation_clauses_and_pragmas effect-of-convention-on-representation}@anchor{2a2}@anchor{gnat_rm/representation_clauses_and_pragmas id18}@anchor{2a3} @section Effect of Convention on Representation @@ -20331,7 +20358,7 @@ when one of these values is read, any nonzero value is treated as True. @end itemize @node Conventions and Anonymous Access Types,Determining the Representations chosen by GNAT,Effect of Convention on Representation,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas conventions-and-anonymous-access-types}@anchor{2a1}@anchor{gnat_rm/representation_clauses_and_pragmas id19}@anchor{2a2} +@anchor{gnat_rm/representation_clauses_and_pragmas conventions-and-anonymous-access-types}@anchor{2a4}@anchor{gnat_rm/representation_clauses_and_pragmas id19}@anchor{2a5} @section Conventions and Anonymous Access Types @@ -20407,7 +20434,7 @@ package ConvComp is @end example @node Determining the Representations chosen by GNAT,,Conventions and Anonymous Access Types,Representation Clauses and Pragmas -@anchor{gnat_rm/representation_clauses_and_pragmas determining-the-representations-chosen-by-gnat}@anchor{2a3}@anchor{gnat_rm/representation_clauses_and_pragmas id20}@anchor{2a4} +@anchor{gnat_rm/representation_clauses_and_pragmas determining-the-representations-chosen-by-gnat}@anchor{2a6}@anchor{gnat_rm/representation_clauses_and_pragmas id20}@anchor{2a7} @section Determining the Representations chosen by GNAT @@ -20559,7 +20586,7 @@ generated by the compiler into the original source to fix and guarantee the actual representation to be used. @node Standard Library Routines,The Implementation of Standard I/O,Representation Clauses and Pragmas,Top -@anchor{gnat_rm/standard_library_routines doc}@anchor{2a5}@anchor{gnat_rm/standard_library_routines id1}@anchor{2a6}@anchor{gnat_rm/standard_library_routines standard-library-routines}@anchor{e} +@anchor{gnat_rm/standard_library_routines doc}@anchor{2a8}@anchor{gnat_rm/standard_library_routines id1}@anchor{2a9}@anchor{gnat_rm/standard_library_routines standard-library-routines}@anchor{e} @chapter Standard Library Routines @@ -21383,7 +21410,7 @@ For packages in Interfaces and System, all the RM defined packages are available in GNAT, see the Ada 2012 RM for full details. @node The Implementation of Standard I/O,The GNAT Library,Standard Library Routines,Top -@anchor{gnat_rm/the_implementation_of_standard_i_o doc}@anchor{2a7}@anchor{gnat_rm/the_implementation_of_standard_i_o id1}@anchor{2a8}@anchor{gnat_rm/the_implementation_of_standard_i_o the-implementation-of-standard-i-o}@anchor{f} +@anchor{gnat_rm/the_implementation_of_standard_i_o doc}@anchor{2aa}@anchor{gnat_rm/the_implementation_of_standard_i_o id1}@anchor{2ab}@anchor{gnat_rm/the_implementation_of_standard_i_o the-implementation-of-standard-i-o}@anchor{f} @chapter The Implementation of Standard I/O @@ -21435,7 +21462,7 @@ these additional facilities are also described in this chapter. @end menu @node Standard I/O Packages,FORM Strings,,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id2}@anchor{2a9}@anchor{gnat_rm/the_implementation_of_standard_i_o standard-i-o-packages}@anchor{2aa} +@anchor{gnat_rm/the_implementation_of_standard_i_o id2}@anchor{2ac}@anchor{gnat_rm/the_implementation_of_standard_i_o standard-i-o-packages}@anchor{2ad} @section Standard I/O Packages @@ -21506,7 +21533,7 @@ flush the common I/O streams and in particular Standard_Output before elaborating the Ada code. @node FORM Strings,Direct_IO,Standard I/O Packages,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o form-strings}@anchor{2ab}@anchor{gnat_rm/the_implementation_of_standard_i_o id3}@anchor{2ac} +@anchor{gnat_rm/the_implementation_of_standard_i_o form-strings}@anchor{2ae}@anchor{gnat_rm/the_implementation_of_standard_i_o id3}@anchor{2af} @section FORM Strings @@ -21532,7 +21559,7 @@ unrecognized keyword appears in a form string, it is silently ignored and not considered invalid. @node Direct_IO,Sequential_IO,FORM Strings,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o direct-io}@anchor{2ad}@anchor{gnat_rm/the_implementation_of_standard_i_o id4}@anchor{2ae} +@anchor{gnat_rm/the_implementation_of_standard_i_o direct-io}@anchor{2b0}@anchor{gnat_rm/the_implementation_of_standard_i_o id4}@anchor{2b1} @section Direct_IO @@ -21552,7 +21579,7 @@ There is no limit on the size of Direct_IO files, they are expanded as necessary to accommodate whatever records are written to the file. @node Sequential_IO,Text_IO,Direct_IO,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id5}@anchor{2af}@anchor{gnat_rm/the_implementation_of_standard_i_o sequential-io}@anchor{2b0} +@anchor{gnat_rm/the_implementation_of_standard_i_o id5}@anchor{2b2}@anchor{gnat_rm/the_implementation_of_standard_i_o sequential-io}@anchor{2b3} @section Sequential_IO @@ -21599,7 +21626,7 @@ using Stream_IO, and this is the preferred mechanism. In particular, the above program fragment rewritten to use Stream_IO will work correctly. @node Text_IO,Wide_Text_IO,Sequential_IO,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id6}@anchor{2b1}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io}@anchor{2b2} +@anchor{gnat_rm/the_implementation_of_standard_i_o id6}@anchor{2b4}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io}@anchor{2b5} @section Text_IO @@ -21682,7 +21709,7 @@ the file. @end menu @node Stream Pointer Positioning,Reading and Writing Non-Regular Files,,Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id7}@anchor{2b3}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning}@anchor{2b4} +@anchor{gnat_rm/the_implementation_of_standard_i_o id7}@anchor{2b6}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning}@anchor{2b7} @subsection Stream Pointer Positioning @@ -21718,7 +21745,7 @@ between two Ada files, then the difference may be observable in some situations. @node Reading and Writing Non-Regular Files,Get_Immediate,Stream Pointer Positioning,Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id8}@anchor{2b5}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files}@anchor{2b6} +@anchor{gnat_rm/the_implementation_of_standard_i_o id8}@anchor{2b8}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files}@anchor{2b9} @subsection Reading and Writing Non-Regular Files @@ -21769,7 +21796,7 @@ to read data past that end of file indication, until another end of file indication is entered. @node Get_Immediate,Treating Text_IO Files as Streams,Reading and Writing Non-Regular Files,Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o get-immediate}@anchor{2b7}@anchor{gnat_rm/the_implementation_of_standard_i_o id9}@anchor{2b8} +@anchor{gnat_rm/the_implementation_of_standard_i_o get-immediate}@anchor{2ba}@anchor{gnat_rm/the_implementation_of_standard_i_o id9}@anchor{2bb} @subsection Get_Immediate @@ -21787,7 +21814,7 @@ possible), it is undefined whether the FF character will be treated as a page mark. @node Treating Text_IO Files as Streams,Text_IO Extensions,Get_Immediate,Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id10}@anchor{2b9}@anchor{gnat_rm/the_implementation_of_standard_i_o treating-text-io-files-as-streams}@anchor{2ba} +@anchor{gnat_rm/the_implementation_of_standard_i_o id10}@anchor{2bc}@anchor{gnat_rm/the_implementation_of_standard_i_o treating-text-io-files-as-streams}@anchor{2bd} @subsection Treating Text_IO Files as Streams @@ -21803,7 +21830,7 @@ skipped and the effect is similar to that described above for @code{Get_Immediate}. @node Text_IO Extensions,Text_IO Facilities for Unbounded Strings,Treating Text_IO Files as Streams,Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id11}@anchor{2bb}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-extensions}@anchor{2bc} +@anchor{gnat_rm/the_implementation_of_standard_i_o id11}@anchor{2be}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-extensions}@anchor{2bf} @subsection Text_IO Extensions @@ -21831,7 +21858,7 @@ the string is to be read. @end itemize @node Text_IO Facilities for Unbounded Strings,,Text_IO Extensions,Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id12}@anchor{2bd}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-facilities-for-unbounded-strings}@anchor{2be} +@anchor{gnat_rm/the_implementation_of_standard_i_o id12}@anchor{2c0}@anchor{gnat_rm/the_implementation_of_standard_i_o text-io-facilities-for-unbounded-strings}@anchor{2c1} @subsection Text_IO Facilities for Unbounded Strings @@ -21879,7 +21906,7 @@ files @code{a-szuzti.ads} and @code{a-szuzti.adb} provides similar extended @code{Wide_Wide_Text_IO} functionality for unbounded wide wide strings. @node Wide_Text_IO,Wide_Wide_Text_IO,Text_IO,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id13}@anchor{2bf}@anchor{gnat_rm/the_implementation_of_standard_i_o wide-text-io}@anchor{2c0} +@anchor{gnat_rm/the_implementation_of_standard_i_o id13}@anchor{2c2}@anchor{gnat_rm/the_implementation_of_standard_i_o wide-text-io}@anchor{2c3} @section Wide_Text_IO @@ -22126,12 +22153,12 @@ input also causes Constraint_Error to be raised. @end menu @node Stream Pointer Positioning<2>,Reading and Writing Non-Regular Files<2>,,Wide_Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id14}@anchor{2c1}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-1}@anchor{2c2} +@anchor{gnat_rm/the_implementation_of_standard_i_o id14}@anchor{2c4}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-1}@anchor{2c5} @subsection Stream Pointer Positioning @code{Ada.Wide_Text_IO} is similar to @code{Ada.Text_IO} in its handling -of stream pointer positioning (@ref{2b2,,Text_IO}). There is one additional +of stream pointer positioning (@ref{2b5,,Text_IO}). There is one additional case: If @code{Ada.Wide_Text_IO.Look_Ahead} reads a character outside the @@ -22150,7 +22177,7 @@ to a normal program using @code{Wide_Text_IO}. However, this discrepancy can be observed if the wide text file shares a stream with another file. @node Reading and Writing Non-Regular Files<2>,,Stream Pointer Positioning<2>,Wide_Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id15}@anchor{2c3}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-1}@anchor{2c4} +@anchor{gnat_rm/the_implementation_of_standard_i_o id15}@anchor{2c6}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-1}@anchor{2c7} @subsection Reading and Writing Non-Regular Files @@ -22161,7 +22188,7 @@ treated as data characters), and @code{End_Of_Page} always returns it is possible to read beyond an end of file. @node Wide_Wide_Text_IO,Stream_IO,Wide_Text_IO,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id16}@anchor{2c5}@anchor{gnat_rm/the_implementation_of_standard_i_o wide-wide-text-io}@anchor{2c6} +@anchor{gnat_rm/the_implementation_of_standard_i_o id16}@anchor{2c8}@anchor{gnat_rm/the_implementation_of_standard_i_o wide-wide-text-io}@anchor{2c9} @section Wide_Wide_Text_IO @@ -22330,12 +22357,12 @@ input also causes Constraint_Error to be raised. @end menu @node Stream Pointer Positioning<3>,Reading and Writing Non-Regular Files<3>,,Wide_Wide_Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id17}@anchor{2c7}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-2}@anchor{2c8} +@anchor{gnat_rm/the_implementation_of_standard_i_o id17}@anchor{2ca}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-pointer-positioning-2}@anchor{2cb} @subsection Stream Pointer Positioning @code{Ada.Wide_Wide_Text_IO} is similar to @code{Ada.Text_IO} in its handling -of stream pointer positioning (@ref{2b2,,Text_IO}). There is one additional +of stream pointer positioning (@ref{2b5,,Text_IO}). There is one additional case: If @code{Ada.Wide_Wide_Text_IO.Look_Ahead} reads a character outside the @@ -22354,7 +22381,7 @@ to a normal program using @code{Wide_Wide_Text_IO}. However, this discrepancy can be observed if the wide text file shares a stream with another file. @node Reading and Writing Non-Regular Files<3>,,Stream Pointer Positioning<3>,Wide_Wide_Text_IO -@anchor{gnat_rm/the_implementation_of_standard_i_o id18}@anchor{2c9}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-2}@anchor{2ca} +@anchor{gnat_rm/the_implementation_of_standard_i_o id18}@anchor{2cc}@anchor{gnat_rm/the_implementation_of_standard_i_o reading-and-writing-non-regular-files-2}@anchor{2cd} @subsection Reading and Writing Non-Regular Files @@ -22365,7 +22392,7 @@ treated as data characters), and @code{End_Of_Page} always returns it is possible to read beyond an end of file. @node Stream_IO,Text Translation,Wide_Wide_Text_IO,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id19}@anchor{2cb}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-io}@anchor{2cc} +@anchor{gnat_rm/the_implementation_of_standard_i_o id19}@anchor{2ce}@anchor{gnat_rm/the_implementation_of_standard_i_o stream-io}@anchor{2cf} @section Stream_IO @@ -22387,7 +22414,7 @@ manner described for stream attributes. @end itemize @node Text Translation,Shared Files,Stream_IO,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id20}@anchor{2cd}@anchor{gnat_rm/the_implementation_of_standard_i_o text-translation}@anchor{2ce} +@anchor{gnat_rm/the_implementation_of_standard_i_o id20}@anchor{2d0}@anchor{gnat_rm/the_implementation_of_standard_i_o text-translation}@anchor{2d1} @section Text Translation @@ -22421,7 +22448,7 @@ mode. (corresponds to_O_U16TEXT). @end itemize @node Shared Files,Filenames encoding,Text Translation,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id21}@anchor{2cf}@anchor{gnat_rm/the_implementation_of_standard_i_o shared-files}@anchor{2d0} +@anchor{gnat_rm/the_implementation_of_standard_i_o id21}@anchor{2d2}@anchor{gnat_rm/the_implementation_of_standard_i_o shared-files}@anchor{2d3} @section Shared Files @@ -22484,7 +22511,7 @@ heterogeneous input-output. Although this approach will work in GNAT if for this purpose (using the stream attributes) @node Filenames encoding,File content encoding,Shared Files,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o filenames-encoding}@anchor{2d1}@anchor{gnat_rm/the_implementation_of_standard_i_o id22}@anchor{2d2} +@anchor{gnat_rm/the_implementation_of_standard_i_o filenames-encoding}@anchor{2d4}@anchor{gnat_rm/the_implementation_of_standard_i_o id22}@anchor{2d5} @section Filenames encoding @@ -22524,7 +22551,7 @@ platform. On the other Operating Systems the run-time is supporting UTF-8 natively. @node File content encoding,Open Modes,Filenames encoding,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o file-content-encoding}@anchor{2d3}@anchor{gnat_rm/the_implementation_of_standard_i_o id23}@anchor{2d4} +@anchor{gnat_rm/the_implementation_of_standard_i_o file-content-encoding}@anchor{2d6}@anchor{gnat_rm/the_implementation_of_standard_i_o id23}@anchor{2d7} @section File content encoding @@ -22557,7 +22584,7 @@ Unicode 8-bit encoding This encoding is only supported on the Windows platform. @node Open Modes,Operations on C Streams,File content encoding,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id24}@anchor{2d5}@anchor{gnat_rm/the_implementation_of_standard_i_o open-modes}@anchor{2d6} +@anchor{gnat_rm/the_implementation_of_standard_i_o id24}@anchor{2d8}@anchor{gnat_rm/the_implementation_of_standard_i_o open-modes}@anchor{2d9} @section Open Modes @@ -22660,7 +22687,7 @@ subsequently requires switching from reading to writing or vice-versa, then the file is reopened in @code{r+} mode to permit the required operation. @node Operations on C Streams,Interfacing to C Streams,Open Modes,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id25}@anchor{2d7}@anchor{gnat_rm/the_implementation_of_standard_i_o operations-on-c-streams}@anchor{2d8} +@anchor{gnat_rm/the_implementation_of_standard_i_o id25}@anchor{2da}@anchor{gnat_rm/the_implementation_of_standard_i_o operations-on-c-streams}@anchor{2db} @section Operations on C Streams @@ -22820,7 +22847,7 @@ end Interfaces.C_Streams; @end example @node Interfacing to C Streams,,Operations on C Streams,The Implementation of Standard I/O -@anchor{gnat_rm/the_implementation_of_standard_i_o id26}@anchor{2d9}@anchor{gnat_rm/the_implementation_of_standard_i_o interfacing-to-c-streams}@anchor{2da} +@anchor{gnat_rm/the_implementation_of_standard_i_o id26}@anchor{2dc}@anchor{gnat_rm/the_implementation_of_standard_i_o interfacing-to-c-streams}@anchor{2dd} @section Interfacing to C Streams @@ -22913,7 +22940,7 @@ imported from a C program, allowing an Ada file to operate on an existing C file. @node The GNAT Library,Interfacing to Other Languages,The Implementation of Standard I/O,Top -@anchor{gnat_rm/the_gnat_library doc}@anchor{2db}@anchor{gnat_rm/the_gnat_library id1}@anchor{2dc}@anchor{gnat_rm/the_gnat_library the-gnat-library}@anchor{10} +@anchor{gnat_rm/the_gnat_library doc}@anchor{2de}@anchor{gnat_rm/the_gnat_library id1}@anchor{2df}@anchor{gnat_rm/the_gnat_library the-gnat-library}@anchor{10} @chapter The GNAT Library @@ -23099,7 +23126,7 @@ of GNAT, and will generate a warning message. @end menu @node Ada Characters Latin_9 a-chlat9 ads,Ada Characters Wide_Latin_1 a-cwila1 ads,,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-characters-latin-9-a-chlat9-ads}@anchor{2dd}@anchor{gnat_rm/the_gnat_library id2}@anchor{2de} +@anchor{gnat_rm/the_gnat_library ada-characters-latin-9-a-chlat9-ads}@anchor{2e0}@anchor{gnat_rm/the_gnat_library id2}@anchor{2e1} @section @code{Ada.Characters.Latin_9} (@code{a-chlat9.ads}) @@ -23116,7 +23143,7 @@ is specifically authorized by the Ada Reference Manual (RM A.3.3(27)). @node Ada Characters Wide_Latin_1 a-cwila1 ads,Ada Characters Wide_Latin_9 a-cwila9 ads,Ada Characters Latin_9 a-chlat9 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-1-a-cwila1-ads}@anchor{2df}@anchor{gnat_rm/the_gnat_library id3}@anchor{2e0} +@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-1-a-cwila1-ads}@anchor{2e2}@anchor{gnat_rm/the_gnat_library id3}@anchor{2e3} @section @code{Ada.Characters.Wide_Latin_1} (@code{a-cwila1.ads}) @@ -23133,7 +23160,7 @@ is specifically authorized by the Ada Reference Manual (RM A.3.3(27)). @node Ada Characters Wide_Latin_9 a-cwila9 ads,Ada Characters Wide_Wide_Latin_1 a-chzla1 ads,Ada Characters Wide_Latin_1 a-cwila1 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-9-a-cwila9-ads}@anchor{2e1}@anchor{gnat_rm/the_gnat_library id4}@anchor{2e2} +@anchor{gnat_rm/the_gnat_library ada-characters-wide-latin-9-a-cwila9-ads}@anchor{2e4}@anchor{gnat_rm/the_gnat_library id4}@anchor{2e5} @section @code{Ada.Characters.Wide_Latin_9} (@code{a-cwila9.ads}) @@ -23150,7 +23177,7 @@ is specifically authorized by the Ada Reference Manual (RM A.3.3(27)). @node Ada Characters Wide_Wide_Latin_1 a-chzla1 ads,Ada Characters Wide_Wide_Latin_9 a-chzla9 ads,Ada Characters Wide_Latin_9 a-cwila9 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-1-a-chzla1-ads}@anchor{2e3}@anchor{gnat_rm/the_gnat_library id5}@anchor{2e4} +@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-1-a-chzla1-ads}@anchor{2e6}@anchor{gnat_rm/the_gnat_library id5}@anchor{2e7} @section @code{Ada.Characters.Wide_Wide_Latin_1} (@code{a-chzla1.ads}) @@ -23167,7 +23194,7 @@ is specifically authorized by the Ada Reference Manual (RM A.3.3(27)). @node Ada Characters Wide_Wide_Latin_9 a-chzla9 ads,Ada Containers Bounded_Holders a-coboho ads,Ada Characters Wide_Wide_Latin_1 a-chzla1 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-9-a-chzla9-ads}@anchor{2e5}@anchor{gnat_rm/the_gnat_library id6}@anchor{2e6} +@anchor{gnat_rm/the_gnat_library ada-characters-wide-wide-latin-9-a-chzla9-ads}@anchor{2e8}@anchor{gnat_rm/the_gnat_library id6}@anchor{2e9} @section @code{Ada.Characters.Wide_Wide_Latin_9} (@code{a-chzla9.ads}) @@ -23184,7 +23211,7 @@ is specifically authorized by the Ada Reference Manual (RM A.3.3(27)). @node Ada Containers Bounded_Holders a-coboho ads,Ada Command_Line Environment a-colien ads,Ada Characters Wide_Wide_Latin_9 a-chzla9 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-containers-bounded-holders-a-coboho-ads}@anchor{2e7}@anchor{gnat_rm/the_gnat_library id7}@anchor{2e8} +@anchor{gnat_rm/the_gnat_library ada-containers-bounded-holders-a-coboho-ads}@anchor{2ea}@anchor{gnat_rm/the_gnat_library id7}@anchor{2eb} @section @code{Ada.Containers.Bounded_Holders} (@code{a-coboho.ads}) @@ -23196,7 +23223,7 @@ This child of @code{Ada.Containers} defines a modified version of Indefinite_Holders that avoids heap allocation. @node Ada Command_Line Environment a-colien ads,Ada Command_Line Remove a-colire ads,Ada Containers Bounded_Holders a-coboho ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-command-line-environment-a-colien-ads}@anchor{2e9}@anchor{gnat_rm/the_gnat_library id8}@anchor{2ea} +@anchor{gnat_rm/the_gnat_library ada-command-line-environment-a-colien-ads}@anchor{2ec}@anchor{gnat_rm/the_gnat_library id8}@anchor{2ed} @section @code{Ada.Command_Line.Environment} (@code{a-colien.ads}) @@ -23209,7 +23236,7 @@ provides a mechanism for obtaining environment values on systems where this concept makes sense. @node Ada Command_Line Remove a-colire ads,Ada Command_Line Response_File a-clrefi ads,Ada Command_Line Environment a-colien ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-command-line-remove-a-colire-ads}@anchor{2eb}@anchor{gnat_rm/the_gnat_library id9}@anchor{2ec} +@anchor{gnat_rm/the_gnat_library ada-command-line-remove-a-colire-ads}@anchor{2ee}@anchor{gnat_rm/the_gnat_library id9}@anchor{2ef} @section @code{Ada.Command_Line.Remove} (@code{a-colire.ads}) @@ -23227,7 +23254,7 @@ to further calls to the subprograms in @code{Ada.Command_Line}. These calls will not see the removed argument. @node Ada Command_Line Response_File a-clrefi ads,Ada Direct_IO C_Streams a-diocst ads,Ada Command_Line Remove a-colire ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-command-line-response-file-a-clrefi-ads}@anchor{2ed}@anchor{gnat_rm/the_gnat_library id10}@anchor{2ee} +@anchor{gnat_rm/the_gnat_library ada-command-line-response-file-a-clrefi-ads}@anchor{2f0}@anchor{gnat_rm/the_gnat_library id10}@anchor{2f1} @section @code{Ada.Command_Line.Response_File} (@code{a-clrefi.ads}) @@ -23247,7 +23274,7 @@ Using a response file allow passing a set of arguments to an executable longer than the maximum allowed by the system on the command line. @node Ada Direct_IO C_Streams a-diocst ads,Ada Exceptions Is_Null_Occurrence a-einuoc ads,Ada Command_Line Response_File a-clrefi ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-direct-io-c-streams-a-diocst-ads}@anchor{2ef}@anchor{gnat_rm/the_gnat_library id11}@anchor{2f0} +@anchor{gnat_rm/the_gnat_library ada-direct-io-c-streams-a-diocst-ads}@anchor{2f2}@anchor{gnat_rm/the_gnat_library id11}@anchor{2f3} @section @code{Ada.Direct_IO.C_Streams} (@code{a-diocst.ads}) @@ -23262,7 +23289,7 @@ extracted from a file opened on the Ada side, and an Ada file can be constructed from a stream opened on the C side. @node Ada Exceptions Is_Null_Occurrence a-einuoc ads,Ada Exceptions Last_Chance_Handler a-elchha ads,Ada Direct_IO C_Streams a-diocst ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-exceptions-is-null-occurrence-a-einuoc-ads}@anchor{2f1}@anchor{gnat_rm/the_gnat_library id12}@anchor{2f2} +@anchor{gnat_rm/the_gnat_library ada-exceptions-is-null-occurrence-a-einuoc-ads}@anchor{2f4}@anchor{gnat_rm/the_gnat_library id12}@anchor{2f5} @section @code{Ada.Exceptions.Is_Null_Occurrence} (@code{a-einuoc.ads}) @@ -23276,7 +23303,7 @@ exception occurrence (@code{Null_Occurrence}) without raising an exception. @node Ada Exceptions Last_Chance_Handler a-elchha ads,Ada Exceptions Traceback a-exctra ads,Ada Exceptions Is_Null_Occurrence a-einuoc ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-exceptions-last-chance-handler-a-elchha-ads}@anchor{2f3}@anchor{gnat_rm/the_gnat_library id13}@anchor{2f4} +@anchor{gnat_rm/the_gnat_library ada-exceptions-last-chance-handler-a-elchha-ads}@anchor{2f6}@anchor{gnat_rm/the_gnat_library id13}@anchor{2f7} @section @code{Ada.Exceptions.Last_Chance_Handler} (@code{a-elchha.ads}) @@ -23290,7 +23317,7 @@ exceptions (hence the name last chance), and perform clean ups before terminating the program. Note that this subprogram never returns. @node Ada Exceptions Traceback a-exctra ads,Ada Sequential_IO C_Streams a-siocst ads,Ada Exceptions Last_Chance_Handler a-elchha ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-exceptions-traceback-a-exctra-ads}@anchor{2f5}@anchor{gnat_rm/the_gnat_library id14}@anchor{2f6} +@anchor{gnat_rm/the_gnat_library ada-exceptions-traceback-a-exctra-ads}@anchor{2f8}@anchor{gnat_rm/the_gnat_library id14}@anchor{2f9} @section @code{Ada.Exceptions.Traceback} (@code{a-exctra.ads}) @@ -23303,7 +23330,7 @@ give a traceback array of addresses based on an exception occurrence. @node Ada Sequential_IO C_Streams a-siocst ads,Ada Streams Stream_IO C_Streams a-ssicst ads,Ada Exceptions Traceback a-exctra ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-sequential-io-c-streams-a-siocst-ads}@anchor{2f7}@anchor{gnat_rm/the_gnat_library id15}@anchor{2f8} +@anchor{gnat_rm/the_gnat_library ada-sequential-io-c-streams-a-siocst-ads}@anchor{2fa}@anchor{gnat_rm/the_gnat_library id15}@anchor{2fb} @section @code{Ada.Sequential_IO.C_Streams} (@code{a-siocst.ads}) @@ -23318,7 +23345,7 @@ extracted from a file opened on the Ada side, and an Ada file can be constructed from a stream opened on the C side. @node Ada Streams Stream_IO C_Streams a-ssicst ads,Ada Strings Unbounded Text_IO a-suteio ads,Ada Sequential_IO C_Streams a-siocst ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-streams-stream-io-c-streams-a-ssicst-ads}@anchor{2f9}@anchor{gnat_rm/the_gnat_library id16}@anchor{2fa} +@anchor{gnat_rm/the_gnat_library ada-streams-stream-io-c-streams-a-ssicst-ads}@anchor{2fc}@anchor{gnat_rm/the_gnat_library id16}@anchor{2fd} @section @code{Ada.Streams.Stream_IO.C_Streams} (@code{a-ssicst.ads}) @@ -23333,7 +23360,7 @@ extracted from a file opened on the Ada side, and an Ada file can be constructed from a stream opened on the C side. @node Ada Strings Unbounded Text_IO a-suteio ads,Ada Strings Wide_Unbounded Wide_Text_IO a-swuwti ads,Ada Streams Stream_IO C_Streams a-ssicst ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-strings-unbounded-text-io-a-suteio-ads}@anchor{2fb}@anchor{gnat_rm/the_gnat_library id17}@anchor{2fc} +@anchor{gnat_rm/the_gnat_library ada-strings-unbounded-text-io-a-suteio-ads}@anchor{2fe}@anchor{gnat_rm/the_gnat_library id17}@anchor{2ff} @section @code{Ada.Strings.Unbounded.Text_IO} (@code{a-suteio.ads}) @@ -23350,7 +23377,7 @@ strings, avoiding the necessity for an intermediate operation with ordinary strings. @node Ada Strings Wide_Unbounded Wide_Text_IO a-swuwti ads,Ada Strings Wide_Wide_Unbounded Wide_Wide_Text_IO a-szuzti ads,Ada Strings Unbounded Text_IO a-suteio ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-strings-wide-unbounded-wide-text-io-a-swuwti-ads}@anchor{2fd}@anchor{gnat_rm/the_gnat_library id18}@anchor{2fe} +@anchor{gnat_rm/the_gnat_library ada-strings-wide-unbounded-wide-text-io-a-swuwti-ads}@anchor{300}@anchor{gnat_rm/the_gnat_library id18}@anchor{301} @section @code{Ada.Strings.Wide_Unbounded.Wide_Text_IO} (@code{a-swuwti.ads}) @@ -23367,7 +23394,7 @@ wide strings, avoiding the necessity for an intermediate operation with ordinary wide strings. @node Ada Strings Wide_Wide_Unbounded Wide_Wide_Text_IO a-szuzti ads,Ada Task_Initialization a-tasini ads,Ada Strings Wide_Unbounded Wide_Text_IO a-swuwti ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-strings-wide-wide-unbounded-wide-wide-text-io-a-szuzti-ads}@anchor{2ff}@anchor{gnat_rm/the_gnat_library id19}@anchor{300} +@anchor{gnat_rm/the_gnat_library ada-strings-wide-wide-unbounded-wide-wide-text-io-a-szuzti-ads}@anchor{302}@anchor{gnat_rm/the_gnat_library id19}@anchor{303} @section @code{Ada.Strings.Wide_Wide_Unbounded.Wide_Wide_Text_IO} (@code{a-szuzti.ads}) @@ -23384,7 +23411,7 @@ wide wide strings, avoiding the necessity for an intermediate operation with ordinary wide wide strings. @node Ada Task_Initialization a-tasini ads,Ada Text_IO C_Streams a-tiocst ads,Ada Strings Wide_Wide_Unbounded Wide_Wide_Text_IO a-szuzti ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-task-initialization-a-tasini-ads}@anchor{301}@anchor{gnat_rm/the_gnat_library id20}@anchor{302} +@anchor{gnat_rm/the_gnat_library ada-task-initialization-a-tasini-ads}@anchor{304}@anchor{gnat_rm/the_gnat_library id20}@anchor{305} @section @code{Ada.Task_Initialization} (@code{a-tasini.ads}) @@ -23396,7 +23423,7 @@ parameterless procedures. Note that such a handler is only invoked for those tasks activated after the handler is set. @node Ada Text_IO C_Streams a-tiocst ads,Ada Text_IO Reset_Standard_Files a-tirsfi ads,Ada Task_Initialization a-tasini ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-text-io-c-streams-a-tiocst-ads}@anchor{303}@anchor{gnat_rm/the_gnat_library id21}@anchor{304} +@anchor{gnat_rm/the_gnat_library ada-text-io-c-streams-a-tiocst-ads}@anchor{306}@anchor{gnat_rm/the_gnat_library id21}@anchor{307} @section @code{Ada.Text_IO.C_Streams} (@code{a-tiocst.ads}) @@ -23411,7 +23438,7 @@ extracted from a file opened on the Ada side, and an Ada file can be constructed from a stream opened on the C side. @node Ada Text_IO Reset_Standard_Files a-tirsfi ads,Ada Wide_Characters Unicode a-wichun ads,Ada Text_IO C_Streams a-tiocst ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-text-io-reset-standard-files-a-tirsfi-ads}@anchor{305}@anchor{gnat_rm/the_gnat_library id22}@anchor{306} +@anchor{gnat_rm/the_gnat_library ada-text-io-reset-standard-files-a-tirsfi-ads}@anchor{308}@anchor{gnat_rm/the_gnat_library id22}@anchor{309} @section @code{Ada.Text_IO.Reset_Standard_Files} (@code{a-tirsfi.ads}) @@ -23426,7 +23453,7 @@ execution (for example a standard input file may be redefined to be interactive). @node Ada Wide_Characters Unicode a-wichun ads,Ada Wide_Text_IO C_Streams a-wtcstr ads,Ada Text_IO Reset_Standard_Files a-tirsfi ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-wide-characters-unicode-a-wichun-ads}@anchor{307}@anchor{gnat_rm/the_gnat_library id23}@anchor{308} +@anchor{gnat_rm/the_gnat_library ada-wide-characters-unicode-a-wichun-ads}@anchor{30a}@anchor{gnat_rm/the_gnat_library id23}@anchor{30b} @section @code{Ada.Wide_Characters.Unicode} (@code{a-wichun.ads}) @@ -23439,7 +23466,7 @@ This package provides subprograms that allow categorization of Wide_Character values according to Unicode categories. @node Ada Wide_Text_IO C_Streams a-wtcstr ads,Ada Wide_Text_IO Reset_Standard_Files a-wrstfi ads,Ada Wide_Characters Unicode a-wichun ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-wide-text-io-c-streams-a-wtcstr-ads}@anchor{309}@anchor{gnat_rm/the_gnat_library id24}@anchor{30a} +@anchor{gnat_rm/the_gnat_library ada-wide-text-io-c-streams-a-wtcstr-ads}@anchor{30c}@anchor{gnat_rm/the_gnat_library id24}@anchor{30d} @section @code{Ada.Wide_Text_IO.C_Streams} (@code{a-wtcstr.ads}) @@ -23454,7 +23481,7 @@ extracted from a file opened on the Ada side, and an Ada file can be constructed from a stream opened on the C side. @node Ada Wide_Text_IO Reset_Standard_Files a-wrstfi ads,Ada Wide_Wide_Characters Unicode a-zchuni ads,Ada Wide_Text_IO C_Streams a-wtcstr ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-wide-text-io-reset-standard-files-a-wrstfi-ads}@anchor{30b}@anchor{gnat_rm/the_gnat_library id25}@anchor{30c} +@anchor{gnat_rm/the_gnat_library ada-wide-text-io-reset-standard-files-a-wrstfi-ads}@anchor{30e}@anchor{gnat_rm/the_gnat_library id25}@anchor{30f} @section @code{Ada.Wide_Text_IO.Reset_Standard_Files} (@code{a-wrstfi.ads}) @@ -23469,7 +23496,7 @@ execution (for example a standard input file may be redefined to be interactive). @node Ada Wide_Wide_Characters Unicode a-zchuni ads,Ada Wide_Wide_Text_IO C_Streams a-ztcstr ads,Ada Wide_Text_IO Reset_Standard_Files a-wrstfi ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-wide-wide-characters-unicode-a-zchuni-ads}@anchor{30d}@anchor{gnat_rm/the_gnat_library id26}@anchor{30e} +@anchor{gnat_rm/the_gnat_library ada-wide-wide-characters-unicode-a-zchuni-ads}@anchor{310}@anchor{gnat_rm/the_gnat_library id26}@anchor{311} @section @code{Ada.Wide_Wide_Characters.Unicode} (@code{a-zchuni.ads}) @@ -23482,7 +23509,7 @@ This package provides subprograms that allow categorization of Wide_Wide_Character values according to Unicode categories. @node Ada Wide_Wide_Text_IO C_Streams a-ztcstr ads,Ada Wide_Wide_Text_IO Reset_Standard_Files a-zrstfi ads,Ada Wide_Wide_Characters Unicode a-zchuni ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-c-streams-a-ztcstr-ads}@anchor{30f}@anchor{gnat_rm/the_gnat_library id27}@anchor{310} +@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-c-streams-a-ztcstr-ads}@anchor{312}@anchor{gnat_rm/the_gnat_library id27}@anchor{313} @section @code{Ada.Wide_Wide_Text_IO.C_Streams} (@code{a-ztcstr.ads}) @@ -23497,7 +23524,7 @@ extracted from a file opened on the Ada side, and an Ada file can be constructed from a stream opened on the C side. @node Ada Wide_Wide_Text_IO Reset_Standard_Files a-zrstfi ads,GNAT Altivec g-altive ads,Ada Wide_Wide_Text_IO C_Streams a-ztcstr ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-reset-standard-files-a-zrstfi-ads}@anchor{311}@anchor{gnat_rm/the_gnat_library id28}@anchor{312} +@anchor{gnat_rm/the_gnat_library ada-wide-wide-text-io-reset-standard-files-a-zrstfi-ads}@anchor{314}@anchor{gnat_rm/the_gnat_library id28}@anchor{315} @section @code{Ada.Wide_Wide_Text_IO.Reset_Standard_Files} (@code{a-zrstfi.ads}) @@ -23512,7 +23539,7 @@ change during execution (for example a standard input file may be redefined to be interactive). @node GNAT Altivec g-altive ads,GNAT Altivec Conversions g-altcon ads,Ada Wide_Wide_Text_IO Reset_Standard_Files a-zrstfi ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-altivec-g-altive-ads}@anchor{313}@anchor{gnat_rm/the_gnat_library id29}@anchor{314} +@anchor{gnat_rm/the_gnat_library gnat-altivec-g-altive-ads}@anchor{316}@anchor{gnat_rm/the_gnat_library id29}@anchor{317} @section @code{GNAT.Altivec} (@code{g-altive.ads}) @@ -23525,7 +23552,7 @@ definitions of constants and types common to all the versions of the binding. @node GNAT Altivec Conversions g-altcon ads,GNAT Altivec Vector_Operations g-alveop ads,GNAT Altivec g-altive ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-altivec-conversions-g-altcon-ads}@anchor{315}@anchor{gnat_rm/the_gnat_library id30}@anchor{316} +@anchor{gnat_rm/the_gnat_library gnat-altivec-conversions-g-altcon-ads}@anchor{318}@anchor{gnat_rm/the_gnat_library id30}@anchor{319} @section @code{GNAT.Altivec.Conversions} (@code{g-altcon.ads}) @@ -23536,7 +23563,7 @@ binding. This package provides the Vector/View conversion routines. @node GNAT Altivec Vector_Operations g-alveop ads,GNAT Altivec Vector_Types g-alvety ads,GNAT Altivec Conversions g-altcon ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-operations-g-alveop-ads}@anchor{317}@anchor{gnat_rm/the_gnat_library id31}@anchor{318} +@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-operations-g-alveop-ads}@anchor{31a}@anchor{gnat_rm/the_gnat_library id31}@anchor{31b} @section @code{GNAT.Altivec.Vector_Operations} (@code{g-alveop.ads}) @@ -23550,7 +23577,7 @@ library. The hard binding is provided as a separate package. This unit is common to both bindings. @node GNAT Altivec Vector_Types g-alvety ads,GNAT Altivec Vector_Views g-alvevi ads,GNAT Altivec Vector_Operations g-alveop ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-types-g-alvety-ads}@anchor{319}@anchor{gnat_rm/the_gnat_library id32}@anchor{31a} +@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-types-g-alvety-ads}@anchor{31c}@anchor{gnat_rm/the_gnat_library id32}@anchor{31d} @section @code{GNAT.Altivec.Vector_Types} (@code{g-alvety.ads}) @@ -23562,7 +23589,7 @@ This package exposes the various vector types part of the Ada binding to AltiVec facilities. @node GNAT Altivec Vector_Views g-alvevi ads,GNAT Array_Split g-arrspl ads,GNAT Altivec Vector_Types g-alvety ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-views-g-alvevi-ads}@anchor{31b}@anchor{gnat_rm/the_gnat_library id33}@anchor{31c} +@anchor{gnat_rm/the_gnat_library gnat-altivec-vector-views-g-alvevi-ads}@anchor{31e}@anchor{gnat_rm/the_gnat_library id33}@anchor{31f} @section @code{GNAT.Altivec.Vector_Views} (@code{g-alvevi.ads}) @@ -23577,7 +23604,7 @@ vector elements and provides a simple way to initialize vector objects. @node GNAT Array_Split g-arrspl ads,GNAT AWK g-awk ads,GNAT Altivec Vector_Views g-alvevi ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-array-split-g-arrspl-ads}@anchor{31d}@anchor{gnat_rm/the_gnat_library id34}@anchor{31e} +@anchor{gnat_rm/the_gnat_library gnat-array-split-g-arrspl-ads}@anchor{320}@anchor{gnat_rm/the_gnat_library id34}@anchor{321} @section @code{GNAT.Array_Split} (@code{g-arrspl.ads}) @@ -23590,7 +23617,7 @@ an array wherever the separators appear, and provide direct access to the resulting slices. @node GNAT AWK g-awk ads,GNAT Binary_Search g-binsea ads,GNAT Array_Split g-arrspl ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-awk-g-awk-ads}@anchor{31f}@anchor{gnat_rm/the_gnat_library id35}@anchor{320} +@anchor{gnat_rm/the_gnat_library gnat-awk-g-awk-ads}@anchor{322}@anchor{gnat_rm/the_gnat_library id35}@anchor{323} @section @code{GNAT.AWK} (@code{g-awk.ads}) @@ -23605,7 +23632,7 @@ or more files containing formatted data. The file is viewed as a database where each record is a line and a field is a data element in this line. @node GNAT Binary_Search g-binsea ads,GNAT Bind_Environment g-binenv ads,GNAT AWK g-awk ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-binary-search-g-binsea-ads}@anchor{321}@anchor{gnat_rm/the_gnat_library id36}@anchor{322} +@anchor{gnat_rm/the_gnat_library gnat-binary-search-g-binsea-ads}@anchor{324}@anchor{gnat_rm/the_gnat_library id36}@anchor{325} @section @code{GNAT.Binary_Search} (@code{g-binsea.ads}) @@ -23617,7 +23644,7 @@ Allow binary search of a sorted array (or of an array-like container; the generic does not reference the array directly). @node GNAT Bind_Environment g-binenv ads,GNAT Branch_Prediction g-brapre ads,GNAT Binary_Search g-binsea ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-bind-environment-g-binenv-ads}@anchor{323}@anchor{gnat_rm/the_gnat_library id37}@anchor{324} +@anchor{gnat_rm/the_gnat_library gnat-bind-environment-g-binenv-ads}@anchor{326}@anchor{gnat_rm/the_gnat_library id37}@anchor{327} @section @code{GNAT.Bind_Environment} (@code{g-binenv.ads}) @@ -23630,7 +23657,7 @@ These associations can be specified using the @code{-V} binder command line switch. @node GNAT Branch_Prediction g-brapre ads,GNAT Bounded_Buffers g-boubuf ads,GNAT Bind_Environment g-binenv ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-branch-prediction-g-brapre-ads}@anchor{325}@anchor{gnat_rm/the_gnat_library id38}@anchor{326} +@anchor{gnat_rm/the_gnat_library gnat-branch-prediction-g-brapre-ads}@anchor{328}@anchor{gnat_rm/the_gnat_library id38}@anchor{329} @section @code{GNAT.Branch_Prediction} (@code{g-brapre.ads}) @@ -23641,7 +23668,7 @@ line switch. Provides routines giving hints to the branch predictor of the code generator. @node GNAT Bounded_Buffers g-boubuf ads,GNAT Bounded_Mailboxes g-boumai ads,GNAT Branch_Prediction g-brapre ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-bounded-buffers-g-boubuf-ads}@anchor{327}@anchor{gnat_rm/the_gnat_library id39}@anchor{328} +@anchor{gnat_rm/the_gnat_library gnat-bounded-buffers-g-boubuf-ads}@anchor{32a}@anchor{gnat_rm/the_gnat_library id39}@anchor{32b} @section @code{GNAT.Bounded_Buffers} (@code{g-boubuf.ads}) @@ -23656,7 +23683,7 @@ useful directly or as parts of the implementations of other abstractions, such as mailboxes. @node GNAT Bounded_Mailboxes g-boumai ads,GNAT Bubble_Sort g-bubsor ads,GNAT Bounded_Buffers g-boubuf ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-bounded-mailboxes-g-boumai-ads}@anchor{329}@anchor{gnat_rm/the_gnat_library id40}@anchor{32a} +@anchor{gnat_rm/the_gnat_library gnat-bounded-mailboxes-g-boumai-ads}@anchor{32c}@anchor{gnat_rm/the_gnat_library id40}@anchor{32d} @section @code{GNAT.Bounded_Mailboxes} (@code{g-boumai.ads}) @@ -23669,7 +23696,7 @@ such as mailboxes. Provides a thread-safe asynchronous intertask mailbox communication facility. @node GNAT Bubble_Sort g-bubsor ads,GNAT Bubble_Sort_A g-busora ads,GNAT Bounded_Mailboxes g-boumai ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-bubsor-ads}@anchor{32b}@anchor{gnat_rm/the_gnat_library id41}@anchor{32c} +@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-bubsor-ads}@anchor{32e}@anchor{gnat_rm/the_gnat_library id41}@anchor{32f} @section @code{GNAT.Bubble_Sort} (@code{g-bubsor.ads}) @@ -23684,7 +23711,7 @@ data items. Exchange and comparison procedures are provided by passing access-to-procedure values. @node GNAT Bubble_Sort_A g-busora ads,GNAT Bubble_Sort_G g-busorg ads,GNAT Bubble_Sort g-bubsor ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-a-g-busora-ads}@anchor{32d}@anchor{gnat_rm/the_gnat_library id42}@anchor{32e} +@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-a-g-busora-ads}@anchor{330}@anchor{gnat_rm/the_gnat_library id42}@anchor{331} @section @code{GNAT.Bubble_Sort_A} (@code{g-busora.ads}) @@ -23700,7 +23727,7 @@ access-to-procedure values. This is an older version, retained for compatibility. Usually @code{GNAT.Bubble_Sort} will be preferable. @node GNAT Bubble_Sort_G g-busorg ads,GNAT Byte_Order_Mark g-byorma ads,GNAT Bubble_Sort_A g-busora ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-g-busorg-ads}@anchor{32f}@anchor{gnat_rm/the_gnat_library id43}@anchor{330} +@anchor{gnat_rm/the_gnat_library gnat-bubble-sort-g-g-busorg-ads}@anchor{332}@anchor{gnat_rm/the_gnat_library id43}@anchor{333} @section @code{GNAT.Bubble_Sort_G} (@code{g-busorg.ads}) @@ -23716,7 +23743,7 @@ if the procedures can be inlined, at the expense of duplicating code for multiple instantiations. @node GNAT Byte_Order_Mark g-byorma ads,GNAT Byte_Swapping g-bytswa ads,GNAT Bubble_Sort_G g-busorg ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-byte-order-mark-g-byorma-ads}@anchor{331}@anchor{gnat_rm/the_gnat_library id44}@anchor{332} +@anchor{gnat_rm/the_gnat_library gnat-byte-order-mark-g-byorma-ads}@anchor{334}@anchor{gnat_rm/the_gnat_library id44}@anchor{335} @section @code{GNAT.Byte_Order_Mark} (@code{g-byorma.ads}) @@ -23732,7 +23759,7 @@ the encoding of the string. The routine includes detection of special XML sequences for various UCS input formats. @node GNAT Byte_Swapping g-bytswa ads,GNAT Calendar g-calend ads,GNAT Byte_Order_Mark g-byorma ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-byte-swapping-g-bytswa-ads}@anchor{333}@anchor{gnat_rm/the_gnat_library id45}@anchor{334} +@anchor{gnat_rm/the_gnat_library gnat-byte-swapping-g-bytswa-ads}@anchor{336}@anchor{gnat_rm/the_gnat_library id45}@anchor{337} @section @code{GNAT.Byte_Swapping} (@code{g-bytswa.ads}) @@ -23746,7 +23773,7 @@ General routines for swapping the bytes in 2-, 4-, and 8-byte quantities. Machine-specific implementations are available in some cases. @node GNAT Calendar g-calend ads,GNAT Calendar Time_IO g-catiio ads,GNAT Byte_Swapping g-bytswa ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-calendar-g-calend-ads}@anchor{335}@anchor{gnat_rm/the_gnat_library id46}@anchor{336} +@anchor{gnat_rm/the_gnat_library gnat-calendar-g-calend-ads}@anchor{338}@anchor{gnat_rm/the_gnat_library id46}@anchor{339} @section @code{GNAT.Calendar} (@code{g-calend.ads}) @@ -23760,7 +23787,7 @@ Also provides conversion of @code{Ada.Calendar.Time} values to and from the C @code{timeval} format. @node GNAT Calendar Time_IO g-catiio ads,GNAT CRC32 g-crc32 ads,GNAT Calendar g-calend ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-calendar-time-io-g-catiio-ads}@anchor{337}@anchor{gnat_rm/the_gnat_library id47}@anchor{338} +@anchor{gnat_rm/the_gnat_library gnat-calendar-time-io-g-catiio-ads}@anchor{33a}@anchor{gnat_rm/the_gnat_library id47}@anchor{33b} @section @code{GNAT.Calendar.Time_IO} (@code{g-catiio.ads}) @@ -23771,7 +23798,7 @@ C @code{timeval} format. @geindex GNAT.Calendar.Time_IO (g-catiio.ads) @node GNAT CRC32 g-crc32 ads,GNAT Case_Util g-casuti ads,GNAT Calendar Time_IO g-catiio ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-crc32-g-crc32-ads}@anchor{339}@anchor{gnat_rm/the_gnat_library id48}@anchor{33a} +@anchor{gnat_rm/the_gnat_library gnat-crc32-g-crc32-ads}@anchor{33c}@anchor{gnat_rm/the_gnat_library id48}@anchor{33d} @section @code{GNAT.CRC32} (@code{g-crc32.ads}) @@ -23788,7 +23815,7 @@ of this algorithm see Aug. 1988. Sarwate, D.V. @node GNAT Case_Util g-casuti ads,GNAT CGI g-cgi ads,GNAT CRC32 g-crc32 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-case-util-g-casuti-ads}@anchor{33b}@anchor{gnat_rm/the_gnat_library id49}@anchor{33c} +@anchor{gnat_rm/the_gnat_library gnat-case-util-g-casuti-ads}@anchor{33e}@anchor{gnat_rm/the_gnat_library id49}@anchor{33f} @section @code{GNAT.Case_Util} (@code{g-casuti.ads}) @@ -23803,7 +23830,7 @@ without the overhead of the full casing tables in @code{Ada.Characters.Handling}. @node GNAT CGI g-cgi ads,GNAT CGI Cookie g-cgicoo ads,GNAT Case_Util g-casuti ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-cgi-g-cgi-ads}@anchor{33d}@anchor{gnat_rm/the_gnat_library id50}@anchor{33e} +@anchor{gnat_rm/the_gnat_library gnat-cgi-g-cgi-ads}@anchor{340}@anchor{gnat_rm/the_gnat_library id50}@anchor{341} @section @code{GNAT.CGI} (@code{g-cgi.ads}) @@ -23818,7 +23845,7 @@ builds a table whose index is the key and provides some services to deal with this table. @node GNAT CGI Cookie g-cgicoo ads,GNAT CGI Debug g-cgideb ads,GNAT CGI g-cgi ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-cgi-cookie-g-cgicoo-ads}@anchor{33f}@anchor{gnat_rm/the_gnat_library id51}@anchor{340} +@anchor{gnat_rm/the_gnat_library gnat-cgi-cookie-g-cgicoo-ads}@anchor{342}@anchor{gnat_rm/the_gnat_library id51}@anchor{343} @section @code{GNAT.CGI.Cookie} (@code{g-cgicoo.ads}) @@ -23833,7 +23860,7 @@ Common Gateway Interface (CGI). It exports services to deal with Web cookies (piece of information kept in the Web client software). @node GNAT CGI Debug g-cgideb ads,GNAT Command_Line g-comlin ads,GNAT CGI Cookie g-cgicoo ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-cgi-debug-g-cgideb-ads}@anchor{341}@anchor{gnat_rm/the_gnat_library id52}@anchor{342} +@anchor{gnat_rm/the_gnat_library gnat-cgi-debug-g-cgideb-ads}@anchor{344}@anchor{gnat_rm/the_gnat_library id52}@anchor{345} @section @code{GNAT.CGI.Debug} (@code{g-cgideb.ads}) @@ -23845,7 +23872,7 @@ This is a package to help debugging CGI (Common Gateway Interface) programs written in Ada. @node GNAT Command_Line g-comlin ads,GNAT Compiler_Version g-comver ads,GNAT CGI Debug g-cgideb ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-command-line-g-comlin-ads}@anchor{343}@anchor{gnat_rm/the_gnat_library id53}@anchor{344} +@anchor{gnat_rm/the_gnat_library gnat-command-line-g-comlin-ads}@anchor{346}@anchor{gnat_rm/the_gnat_library id53}@anchor{347} @section @code{GNAT.Command_Line} (@code{g-comlin.ads}) @@ -23858,7 +23885,7 @@ including the ability to scan for named switches with optional parameters and expand file names using wildcard notations. @node GNAT Compiler_Version g-comver ads,GNAT Ctrl_C g-ctrl_c ads,GNAT Command_Line g-comlin ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-compiler-version-g-comver-ads}@anchor{345}@anchor{gnat_rm/the_gnat_library id54}@anchor{346} +@anchor{gnat_rm/the_gnat_library gnat-compiler-version-g-comver-ads}@anchor{348}@anchor{gnat_rm/the_gnat_library id54}@anchor{349} @section @code{GNAT.Compiler_Version} (@code{g-comver.ads}) @@ -23876,7 +23903,7 @@ of the compiler if a consistent tool set is used to compile all units of a partition). @node GNAT Ctrl_C g-ctrl_c ads,GNAT Current_Exception g-curexc ads,GNAT Compiler_Version g-comver ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-ctrl-c-g-ctrl-c-ads}@anchor{347}@anchor{gnat_rm/the_gnat_library id55}@anchor{348} +@anchor{gnat_rm/the_gnat_library gnat-ctrl-c-g-ctrl-c-ads}@anchor{34a}@anchor{gnat_rm/the_gnat_library id55}@anchor{34b} @section @code{GNAT.Ctrl_C} (@code{g-ctrl_c.ads}) @@ -23887,7 +23914,7 @@ of a partition). Provides a simple interface to handle Ctrl-C keyboard events. @node GNAT Current_Exception g-curexc ads,GNAT Debug_Pools g-debpoo ads,GNAT Ctrl_C g-ctrl_c ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-current-exception-g-curexc-ads}@anchor{349}@anchor{gnat_rm/the_gnat_library id56}@anchor{34a} +@anchor{gnat_rm/the_gnat_library gnat-current-exception-g-curexc-ads}@anchor{34c}@anchor{gnat_rm/the_gnat_library id56}@anchor{34d} @section @code{GNAT.Current_Exception} (@code{g-curexc.ads}) @@ -23904,7 +23931,7 @@ This is particularly useful in simulating typical facilities for obtaining information about exceptions provided by Ada 83 compilers. @node GNAT Debug_Pools g-debpoo ads,GNAT Debug_Utilities g-debuti ads,GNAT Current_Exception g-curexc ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-debug-pools-g-debpoo-ads}@anchor{34b}@anchor{gnat_rm/the_gnat_library id57}@anchor{34c} +@anchor{gnat_rm/the_gnat_library gnat-debug-pools-g-debpoo-ads}@anchor{34e}@anchor{gnat_rm/the_gnat_library id57}@anchor{34f} @section @code{GNAT.Debug_Pools} (@code{g-debpoo.ads}) @@ -23921,7 +23948,7 @@ problems. See @code{The GNAT Debug_Pool Facility} section in the @cite{GNAT User’s Guide}. @node GNAT Debug_Utilities g-debuti ads,GNAT Decode_String g-decstr ads,GNAT Debug_Pools g-debpoo ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-debug-utilities-g-debuti-ads}@anchor{34d}@anchor{gnat_rm/the_gnat_library id58}@anchor{34e} +@anchor{gnat_rm/the_gnat_library gnat-debug-utilities-g-debuti-ads}@anchor{350}@anchor{gnat_rm/the_gnat_library id58}@anchor{351} @section @code{GNAT.Debug_Utilities} (@code{g-debuti.ads}) @@ -23934,7 +23961,7 @@ to and from string images of address values. Supports both C and Ada formats for hexadecimal literals. @node GNAT Decode_String g-decstr ads,GNAT Decode_UTF8_String g-deutst ads,GNAT Debug_Utilities g-debuti ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-decode-string-g-decstr-ads}@anchor{34f}@anchor{gnat_rm/the_gnat_library id59}@anchor{350} +@anchor{gnat_rm/the_gnat_library gnat-decode-string-g-decstr-ads}@anchor{352}@anchor{gnat_rm/the_gnat_library id59}@anchor{353} @section @code{GNAT.Decode_String} (@code{g-decstr.ads}) @@ -23958,7 +23985,7 @@ Useful in conjunction with Unicode character coding. Note there is a preinstantiation for UTF-8. See next entry. @node GNAT Decode_UTF8_String g-deutst ads,GNAT Directory_Operations g-dirope ads,GNAT Decode_String g-decstr ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-decode-utf8-string-g-deutst-ads}@anchor{351}@anchor{gnat_rm/the_gnat_library id60}@anchor{352} +@anchor{gnat_rm/the_gnat_library gnat-decode-utf8-string-g-deutst-ads}@anchor{354}@anchor{gnat_rm/the_gnat_library id60}@anchor{355} @section @code{GNAT.Decode_UTF8_String} (@code{g-deutst.ads}) @@ -23979,7 +24006,7 @@ preinstantiation for UTF-8. See next entry. A preinstantiation of GNAT.Decode_Strings for UTF-8 encoding. @node GNAT Directory_Operations g-dirope ads,GNAT Directory_Operations Iteration g-diopit ads,GNAT Decode_UTF8_String g-deutst ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-directory-operations-g-dirope-ads}@anchor{353}@anchor{gnat_rm/the_gnat_library id61}@anchor{354} +@anchor{gnat_rm/the_gnat_library gnat-directory-operations-g-dirope-ads}@anchor{356}@anchor{gnat_rm/the_gnat_library id61}@anchor{357} @section @code{GNAT.Directory_Operations} (@code{g-dirope.ads}) @@ -23992,7 +24019,7 @@ the current directory, making new directories, and scanning the files in a directory. @node GNAT Directory_Operations Iteration g-diopit ads,GNAT Dynamic_HTables g-dynhta ads,GNAT Directory_Operations g-dirope ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-directory-operations-iteration-g-diopit-ads}@anchor{355}@anchor{gnat_rm/the_gnat_library id62}@anchor{356} +@anchor{gnat_rm/the_gnat_library gnat-directory-operations-iteration-g-diopit-ads}@anchor{358}@anchor{gnat_rm/the_gnat_library id62}@anchor{359} @section @code{GNAT.Directory_Operations.Iteration} (@code{g-diopit.ads}) @@ -24004,7 +24031,7 @@ A child unit of GNAT.Directory_Operations providing additional operations for iterating through directories. @node GNAT Dynamic_HTables g-dynhta ads,GNAT Dynamic_Tables g-dyntab ads,GNAT Directory_Operations Iteration g-diopit ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-dynamic-htables-g-dynhta-ads}@anchor{357}@anchor{gnat_rm/the_gnat_library id63}@anchor{358} +@anchor{gnat_rm/the_gnat_library gnat-dynamic-htables-g-dynhta-ads}@anchor{35a}@anchor{gnat_rm/the_gnat_library id63}@anchor{35b} @section @code{GNAT.Dynamic_HTables} (@code{g-dynhta.ads}) @@ -24022,7 +24049,7 @@ dynamic instances of the hash table, while an instantiation of @code{GNAT.HTable} creates a single instance of the hash table. @node GNAT Dynamic_Tables g-dyntab ads,GNAT Encode_String g-encstr ads,GNAT Dynamic_HTables g-dynhta ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-dynamic-tables-g-dyntab-ads}@anchor{359}@anchor{gnat_rm/the_gnat_library id64}@anchor{35a} +@anchor{gnat_rm/the_gnat_library gnat-dynamic-tables-g-dyntab-ads}@anchor{35c}@anchor{gnat_rm/the_gnat_library id64}@anchor{35d} @section @code{GNAT.Dynamic_Tables} (@code{g-dyntab.ads}) @@ -24042,7 +24069,7 @@ dynamic instances of the table, while an instantiation of @code{GNAT.Table} creates a single instance of the table type. @node GNAT Encode_String g-encstr ads,GNAT Encode_UTF8_String g-enutst ads,GNAT Dynamic_Tables g-dyntab ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-encode-string-g-encstr-ads}@anchor{35b}@anchor{gnat_rm/the_gnat_library id65}@anchor{35c} +@anchor{gnat_rm/the_gnat_library gnat-encode-string-g-encstr-ads}@anchor{35e}@anchor{gnat_rm/the_gnat_library id65}@anchor{35f} @section @code{GNAT.Encode_String} (@code{g-encstr.ads}) @@ -24064,7 +24091,7 @@ encoding method. Useful in conjunction with Unicode character coding. Note there is a preinstantiation for UTF-8. See next entry. @node GNAT Encode_UTF8_String g-enutst ads,GNAT Exception_Actions g-excact ads,GNAT Encode_String g-encstr ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-encode-utf8-string-g-enutst-ads}@anchor{35d}@anchor{gnat_rm/the_gnat_library id66}@anchor{35e} +@anchor{gnat_rm/the_gnat_library gnat-encode-utf8-string-g-enutst-ads}@anchor{360}@anchor{gnat_rm/the_gnat_library id66}@anchor{361} @section @code{GNAT.Encode_UTF8_String} (@code{g-enutst.ads}) @@ -24085,7 +24112,7 @@ Note there is a preinstantiation for UTF-8. See next entry. A preinstantiation of GNAT.Encode_Strings for UTF-8 encoding. @node GNAT Exception_Actions g-excact ads,GNAT Exception_Traces g-exctra ads,GNAT Encode_UTF8_String g-enutst ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-exception-actions-g-excact-ads}@anchor{35f}@anchor{gnat_rm/the_gnat_library id67}@anchor{360} +@anchor{gnat_rm/the_gnat_library gnat-exception-actions-g-excact-ads}@anchor{362}@anchor{gnat_rm/the_gnat_library id67}@anchor{363} @section @code{GNAT.Exception_Actions} (@code{g-excact.ads}) @@ -24098,7 +24125,7 @@ for specific exceptions, or when any exception is raised. This can be used for instance to force a core dump to ease debugging. @node GNAT Exception_Traces g-exctra ads,GNAT Exceptions g-except ads,GNAT Exception_Actions g-excact ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-exception-traces-g-exctra-ads}@anchor{361}@anchor{gnat_rm/the_gnat_library id68}@anchor{362} +@anchor{gnat_rm/the_gnat_library gnat-exception-traces-g-exctra-ads}@anchor{364}@anchor{gnat_rm/the_gnat_library id68}@anchor{365} @section @code{GNAT.Exception_Traces} (@code{g-exctra.ads}) @@ -24112,7 +24139,7 @@ Provides an interface allowing to control automatic output upon exception occurrences. @node GNAT Exceptions g-except ads,GNAT Expect g-expect ads,GNAT Exception_Traces g-exctra ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-exceptions-g-except-ads}@anchor{363}@anchor{gnat_rm/the_gnat_library id69}@anchor{364} +@anchor{gnat_rm/the_gnat_library gnat-exceptions-g-except-ads}@anchor{366}@anchor{gnat_rm/the_gnat_library id69}@anchor{367} @section @code{GNAT.Exceptions} (@code{g-except.ads}) @@ -24133,7 +24160,7 @@ predefined exceptions, and for example allows raising @code{Constraint_Error} with a message from a pure subprogram. @node GNAT Expect g-expect ads,GNAT Expect TTY g-exptty ads,GNAT Exceptions g-except ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-expect-g-expect-ads}@anchor{365}@anchor{gnat_rm/the_gnat_library id70}@anchor{366} +@anchor{gnat_rm/the_gnat_library gnat-expect-g-expect-ads}@anchor{368}@anchor{gnat_rm/the_gnat_library id70}@anchor{369} @section @code{GNAT.Expect} (@code{g-expect.ads}) @@ -24149,7 +24176,7 @@ It is not implemented for cross ports, and in particular is not implemented for VxWorks or LynxOS. @node GNAT Expect TTY g-exptty ads,GNAT Float_Control g-flocon ads,GNAT Expect g-expect ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-expect-tty-g-exptty-ads}@anchor{367}@anchor{gnat_rm/the_gnat_library id71}@anchor{368} +@anchor{gnat_rm/the_gnat_library gnat-expect-tty-g-exptty-ads}@anchor{36a}@anchor{gnat_rm/the_gnat_library id71}@anchor{36b} @section @code{GNAT.Expect.TTY} (@code{g-exptty.ads}) @@ -24161,7 +24188,7 @@ ports. It is not implemented for cross ports, and in particular is not implemented for VxWorks or LynxOS. @node GNAT Float_Control g-flocon ads,GNAT Formatted_String g-forstr ads,GNAT Expect TTY g-exptty ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-float-control-g-flocon-ads}@anchor{369}@anchor{gnat_rm/the_gnat_library id72}@anchor{36a} +@anchor{gnat_rm/the_gnat_library gnat-float-control-g-flocon-ads}@anchor{36c}@anchor{gnat_rm/the_gnat_library id72}@anchor{36d} @section @code{GNAT.Float_Control} (@code{g-flocon.ads}) @@ -24175,7 +24202,7 @@ library calls may cause this mode to be modified, and the Reset procedure in this package can be used to reestablish the required mode. @node GNAT Formatted_String g-forstr ads,GNAT Generic_Fast_Math_Functions g-gfmafu ads,GNAT Float_Control g-flocon ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-formatted-string-g-forstr-ads}@anchor{36b}@anchor{gnat_rm/the_gnat_library id73}@anchor{36c} +@anchor{gnat_rm/the_gnat_library gnat-formatted-string-g-forstr-ads}@anchor{36e}@anchor{gnat_rm/the_gnat_library id73}@anchor{36f} @section @code{GNAT.Formatted_String} (@code{g-forstr.ads}) @@ -24190,7 +24217,7 @@ derived from Integer, Float or enumerations as values for the formatted string. @node GNAT Generic_Fast_Math_Functions g-gfmafu ads,GNAT Heap_Sort g-heasor ads,GNAT Formatted_String g-forstr ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-generic-fast-math-functions-g-gfmafu-ads}@anchor{36d}@anchor{gnat_rm/the_gnat_library id74}@anchor{36e} +@anchor{gnat_rm/the_gnat_library gnat-generic-fast-math-functions-g-gfmafu-ads}@anchor{370}@anchor{gnat_rm/the_gnat_library id74}@anchor{371} @section @code{GNAT.Generic_Fast_Math_Functions} (@code{g-gfmafu.ads}) @@ -24208,7 +24235,7 @@ have a vector implementation that can be automatically used by the compiler when auto-vectorization is enabled. @node GNAT Heap_Sort g-heasor ads,GNAT Heap_Sort_A g-hesora ads,GNAT Generic_Fast_Math_Functions g-gfmafu ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-heasor-ads}@anchor{36f}@anchor{gnat_rm/the_gnat_library id75}@anchor{370} +@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-heasor-ads}@anchor{372}@anchor{gnat_rm/the_gnat_library id75}@anchor{373} @section @code{GNAT.Heap_Sort} (@code{g-heasor.ads}) @@ -24222,7 +24249,7 @@ access-to-procedure values. The algorithm used is a modified heap sort that performs approximately N*log(N) comparisons in the worst case. @node GNAT Heap_Sort_A g-hesora ads,GNAT Heap_Sort_G g-hesorg ads,GNAT Heap_Sort g-heasor ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-heap-sort-a-g-hesora-ads}@anchor{371}@anchor{gnat_rm/the_gnat_library id76}@anchor{372} +@anchor{gnat_rm/the_gnat_library gnat-heap-sort-a-g-hesora-ads}@anchor{374}@anchor{gnat_rm/the_gnat_library id76}@anchor{375} @section @code{GNAT.Heap_Sort_A} (@code{g-hesora.ads}) @@ -24238,7 +24265,7 @@ This differs from @code{GNAT.Heap_Sort} in having a less convenient interface, but may be slightly more efficient. @node GNAT Heap_Sort_G g-hesorg ads,GNAT HTable g-htable ads,GNAT Heap_Sort_A g-hesora ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-g-hesorg-ads}@anchor{373}@anchor{gnat_rm/the_gnat_library id77}@anchor{374} +@anchor{gnat_rm/the_gnat_library gnat-heap-sort-g-g-hesorg-ads}@anchor{376}@anchor{gnat_rm/the_gnat_library id77}@anchor{377} @section @code{GNAT.Heap_Sort_G} (@code{g-hesorg.ads}) @@ -24252,7 +24279,7 @@ if the procedures can be inlined, at the expense of duplicating code for multiple instantiations. @node GNAT HTable g-htable ads,GNAT IO g-io ads,GNAT Heap_Sort_G g-hesorg ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-htable-g-htable-ads}@anchor{375}@anchor{gnat_rm/the_gnat_library id78}@anchor{376} +@anchor{gnat_rm/the_gnat_library gnat-htable-g-htable-ads}@anchor{378}@anchor{gnat_rm/the_gnat_library id78}@anchor{379} @section @code{GNAT.HTable} (@code{g-htable.ads}) @@ -24265,7 +24292,7 @@ data. Provides two approaches, one a simple static approach, and the other allowing arbitrary dynamic hash tables. @node GNAT IO g-io ads,GNAT IO_Aux g-io_aux ads,GNAT HTable g-htable ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-io-g-io-ads}@anchor{377}@anchor{gnat_rm/the_gnat_library id79}@anchor{378} +@anchor{gnat_rm/the_gnat_library gnat-io-g-io-ads}@anchor{37a}@anchor{gnat_rm/the_gnat_library id79}@anchor{37b} @section @code{GNAT.IO} (@code{g-io.ads}) @@ -24281,7 +24308,7 @@ Standard_Input, and writing characters, strings and integers to either Standard_Output or Standard_Error. @node GNAT IO_Aux g-io_aux ads,GNAT Lock_Files g-locfil ads,GNAT IO g-io ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-io-aux-g-io-aux-ads}@anchor{379}@anchor{gnat_rm/the_gnat_library id80}@anchor{37a} +@anchor{gnat_rm/the_gnat_library gnat-io-aux-g-io-aux-ads}@anchor{37c}@anchor{gnat_rm/the_gnat_library id80}@anchor{37d} @section @code{GNAT.IO_Aux} (@code{g-io_aux.ads}) @@ -24295,7 +24322,7 @@ Provides some auxiliary functions for use with Text_IO, including a test for whether a file exists, and functions for reading a line of text. @node GNAT Lock_Files g-locfil ads,GNAT MBBS_Discrete_Random g-mbdira ads,GNAT IO_Aux g-io_aux ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-lock-files-g-locfil-ads}@anchor{37b}@anchor{gnat_rm/the_gnat_library id81}@anchor{37c} +@anchor{gnat_rm/the_gnat_library gnat-lock-files-g-locfil-ads}@anchor{37e}@anchor{gnat_rm/the_gnat_library id81}@anchor{37f} @section @code{GNAT.Lock_Files} (@code{g-locfil.ads}) @@ -24309,7 +24336,7 @@ Provides a general interface for using files as locks. Can be used for providing program level synchronization. @node GNAT MBBS_Discrete_Random g-mbdira ads,GNAT MBBS_Float_Random g-mbflra ads,GNAT Lock_Files g-locfil ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-mbbs-discrete-random-g-mbdira-ads}@anchor{37d}@anchor{gnat_rm/the_gnat_library id82}@anchor{37e} +@anchor{gnat_rm/the_gnat_library gnat-mbbs-discrete-random-g-mbdira-ads}@anchor{380}@anchor{gnat_rm/the_gnat_library id82}@anchor{381} @section @code{GNAT.MBBS_Discrete_Random} (@code{g-mbdira.ads}) @@ -24321,7 +24348,7 @@ The original implementation of @code{Ada.Numerics.Discrete_Random}. Uses a modified version of the Blum-Blum-Shub generator. @node GNAT MBBS_Float_Random g-mbflra ads,GNAT MD5 g-md5 ads,GNAT MBBS_Discrete_Random g-mbdira ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-mbbs-float-random-g-mbflra-ads}@anchor{37f}@anchor{gnat_rm/the_gnat_library id83}@anchor{380} +@anchor{gnat_rm/the_gnat_library gnat-mbbs-float-random-g-mbflra-ads}@anchor{382}@anchor{gnat_rm/the_gnat_library id83}@anchor{383} @section @code{GNAT.MBBS_Float_Random} (@code{g-mbflra.ads}) @@ -24333,7 +24360,7 @@ The original implementation of @code{Ada.Numerics.Float_Random}. Uses a modified version of the Blum-Blum-Shub generator. @node GNAT MD5 g-md5 ads,GNAT Memory_Dump g-memdum ads,GNAT MBBS_Float_Random g-mbflra ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-md5-g-md5-ads}@anchor{381}@anchor{gnat_rm/the_gnat_library id84}@anchor{382} +@anchor{gnat_rm/the_gnat_library gnat-md5-g-md5-ads}@anchor{384}@anchor{gnat_rm/the_gnat_library id84}@anchor{385} @section @code{GNAT.MD5} (@code{g-md5.ads}) @@ -24346,7 +24373,7 @@ the HMAC-MD5 message authentication function as described in RFC 2104 and FIPS PUB 198. @node GNAT Memory_Dump g-memdum ads,GNAT Most_Recent_Exception g-moreex ads,GNAT MD5 g-md5 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-memory-dump-g-memdum-ads}@anchor{383}@anchor{gnat_rm/the_gnat_library id85}@anchor{384} +@anchor{gnat_rm/the_gnat_library gnat-memory-dump-g-memdum-ads}@anchor{386}@anchor{gnat_rm/the_gnat_library id85}@anchor{387} @section @code{GNAT.Memory_Dump} (@code{g-memdum.ads}) @@ -24359,7 +24386,7 @@ standard output or standard error files. Uses GNAT.IO for actual output. @node GNAT Most_Recent_Exception g-moreex ads,GNAT OS_Lib g-os_lib ads,GNAT Memory_Dump g-memdum ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-most-recent-exception-g-moreex-ads}@anchor{385}@anchor{gnat_rm/the_gnat_library id86}@anchor{386} +@anchor{gnat_rm/the_gnat_library gnat-most-recent-exception-g-moreex-ads}@anchor{388}@anchor{gnat_rm/the_gnat_library id86}@anchor{389} @section @code{GNAT.Most_Recent_Exception} (@code{g-moreex.ads}) @@ -24373,7 +24400,7 @@ various logging purposes, including duplicating functionality of some Ada 83 implementation dependent extensions. @node GNAT OS_Lib g-os_lib ads,GNAT Perfect_Hash_Generators g-pehage ads,GNAT Most_Recent_Exception g-moreex ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-os-lib-g-os-lib-ads}@anchor{387}@anchor{gnat_rm/the_gnat_library id87}@anchor{388} +@anchor{gnat_rm/the_gnat_library gnat-os-lib-g-os-lib-ads}@anchor{38a}@anchor{gnat_rm/the_gnat_library id87}@anchor{38b} @section @code{GNAT.OS_Lib} (@code{g-os_lib.ads}) @@ -24389,7 +24416,7 @@ including a portable spawn procedure, and access to environment variables and error return codes. @node GNAT Perfect_Hash_Generators g-pehage ads,GNAT Random_Numbers g-rannum ads,GNAT OS_Lib g-os_lib ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-perfect-hash-generators-g-pehage-ads}@anchor{389}@anchor{gnat_rm/the_gnat_library id88}@anchor{38a} +@anchor{gnat_rm/the_gnat_library gnat-perfect-hash-generators-g-pehage-ads}@anchor{38c}@anchor{gnat_rm/the_gnat_library id88}@anchor{38d} @section @code{GNAT.Perfect_Hash_Generators} (@code{g-pehage.ads}) @@ -24407,7 +24434,7 @@ hashcode are in the same order. These hashing functions are very convenient for use with realtime applications. @node GNAT Random_Numbers g-rannum ads,GNAT Regexp g-regexp ads,GNAT Perfect_Hash_Generators g-pehage ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-random-numbers-g-rannum-ads}@anchor{38b}@anchor{gnat_rm/the_gnat_library id89}@anchor{38c} +@anchor{gnat_rm/the_gnat_library gnat-random-numbers-g-rannum-ads}@anchor{38e}@anchor{gnat_rm/the_gnat_library id89}@anchor{38f} @section @code{GNAT.Random_Numbers} (@code{g-rannum.ads}) @@ -24419,7 +24446,7 @@ Provides random number capabilities which extend those available in the standard Ada library and are more convenient to use. @node GNAT Regexp g-regexp ads,GNAT Registry g-regist ads,GNAT Random_Numbers g-rannum ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-regexp-g-regexp-ads}@anchor{262}@anchor{gnat_rm/the_gnat_library id90}@anchor{38d} +@anchor{gnat_rm/the_gnat_library gnat-regexp-g-regexp-ads}@anchor{265}@anchor{gnat_rm/the_gnat_library id90}@anchor{390} @section @code{GNAT.Regexp} (@code{g-regexp.ads}) @@ -24435,7 +24462,7 @@ simplest of the three pattern matching packages provided, and is particularly suitable for ‘file globbing’ applications. @node GNAT Registry g-regist ads,GNAT Regpat g-regpat ads,GNAT Regexp g-regexp ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-registry-g-regist-ads}@anchor{38e}@anchor{gnat_rm/the_gnat_library id91}@anchor{38f} +@anchor{gnat_rm/the_gnat_library gnat-registry-g-regist-ads}@anchor{391}@anchor{gnat_rm/the_gnat_library id91}@anchor{392} @section @code{GNAT.Registry} (@code{g-regist.ads}) @@ -24449,7 +24476,7 @@ registry API, but at a lower level of abstraction, refer to the Win32.Winreg package provided with the Win32Ada binding @node GNAT Regpat g-regpat ads,GNAT Rewrite_Data g-rewdat ads,GNAT Registry g-regist ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-regpat-g-regpat-ads}@anchor{390}@anchor{gnat_rm/the_gnat_library id92}@anchor{391} +@anchor{gnat_rm/the_gnat_library gnat-regpat-g-regpat-ads}@anchor{393}@anchor{gnat_rm/the_gnat_library id92}@anchor{394} @section @code{GNAT.Regpat} (@code{g-regpat.ads}) @@ -24464,7 +24491,7 @@ from the original V7 style regular expression library written in C by Henry Spencer (and binary compatible with this C library). @node GNAT Rewrite_Data g-rewdat ads,GNAT Secondary_Stack_Info g-sestin ads,GNAT Regpat g-regpat ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-rewrite-data-g-rewdat-ads}@anchor{392}@anchor{gnat_rm/the_gnat_library id93}@anchor{393} +@anchor{gnat_rm/the_gnat_library gnat-rewrite-data-g-rewdat-ads}@anchor{395}@anchor{gnat_rm/the_gnat_library id93}@anchor{396} @section @code{GNAT.Rewrite_Data} (@code{g-rewdat.ads}) @@ -24478,7 +24505,7 @@ full content to be processed is not loaded into memory all at once. This makes this interface usable for large files or socket streams. @node GNAT Secondary_Stack_Info g-sestin ads,GNAT Semaphores g-semaph ads,GNAT Rewrite_Data g-rewdat ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-secondary-stack-info-g-sestin-ads}@anchor{394}@anchor{gnat_rm/the_gnat_library id94}@anchor{395} +@anchor{gnat_rm/the_gnat_library gnat-secondary-stack-info-g-sestin-ads}@anchor{397}@anchor{gnat_rm/the_gnat_library id94}@anchor{398} @section @code{GNAT.Secondary_Stack_Info} (@code{g-sestin.ads}) @@ -24490,7 +24517,7 @@ Provides the capability to query the high water mark of the current task’s secondary stack. @node GNAT Semaphores g-semaph ads,GNAT Serial_Communications g-sercom ads,GNAT Secondary_Stack_Info g-sestin ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-semaphores-g-semaph-ads}@anchor{396}@anchor{gnat_rm/the_gnat_library id95}@anchor{397} +@anchor{gnat_rm/the_gnat_library gnat-semaphores-g-semaph-ads}@anchor{399}@anchor{gnat_rm/the_gnat_library id95}@anchor{39a} @section @code{GNAT.Semaphores} (@code{g-semaph.ads}) @@ -24501,7 +24528,7 @@ secondary stack. Provides classic counting and binary semaphores using protected types. @node GNAT Serial_Communications g-sercom ads,GNAT SHA1 g-sha1 ads,GNAT Semaphores g-semaph ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-serial-communications-g-sercom-ads}@anchor{398}@anchor{gnat_rm/the_gnat_library id96}@anchor{399} +@anchor{gnat_rm/the_gnat_library gnat-serial-communications-g-sercom-ads}@anchor{39b}@anchor{gnat_rm/the_gnat_library id96}@anchor{39c} @section @code{GNAT.Serial_Communications} (@code{g-sercom.ads}) @@ -24513,7 +24540,7 @@ Provides a simple interface to send and receive data over a serial port. This is only supported on GNU/Linux and Windows. @node GNAT SHA1 g-sha1 ads,GNAT SHA224 g-sha224 ads,GNAT Serial_Communications g-sercom ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sha1-g-sha1-ads}@anchor{39a}@anchor{gnat_rm/the_gnat_library id97}@anchor{39b} +@anchor{gnat_rm/the_gnat_library gnat-sha1-g-sha1-ads}@anchor{39d}@anchor{gnat_rm/the_gnat_library id97}@anchor{39e} @section @code{GNAT.SHA1} (@code{g-sha1.ads}) @@ -24526,7 +24553,7 @@ and RFC 3174, and the HMAC-SHA1 message authentication function as described in RFC 2104 and FIPS PUB 198. @node GNAT SHA224 g-sha224 ads,GNAT SHA256 g-sha256 ads,GNAT SHA1 g-sha1 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sha224-g-sha224-ads}@anchor{39c}@anchor{gnat_rm/the_gnat_library id98}@anchor{39d} +@anchor{gnat_rm/the_gnat_library gnat-sha224-g-sha224-ads}@anchor{39f}@anchor{gnat_rm/the_gnat_library id98}@anchor{3a0} @section @code{GNAT.SHA224} (@code{g-sha224.ads}) @@ -24539,7 +24566,7 @@ and the HMAC-SHA224 message authentication function as described in RFC 2104 and FIPS PUB 198. @node GNAT SHA256 g-sha256 ads,GNAT SHA384 g-sha384 ads,GNAT SHA224 g-sha224 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sha256-g-sha256-ads}@anchor{39e}@anchor{gnat_rm/the_gnat_library id99}@anchor{39f} +@anchor{gnat_rm/the_gnat_library gnat-sha256-g-sha256-ads}@anchor{3a1}@anchor{gnat_rm/the_gnat_library id99}@anchor{3a2} @section @code{GNAT.SHA256} (@code{g-sha256.ads}) @@ -24552,7 +24579,7 @@ and the HMAC-SHA256 message authentication function as described in RFC 2104 and FIPS PUB 198. @node GNAT SHA384 g-sha384 ads,GNAT SHA512 g-sha512 ads,GNAT SHA256 g-sha256 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sha384-g-sha384-ads}@anchor{3a0}@anchor{gnat_rm/the_gnat_library id100}@anchor{3a1} +@anchor{gnat_rm/the_gnat_library gnat-sha384-g-sha384-ads}@anchor{3a3}@anchor{gnat_rm/the_gnat_library id100}@anchor{3a4} @section @code{GNAT.SHA384} (@code{g-sha384.ads}) @@ -24565,7 +24592,7 @@ and the HMAC-SHA384 message authentication function as described in RFC 2104 and FIPS PUB 198. @node GNAT SHA512 g-sha512 ads,GNAT Signals g-signal ads,GNAT SHA384 g-sha384 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sha512-g-sha512-ads}@anchor{3a2}@anchor{gnat_rm/the_gnat_library id101}@anchor{3a3} +@anchor{gnat_rm/the_gnat_library gnat-sha512-g-sha512-ads}@anchor{3a5}@anchor{gnat_rm/the_gnat_library id101}@anchor{3a6} @section @code{GNAT.SHA512} (@code{g-sha512.ads}) @@ -24578,7 +24605,7 @@ and the HMAC-SHA512 message authentication function as described in RFC 2104 and FIPS PUB 198. @node GNAT Signals g-signal ads,GNAT Sockets g-socket ads,GNAT SHA512 g-sha512 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-signals-g-signal-ads}@anchor{3a4}@anchor{gnat_rm/the_gnat_library id102}@anchor{3a5} +@anchor{gnat_rm/the_gnat_library gnat-signals-g-signal-ads}@anchor{3a7}@anchor{gnat_rm/the_gnat_library id102}@anchor{3a8} @section @code{GNAT.Signals} (@code{g-signal.ads}) @@ -24590,7 +24617,7 @@ Provides the ability to manipulate the blocked status of signals on supported targets. @node GNAT Sockets g-socket ads,GNAT Source_Info g-souinf ads,GNAT Signals g-signal ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sockets-g-socket-ads}@anchor{3a6}@anchor{gnat_rm/the_gnat_library id103}@anchor{3a7} +@anchor{gnat_rm/the_gnat_library gnat-sockets-g-socket-ads}@anchor{3a9}@anchor{gnat_rm/the_gnat_library id103}@anchor{3aa} @section @code{GNAT.Sockets} (@code{g-socket.ads}) @@ -24605,7 +24632,7 @@ on all native GNAT ports and on VxWorks cross ports. It is not implemented for the LynxOS cross port. @node GNAT Source_Info g-souinf ads,GNAT Spelling_Checker g-speche ads,GNAT Sockets g-socket ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-source-info-g-souinf-ads}@anchor{3a8}@anchor{gnat_rm/the_gnat_library id104}@anchor{3a9} +@anchor{gnat_rm/the_gnat_library gnat-source-info-g-souinf-ads}@anchor{3ab}@anchor{gnat_rm/the_gnat_library id104}@anchor{3ac} @section @code{GNAT.Source_Info} (@code{g-souinf.ads}) @@ -24619,7 +24646,7 @@ subprograms yielding the date and time of the current compilation (like the C macros @code{__DATE__} and @code{__TIME__}) @node GNAT Spelling_Checker g-speche ads,GNAT Spelling_Checker_Generic g-spchge ads,GNAT Source_Info g-souinf ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-g-speche-ads}@anchor{3aa}@anchor{gnat_rm/the_gnat_library id105}@anchor{3ab} +@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-g-speche-ads}@anchor{3ad}@anchor{gnat_rm/the_gnat_library id105}@anchor{3ae} @section @code{GNAT.Spelling_Checker} (@code{g-speche.ads}) @@ -24631,7 +24658,7 @@ Provides a function for determining whether one string is a plausible near misspelling of another string. @node GNAT Spelling_Checker_Generic g-spchge ads,GNAT Spitbol Patterns g-spipat ads,GNAT Spelling_Checker g-speche ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-generic-g-spchge-ads}@anchor{3ac}@anchor{gnat_rm/the_gnat_library id106}@anchor{3ad} +@anchor{gnat_rm/the_gnat_library gnat-spelling-checker-generic-g-spchge-ads}@anchor{3af}@anchor{gnat_rm/the_gnat_library id106}@anchor{3b0} @section @code{GNAT.Spelling_Checker_Generic} (@code{g-spchge.ads}) @@ -24644,7 +24671,7 @@ determining whether one string is a plausible near misspelling of another string. @node GNAT Spitbol Patterns g-spipat ads,GNAT Spitbol g-spitbo ads,GNAT Spelling_Checker_Generic g-spchge ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-spitbol-patterns-g-spipat-ads}@anchor{3ae}@anchor{gnat_rm/the_gnat_library id107}@anchor{3af} +@anchor{gnat_rm/the_gnat_library gnat-spitbol-patterns-g-spipat-ads}@anchor{3b1}@anchor{gnat_rm/the_gnat_library id107}@anchor{3b2} @section @code{GNAT.Spitbol.Patterns} (@code{g-spipat.ads}) @@ -24660,7 +24687,7 @@ the SNOBOL4 dynamic pattern construction and matching capabilities, using the efficient algorithm developed by Robert Dewar for the SPITBOL system. @node GNAT Spitbol g-spitbo ads,GNAT Spitbol Table_Boolean g-sptabo ads,GNAT Spitbol Patterns g-spipat ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-spitbol-g-spitbo-ads}@anchor{3b0}@anchor{gnat_rm/the_gnat_library id108}@anchor{3b1} +@anchor{gnat_rm/the_gnat_library gnat-spitbol-g-spitbo-ads}@anchor{3b3}@anchor{gnat_rm/the_gnat_library id108}@anchor{3b4} @section @code{GNAT.Spitbol} (@code{g-spitbo.ads}) @@ -24675,7 +24702,7 @@ useful for constructing arbitrary mappings from strings in the style of the SNOBOL4 TABLE function. @node GNAT Spitbol Table_Boolean g-sptabo ads,GNAT Spitbol Table_Integer g-sptain ads,GNAT Spitbol g-spitbo ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-boolean-g-sptabo-ads}@anchor{3b2}@anchor{gnat_rm/the_gnat_library id109}@anchor{3b3} +@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-boolean-g-sptabo-ads}@anchor{3b5}@anchor{gnat_rm/the_gnat_library id109}@anchor{3b6} @section @code{GNAT.Spitbol.Table_Boolean} (@code{g-sptabo.ads}) @@ -24690,7 +24717,7 @@ for type @code{Standard.Boolean}, giving an implementation of sets of string values. @node GNAT Spitbol Table_Integer g-sptain ads,GNAT Spitbol Table_VString g-sptavs ads,GNAT Spitbol Table_Boolean g-sptabo ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-integer-g-sptain-ads}@anchor{3b4}@anchor{gnat_rm/the_gnat_library id110}@anchor{3b5} +@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-integer-g-sptain-ads}@anchor{3b7}@anchor{gnat_rm/the_gnat_library id110}@anchor{3b8} @section @code{GNAT.Spitbol.Table_Integer} (@code{g-sptain.ads}) @@ -24707,7 +24734,7 @@ for type @code{Standard.Integer}, giving an implementation of maps from string to integer values. @node GNAT Spitbol Table_VString g-sptavs ads,GNAT SSE g-sse ads,GNAT Spitbol Table_Integer g-sptain ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-vstring-g-sptavs-ads}@anchor{3b6}@anchor{gnat_rm/the_gnat_library id111}@anchor{3b7} +@anchor{gnat_rm/the_gnat_library gnat-spitbol-table-vstring-g-sptavs-ads}@anchor{3b9}@anchor{gnat_rm/the_gnat_library id111}@anchor{3ba} @section @code{GNAT.Spitbol.Table_VString} (@code{g-sptavs.ads}) @@ -24724,7 +24751,7 @@ a variable length string type, giving an implementation of general maps from strings to strings. @node GNAT SSE g-sse ads,GNAT SSE Vector_Types g-ssvety ads,GNAT Spitbol Table_VString g-sptavs ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sse-g-sse-ads}@anchor{3b8}@anchor{gnat_rm/the_gnat_library id112}@anchor{3b9} +@anchor{gnat_rm/the_gnat_library gnat-sse-g-sse-ads}@anchor{3bb}@anchor{gnat_rm/the_gnat_library id112}@anchor{3bc} @section @code{GNAT.SSE} (@code{g-sse.ads}) @@ -24736,7 +24763,7 @@ targets. It exposes vector component types together with a general introduction to the binding contents and use. @node GNAT SSE Vector_Types g-ssvety ads,GNAT String_Hash g-strhas ads,GNAT SSE g-sse ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-sse-vector-types-g-ssvety-ads}@anchor{3ba}@anchor{gnat_rm/the_gnat_library id113}@anchor{3bb} +@anchor{gnat_rm/the_gnat_library gnat-sse-vector-types-g-ssvety-ads}@anchor{3bd}@anchor{gnat_rm/the_gnat_library id113}@anchor{3be} @section @code{GNAT.SSE.Vector_Types} (@code{g-ssvety.ads}) @@ -24745,7 +24772,7 @@ introduction to the binding contents and use. SSE vector types for use with SSE related intrinsics. @node GNAT String_Hash g-strhas ads,GNAT Strings g-string ads,GNAT SSE Vector_Types g-ssvety ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-string-hash-g-strhas-ads}@anchor{3bc}@anchor{gnat_rm/the_gnat_library id114}@anchor{3bd} +@anchor{gnat_rm/the_gnat_library gnat-string-hash-g-strhas-ads}@anchor{3bf}@anchor{gnat_rm/the_gnat_library id114}@anchor{3c0} @section @code{GNAT.String_Hash} (@code{g-strhas.ads}) @@ -24757,7 +24784,7 @@ Provides a generic hash function working on arrays of scalars. Both the scalar type and the hash result type are parameters. @node GNAT Strings g-string ads,GNAT String_Split g-strspl ads,GNAT String_Hash g-strhas ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-strings-g-string-ads}@anchor{3be}@anchor{gnat_rm/the_gnat_library id115}@anchor{3bf} +@anchor{gnat_rm/the_gnat_library gnat-strings-g-string-ads}@anchor{3c1}@anchor{gnat_rm/the_gnat_library id115}@anchor{3c2} @section @code{GNAT.Strings} (@code{g-string.ads}) @@ -24767,7 +24794,7 @@ Common String access types and related subprograms. Basically it defines a string access and an array of string access types. @node GNAT String_Split g-strspl ads,GNAT Table g-table ads,GNAT Strings g-string ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-string-split-g-strspl-ads}@anchor{3c0}@anchor{gnat_rm/the_gnat_library id116}@anchor{3c1} +@anchor{gnat_rm/the_gnat_library gnat-string-split-g-strspl-ads}@anchor{3c3}@anchor{gnat_rm/the_gnat_library id116}@anchor{3c4} @section @code{GNAT.String_Split} (@code{g-strspl.ads}) @@ -24781,7 +24808,7 @@ to the resulting slices. This package is instantiated from @code{GNAT.Array_Split}. @node GNAT Table g-table ads,GNAT Task_Lock g-tasloc ads,GNAT String_Split g-strspl ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-table-g-table-ads}@anchor{3c2}@anchor{gnat_rm/the_gnat_library id117}@anchor{3c3} +@anchor{gnat_rm/the_gnat_library gnat-table-g-table-ads}@anchor{3c5}@anchor{gnat_rm/the_gnat_library id117}@anchor{3c6} @section @code{GNAT.Table} (@code{g-table.ads}) @@ -24801,7 +24828,7 @@ while an instantiation of @code{GNAT.Dynamic_Tables} creates a type that can be used to define dynamic instances of the table. @node GNAT Task_Lock g-tasloc ads,GNAT Time_Stamp g-timsta ads,GNAT Table g-table ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-task-lock-g-tasloc-ads}@anchor{3c4}@anchor{gnat_rm/the_gnat_library id118}@anchor{3c5} +@anchor{gnat_rm/the_gnat_library gnat-task-lock-g-tasloc-ads}@anchor{3c7}@anchor{gnat_rm/the_gnat_library id118}@anchor{3c8} @section @code{GNAT.Task_Lock} (@code{g-tasloc.ads}) @@ -24818,7 +24845,7 @@ single global task lock. Appropriate for use in situations where contention between tasks is very rarely expected. @node GNAT Time_Stamp g-timsta ads,GNAT Threads g-thread ads,GNAT Task_Lock g-tasloc ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-time-stamp-g-timsta-ads}@anchor{3c6}@anchor{gnat_rm/the_gnat_library id119}@anchor{3c7} +@anchor{gnat_rm/the_gnat_library gnat-time-stamp-g-timsta-ads}@anchor{3c9}@anchor{gnat_rm/the_gnat_library id119}@anchor{3ca} @section @code{GNAT.Time_Stamp} (@code{g-timsta.ads}) @@ -24833,7 +24860,7 @@ represents the current date and time in ISO 8601 format. This is a very simple routine with minimal code and there are no dependencies on any other unit. @node GNAT Threads g-thread ads,GNAT Traceback g-traceb ads,GNAT Time_Stamp g-timsta ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-threads-g-thread-ads}@anchor{3c8}@anchor{gnat_rm/the_gnat_library id120}@anchor{3c9} +@anchor{gnat_rm/the_gnat_library gnat-threads-g-thread-ads}@anchor{3cb}@anchor{gnat_rm/the_gnat_library id120}@anchor{3cc} @section @code{GNAT.Threads} (@code{g-thread.ads}) @@ -24850,7 +24877,7 @@ further details if your program has threads that are created by a non-Ada environment which then accesses Ada code. @node GNAT Traceback g-traceb ads,GNAT Traceback Symbolic g-trasym ads,GNAT Threads g-thread ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-traceback-g-traceb-ads}@anchor{3ca}@anchor{gnat_rm/the_gnat_library id121}@anchor{3cb} +@anchor{gnat_rm/the_gnat_library gnat-traceback-g-traceb-ads}@anchor{3cd}@anchor{gnat_rm/the_gnat_library id121}@anchor{3ce} @section @code{GNAT.Traceback} (@code{g-traceb.ads}) @@ -24862,7 +24889,7 @@ Provides a facility for obtaining non-symbolic traceback information, useful in various debugging situations. @node GNAT Traceback Symbolic g-trasym ads,GNAT UTF_32 g-utf_32 ads,GNAT Traceback g-traceb ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-traceback-symbolic-g-trasym-ads}@anchor{3cc}@anchor{gnat_rm/the_gnat_library id122}@anchor{3cd} +@anchor{gnat_rm/the_gnat_library gnat-traceback-symbolic-g-trasym-ads}@anchor{3cf}@anchor{gnat_rm/the_gnat_library id122}@anchor{3d0} @section @code{GNAT.Traceback.Symbolic} (@code{g-trasym.ads}) @@ -24871,7 +24898,7 @@ in various debugging situations. @geindex Trace back facilities @node GNAT UTF_32 g-utf_32 ads,GNAT UTF_32_Spelling_Checker g-u3spch ads,GNAT Traceback Symbolic g-trasym ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-utf-32-g-utf-32-ads}@anchor{3ce}@anchor{gnat_rm/the_gnat_library id123}@anchor{3cf} +@anchor{gnat_rm/the_gnat_library gnat-utf-32-g-utf-32-ads}@anchor{3d1}@anchor{gnat_rm/the_gnat_library id123}@anchor{3d2} @section @code{GNAT.UTF_32} (@code{g-utf_32.ads}) @@ -24890,7 +24917,7 @@ lower case to upper case fold routine corresponding to the Ada 2005 rules for identifier equivalence. @node GNAT UTF_32_Spelling_Checker g-u3spch ads,GNAT Wide_Spelling_Checker g-wispch ads,GNAT UTF_32 g-utf_32 ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-utf-32-spelling-checker-g-u3spch-ads}@anchor{3d0}@anchor{gnat_rm/the_gnat_library id124}@anchor{3d1} +@anchor{gnat_rm/the_gnat_library gnat-utf-32-spelling-checker-g-u3spch-ads}@anchor{3d3}@anchor{gnat_rm/the_gnat_library id124}@anchor{3d4} @section @code{GNAT.UTF_32_Spelling_Checker} (@code{g-u3spch.ads}) @@ -24903,7 +24930,7 @@ near misspelling of another wide wide string, where the strings are represented using the UTF_32_String type defined in System.Wch_Cnv. @node GNAT Wide_Spelling_Checker g-wispch ads,GNAT Wide_String_Split g-wistsp ads,GNAT UTF_32_Spelling_Checker g-u3spch ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-wide-spelling-checker-g-wispch-ads}@anchor{3d2}@anchor{gnat_rm/the_gnat_library id125}@anchor{3d3} +@anchor{gnat_rm/the_gnat_library gnat-wide-spelling-checker-g-wispch-ads}@anchor{3d5}@anchor{gnat_rm/the_gnat_library id125}@anchor{3d6} @section @code{GNAT.Wide_Spelling_Checker} (@code{g-wispch.ads}) @@ -24915,7 +24942,7 @@ Provides a function for determining whether one wide string is a plausible near misspelling of another wide string. @node GNAT Wide_String_Split g-wistsp ads,GNAT Wide_Wide_Spelling_Checker g-zspche ads,GNAT Wide_Spelling_Checker g-wispch ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-wide-string-split-g-wistsp-ads}@anchor{3d4}@anchor{gnat_rm/the_gnat_library id126}@anchor{3d5} +@anchor{gnat_rm/the_gnat_library gnat-wide-string-split-g-wistsp-ads}@anchor{3d7}@anchor{gnat_rm/the_gnat_library id126}@anchor{3d8} @section @code{GNAT.Wide_String_Split} (@code{g-wistsp.ads}) @@ -24929,7 +24956,7 @@ to the resulting slices. This package is instantiated from @code{GNAT.Array_Split}. @node GNAT Wide_Wide_Spelling_Checker g-zspche ads,GNAT Wide_Wide_String_Split g-zistsp ads,GNAT Wide_String_Split g-wistsp ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-wide-wide-spelling-checker-g-zspche-ads}@anchor{3d6}@anchor{gnat_rm/the_gnat_library id127}@anchor{3d7} +@anchor{gnat_rm/the_gnat_library gnat-wide-wide-spelling-checker-g-zspche-ads}@anchor{3d9}@anchor{gnat_rm/the_gnat_library id127}@anchor{3da} @section @code{GNAT.Wide_Wide_Spelling_Checker} (@code{g-zspche.ads}) @@ -24941,7 +24968,7 @@ Provides a function for determining whether one wide wide string is a plausible near misspelling of another wide wide string. @node GNAT Wide_Wide_String_Split g-zistsp ads,Interfaces C Extensions i-cexten ads,GNAT Wide_Wide_Spelling_Checker g-zspche ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library gnat-wide-wide-string-split-g-zistsp-ads}@anchor{3d8}@anchor{gnat_rm/the_gnat_library id128}@anchor{3d9} +@anchor{gnat_rm/the_gnat_library gnat-wide-wide-string-split-g-zistsp-ads}@anchor{3db}@anchor{gnat_rm/the_gnat_library id128}@anchor{3dc} @section @code{GNAT.Wide_Wide_String_Split} (@code{g-zistsp.ads}) @@ -24955,7 +24982,7 @@ to the resulting slices. This package is instantiated from @code{GNAT.Array_Split}. @node Interfaces C Extensions i-cexten ads,Interfaces C Streams i-cstrea ads,GNAT Wide_Wide_String_Split g-zistsp ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id129}@anchor{3da}@anchor{gnat_rm/the_gnat_library interfaces-c-extensions-i-cexten-ads}@anchor{3db} +@anchor{gnat_rm/the_gnat_library id129}@anchor{3dd}@anchor{gnat_rm/the_gnat_library interfaces-c-extensions-i-cexten-ads}@anchor{3de} @section @code{Interfaces.C.Extensions} (@code{i-cexten.ads}) @@ -24966,7 +24993,7 @@ for use with either manually or automatically generated bindings to C libraries. @node Interfaces C Streams i-cstrea ads,Interfaces Packed_Decimal i-pacdec ads,Interfaces C Extensions i-cexten ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id130}@anchor{3dc}@anchor{gnat_rm/the_gnat_library interfaces-c-streams-i-cstrea-ads}@anchor{3dd} +@anchor{gnat_rm/the_gnat_library id130}@anchor{3df}@anchor{gnat_rm/the_gnat_library interfaces-c-streams-i-cstrea-ads}@anchor{3e0} @section @code{Interfaces.C.Streams} (@code{i-cstrea.ads}) @@ -24979,7 +25006,7 @@ This package is a binding for the most commonly used operations on C streams. @node Interfaces Packed_Decimal i-pacdec ads,Interfaces VxWorks i-vxwork ads,Interfaces C Streams i-cstrea ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id131}@anchor{3de}@anchor{gnat_rm/the_gnat_library interfaces-packed-decimal-i-pacdec-ads}@anchor{3df} +@anchor{gnat_rm/the_gnat_library id131}@anchor{3e1}@anchor{gnat_rm/the_gnat_library interfaces-packed-decimal-i-pacdec-ads}@anchor{3e2} @section @code{Interfaces.Packed_Decimal} (@code{i-pacdec.ads}) @@ -24994,7 +25021,7 @@ from a packed decimal format compatible with that used on IBM mainframes. @node Interfaces VxWorks i-vxwork ads,Interfaces VxWorks Int_Connection i-vxinco ads,Interfaces Packed_Decimal i-pacdec ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id132}@anchor{3e0}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-i-vxwork-ads}@anchor{3e1} +@anchor{gnat_rm/the_gnat_library id132}@anchor{3e3}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-i-vxwork-ads}@anchor{3e4} @section @code{Interfaces.VxWorks} (@code{i-vxwork.ads}) @@ -25010,7 +25037,7 @@ In particular, it interfaces with the VxWorks hardware interrupt facilities. @node Interfaces VxWorks Int_Connection i-vxinco ads,Interfaces VxWorks IO i-vxwoio ads,Interfaces VxWorks i-vxwork ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id133}@anchor{3e2}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-int-connection-i-vxinco-ads}@anchor{3e3} +@anchor{gnat_rm/the_gnat_library id133}@anchor{3e5}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-int-connection-i-vxinco-ads}@anchor{3e6} @section @code{Interfaces.VxWorks.Int_Connection} (@code{i-vxinco.ads}) @@ -25026,7 +25053,7 @@ intConnect() with a custom routine for installing interrupt handlers. @node Interfaces VxWorks IO i-vxwoio ads,System Address_Image s-addima ads,Interfaces VxWorks Int_Connection i-vxinco ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id134}@anchor{3e4}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-io-i-vxwoio-ads}@anchor{3e5} +@anchor{gnat_rm/the_gnat_library id134}@anchor{3e7}@anchor{gnat_rm/the_gnat_library interfaces-vxworks-io-i-vxwoio-ads}@anchor{3e8} @section @code{Interfaces.VxWorks.IO} (@code{i-vxwoio.ads}) @@ -25049,7 +25076,7 @@ function codes. A particular use of this package is to enable the use of Get_Immediate under VxWorks. @node System Address_Image s-addima ads,System Assertions s-assert ads,Interfaces VxWorks IO i-vxwoio ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id135}@anchor{3e6}@anchor{gnat_rm/the_gnat_library system-address-image-s-addima-ads}@anchor{3e7} +@anchor{gnat_rm/the_gnat_library id135}@anchor{3e9}@anchor{gnat_rm/the_gnat_library system-address-image-s-addima-ads}@anchor{3ea} @section @code{System.Address_Image} (@code{s-addima.ads}) @@ -25065,7 +25092,7 @@ function that gives an (implementation dependent) string which identifies an address. @node System Assertions s-assert ads,System Atomic_Counters s-atocou ads,System Address_Image s-addima ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id136}@anchor{3e8}@anchor{gnat_rm/the_gnat_library system-assertions-s-assert-ads}@anchor{3e9} +@anchor{gnat_rm/the_gnat_library id136}@anchor{3eb}@anchor{gnat_rm/the_gnat_library system-assertions-s-assert-ads}@anchor{3ec} @section @code{System.Assertions} (@code{s-assert.ads}) @@ -25081,7 +25108,7 @@ by an run-time assertion failure, as well as the routine that is used internally to raise this assertion. @node System Atomic_Counters s-atocou ads,System Memory s-memory ads,System Assertions s-assert ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id137}@anchor{3ea}@anchor{gnat_rm/the_gnat_library system-atomic-counters-s-atocou-ads}@anchor{3eb} +@anchor{gnat_rm/the_gnat_library id137}@anchor{3ed}@anchor{gnat_rm/the_gnat_library system-atomic-counters-s-atocou-ads}@anchor{3ee} @section @code{System.Atomic_Counters} (@code{s-atocou.ads}) @@ -25095,7 +25122,7 @@ on most targets, including all Alpha, AARCH64, ARM, ia64, PowerPC, SPARC V9, x86, and x86_64 platforms. @node System Memory s-memory ads,System Multiprocessors s-multip ads,System Atomic_Counters s-atocou ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id138}@anchor{3ec}@anchor{gnat_rm/the_gnat_library system-memory-s-memory-ads}@anchor{3ed} +@anchor{gnat_rm/the_gnat_library id138}@anchor{3ef}@anchor{gnat_rm/the_gnat_library system-memory-s-memory-ads}@anchor{3f0} @section @code{System.Memory} (@code{s-memory.ads}) @@ -25113,7 +25140,7 @@ calls to this unit may be made for low level allocation uses (for example see the body of @code{GNAT.Tables}). @node System Multiprocessors s-multip ads,System Multiprocessors Dispatching_Domains s-mudido ads,System Memory s-memory ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id139}@anchor{3ee}@anchor{gnat_rm/the_gnat_library system-multiprocessors-s-multip-ads}@anchor{3ef} +@anchor{gnat_rm/the_gnat_library id139}@anchor{3f1}@anchor{gnat_rm/the_gnat_library system-multiprocessors-s-multip-ads}@anchor{3f2} @section @code{System.Multiprocessors} (@code{s-multip.ads}) @@ -25126,7 +25153,7 @@ in GNAT we also make it available in Ada 95 and Ada 2005 (where it is technically an implementation-defined addition). @node System Multiprocessors Dispatching_Domains s-mudido ads,System Partition_Interface s-parint ads,System Multiprocessors s-multip ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id140}@anchor{3f0}@anchor{gnat_rm/the_gnat_library system-multiprocessors-dispatching-domains-s-mudido-ads}@anchor{3f1} +@anchor{gnat_rm/the_gnat_library id140}@anchor{3f3}@anchor{gnat_rm/the_gnat_library system-multiprocessors-dispatching-domains-s-mudido-ads}@anchor{3f4} @section @code{System.Multiprocessors.Dispatching_Domains} (@code{s-mudido.ads}) @@ -25139,7 +25166,7 @@ in GNAT we also make it available in Ada 95 and Ada 2005 (where it is technically an implementation-defined addition). @node System Partition_Interface s-parint ads,System Pool_Global s-pooglo ads,System Multiprocessors Dispatching_Domains s-mudido ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id141}@anchor{3f2}@anchor{gnat_rm/the_gnat_library system-partition-interface-s-parint-ads}@anchor{3f3} +@anchor{gnat_rm/the_gnat_library id141}@anchor{3f5}@anchor{gnat_rm/the_gnat_library system-partition-interface-s-parint-ads}@anchor{3f6} @section @code{System.Partition_Interface} (@code{s-parint.ads}) @@ -25152,7 +25179,7 @@ is used primarily in a distribution context when using Annex E with @code{GLADE}. @node System Pool_Global s-pooglo ads,System Pool_Local s-pooloc ads,System Partition_Interface s-parint ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id142}@anchor{3f4}@anchor{gnat_rm/the_gnat_library system-pool-global-s-pooglo-ads}@anchor{3f5} +@anchor{gnat_rm/the_gnat_library id142}@anchor{3f7}@anchor{gnat_rm/the_gnat_library system-pool-global-s-pooglo-ads}@anchor{3f8} @section @code{System.Pool_Global} (@code{s-pooglo.ads}) @@ -25169,7 +25196,7 @@ declared. It uses malloc/free to allocate/free and does not attempt to do any automatic reclamation. @node System Pool_Local s-pooloc ads,System Restrictions s-restri ads,System Pool_Global s-pooglo ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id143}@anchor{3f6}@anchor{gnat_rm/the_gnat_library system-pool-local-s-pooloc-ads}@anchor{3f7} +@anchor{gnat_rm/the_gnat_library id143}@anchor{3f9}@anchor{gnat_rm/the_gnat_library system-pool-local-s-pooloc-ads}@anchor{3fa} @section @code{System.Pool_Local} (@code{s-pooloc.ads}) @@ -25186,7 +25213,7 @@ a list of allocated blocks, so that all storage allocated for the pool can be freed automatically when the pool is finalized. @node System Restrictions s-restri ads,System Rident s-rident ads,System Pool_Local s-pooloc ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id144}@anchor{3f8}@anchor{gnat_rm/the_gnat_library system-restrictions-s-restri-ads}@anchor{3f9} +@anchor{gnat_rm/the_gnat_library id144}@anchor{3fb}@anchor{gnat_rm/the_gnat_library system-restrictions-s-restri-ads}@anchor{3fc} @section @code{System.Restrictions} (@code{s-restri.ads}) @@ -25202,7 +25229,7 @@ compiler determined information on which restrictions are violated by one or more packages in the partition. @node System Rident s-rident ads,System Strings Stream_Ops s-ststop ads,System Restrictions s-restri ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id145}@anchor{3fa}@anchor{gnat_rm/the_gnat_library system-rident-s-rident-ads}@anchor{3fb} +@anchor{gnat_rm/the_gnat_library id145}@anchor{3fd}@anchor{gnat_rm/the_gnat_library system-rident-s-rident-ads}@anchor{3fe} @section @code{System.Rident} (@code{s-rident.ads}) @@ -25218,7 +25245,7 @@ since the necessary instantiation is included in package System.Restrictions. @node System Strings Stream_Ops s-ststop ads,System Unsigned_Types s-unstyp ads,System Rident s-rident ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id146}@anchor{3fc}@anchor{gnat_rm/the_gnat_library system-strings-stream-ops-s-ststop-ads}@anchor{3fd} +@anchor{gnat_rm/the_gnat_library id146}@anchor{3ff}@anchor{gnat_rm/the_gnat_library system-strings-stream-ops-s-ststop-ads}@anchor{400} @section @code{System.Strings.Stream_Ops} (@code{s-ststop.ads}) @@ -25234,7 +25261,7 @@ stream attributes are applied to string types, but the subprograms in this package can be used directly by application programs. @node System Unsigned_Types s-unstyp ads,System Wch_Cnv s-wchcnv ads,System Strings Stream_Ops s-ststop ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id147}@anchor{3fe}@anchor{gnat_rm/the_gnat_library system-unsigned-types-s-unstyp-ads}@anchor{3ff} +@anchor{gnat_rm/the_gnat_library id147}@anchor{401}@anchor{gnat_rm/the_gnat_library system-unsigned-types-s-unstyp-ads}@anchor{402} @section @code{System.Unsigned_Types} (@code{s-unstyp.ads}) @@ -25247,7 +25274,7 @@ also contains some related definitions for other specialized types used by the compiler in connection with packed array types. @node System Wch_Cnv s-wchcnv ads,System Wch_Con s-wchcon ads,System Unsigned_Types s-unstyp ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id148}@anchor{400}@anchor{gnat_rm/the_gnat_library system-wch-cnv-s-wchcnv-ads}@anchor{401} +@anchor{gnat_rm/the_gnat_library id148}@anchor{403}@anchor{gnat_rm/the_gnat_library system-wch-cnv-s-wchcnv-ads}@anchor{404} @section @code{System.Wch_Cnv} (@code{s-wchcnv.ads}) @@ -25268,7 +25295,7 @@ encoding method. It uses definitions in package @code{System.Wch_Con}. @node System Wch_Con s-wchcon ads,,System Wch_Cnv s-wchcnv ads,The GNAT Library -@anchor{gnat_rm/the_gnat_library id149}@anchor{402}@anchor{gnat_rm/the_gnat_library system-wch-con-s-wchcon-ads}@anchor{403} +@anchor{gnat_rm/the_gnat_library id149}@anchor{405}@anchor{gnat_rm/the_gnat_library system-wch-con-s-wchcon-ads}@anchor{406} @section @code{System.Wch_Con} (@code{s-wchcon.ads}) @@ -25280,7 +25307,7 @@ in ordinary strings. These definitions are used by the package @code{System.Wch_Cnv}. @node Interfacing to Other Languages,Specialized Needs Annexes,The GNAT Library,Top -@anchor{gnat_rm/interfacing_to_other_languages doc}@anchor{404}@anchor{gnat_rm/interfacing_to_other_languages id1}@anchor{405}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-other-languages}@anchor{11} +@anchor{gnat_rm/interfacing_to_other_languages doc}@anchor{407}@anchor{gnat_rm/interfacing_to_other_languages id1}@anchor{408}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-other-languages}@anchor{11} @chapter Interfacing to Other Languages @@ -25298,7 +25325,7 @@ provided. @end menu @node Interfacing to C,Interfacing to C++,,Interfacing to Other Languages -@anchor{gnat_rm/interfacing_to_other_languages id2}@anchor{406}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-c}@anchor{407} +@anchor{gnat_rm/interfacing_to_other_languages id2}@anchor{409}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-c}@anchor{40a} @section Interfacing to C @@ -25438,7 +25465,7 @@ of the length corresponding to the @code{type'Size} value in Ada. @end itemize @node Interfacing to C++,Interfacing to COBOL,Interfacing to C,Interfacing to Other Languages -@anchor{gnat_rm/interfacing_to_other_languages id3}@anchor{47}@anchor{gnat_rm/interfacing_to_other_languages id4}@anchor{408} +@anchor{gnat_rm/interfacing_to_other_languages id3}@anchor{47}@anchor{gnat_rm/interfacing_to_other_languages id4}@anchor{40b} @section Interfacing to C++ @@ -25495,7 +25522,7 @@ The @code{External_Name} is the name of the C++ RTTI symbol. You can then cover a specific C++ exception in an exception handler. @node Interfacing to COBOL,Interfacing to Fortran,Interfacing to C++,Interfacing to Other Languages -@anchor{gnat_rm/interfacing_to_other_languages id5}@anchor{409}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-cobol}@anchor{40a} +@anchor{gnat_rm/interfacing_to_other_languages id5}@anchor{40c}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-cobol}@anchor{40d} @section Interfacing to COBOL @@ -25503,7 +25530,7 @@ Interfacing to COBOL is achieved as described in section B.4 of the Ada Reference Manual. @node Interfacing to Fortran,Interfacing to non-GNAT Ada code,Interfacing to COBOL,Interfacing to Other Languages -@anchor{gnat_rm/interfacing_to_other_languages id6}@anchor{40b}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-fortran}@anchor{40c} +@anchor{gnat_rm/interfacing_to_other_languages id6}@anchor{40e}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-fortran}@anchor{40f} @section Interfacing to Fortran @@ -25513,7 +25540,7 @@ multi-dimensional array causes the array to be stored in column-major order as required for convenient interface to Fortran. @node Interfacing to non-GNAT Ada code,,Interfacing to Fortran,Interfacing to Other Languages -@anchor{gnat_rm/interfacing_to_other_languages id7}@anchor{40d}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-non-gnat-ada-code}@anchor{40e} +@anchor{gnat_rm/interfacing_to_other_languages id7}@anchor{410}@anchor{gnat_rm/interfacing_to_other_languages interfacing-to-non-gnat-ada-code}@anchor{411} @section Interfacing to non-GNAT Ada code @@ -25537,7 +25564,7 @@ values or simple record types without variants, or simple array types with fixed bounds. @node Specialized Needs Annexes,Implementation of Specific Ada Features,Interfacing to Other Languages,Top -@anchor{gnat_rm/specialized_needs_annexes doc}@anchor{40f}@anchor{gnat_rm/specialized_needs_annexes id1}@anchor{410}@anchor{gnat_rm/specialized_needs_annexes specialized-needs-annexes}@anchor{12} +@anchor{gnat_rm/specialized_needs_annexes doc}@anchor{412}@anchor{gnat_rm/specialized_needs_annexes id1}@anchor{413}@anchor{gnat_rm/specialized_needs_annexes specialized-needs-annexes}@anchor{12} @chapter Specialized Needs Annexes @@ -25578,7 +25605,7 @@ in Ada 2005) is fully implemented. @end table @node Implementation of Specific Ada Features,Implementation of Ada 2012 Features,Specialized Needs Annexes,Top -@anchor{gnat_rm/implementation_of_specific_ada_features doc}@anchor{411}@anchor{gnat_rm/implementation_of_specific_ada_features id1}@anchor{412}@anchor{gnat_rm/implementation_of_specific_ada_features implementation-of-specific-ada-features}@anchor{13} +@anchor{gnat_rm/implementation_of_specific_ada_features doc}@anchor{414}@anchor{gnat_rm/implementation_of_specific_ada_features id1}@anchor{415}@anchor{gnat_rm/implementation_of_specific_ada_features implementation-of-specific-ada-features}@anchor{13} @chapter Implementation of Specific Ada Features @@ -25597,7 +25624,7 @@ facilities. @end menu @node Machine Code Insertions,GNAT Implementation of Tasking,,Implementation of Specific Ada Features -@anchor{gnat_rm/implementation_of_specific_ada_features id2}@anchor{413}@anchor{gnat_rm/implementation_of_specific_ada_features machine-code-insertions}@anchor{16a} +@anchor{gnat_rm/implementation_of_specific_ada_features id2}@anchor{416}@anchor{gnat_rm/implementation_of_specific_ada_features machine-code-insertions}@anchor{16d} @section Machine Code Insertions @@ -25765,7 +25792,7 @@ according to normal visibility rules. In particular if there is no qualification is required. @node GNAT Implementation of Tasking,GNAT Implementation of Shared Passive Packages,Machine Code Insertions,Implementation of Specific Ada Features -@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-tasking}@anchor{414}@anchor{gnat_rm/implementation_of_specific_ada_features id3}@anchor{415} +@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-tasking}@anchor{417}@anchor{gnat_rm/implementation_of_specific_ada_features id3}@anchor{418} @section GNAT Implementation of Tasking @@ -25781,7 +25808,7 @@ to compliance with the Real-Time Systems Annex. @end menu @node Mapping Ada Tasks onto the Underlying Kernel Threads,Ensuring Compliance with the Real-Time Annex,,GNAT Implementation of Tasking -@anchor{gnat_rm/implementation_of_specific_ada_features id4}@anchor{416}@anchor{gnat_rm/implementation_of_specific_ada_features mapping-ada-tasks-onto-the-underlying-kernel-threads}@anchor{417} +@anchor{gnat_rm/implementation_of_specific_ada_features id4}@anchor{419}@anchor{gnat_rm/implementation_of_specific_ada_features mapping-ada-tasks-onto-the-underlying-kernel-threads}@anchor{41a} @subsection Mapping Ada Tasks onto the Underlying Kernel Threads @@ -25850,7 +25877,7 @@ support this functionality when the parent contains more than one task. @geindex Forking a new process @node Ensuring Compliance with the Real-Time Annex,Support for Locking Policies,Mapping Ada Tasks onto the Underlying Kernel Threads,GNAT Implementation of Tasking -@anchor{gnat_rm/implementation_of_specific_ada_features ensuring-compliance-with-the-real-time-annex}@anchor{418}@anchor{gnat_rm/implementation_of_specific_ada_features id5}@anchor{419} +@anchor{gnat_rm/implementation_of_specific_ada_features ensuring-compliance-with-the-real-time-annex}@anchor{41b}@anchor{gnat_rm/implementation_of_specific_ada_features id5}@anchor{41c} @subsection Ensuring Compliance with the Real-Time Annex @@ -25901,7 +25928,7 @@ placed at the end. @c Support_for_Locking_Policies @node Support for Locking Policies,,Ensuring Compliance with the Real-Time Annex,GNAT Implementation of Tasking -@anchor{gnat_rm/implementation_of_specific_ada_features support-for-locking-policies}@anchor{41a} +@anchor{gnat_rm/implementation_of_specific_ada_features support-for-locking-policies}@anchor{41d} @subsection Support for Locking Policies @@ -25935,7 +25962,7 @@ then ceiling locking is used. Otherwise, the @code{Ceiling_Locking} policy is ignored. @node GNAT Implementation of Shared Passive Packages,Code Generation for Array Aggregates,GNAT Implementation of Tasking,Implementation of Specific Ada Features -@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-shared-passive-packages}@anchor{41b}@anchor{gnat_rm/implementation_of_specific_ada_features id6}@anchor{41c} +@anchor{gnat_rm/implementation_of_specific_ada_features gnat-implementation-of-shared-passive-packages}@anchor{41e}@anchor{gnat_rm/implementation_of_specific_ada_features id6}@anchor{41f} @section GNAT Implementation of Shared Passive Packages @@ -26033,7 +26060,7 @@ This is used to provide the required locking semantics for proper protected object synchronization. @node Code Generation for Array Aggregates,The Size of Discriminated Records with Default Discriminants,GNAT Implementation of Shared Passive Packages,Implementation of Specific Ada Features -@anchor{gnat_rm/implementation_of_specific_ada_features code-generation-for-array-aggregates}@anchor{41d}@anchor{gnat_rm/implementation_of_specific_ada_features id7}@anchor{41e} +@anchor{gnat_rm/implementation_of_specific_ada_features code-generation-for-array-aggregates}@anchor{420}@anchor{gnat_rm/implementation_of_specific_ada_features id7}@anchor{421} @section Code Generation for Array Aggregates @@ -26064,7 +26091,7 @@ component values and static subtypes also lead to simpler code. @end menu @node Static constant aggregates with static bounds,Constant aggregates with unconstrained nominal types,,Code Generation for Array Aggregates -@anchor{gnat_rm/implementation_of_specific_ada_features id8}@anchor{41f}@anchor{gnat_rm/implementation_of_specific_ada_features static-constant-aggregates-with-static-bounds}@anchor{420} +@anchor{gnat_rm/implementation_of_specific_ada_features id8}@anchor{422}@anchor{gnat_rm/implementation_of_specific_ada_features static-constant-aggregates-with-static-bounds}@anchor{423} @subsection Static constant aggregates with static bounds @@ -26111,7 +26138,7 @@ Zero2: constant two_dim := (others => (others => 0)); @end example @node Constant aggregates with unconstrained nominal types,Aggregates with static bounds,Static constant aggregates with static bounds,Code Generation for Array Aggregates -@anchor{gnat_rm/implementation_of_specific_ada_features constant-aggregates-with-unconstrained-nominal-types}@anchor{421}@anchor{gnat_rm/implementation_of_specific_ada_features id9}@anchor{422} +@anchor{gnat_rm/implementation_of_specific_ada_features constant-aggregates-with-unconstrained-nominal-types}@anchor{424}@anchor{gnat_rm/implementation_of_specific_ada_features id9}@anchor{425} @subsection Constant aggregates with unconstrained nominal types @@ -26126,7 +26153,7 @@ Cr_Unc : constant One_Unc := (12,24,36); @end example @node Aggregates with static bounds,Aggregates with nonstatic bounds,Constant aggregates with unconstrained nominal types,Code Generation for Array Aggregates -@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-static-bounds}@anchor{423}@anchor{gnat_rm/implementation_of_specific_ada_features id10}@anchor{424} +@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-static-bounds}@anchor{426}@anchor{gnat_rm/implementation_of_specific_ada_features id10}@anchor{427} @subsection Aggregates with static bounds @@ -26154,7 +26181,7 @@ end loop; @end example @node Aggregates with nonstatic bounds,Aggregates in assignment statements,Aggregates with static bounds,Code Generation for Array Aggregates -@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-nonstatic-bounds}@anchor{425}@anchor{gnat_rm/implementation_of_specific_ada_features id11}@anchor{426} +@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-with-nonstatic-bounds}@anchor{428}@anchor{gnat_rm/implementation_of_specific_ada_features id11}@anchor{429} @subsection Aggregates with nonstatic bounds @@ -26165,7 +26192,7 @@ have to be applied to sub-arrays individually, if they do not have statically compatible subtypes. @node Aggregates in assignment statements,,Aggregates with nonstatic bounds,Code Generation for Array Aggregates -@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-in-assignment-statements}@anchor{427}@anchor{gnat_rm/implementation_of_specific_ada_features id12}@anchor{428} +@anchor{gnat_rm/implementation_of_specific_ada_features aggregates-in-assignment-statements}@anchor{42a}@anchor{gnat_rm/implementation_of_specific_ada_features id12}@anchor{42b} @subsection Aggregates in assignment statements @@ -26207,7 +26234,7 @@ a temporary (created either by the front-end or the code generator) and then that temporary will be copied onto the target. @node The Size of Discriminated Records with Default Discriminants,Image Values For Nonscalar Types,Code Generation for Array Aggregates,Implementation of Specific Ada Features -@anchor{gnat_rm/implementation_of_specific_ada_features id13}@anchor{429}@anchor{gnat_rm/implementation_of_specific_ada_features the-size-of-discriminated-records-with-default-discriminants}@anchor{42a} +@anchor{gnat_rm/implementation_of_specific_ada_features id13}@anchor{42c}@anchor{gnat_rm/implementation_of_specific_ada_features the-size-of-discriminated-records-with-default-discriminants}@anchor{42d} @section The Size of Discriminated Records with Default Discriminants @@ -26287,7 +26314,7 @@ say) must be consistent, so it is imperative that the object, once created, remain invariant. @node Image Values For Nonscalar Types,Strict Conformance to the Ada Reference Manual,The Size of Discriminated Records with Default Discriminants,Implementation of Specific Ada Features -@anchor{gnat_rm/implementation_of_specific_ada_features id14}@anchor{42b}@anchor{gnat_rm/implementation_of_specific_ada_features image-values-for-nonscalar-types}@anchor{42c} +@anchor{gnat_rm/implementation_of_specific_ada_features id14}@anchor{42e}@anchor{gnat_rm/implementation_of_specific_ada_features image-values-for-nonscalar-types}@anchor{42f} @section Image Values For Nonscalar Types @@ -26307,7 +26334,7 @@ control of image text is required for some type T, then T’Put_Image should be explicitly specified. @node Strict Conformance to the Ada Reference Manual,,Image Values For Nonscalar Types,Implementation of Specific Ada Features -@anchor{gnat_rm/implementation_of_specific_ada_features id15}@anchor{42d}@anchor{gnat_rm/implementation_of_specific_ada_features strict-conformance-to-the-ada-reference-manual}@anchor{42e} +@anchor{gnat_rm/implementation_of_specific_ada_features id15}@anchor{430}@anchor{gnat_rm/implementation_of_specific_ada_features strict-conformance-to-the-ada-reference-manual}@anchor{431} @section Strict Conformance to the Ada Reference Manual @@ -26334,7 +26361,7 @@ behavior (although at the cost of a significant performance penalty), so infinite and NaN values are properly generated. @node Implementation of Ada 2012 Features,GNAT language extensions,Implementation of Specific Ada Features,Top -@anchor{gnat_rm/implementation_of_ada_2012_features doc}@anchor{42f}@anchor{gnat_rm/implementation_of_ada_2012_features id1}@anchor{430}@anchor{gnat_rm/implementation_of_ada_2012_features implementation-of-ada-2012-features}@anchor{14} +@anchor{gnat_rm/implementation_of_ada_2012_features doc}@anchor{432}@anchor{gnat_rm/implementation_of_ada_2012_features id1}@anchor{433}@anchor{gnat_rm/implementation_of_ada_2012_features implementation-of-ada-2012-features}@anchor{14} @chapter Implementation of Ada 2012 Features @@ -28500,7 +28527,7 @@ RM References: H.04 (8/1) @end itemize @node GNAT language extensions,Security Hardening Features,Implementation of Ada 2012 Features,Top -@anchor{gnat_rm/gnat_language_extensions doc}@anchor{431}@anchor{gnat_rm/gnat_language_extensions gnat-language-extensions}@anchor{432}@anchor{gnat_rm/gnat_language_extensions id1}@anchor{433} +@anchor{gnat_rm/gnat_language_extensions doc}@anchor{434}@anchor{gnat_rm/gnat_language_extensions gnat-language-extensions}@anchor{435}@anchor{gnat_rm/gnat_language_extensions id1}@anchor{436} @chapter GNAT language extensions @@ -28531,7 +28558,7 @@ prototyping phase. @end menu @node How to activate the extended GNAT Ada superset,Curated Extensions,,GNAT language extensions -@anchor{gnat_rm/gnat_language_extensions how-to-activate-the-extended-gnat-ada-superset}@anchor{434} +@anchor{gnat_rm/gnat_language_extensions how-to-activate-the-extended-gnat-ada-superset}@anchor{437} @section How to activate the extended GNAT Ada superset @@ -28570,7 +28597,7 @@ for serious projects, and is only means as a playground/technology preview. @end cartouche @node Curated Extensions,Experimental Language Extensions,How to activate the extended GNAT Ada superset,GNAT language extensions -@anchor{gnat_rm/gnat_language_extensions curated-extensions}@anchor{435}@anchor{gnat_rm/gnat_language_extensions curated-language-extensions}@anchor{66} +@anchor{gnat_rm/gnat_language_extensions curated-extensions}@anchor{438}@anchor{gnat_rm/gnat_language_extensions curated-language-extensions}@anchor{66} @section Curated Extensions @@ -28588,7 +28615,7 @@ for serious projects, and is only means as a playground/technology preview. @end menu @node Local Declarations Without Block,Conditional when constructs,,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions local-declarations-without-block}@anchor{436} +@anchor{gnat_rm/gnat_language_extensions local-declarations-without-block}@anchor{439} @subsection Local Declarations Without Block @@ -28611,7 +28638,7 @@ end if; @end example @node Conditional when constructs,Case pattern matching,Local Declarations Without Block,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions conditional-when-constructs}@anchor{437} +@anchor{gnat_rm/gnat_language_extensions conditional-when-constructs}@anchor{43a} @subsection Conditional when constructs @@ -28683,7 +28710,7 @@ Link to the original RFC: @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-conditional-when-constructs.rst} @node Case pattern matching,Fixed lower bounds for array types and subtypes,Conditional when constructs,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions case-pattern-matching}@anchor{438} +@anchor{gnat_rm/gnat_language_extensions case-pattern-matching}@anchor{43b} @subsection Case pattern matching @@ -28815,7 +28842,7 @@ Link to the original RFC: @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-pattern-matching.rst} @node Fixed lower bounds for array types and subtypes,Prefixed-view notation for calls to primitive subprograms of untagged types,Case pattern matching,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions fixed-lower-bounds-for-array-types-and-subtypes}@anchor{439} +@anchor{gnat_rm/gnat_language_extensions fixed-lower-bounds-for-array-types-and-subtypes}@anchor{43c} @subsection Fixed lower bounds for array types and subtypes @@ -28869,7 +28896,7 @@ Link to the original RFC: @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-fixed-lower-bound.rst} @node Prefixed-view notation for calls to primitive subprograms of untagged types,Expression defaults for generic formal functions,Fixed lower bounds for array types and subtypes,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions prefixed-view-notation-for-calls-to-primitive-subprograms-of-untagged-types}@anchor{43a} +@anchor{gnat_rm/gnat_language_extensions prefixed-view-notation-for-calls-to-primitive-subprograms-of-untagged-types}@anchor{43d} @subsection Prefixed-view notation for calls to primitive subprograms of untagged types @@ -28922,7 +28949,7 @@ Link to the original RFC: @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-prefixed-untagged.rst} @node Expression defaults for generic formal functions,String interpolation,Prefixed-view notation for calls to primitive subprograms of untagged types,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions expression-defaults-for-generic-formal-functions}@anchor{43b} +@anchor{gnat_rm/gnat_language_extensions expression-defaults-for-generic-formal-functions}@anchor{43e} @subsection Expression defaults for generic formal functions @@ -28951,7 +28978,7 @@ Link to the original RFC: @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-expression-functions-as-default-for-generic-formal-function-parameters.rst} @node String interpolation,Constrained attribute for generic objects,Expression defaults for generic formal functions,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions string-interpolation}@anchor{43c} +@anchor{gnat_rm/gnat_language_extensions string-interpolation}@anchor{43f} @subsection String interpolation @@ -29117,7 +29144,7 @@ Here is a link to the original RFC : @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-string-interpolation.rst} @node Constrained attribute for generic objects,Static aspect on intrinsic functions,String interpolation,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions constrained-attribute-for-generic-objects}@anchor{43d} +@anchor{gnat_rm/gnat_language_extensions constrained-attribute-for-generic-objects}@anchor{440} @subsection Constrained attribute for generic objects @@ -29125,7 +29152,7 @@ The @code{Constrained} attribute is permitted for objects of generic types. The result indicates whether the corresponding actual is constrained. @node Static aspect on intrinsic functions,,Constrained attribute for generic objects,Curated Extensions -@anchor{gnat_rm/gnat_language_extensions static-aspect-on-intrinsic-functions}@anchor{43e} +@anchor{gnat_rm/gnat_language_extensions static-aspect-on-intrinsic-functions}@anchor{441} @subsection @code{Static} aspect on intrinsic functions @@ -29134,7 +29161,7 @@ and the compiler will evaluate some of these intrinsics statically, in particular the @code{Shift_Left} and @code{Shift_Right} intrinsics. @node Experimental Language Extensions,,Curated Extensions,GNAT language extensions -@anchor{gnat_rm/gnat_language_extensions experimental-language-extensions}@anchor{67}@anchor{gnat_rm/gnat_language_extensions id2}@anchor{43f} +@anchor{gnat_rm/gnat_language_extensions experimental-language-extensions}@anchor{67}@anchor{gnat_rm/gnat_language_extensions id2}@anchor{442} @section Experimental Language Extensions @@ -29145,7 +29172,7 @@ particular the @code{Shift_Left} and @code{Shift_Right} intrinsics. @end menu @node Pragma Storage_Model,Simpler accessibility model,,Experimental Language Extensions -@anchor{gnat_rm/gnat_language_extensions pragma-storage-model}@anchor{440} +@anchor{gnat_rm/gnat_language_extensions pragma-storage-model}@anchor{443} @subsection Pragma Storage_Model @@ -29160,7 +29187,7 @@ Here is a link to the full RFC: @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-storage-model.rst} @node Simpler accessibility model,,Pragma Storage_Model,Experimental Language Extensions -@anchor{gnat_rm/gnat_language_extensions simpler-accessibility-model}@anchor{441} +@anchor{gnat_rm/gnat_language_extensions simpler-accessibility-model}@anchor{444} @subsection Simpler accessibility model @@ -29173,7 +29200,7 @@ Here is a link to the full RFC: @indicateurl{https://github.com/AdaCore/ada-spark-rfcs/blob/master/prototyped/rfc-simpler-accessibility.md} @node Security Hardening Features,Obsolescent Features,GNAT language extensions,Top -@anchor{gnat_rm/security_hardening_features doc}@anchor{442}@anchor{gnat_rm/security_hardening_features id1}@anchor{443}@anchor{gnat_rm/security_hardening_features security-hardening-features}@anchor{15} +@anchor{gnat_rm/security_hardening_features doc}@anchor{445}@anchor{gnat_rm/security_hardening_features id1}@anchor{446}@anchor{gnat_rm/security_hardening_features security-hardening-features}@anchor{15} @chapter Security Hardening Features @@ -29195,7 +29222,7 @@ change. @end menu @node Register Scrubbing,Stack Scrubbing,,Security Hardening Features -@anchor{gnat_rm/security_hardening_features register-scrubbing}@anchor{444} +@anchor{gnat_rm/security_hardening_features register-scrubbing}@anchor{447} @section Register Scrubbing @@ -29231,7 +29258,7 @@ programming languages, see @cite{Using the GNU Compiler Collection (GCC)}. @c Stack Scrubbing: @node Stack Scrubbing,Hardened Conditionals,Register Scrubbing,Security Hardening Features -@anchor{gnat_rm/security_hardening_features stack-scrubbing}@anchor{445} +@anchor{gnat_rm/security_hardening_features stack-scrubbing}@anchor{448} @section Stack Scrubbing @@ -29375,7 +29402,7 @@ Bar_Callable_Ptr. @c Hardened Conditionals: @node Hardened Conditionals,Hardened Booleans,Stack Scrubbing,Security Hardening Features -@anchor{gnat_rm/security_hardening_features hardened-conditionals}@anchor{446} +@anchor{gnat_rm/security_hardening_features hardened-conditionals}@anchor{449} @section Hardened Conditionals @@ -29465,7 +29492,7 @@ be used with other programming languages supported by GCC. @c Hardened Booleans: @node Hardened Booleans,Control Flow Redundancy,Hardened Conditionals,Security Hardening Features -@anchor{gnat_rm/security_hardening_features hardened-booleans}@anchor{447} +@anchor{gnat_rm/security_hardening_features hardened-booleans}@anchor{44a} @section Hardened Booleans @@ -29526,7 +29553,7 @@ and more details on that attribute, see @cite{Using the GNU Compiler Collection @c Control Flow Redundancy: @node Control Flow Redundancy,,Hardened Booleans,Security Hardening Features -@anchor{gnat_rm/security_hardening_features control-flow-redundancy}@anchor{448} +@anchor{gnat_rm/security_hardening_features control-flow-redundancy}@anchor{44b} @section Control Flow Redundancy @@ -29694,7 +29721,7 @@ see @cite{Using the GNU Compiler Collection (GCC)}. These options can be used with other programming languages supported by GCC. @node Obsolescent Features,Compatibility and Porting Guide,Security Hardening Features,Top -@anchor{gnat_rm/obsolescent_features doc}@anchor{449}@anchor{gnat_rm/obsolescent_features id1}@anchor{44a}@anchor{gnat_rm/obsolescent_features obsolescent-features}@anchor{16} +@anchor{gnat_rm/obsolescent_features doc}@anchor{44c}@anchor{gnat_rm/obsolescent_features id1}@anchor{44d}@anchor{gnat_rm/obsolescent_features obsolescent-features}@anchor{16} @chapter Obsolescent Features @@ -29713,7 +29740,7 @@ compatibility purposes. @end menu @node pragma No_Run_Time,pragma Ravenscar,,Obsolescent Features -@anchor{gnat_rm/obsolescent_features id2}@anchor{44b}@anchor{gnat_rm/obsolescent_features pragma-no-run-time}@anchor{44c} +@anchor{gnat_rm/obsolescent_features id2}@anchor{44e}@anchor{gnat_rm/obsolescent_features pragma-no-run-time}@anchor{44f} @section pragma No_Run_Time @@ -29726,7 +29753,7 @@ preferred usage is to use an appropriately configured run-time that includes just those features that are to be made accessible. @node pragma Ravenscar,pragma Restricted_Run_Time,pragma No_Run_Time,Obsolescent Features -@anchor{gnat_rm/obsolescent_features id3}@anchor{44d}@anchor{gnat_rm/obsolescent_features pragma-ravenscar}@anchor{44e} +@anchor{gnat_rm/obsolescent_features id3}@anchor{450}@anchor{gnat_rm/obsolescent_features pragma-ravenscar}@anchor{451} @section pragma Ravenscar @@ -29735,7 +29762,7 @@ The pragma @code{Ravenscar} has exactly the same effect as pragma is part of the new Ada 2005 standard. @node pragma Restricted_Run_Time,pragma Task_Info,pragma Ravenscar,Obsolescent Features -@anchor{gnat_rm/obsolescent_features id4}@anchor{44f}@anchor{gnat_rm/obsolescent_features pragma-restricted-run-time}@anchor{450} +@anchor{gnat_rm/obsolescent_features id4}@anchor{452}@anchor{gnat_rm/obsolescent_features pragma-restricted-run-time}@anchor{453} @section pragma Restricted_Run_Time @@ -29745,7 +29772,7 @@ preferred since the Ada 2005 pragma @code{Profile} is intended for this kind of implementation dependent addition. @node pragma Task_Info,package System Task_Info s-tasinf ads,pragma Restricted_Run_Time,Obsolescent Features -@anchor{gnat_rm/obsolescent_features id5}@anchor{451}@anchor{gnat_rm/obsolescent_features pragma-task-info}@anchor{452} +@anchor{gnat_rm/obsolescent_features id5}@anchor{454}@anchor{gnat_rm/obsolescent_features pragma-task-info}@anchor{455} @section pragma Task_Info @@ -29771,7 +29798,7 @@ in the spec of package System.Task_Info in the runtime library. @node package System Task_Info s-tasinf ads,,pragma Task_Info,Obsolescent Features -@anchor{gnat_rm/obsolescent_features package-system-task-info}@anchor{453}@anchor{gnat_rm/obsolescent_features package-system-task-info-s-tasinf-ads}@anchor{454} +@anchor{gnat_rm/obsolescent_features package-system-task-info}@anchor{456}@anchor{gnat_rm/obsolescent_features package-system-task-info-s-tasinf-ads}@anchor{457} @section package System.Task_Info (@code{s-tasinf.ads}) @@ -29781,7 +29808,7 @@ to support the @code{Task_Info} pragma. The predefined Ada package standard replacement for GNAT’s @code{Task_Info} functionality. @node Compatibility and Porting Guide,GNU Free Documentation License,Obsolescent Features,Top -@anchor{gnat_rm/compatibility_and_porting_guide doc}@anchor{455}@anchor{gnat_rm/compatibility_and_porting_guide compatibility-and-porting-guide}@anchor{17}@anchor{gnat_rm/compatibility_and_porting_guide id1}@anchor{456} +@anchor{gnat_rm/compatibility_and_porting_guide doc}@anchor{458}@anchor{gnat_rm/compatibility_and_porting_guide compatibility-and-porting-guide}@anchor{17}@anchor{gnat_rm/compatibility_and_porting_guide id1}@anchor{459} @chapter Compatibility and Porting Guide @@ -29803,7 +29830,7 @@ applications developed in other Ada environments. @end menu @node Writing Portable Fixed-Point Declarations,Compatibility with Ada 83,,Compatibility and Porting Guide -@anchor{gnat_rm/compatibility_and_porting_guide id2}@anchor{457}@anchor{gnat_rm/compatibility_and_porting_guide writing-portable-fixed-point-declarations}@anchor{458} +@anchor{gnat_rm/compatibility_and_porting_guide id2}@anchor{45a}@anchor{gnat_rm/compatibility_and_porting_guide writing-portable-fixed-point-declarations}@anchor{45b} @section Writing Portable Fixed-Point Declarations @@ -29925,7 +29952,7 @@ If you follow this scheme you will be guaranteed that your fixed-point types will be portable. @node Compatibility with Ada 83,Compatibility between Ada 95 and Ada 2005,Writing Portable Fixed-Point Declarations,Compatibility and Porting Guide -@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-ada-83}@anchor{459}@anchor{gnat_rm/compatibility_and_porting_guide id3}@anchor{45a} +@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-ada-83}@anchor{45c}@anchor{gnat_rm/compatibility_and_porting_guide id3}@anchor{45d} @section Compatibility with Ada 83 @@ -29953,7 +29980,7 @@ following subsections treat the most likely issues to be encountered. @end menu @node Legal Ada 83 programs that are illegal in Ada 95,More deterministic semantics,,Compatibility with Ada 83 -@anchor{gnat_rm/compatibility_and_porting_guide id4}@anchor{45b}@anchor{gnat_rm/compatibility_and_porting_guide legal-ada-83-programs-that-are-illegal-in-ada-95}@anchor{45c} +@anchor{gnat_rm/compatibility_and_porting_guide id4}@anchor{45e}@anchor{gnat_rm/compatibility_and_porting_guide legal-ada-83-programs-that-are-illegal-in-ada-95}@anchor{45f} @subsection Legal Ada 83 programs that are illegal in Ada 95 @@ -30053,7 +30080,7 @@ the fix is usually simply to add the @code{(<>)} to the generic declaration. @end itemize @node More deterministic semantics,Changed semantics,Legal Ada 83 programs that are illegal in Ada 95,Compatibility with Ada 83 -@anchor{gnat_rm/compatibility_and_porting_guide id5}@anchor{45d}@anchor{gnat_rm/compatibility_and_porting_guide more-deterministic-semantics}@anchor{45e} +@anchor{gnat_rm/compatibility_and_porting_guide id5}@anchor{460}@anchor{gnat_rm/compatibility_and_porting_guide more-deterministic-semantics}@anchor{461} @subsection More deterministic semantics @@ -30081,7 +30108,7 @@ which open select branches are executed. @end itemize @node Changed semantics,Other language compatibility issues,More deterministic semantics,Compatibility with Ada 83 -@anchor{gnat_rm/compatibility_and_porting_guide changed-semantics}@anchor{45f}@anchor{gnat_rm/compatibility_and_porting_guide id6}@anchor{460} +@anchor{gnat_rm/compatibility_and_porting_guide changed-semantics}@anchor{462}@anchor{gnat_rm/compatibility_and_porting_guide id6}@anchor{463} @subsection Changed semantics @@ -30123,7 +30150,7 @@ covers only the restricted range. @end itemize @node Other language compatibility issues,,Changed semantics,Compatibility with Ada 83 -@anchor{gnat_rm/compatibility_and_porting_guide id7}@anchor{461}@anchor{gnat_rm/compatibility_and_porting_guide other-language-compatibility-issues}@anchor{462} +@anchor{gnat_rm/compatibility_and_porting_guide id7}@anchor{464}@anchor{gnat_rm/compatibility_and_porting_guide other-language-compatibility-issues}@anchor{465} @subsection Other language compatibility issues @@ -30156,7 +30183,7 @@ include @code{pragma Interface} and the floating point type attributes @end itemize @node Compatibility between Ada 95 and Ada 2005,Implementation-dependent characteristics,Compatibility with Ada 83,Compatibility and Porting Guide -@anchor{gnat_rm/compatibility_and_porting_guide compatibility-between-ada-95-and-ada-2005}@anchor{463}@anchor{gnat_rm/compatibility_and_porting_guide id8}@anchor{464} +@anchor{gnat_rm/compatibility_and_porting_guide compatibility-between-ada-95-and-ada-2005}@anchor{466}@anchor{gnat_rm/compatibility_and_porting_guide id8}@anchor{467} @section Compatibility between Ada 95 and Ada 2005 @@ -30228,7 +30255,7 @@ can declare a function returning a value from an anonymous access type. @end itemize @node Implementation-dependent characteristics,Compatibility with Other Ada Systems,Compatibility between Ada 95 and Ada 2005,Compatibility and Porting Guide -@anchor{gnat_rm/compatibility_and_porting_guide id9}@anchor{465}@anchor{gnat_rm/compatibility_and_porting_guide implementation-dependent-characteristics}@anchor{466} +@anchor{gnat_rm/compatibility_and_porting_guide id9}@anchor{468}@anchor{gnat_rm/compatibility_and_porting_guide implementation-dependent-characteristics}@anchor{469} @section Implementation-dependent characteristics @@ -30251,7 +30278,7 @@ transition from certain Ada 83 compilers. @end menu @node Implementation-defined pragmas,Implementation-defined attributes,,Implementation-dependent characteristics -@anchor{gnat_rm/compatibility_and_porting_guide id10}@anchor{467}@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-pragmas}@anchor{468} +@anchor{gnat_rm/compatibility_and_porting_guide id10}@anchor{46a}@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-pragmas}@anchor{46b} @subsection Implementation-defined pragmas @@ -30273,7 +30300,7 @@ avoiding compiler rejection of units that contain such pragmas; they are not relevant in a GNAT context and hence are not otherwise implemented. @node Implementation-defined attributes,Libraries,Implementation-defined pragmas,Implementation-dependent characteristics -@anchor{gnat_rm/compatibility_and_porting_guide id11}@anchor{469}@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-attributes}@anchor{46a} +@anchor{gnat_rm/compatibility_and_porting_guide id11}@anchor{46c}@anchor{gnat_rm/compatibility_and_porting_guide implementation-defined-attributes}@anchor{46d} @subsection Implementation-defined attributes @@ -30287,7 +30314,7 @@ Ada 83, GNAT supplies the attributes @code{Bit}, @code{Machine_Size} and @code{Type_Class}. @node Libraries,Elaboration order,Implementation-defined attributes,Implementation-dependent characteristics -@anchor{gnat_rm/compatibility_and_porting_guide id12}@anchor{46b}@anchor{gnat_rm/compatibility_and_porting_guide libraries}@anchor{46c} +@anchor{gnat_rm/compatibility_and_porting_guide id12}@anchor{46e}@anchor{gnat_rm/compatibility_and_porting_guide libraries}@anchor{46f} @subsection Libraries @@ -30316,7 +30343,7 @@ be preferable to retrofit the application using modular types. @end itemize @node Elaboration order,Target-specific aspects,Libraries,Implementation-dependent characteristics -@anchor{gnat_rm/compatibility_and_porting_guide elaboration-order}@anchor{46d}@anchor{gnat_rm/compatibility_and_porting_guide id13}@anchor{46e} +@anchor{gnat_rm/compatibility_and_porting_guide elaboration-order}@anchor{470}@anchor{gnat_rm/compatibility_and_porting_guide id13}@anchor{471} @subsection Elaboration order @@ -30352,7 +30379,7 @@ pragmas either globally (as an effect of the `-gnatE' switch) or locally @end itemize @node Target-specific aspects,,Elaboration order,Implementation-dependent characteristics -@anchor{gnat_rm/compatibility_and_porting_guide id14}@anchor{46f}@anchor{gnat_rm/compatibility_and_porting_guide target-specific-aspects}@anchor{470} +@anchor{gnat_rm/compatibility_and_porting_guide id14}@anchor{472}@anchor{gnat_rm/compatibility_and_porting_guide target-specific-aspects}@anchor{473} @subsection Target-specific aspects @@ -30365,10 +30392,10 @@ on the robustness of the original design. Moreover, Ada 95 (and thus Ada 2005 and Ada 2012) are sometimes incompatible with typical Ada 83 compiler practices regarding implicit packing, the meaning of the Size attribute, and the size of access values. -GNAT’s approach to these issues is described in @ref{471,,Representation Clauses}. +GNAT’s approach to these issues is described in @ref{474,,Representation Clauses}. @node Compatibility with Other Ada Systems,Representation Clauses,Implementation-dependent characteristics,Compatibility and Porting Guide -@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-other-ada-systems}@anchor{472}@anchor{gnat_rm/compatibility_and_porting_guide id15}@anchor{473} +@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-other-ada-systems}@anchor{475}@anchor{gnat_rm/compatibility_and_porting_guide id15}@anchor{476} @section Compatibility with Other Ada Systems @@ -30411,7 +30438,7 @@ far beyond this minimal set, as described in the next section. @end itemize @node Representation Clauses,Compatibility with HP Ada 83,Compatibility with Other Ada Systems,Compatibility and Porting Guide -@anchor{gnat_rm/compatibility_and_porting_guide id16}@anchor{474}@anchor{gnat_rm/compatibility_and_porting_guide representation-clauses}@anchor{471} +@anchor{gnat_rm/compatibility_and_porting_guide id16}@anchor{477}@anchor{gnat_rm/compatibility_and_porting_guide representation-clauses}@anchor{474} @section Representation Clauses @@ -30504,7 +30531,7 @@ with thin pointers. @end itemize @node Compatibility with HP Ada 83,,Representation Clauses,Compatibility and Porting Guide -@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-hp-ada-83}@anchor{475}@anchor{gnat_rm/compatibility_and_porting_guide id17}@anchor{476} +@anchor{gnat_rm/compatibility_and_porting_guide compatibility-with-hp-ada-83}@anchor{478}@anchor{gnat_rm/compatibility_and_porting_guide id17}@anchor{479} @section Compatibility with HP Ada 83 @@ -30534,7 +30561,7 @@ extension of package System. @end itemize @node GNU Free Documentation License,Index,Compatibility and Porting Guide,Top -@anchor{share/gnu_free_documentation_license doc}@anchor{477}@anchor{share/gnu_free_documentation_license gnu-fdl}@anchor{1}@anchor{share/gnu_free_documentation_license gnu-free-documentation-license}@anchor{478} +@anchor{share/gnu_free_documentation_license doc}@anchor{47a}@anchor{share/gnu_free_documentation_license gnu-fdl}@anchor{1}@anchor{share/gnu_free_documentation_license gnu-free-documentation-license}@anchor{47b} @chapter GNU Free Documentation License diff --git a/gcc/ada/gnat_ugn.texi b/gcc/ada/gnat_ugn.texi index 1562bee..897153b 100644 --- a/gcc/ada/gnat_ugn.texi +++ b/gcc/ada/gnat_ugn.texi @@ -3,7 +3,7 @@ @setfilename gnat_ugn.info @documentencoding UTF-8 @ifinfo -@*Generated by Sphinx 5.2.3.@* +@*Generated by Sphinx 7.2.6.@* @end ifinfo @settitle GNAT User's Guide for Native Platforms @defindex ge @@ -19,7 +19,7 @@ @copying @quotation -GNAT User's Guide for Native Platforms , Sep 26, 2023 +GNAT User's Guide for Native Platforms , Oct 16, 2023 AdaCore @@ -15781,12 +15781,12 @@ a chain of blocks in the heap. In this case, the default secondary stack size determines the initial size of the secondary stack for each task and the smallest amount the secondary stack can grow by. -For Ravenscar, ZFP, and Cert run-times the size of the secondary stack is -fixed. This switch can be used to change the default size of these stacks. -The default secondary stack size can be overridden on a per-task basis if -individual tasks have different secondary stack requirements. This is -achieved through the Secondary_Stack_Size aspect that takes the size of the -secondary stack in bytes. +For Light, Light-Tasking, and Embedded run-times the size of the secondary +stack is fixed. This switch can be used to change the default size of these +stacks. The default secondary stack size can be overridden on a per-task +basis if individual tasks have different secondary stack requirements. This +is achieved through the Secondary_Stack_Size aspect, which takes the size of +the secondary stack in bytes. @end table @geindex -e (gnatbind) @@ -16047,6 +16047,27 @@ Pessimistic (worst-case) elaboration order. @item @code{-P} Generate binder file suitable for CodePeer. +@end table + +@geindex -Q (gnatbind) + + +@table @asis + +@item @code{-Q`nnn'} + +Generate @code{nnn} additional default-sized secondary stacks. + +Tasks declared at the library level that use default-size secondary stacks +have their secondary stacks allocated from a pool of stacks generated by +gnatbind. This allows the default secondary stack size to be quickly changed +by rebinding the application. + +While the binder sizes this pool to match the number of such tasks defined in +the application, the pool size may need to be increased with the @code{-Q} +switch to accommodate foreign threads registered with the Light run-time. For +more information, please see the `The Primary and Secondary Stack' chapter in +the `GNAT User’s Guide Supplement for Cross Platforms'. @geindex -R (gnatbind) @@ -22827,13 +22848,13 @@ Alternatively, you can specify @code{rts-sjlj/adainclude} in the file Selecting another run-time library temporarily can be achieved by using the @code{--RTS} switch, e.g., @code{--RTS=sjlj} -@anchor{gnat_ugn/platform_specific_information choosing-the-scheduling-policy}@anchor{1bb} + @geindex SCHED_FIFO scheduling policy @geindex SCHED_RR scheduling policy @geindex SCHED_OTHER scheduling policy - +@anchor{gnat_ugn/platform_specific_information choosing-the-scheduling-policy}@anchor{1bb} @menu * Choosing the Scheduling Policy:: diff --git a/gcc/ada/libgnat/s-imagef.adb b/gcc/ada/libgnat/s-imagef.adb index 3f6bfa2..6194a31 100644 --- a/gcc/ada/libgnat/s-imagef.adb +++ b/gcc/ada/libgnat/s-imagef.adb @@ -307,6 +307,9 @@ package body System.Image_F is YY : Int := Y; -- First two operands of the scaled divide + J : Natural; + -- Loop index + begin -- Set the first character like Image @@ -317,59 +320,61 @@ package body System.Image_F is Ndigs := 0; end if; - for J in 1 .. N loop - exit when XX = 0; + -- First round of scaled divide + if XX /= 0 then Scaled_Divide (XX, YY, Z, Q, R => XX, Round => False); + if Q /= 0 then + Set_Image_Integer (Q, Digs, Ndigs); + end if; - if J = 1 then - if Q /= 0 then - Set_Image_Integer (Q, Digs, Ndigs); - end if; - - Scale := Scale + D; + Scale := Scale + D; - -- Prepare for next round, if any + -- Prepare for next round, if any - YY := 10**Maxdigs; + YY := 10**Maxdigs; + end if; - else - pragma Assert (-10**Maxdigs < Q and then Q < 10**Maxdigs); + J := 2; + while J <= N and then XX /= 0 loop + Scaled_Divide (XX, YY, Z, Q, R => XX, Round => False); - Len := 0; - Set_Image_Integer (abs Q, Buf, Len); + pragma Assert (-10**Maxdigs < Q and then Q < 10**Maxdigs); - pragma Assert (1 <= Len and then Len <= Maxdigs); + Len := 0; + Set_Image_Integer (abs Q, Buf, Len); - -- If no character but the space has been written, write the - -- minus if need be, since Set_Image_Integer did not do it. + pragma Assert (1 <= Len and then Len <= Maxdigs); - if Ndigs <= 1 then - if Q /= 0 then - if Ndigs = 0 then - Digs (1) := '-'; - end if; + -- If no character but the space has been written, write the + -- minus if need be, since Set_Image_Integer did not do it. - Digs (2 .. Len + 1) := Buf (1 .. Len); - Ndigs := Len + 1; + if Ndigs <= 1 then + if Q /= 0 then + if Ndigs = 0 then + Digs (1) := '-'; end if; - -- Or else pad the output with zeroes up to Maxdigs + Digs (2 .. Len + 1) := Buf (1 .. Len); + Ndigs := Len + 1; + end if; - else - for K in 1 .. Maxdigs - Len loop - Digs (Ndigs + K) := '0'; - end loop; + -- Or else pad the output with zeroes up to Maxdigs - for K in 1 .. Len loop - Digs (Ndigs + Maxdigs - Len + K) := Buf (K); - end loop; + else + for K in 1 .. Maxdigs - Len loop + Digs (Ndigs + K) := '0'; + end loop; - Ndigs := Ndigs + Maxdigs; - end if; + for K in 1 .. Len loop + Digs (Ndigs + Maxdigs - Len + K) := Buf (K); + end loop; - Scale := Scale + Maxdigs; + Ndigs := Ndigs + Maxdigs; end if; + + Scale := Scale + Maxdigs; + J := J + 1; end loop; -- If no digit was output, this is zero diff --git a/gcc/ada/libgnat/s-imguti.adb b/gcc/ada/libgnat/s-imguti.adb index 4b9e27a..cb08110 100644 --- a/gcc/ada/libgnat/s-imguti.adb +++ b/gcc/ada/libgnat/s-imguti.adb @@ -37,6 +37,8 @@ package body System.Img_Util is -- Set_Decimal_Digits -- ------------------------ + pragma Annotate (Gnatcheck, Exempt_On, "Unassigned_OUT_Parameters", + "the OUT parameter is assigned by component"); procedure Set_Decimal_Digits (Digs : in out String; NDigs : Natural; @@ -47,6 +49,8 @@ package body System.Img_Util is Aft : Natural; Exp : Natural) is + pragma Annotate (Gnatcheck, Exempt_Off, "Unassigned_OUT_Parameters"); + pragma Assert (NDigs >= 1); pragma Assert (Digs'First = 1); pragma Assert (Digs'First < Digs'Last); @@ -413,6 +417,8 @@ package body System.Img_Util is -- Set_Floating_Invalid_Value -- -------------------------------- + pragma Annotate (Gnatcheck, Exempt_On, "Unassigned_OUT_Parameters", + "the OUT parameter is assigned by component"); procedure Set_Floating_Invalid_Value (V : Floating_Invalid_Value; S : out String; @@ -421,6 +427,8 @@ package body System.Img_Util is Aft : Natural; Exp : Natural) is + pragma Annotate (Gnatcheck, Exempt_Off, "Unassigned_OUT_Parameters"); + procedure Set (C : Character); -- Sets character C in output buffer diff --git a/gcc/ada/par-prag.adb b/gcc/ada/par-prag.adb index b139862..855e778 100644 --- a/gcc/ada/par-prag.adb +++ b/gcc/ada/par-prag.adb @@ -1488,6 +1488,7 @@ begin | Pragma_Rename_Pragma | Pragma_Restricted_Run_Time | Pragma_Reviewable + | Pragma_Side_Effects | Pragma_SPARK_Mode | Pragma_Secondary_Stack_Size | Pragma_Share_Generic diff --git a/gcc/ada/sem_ch13.adb b/gcc/ada/sem_ch13.adb index f891359..ae06313 100644 --- a/gcc/ada/sem_ch13.adb +++ b/gcc/ada/sem_ch13.adb @@ -1438,6 +1438,7 @@ package body Sem_Ch13 is -- Refined_Global -- Refined_Post -- Refined_State + -- Side_Effects -- SPARK_Mode -- Secondary_Stack_Size -- Subprogram_Variant @@ -3934,6 +3935,21 @@ package body Sem_Ch13 is goto Continue; + -- Aspect Side_Effects is never delayed because it is + -- equivalent to a source pragma which appears after + -- the related subprogram. + + when Aspect_Side_Effects => + Aitem := Make_Aitem_Pragma + (Pragma_Argument_Associations => New_List ( + Make_Pragma_Argument_Association (Loc, + Expression => Relocate_Node (Expr))), + Pragma_Name => Name_Side_Effects); + + Decorate (Aspect, Aitem); + Insert_Pragma (Aitem); + goto Continue; + -- SPARK_Mode when Aspect_SPARK_Mode => @@ -11374,6 +11390,7 @@ package body Sem_Ch13 is | Aspect_Postcondition | Aspect_Pre | Aspect_Precondition + | Aspect_Side_Effects | Aspect_Refined_Depends | Aspect_Refined_Global | Aspect_Refined_Post diff --git a/gcc/ada/sem_ch3.adb b/gcc/ada/sem_ch3.adb index c79d323..e92b46f 100644 --- a/gcc/ada/sem_ch3.adb +++ b/gcc/ada/sem_ch3.adb @@ -13809,7 +13809,7 @@ package body Sem_Ch3 is Suffix : Character) is C : constant Node_Id := Constraint (SI); - Number_Of_Constraints : Nat := 0; + Number_Of_Constraints : constant Nat := List_Length (Constraints (C)); Index : Node_Id; S, T : Entity_Id; Constraint_OK : Boolean := True; @@ -13835,12 +13835,6 @@ package body Sem_Ch3 is Constraint_OK := False; else - S := First (Constraints (C)); - while Present (S) loop - Number_Of_Constraints := Number_Of_Constraints + 1; - Next (S); - end loop; - -- In either case, the index constraint must provide a discrete -- range for each index of the array type and the type of each -- discrete range must be the same as that of the corresponding diff --git a/gcc/ada/sem_ch6.adb b/gcc/ada/sem_ch6.adb index a0dad86..29d51a9 100644 --- a/gcc/ada/sem_ch6.adb +++ b/gcc/ada/sem_ch6.adb @@ -2219,6 +2219,50 @@ package body Sem_Ch6 is end if; end Analyze_Return_Type; + -------------------------------------------- + -- Analyze_SPARK_Subprogram_Specification -- + -------------------------------------------- + + procedure Analyze_SPARK_Subprogram_Specification (N : Node_Id) is + Spec_Id : constant Entity_Id := Defining_Entity (N); + Formal : Entity_Id; + + begin + if not Comes_From_Source (Spec_Id) then + return; + end if; + + -- The following checks are relevant only when SPARK_Mode is On as + -- these are not standard Ada legality rules. + + if No (SPARK_Pragma (Spec_Id)) + or else Get_SPARK_Mode_From_Annotation (SPARK_Pragma (Spec_Id)) /= On + then + return; + end if; + + Formal := First_Formal (Spec_Id); + while Present (Formal) loop + if Ekind (Spec_Id) in E_Function | E_Generic_Function + and then not Is_Function_With_Side_Effects (Spec_Id) + then + -- A function cannot have a parameter of mode IN OUT or OUT + -- (SPARK RM 6.1). + + if Ekind (Formal) in E_In_Out_Parameter + | E_Out_Parameter + then + Error_Msg_Code := GEC_Out_Parameter_In_Function; + Error_Msg_N + ("function cannot have parameter of mode `OUT` or " + & "`IN OUT` in SPARK '[[]']", Formal); + end if; + end if; + + Next_Formal (Formal); + end loop; + end Analyze_SPARK_Subprogram_Specification; + ----------------------------- -- Analyze_Subprogram_Body -- ----------------------------- @@ -4577,6 +4621,29 @@ package body Sem_Ch6 is Analyze_Pragmas_In_Declarations (Body_Id); Analyze_Entry_Or_Subprogram_Body_Contract (Body_Id); + -- Apply SPARK legality checks + + Analyze_SPARK_Subprogram_Specification (Specification (N)); + + -- A function with side-effects shall not be an expression function + -- (SPARK RM 6.1.11(6)). + + if Present (Spec_Id) + and then (Is_Expression_Function (Spec_Id) + or else Is_Expression_Function (Body_Id)) + and then Is_Function_With_Side_Effects (Spec_Id) + then + if From_Aspect_Specification + (Get_Pragma (Spec_Id, Pragma_Side_Effects)) + then + Error_Msg_N ("aspect Side_Effects not allowed" + & " on an expression function", N); + else + Error_Msg_N ("pragma Side_Effects not allowed" + & " on an expression function", N); + end if; + end if; + Set_Actual_Subtypes (N, Current_Scope); -- Add a declaration for the Protection object, renaming declarations @@ -5187,6 +5254,14 @@ package body Sem_Ch6 is Analyze_Aspect_Specifications (N, Designator); end if; + -- The legality of a function specification in SPARK depends on whether + -- the function is a function with or without side-effects. Analyze the + -- pragma in advance if present, before specific SPARK legality checks. + + Analyze_Pragmas_If_Present (N, Pragma_SPARK_Mode); + Analyze_Pragmas_If_Present (N, Pragma_Side_Effects); + Analyze_SPARK_Subprogram_Specification (Specification (N)); + if Scop /= Standard_Standard and then not Is_Child_Unit (Designator) then Set_Categorization_From_Scope (Designator, Scop); @@ -13071,34 +13146,6 @@ package body Sem_Ch6 is Null_Exclusion_Static_Checks (Param_Spec); end if; - -- The following checks are relevant only when SPARK_Mode is on as - -- these are not standard Ada legality rules. - - if SPARK_Mode = On then - if Ekind (Scope (Formal)) in E_Function | E_Generic_Function then - - -- A function cannot have a parameter of mode IN OUT or OUT - -- (SPARK RM 6.1). - - if Ekind (Formal) in E_In_Out_Parameter | E_Out_Parameter then - Error_Msg_N - ("function cannot have parameter of mode `OUT` or " - & "`IN OUT`", Formal); - end if; - - -- A procedure cannot have an effectively volatile formal - -- parameter of mode IN because it behaves as a constant - -- (SPARK RM 7.1.3(4)). - - elsif Ekind (Scope (Formal)) = E_Procedure - and then Ekind (Formal) = E_In_Parameter - and then Is_Effectively_Volatile (Formal) - then - Error_Msg_N - ("formal parameter of mode `IN` cannot be volatile", Formal); - end if; - end if; - -- Deal with aspects on formal parameters. Only Unreferenced is -- supported for the time being. diff --git a/gcc/ada/sem_ch6.ads b/gcc/ada/sem_ch6.ads index f5ff960..dd2509d 100644 --- a/gcc/ada/sem_ch6.ads +++ b/gcc/ada/sem_ch6.ads @@ -51,6 +51,10 @@ package Sem_Ch6 is -- and body declarations. Returns the defining entity for the -- specification N. + procedure Analyze_SPARK_Subprogram_Specification (N : Node_Id); + -- Check SPARK legality rules that require that the specification has been + -- analyzed already. + function Can_Override_Operator (Subp : Entity_Id) return Boolean; -- Returns true if Subp can override a predefined operator diff --git a/gcc/ada/sem_prag.adb b/gcc/ada/sem_prag.adb index 6de87fb..b765575 100644 --- a/gcc/ada/sem_prag.adb +++ b/gcc/ada/sem_prag.adb @@ -186,6 +186,14 @@ package body Sem_Prag is -- to Uppercase or Lowercase, then a new string literal with appropriate -- casing is constructed. + procedure Analyze_If_Present_Internal + (N : Node_Id; + Id : Pragma_Id; + Included : Boolean); + -- Inspect the remainder of the list containing pragma N and look for a + -- pragma that matches Id. If found, analyze the pragma. If Included is + -- True, N is included in the search. + procedure Analyze_Part_Of (Indic : Node_Id; Item_Id : Entity_Id; @@ -1097,10 +1105,14 @@ package body Sem_Prag is -- and attribute 'Result are still valid items. if Ekind (Spec_Id) in E_Function | E_Generic_Function + and then not Is_Function_With_Side_Effects (Spec_Id) and then not Is_Input then + Error_Msg_Code := + GEC_Output_In_Function_Global_Or_Depends; SPARK_Msg_N - ("output item is not applicable to function", Item); + ("output item is not applicable to function '[[]']", + Item); end if; -- The item denotes a concurrent type. Note that single @@ -3080,9 +3092,12 @@ package body Sem_Prag is procedure Check_Mode_Restriction_In_Function (Mode : Node_Id) is begin - if Ekind (Spec_Id) in E_Function | E_Generic_Function then + if Ekind (Spec_Id) in E_Function | E_Generic_Function + and then not Is_Function_With_Side_Effects (Spec_Id) + then + Error_Msg_Code := GEC_Output_In_Function_Global_Or_Depends; SPARK_Msg_N - ("global mode & is not applicable to functions", Mode); + ("global mode & is not applicable to function '[[]']", Mode); end if; end Check_Mode_Restriction_In_Function; @@ -3263,6 +3278,46 @@ package body Sem_Prag is Set_Is_Analyzed_Pragma (N); end Analyze_Global_In_Decl_Part; + --------------------------------- + -- Analyze_If_Present_Internal -- + --------------------------------- + + procedure Analyze_If_Present_Internal + (N : Node_Id; + Id : Pragma_Id; + Included : Boolean) + is + Stmt : Node_Id; + + begin + pragma Assert (Is_List_Member (N)); + + -- Inspect the declarations or statements following pragma N looking + -- for another pragma whose Id matches the caller's request. If it is + -- available, analyze it. + + if Included then + Stmt := N; + else + Stmt := Next (N); + end if; + + while Present (Stmt) loop + if Nkind (Stmt) = N_Pragma and then Get_Pragma_Id (Stmt) = Id then + Analyze_Pragma (Stmt); + exit; + + -- The first source declaration or statement immediately following + -- N ends the region where a pragma may appear. + + elsif Comes_From_Source (Stmt) then + exit; + end if; + + Next (Stmt); + end loop; + end Analyze_If_Present_Internal; + -------------------------------------------- -- Analyze_Initial_Condition_In_Decl_Part -- -------------------------------------------- @@ -4558,9 +4613,9 @@ package body Sem_Prag is procedure Check_Static_Boolean_Expression (Expr : Node_Id); -- Subsidiary to the analysis of pragmas Async_Readers, Async_Writers, -- Constant_After_Elaboration, Effective_Reads, Effective_Writes, - -- Extensions_Visible and Volatile_Function. Ensure that expression Expr - -- is an OK static boolean expression. Emit an error if this is not the - -- case. + -- Extensions_Visible, Side_Effects and Volatile_Function. Ensure + -- that expression Expr is an OK static boolean expression. Emit an + -- error if this is not the case. procedure Check_Static_Constraint (Constr : Node_Id); -- Constr is a constraint from an N_Subtype_Indication node from a @@ -5017,30 +5072,8 @@ package body Sem_Prag is ------------------------ procedure Analyze_If_Present (Id : Pragma_Id) is - Stmt : Node_Id; - begin - pragma Assert (Is_List_Member (N)); - - -- Inspect the declarations or statements following pragma N looking - -- for another pragma whose Id matches the caller's request. If it is - -- available, analyze it. - - Stmt := Next (N); - while Present (Stmt) loop - if Nkind (Stmt) = N_Pragma and then Get_Pragma_Id (Stmt) = Id then - Analyze_Pragma (Stmt); - exit; - - -- The first source declaration or statement immediately following - -- N ends the region where a pragma may appear. - - elsif Comes_From_Source (Stmt) then - exit; - end if; - - Next (Stmt); - end loop; + Analyze_If_Present_Internal (N, Id, Included => False); end Analyze_If_Present; -------------------------------- @@ -13325,7 +13358,7 @@ package body Sem_Prag is Check_No_Identifiers; Check_At_Most_N_Arguments (1); - -- Ensure the proper placement of the pragma. Exceptional_Cases + -- Ensure the proper placement of the pragma. Always_Terminates -- must be associated with a subprogram declaration or a body that -- acts as a spec. @@ -13386,17 +13419,29 @@ package body Sem_Prag is Spec_Id := Unique_Defining_Entity (Subp_Decl); - -- Pragma Always_Terminates is not allowed on functions + -- In order to call Is_Function_With_Side_Effects, analyze pragma + -- Side_Effects if present. - if Ekind (Spec_Id) = E_Function then - Error_Msg_N (Fix_Error - ("pragma % cannot apply to function"), N); - return; + Analyze_If_Present (Pragma_Side_Effects); - elsif Ekind (Spec_Id) = E_Generic_Function then - Error_Msg_N (Fix_Error - ("pragma % cannot apply to generic function"), N); - return; + -- Pragma Always_Terminates is not allowed on functions without + -- side-effects. + + if Ekind (Spec_Id) in E_Function | E_Generic_Function + and then not Is_Function_With_Side_Effects (Spec_Id) + then + Error_Msg_Code := GEC_Always_Terminates_On_Function; + + if Ekind (Spec_Id) = E_Function then + Error_Msg_N (Fix_Error + ("pragma % cannot apply to function '[[]']"), N); + return; + + elsif Ekind (Spec_Id) = E_Generic_Function then + Error_Msg_N (Fix_Error + ("pragma % cannot apply to generic function '[[]']"), N); + return; + end if; end if; -- Pragma Always_Terminates applied to packages doesn't allow any @@ -16248,6 +16293,7 @@ package body Sem_Prag is Analyze_If_Present (Pragma_SPARK_Mode); Analyze_If_Present (Pragma_Volatile_Function); + Analyze_If_Present (Pragma_Side_Effects); Analyze_If_Present (Pragma_Global); Analyze_Depends_In_Decl_Part (N); end if; @@ -16937,6 +16983,31 @@ package body Sem_Prag is Spec_Id := Unique_Defining_Entity (Subp_Decl); + -- In order to call Is_Function_With_Side_Effects, analyze pragma + -- Side_Effects if present. + + Analyze_If_Present (Pragma_Side_Effects); + + -- Pragma Exceptional_Cases is not allowed on functions without + -- side-effects. + + if Ekind (Spec_Id) in E_Function | E_Generic_Function + and then not Is_Function_With_Side_Effects (Spec_Id) + then + Error_Msg_Sloc := GEC_Exceptional_Cases_On_Function; + + if Ekind (Spec_Id) = E_Function then + Error_Msg_N (Fix_Error + ("pragma % cannot apply to function '[[]']"), N); + return; + + elsif Ekind (Spec_Id) = E_Generic_Function then + Error_Msg_N (Fix_Error + ("pragma % cannot apply to generic function '[[]']"), N); + return; + end if; + end if; + -- A pragma that applies to a Ghost entity becomes Ghost for the -- purposes of legality checks and removal of ignored Ghost code. @@ -17990,6 +18061,7 @@ package body Sem_Prag is Analyze_If_Present (Pragma_SPARK_Mode); Analyze_If_Present (Pragma_Volatile_Function); + Analyze_If_Present (Pragma_Side_Effects); Analyze_Global_In_Decl_Part (N); Analyze_If_Present (Pragma_Depends); end if; @@ -22998,6 +23070,16 @@ package body Sem_Prag is E := Entity (E_Id); + Analyze_If_Present (Pragma_Side_Effects); + + -- A function with side-effects shall not have a Pure_Function + -- aspect or pragma (SPARK RM 6.1.11(5)). + + if Is_Function_With_Side_Effects (E) then + Error_Pragma + ("pragma % incompatible with ""Side_Effects"""); + end if; + -- A pragma that applies to a Ghost entity becomes Ghost for the -- purposes of legality checks and removal of ignored Ghost code. @@ -23171,6 +23253,7 @@ package body Sem_Prag is Analyze_If_Present (Pragma_SPARK_Mode); Analyze_If_Present (Pragma_Volatile_Function); + Analyze_If_Present (Pragma_Side_Effects); Analyze_If_Present (Pragma_Refined_Global); Analyze_Refined_Depends_In_Decl_Part (N); end if; @@ -23239,6 +23322,7 @@ package body Sem_Prag is Analyze_If_Present (Pragma_SPARK_Mode); Analyze_If_Present (Pragma_Volatile_Function); + Analyze_If_Present (Pragma_Side_Effects); Analyze_Refined_Global_In_Decl_Part (N); Analyze_If_Present (Pragma_Refined_Depends); end if; @@ -23774,6 +23858,129 @@ package body Sem_Prag is Check_Arg_Count (0); Check_Valid_Configuration_Pragma; + ------------------ + -- Side_Effects -- + ------------------ + + -- pragma Side_Effects [ (boolean_EXPRESSION) ]; + + -- Characteristics: + + -- * Analysis - The annotation is fully analyzed immediately upon + -- elaboration as its expression must be static. + + -- * Expansion - None. + + -- * Template - The annotation utilizes the generic template of the + -- related subprogram [body] when it is: + + -- aspect on subprogram declaration + -- aspect on stand-alone subprogram body + -- pragma on stand-alone subprogram body + + -- The annotation must prepare its own template when it is: + + -- pragma on subprogram declaration + + -- * Globals - Capture of global references must occur after full + -- analysis. + + -- * Instance - The annotation is instantiated automatically when + -- the related generic subprogram [body] is instantiated except for + -- the "pragma on subprogram declaration" case. In that scenario + -- the annotation must instantiate itself. + + when Pragma_Side_Effects => Side_Effects : declare + Subp_Decl : Node_Id; + Spec_Id : Entity_Id; + Over_Id : Entity_Id; + + begin + GNAT_Pragma; + Check_No_Identifiers; + Check_At_Most_N_Arguments (1); + + Subp_Decl := + Find_Related_Declaration_Or_Body (N, Do_Checks => True); + + -- Abstract subprogram declaration + + if Nkind (Subp_Decl) = N_Abstract_Subprogram_Declaration then + null; + + -- Generic subprogram declaration + + elsif Nkind (Subp_Decl) = N_Generic_Subprogram_Declaration then + null; + + -- Body acts as spec + + elsif Nkind (Subp_Decl) = N_Subprogram_Body + and then No (Corresponding_Spec (Subp_Decl)) + then + null; + + -- Body stub acts as spec + + elsif Nkind (Subp_Decl) = N_Subprogram_Body_Stub + and then No (Corresponding_Spec_Of_Stub (Subp_Decl)) + then + null; + + -- Subprogram declaration + + elsif Nkind (Subp_Decl) = N_Subprogram_Declaration then + null; + + -- Otherwise the pragma is associated with an illegal construct + + else + Error_Pragma ("pragma % must apply to a subprogram"); + end if; + + if Nkind (Specification (Subp_Decl)) /= N_Function_Specification + then + Error_Pragma ("pragma % must apply to a function"); + end if; + + Spec_Id := Unique_Defining_Entity (Subp_Decl); + + -- Chain the pragma on the contract for completeness + + Add_Contract_Item (N, Spec_Id); + + -- A function with side-effects cannot override a function without + -- side-effects (SPARK RM 7.1.2(16)). Overriding checks are + -- usually performed in New_Overloaded_Entity, however at + -- that point the pragma has not been processed yet. + + Over_Id := Overridden_Operation (Spec_Id); + + if Present (Over_Id) + and then not Is_Function_With_Side_Effects (Over_Id) + then + Error_Msg_N + ("incompatible declaration of side-effects for function", + Spec_Id); + + Error_Msg_Sloc := Sloc (Over_Id); + Error_Msg_N + ("\& declared # with Side_Effects value False", + Spec_Id); + + Error_Msg_Sloc := Sloc (Spec_Id); + Error_Msg_N + ("\overridden # with Side_Effects value True", + Spec_Id); + end if; + + -- Analyze the Boolean expression (if any) + + if Present (Arg1) then + Check_Static_Boolean_Expression (Get_Pragma_Arg (Arg1)); + end if; + end Side_Effects; + ------------------------------ -- Simple_Storage_Pool_Type -- ------------------------------ @@ -26781,6 +26988,25 @@ package body Sem_Prag is when Pragma_Exit => null; end Analyze_Pragma; + -------------------------------- + -- Analyze_Pragmas_If_Present -- + -------------------------------- + + procedure Analyze_Pragmas_If_Present (Decl : Node_Id; Id : Pragma_Id) is + Prag : Node_Id; + begin + if Nkind (Parent (Decl)) = N_Compilation_Unit then + Prag := First (Pragmas_After (Aux_Decls_Node (Parent (Decl)))); + else + pragma Assert (Is_List_Member (Decl)); + Prag := Next (Decl); + end if; + + if Present (Prag) then + Analyze_If_Present_Internal (Prag, Id, Included => True); + end if; + end Analyze_Pragmas_If_Present; + --------------------------------------------- -- Analyze_Pre_Post_Condition_In_Decl_Part -- --------------------------------------------- @@ -32407,6 +32633,7 @@ package body Sem_Prag is Pragma_Restriction_Warnings => 0, Pragma_Restrictions => 0, Pragma_Reviewable => -1, + Pragma_Side_Effects => 0, Pragma_Secondary_Stack_Size => -1, Pragma_Share_Generic => 0, Pragma_Shared => 0, diff --git a/gcc/ada/sem_prag.ads b/gcc/ada/sem_prag.ads index e8e9856..dcfec50 100644 --- a/gcc/ada/sem_prag.ads +++ b/gcc/ada/sem_prag.ads @@ -212,30 +212,31 @@ package Sem_Prag is -- of subprogram bodies. Pragma_Significant_To_Subprograms : constant array (Pragma_Id) of Boolean := - (Pragma_Always_Terminates => True, - Pragma_Contract_Cases => True, - Pragma_Depends => True, - Pragma_Exceptional_Cases => True, - Pragma_Ghost => True, - Pragma_Global => True, - Pragma_Inline => True, - Pragma_Inline_Always => True, - Pragma_Post => True, - Pragma_Post_Class => True, - Pragma_Postcondition => True, - Pragma_Pre => True, - Pragma_Pre_Class => True, - Pragma_Precondition => True, - Pragma_Pure => True, - Pragma_Pure_Function => True, - Pragma_Refined_Depends => True, - Pragma_Refined_Global => True, - Pragma_Refined_Post => True, - Pragma_Refined_State => True, - Pragma_Subprogram_Variant => True, - Pragma_Volatile => True, - Pragma_Volatile_Function => True, - others => False); + (Pragma_Always_Terminates => True, + Pragma_Contract_Cases => True, + Pragma_Depends => True, + Pragma_Exceptional_Cases => True, + Pragma_Ghost => True, + Pragma_Global => True, + Pragma_Inline => True, + Pragma_Inline_Always => True, + Pragma_Post => True, + Pragma_Post_Class => True, + Pragma_Postcondition => True, + Pragma_Pre => True, + Pragma_Pre_Class => True, + Pragma_Precondition => True, + Pragma_Pure => True, + Pragma_Pure_Function => True, + Pragma_Refined_Depends => True, + Pragma_Refined_Global => True, + Pragma_Refined_Post => True, + Pragma_Refined_State => True, + Pragma_Side_Effects => True, + Pragma_Subprogram_Variant => True, + Pragma_Volatile => True, + Pragma_Volatile_Function => True, + others => False); ----------------- -- Subprograms -- @@ -244,6 +245,10 @@ package Sem_Prag is procedure Analyze_Pragma (N : Node_Id); -- Analyze procedure for pragma reference node N + procedure Analyze_Pragmas_If_Present (Decl : Node_Id; Id : Pragma_Id); + -- Inspect the list of pragmas after declaration Decl and look for a pragma + -- that matches Id. If found, analyze the pragma. + procedure Analyze_Always_Terminates_In_Decl_Part (N : Node_Id; Freeze_Id : Entity_Id := Empty); @@ -475,6 +480,7 @@ package Sem_Prag is -- Refined_Global -- Refined_Post -- Refined_State + -- Side_Effects -- Subprogram_Variant -- Test_Case -- Volatile_Function diff --git a/gcc/ada/sem_util.adb b/gcc/ada/sem_util.adb index 26ddb52..5440c6a 100644 --- a/gcc/ada/sem_util.adb +++ b/gcc/ada/sem_util.adb @@ -17510,6 +17510,36 @@ package body Sem_Util is end if; end Is_Fully_Initialized_Variant; + ----------------------------------- + -- Is_Function_With_Side_Effects -- + ----------------------------------- + + function Is_Function_With_Side_Effects (Subp : Entity_Id) return Boolean is + Arg : Node_Id; + Expr : Node_Id; + Prag : constant Node_Id := Get_Pragma (Subp, Pragma_Side_Effects); + + begin + -- Extract the value from the Boolean expression (if any) + + if Present (Prag) then + Arg := First (Pragma_Argument_Associations (Prag)); + + if Present (Arg) then + Expr := Get_Pragma_Arg (Arg); + + return Is_True (Expr_Value (Expr)); + + -- Otherwise the aspect or pragma defaults to True + + else + return True; + end if; + end if; + + return False; + end Is_Function_With_Side_Effects; + ------------------------------------ -- Is_Generic_Declaration_Or_Body -- ------------------------------------ diff --git a/gcc/ada/sem_util.ads b/gcc/ada/sem_util.ads index dda71e4..caf6a66 100644 --- a/gcc/ada/sem_util.ads +++ b/gcc/ada/sem_util.ads @@ -1479,6 +1479,10 @@ package Sem_Util is function Is_Container_Aggregate (Exp : Node_Id) return Boolean; -- Is the given expression a container aggregate? + function Is_Function_With_Side_Effects (Subp : Entity_Id) return Boolean; + -- Return True if Subp is a function with side-effects, ie. it has a + -- (direct or inherited) pragma Side_Effects with static value True. + function Is_Newly_Constructed (Exp : Node_Id; Context_Requires_NC : Boolean) return Boolean; -- Indicates whether a given expression is "newly constructed" (RM 4.4). diff --git a/gcc/ada/snames.ads-tmpl b/gcc/ada/snames.ads-tmpl index 5044abb..b00b0f0 100644 --- a/gcc/ada/snames.ads-tmpl +++ b/gcc/ada/snames.ads-tmpl @@ -672,6 +672,7 @@ package Snames is Name_Share_Generic : constant Name_Id := N + $; -- GNAT Name_Shared : constant Name_Id := N + $; -- Ada 83 Name_Shared_Passive : constant Name_Id := N + $; + Name_Side_Effects : constant Name_Id := N + $; -- GNAT Name_Simple_Storage_Pool_Type : constant Name_Id := N + $; -- GNAT -- Note: Storage_Size is not in this list because its name matches the name @@ -1938,6 +1939,7 @@ package Snames is Pragma_Share_Generic, Pragma_Shared, Pragma_Shared_Passive, + Pragma_Side_Effects, Pragma_Simple_Storage_Pool_Type, Pragma_Source_Reference, Pragma_Static_Elaboration_Desired, diff --git a/gcc/builtins.def b/gcc/builtins.def index 5953266..e989cd8 100644 --- a/gcc/builtins.def +++ b/gcc/builtins.def @@ -1074,6 +1074,8 @@ DEF_BUILTIN_STUB (BUILT_IN_ADJUST_TRAMPOLINE, "__builtin_adjust_trampoline") DEF_BUILTIN_STUB (BUILT_IN_INIT_DESCRIPTOR, "__builtin_init_descriptor") DEF_BUILTIN_STUB (BUILT_IN_ADJUST_DESCRIPTOR, "__builtin_adjust_descriptor") DEF_BUILTIN_STUB (BUILT_IN_NONLOCAL_GOTO, "__builtin_nonlocal_goto") +DEF_BUILTIN_STUB (BUILT_IN_NESTED_PTR_CREATED, "__builtin_nested_func_ptr_created") +DEF_BUILTIN_STUB (BUILT_IN_NESTED_PTR_DELETED, "__builtin_nested_func_ptr_deleted") /* Implementing __builtin_setjmp. */ DEF_BUILTIN_STUB (BUILT_IN_SETJMP_SETUP, "__builtin_setjmp_setup") @@ -1179,6 +1181,9 @@ DEF_GCC_BUILTIN (BUILT_IN_FILE, "FILE", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LI DEF_GCC_BUILTIN (BUILT_IN_FUNCTION, "FUNCTION", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LIST) DEF_GCC_BUILTIN (BUILT_IN_LINE, "LINE", BT_FN_INT, ATTR_NOTHROW_LEAF_LIST) +/* Control Flow Redundancy hardening out-of-line checker. */ +DEF_BUILTIN_STUB (BUILT_IN___HARDCFR_CHECK, "__builtin___hardcfr_check") + /* Synchronization Primitives. */ #include "sync-builtins.def" diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog index b4b1b90..25f583c 100644 --- a/gcc/c-family/ChangeLog +++ b/gcc/c-family/ChangeLog @@ -1,3 +1,25 @@ +2023-10-20 Marek Polacek <polacek@redhat.com> + + PR c/111884 + * c-common.cc (c_common_get_alias_set): Return -1 for char8_t only + in C++. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + * c-warn.cc (check_address_or_pointer_of_packed_member): Handle + type-dependent callee of CALL_EXPR. + +2023-10-20 Alexandre Oliva <oliva@adacore.com> + + * c-attribs.cc (handle_expected_throw_attribute): New. + (c_common_attribute_table): Add expected_throw. + +2023-10-19 Lewis Hyatt <lhyatt@gmail.com> + + PR c++/89038 + * c-pragma.cc (handle_pragma_diagnostic_impl): Handle + -Wunknown-pragmas during early processing. + 2023-10-15 Jakub Jelinek <jakub@redhat.com> PR tree-optimization/111800 diff --git a/gcc/c-family/c-attribs.cc b/gcc/c-family/c-attribs.cc index dca7548..abf44d5 100644 --- a/gcc/c-family/c-attribs.cc +++ b/gcc/c-family/c-attribs.cc @@ -136,6 +136,7 @@ static tree handle_vector_mask_attribute (tree *, tree, tree, int, static tree handle_nonnull_attribute (tree *, tree, tree, int, bool *); static tree handle_nonstring_attribute (tree *, tree, tree, int, bool *); static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *); +static tree handle_expected_throw_attribute (tree *, tree, tree, int, bool *); static tree handle_cleanup_attribute (tree *, tree, tree, int, bool *); static tree handle_warn_unused_result_attribute (tree *, tree, tree, int, bool *); @@ -437,6 +438,8 @@ const struct attribute_spec c_common_attribute_table[] = handle_nonstring_attribute, NULL }, { "nothrow", 0, 0, true, false, false, false, handle_nothrow_attribute, NULL }, + { "expected_throw", 0, 0, true, false, false, false, + handle_expected_throw_attribute, NULL }, { "may_alias", 0, 0, false, true, false, false, NULL, NULL }, { "cleanup", 1, 1, true, false, false, false, handle_cleanup_attribute, NULL }, @@ -5459,6 +5462,25 @@ handle_nothrow_attribute (tree *node, tree name, tree ARG_UNUSED (args), return NULL_TREE; } +/* Handle a "nothrow" attribute; arguments as in + struct attribute_spec.handler. */ + +static tree +handle_expected_throw_attribute (tree *node, tree name, tree ARG_UNUSED (args), + int ARG_UNUSED (flags), bool *no_add_attrs) +{ + if (TREE_CODE (*node) == FUNCTION_DECL) + /* No flag to set here. */; + /* ??? TODO: Support types. */ + else + { + warning (OPT_Wattributes, "%qE attribute ignored", name); + *no_add_attrs = true; + } + + return NULL_TREE; +} + /* Handle a "cleanup" attribute; arguments as in struct attribute_spec.handler. */ diff --git a/gcc/c-family/c-common.cc b/gcc/c-family/c-common.cc index f044db5..0efdc67 100644 --- a/gcc/c-family/c-common.cc +++ b/gcc/c-family/c-common.cc @@ -3828,12 +3828,13 @@ c_common_get_alias_set (tree t) if (!TYPE_P (t)) return -1; - /* Unlike char, char8_t doesn't alias. */ - if (flag_char8_t && t == char8_type_node) + /* Unlike char, char8_t doesn't alias in C++. (In C, char8_t is not + a distinct type.) */ + if (flag_char8_t && t == char8_type_node && c_dialect_cxx ()) return -1; /* The C standard guarantees that any object may be accessed via an - lvalue that has narrow character type (except char8_t). */ + lvalue that has narrow character type. */ if (t == char_type_node || t == signed_char_type_node || t == unsigned_char_type_node) diff --git a/gcc/c-family/c-pragma.cc b/gcc/c-family/c-pragma.cc index 293311d..98dfb0f 100644 --- a/gcc/c-family/c-pragma.cc +++ b/gcc/c-family/c-pragma.cc @@ -963,7 +963,8 @@ handle_pragma_diagnostic_impl () /* option_string + 1 to skip the initial '-' */ unsigned int option_index = find_opt (data.option_str + 1, lang_mask); - if (early && !c_option_is_from_cpp_diagnostics (option_index)) + if (early && !(c_option_is_from_cpp_diagnostics (option_index) + || option_index == OPT_Wunknown_pragmas)) return; if (option_index == OPT_SPECIAL_unknown) diff --git a/gcc/c-family/c-warn.cc b/gcc/c-family/c-warn.cc index 3e2d02a..9ab83a9 100644 --- a/gcc/c-family/c-warn.cc +++ b/gcc/c-family/c-warn.cc @@ -3036,7 +3036,7 @@ check_address_or_pointer_of_packed_member (tree type, tree rhs) rhs = TREE_TYPE (rhs); /* Pointer type. */ /* We could be called while processing a template and RHS could be a functor. In that case it's a class, not a pointer. */ - if (!POINTER_TYPE_P (rhs)) + if (!rhs || !POINTER_TYPE_P (rhs)) return NULL_TREE; rhs = TREE_TYPE (rhs); /* Function type. */ rhstype = TREE_TYPE (rhs); diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog index 92e90e7..2fb67af 100644 --- a/gcc/c/ChangeLog +++ b/gcc/c/ChangeLog @@ -1,3 +1,42 @@ +2023-10-20 Florian Weimer <fweimer@redhat.com> + + PR c/109827 + PR other/44209 + * c-typeck.cc (build_conditional_expr): Use OPT_Wint_conversion + for pointer/integer mismatch warnings. + +2023-10-20 Florian Weimer <fweimer@redhat.com> + + PR c/109826 + PR other/44209 + * c-typeck.cc (build_conditional_expr): Use + OPT_Wincompatible_pointer_types for pointer mismatches. + Emit location information for the operand. + +2023-10-19 Andrew Pinski <pinskia@gmail.com> + + PR c/100532 + * c-typeck.cc (convert_argument): After erroring out + about an incomplete type return error_mark_node. + +2023-10-19 Andrew Pinski <pinskia@gmail.com> + + PR c/104822 + * c-typeck.cc (convert_for_assignment): Check for null pointer + before warning about an incompatible scalar storage order. + +2023-10-18 Andrew Pinski <pinskia@gmail.com> + + PR c/101364 + * c-decl.cc (diagnose_arglist_conflict): Test for + error mark before calling of c_type_promotes_to. + +2023-10-18 Andrew Pinski <pinskia@gmail.com> + + PR c/101285 + * c-typeck.cc (c_safe_arg_type_equiv_p): Return true for error + operands early. + 2023-10-17 Martin Uecker <uecker@tugraz.at> PR c/111708 diff --git a/gcc/c/c-decl.cc b/gcc/c/c-decl.cc index 0de3847..7a145be 100644 --- a/gcc/c/c-decl.cc +++ b/gcc/c/c-decl.cc @@ -1899,7 +1899,8 @@ diagnose_arglist_conflict (tree newdecl, tree olddecl, break; } - if (c_type_promotes_to (type) != type) + if (!error_operand_p (type) + && c_type_promotes_to (type) != type) { inform (input_location, "an argument type that has a default " "promotion cannot match an empty parameter name list " diff --git a/gcc/c/c-typeck.cc b/gcc/c/c-typeck.cc index e55e887..112d28f 100644 --- a/gcc/c/c-typeck.cc +++ b/gcc/c/c-typeck.cc @@ -3367,7 +3367,7 @@ convert_argument (location_t ploc, tree function, tree fundecl, { error_at (ploc, "type of formal parameter %d is incomplete", parmnum + 1); - return val; + return error_mark_node; } /* Optionally warn about conversions that differ from the default @@ -5564,14 +5564,14 @@ build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp, else { int qual = ENCODE_QUAL_ADDR_SPACE (as_common); - if (bltin1 && bltin2) - warning_at (colon_loc, OPT_Wincompatible_pointer_types, - "pointer type mismatch between %qT and %qT " - "of %qD and %qD in conditional expression", - type1, type2, bltin1, bltin2); - else - pedwarn (colon_loc, 0, - "pointer type mismatch in conditional expression"); + if (emit_diagnostic (bltin1 && bltin2 ? DK_WARNING : DK_PEDWARN, + colon_loc, OPT_Wincompatible_pointer_types, + "pointer type mismatch " + "in conditional expression")) + { + inform (op1_loc, "first expression has type %qT", type1); + inform (op2_loc, "second expression has type %qT", type2); + } result_type = build_pointer_type (build_qualified_type (void_type_node, qual)); } @@ -5580,7 +5580,7 @@ build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp, && (code2 == INTEGER_TYPE || code2 == BITINT_TYPE)) { if (!null_pointer_constant_p (orig_op2)) - pedwarn (colon_loc, 0, + pedwarn (colon_loc, OPT_Wint_conversion, "pointer/integer type mismatch in conditional expression"); else { @@ -5592,7 +5592,7 @@ build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp, && (code1 == INTEGER_TYPE || code1 == BITINT_TYPE)) { if (!null_pointer_constant_p (orig_op1)) - pedwarn (colon_loc, 0, + pedwarn (colon_loc, OPT_Wint_conversion, "pointer/integer type mismatch in conditional expression"); else { @@ -5960,6 +5960,9 @@ handle_warn_cast_qual (location_t loc, tree type, tree otype) static bool c_safe_arg_type_equiv_p (tree t1, tree t2) { + if (error_operand_p (t1) || error_operand_p (t2)) + return true; + t1 = TYPE_MAIN_VARIANT (t1); t2 = TYPE_MAIN_VARIANT (t2); @@ -7446,6 +7449,7 @@ convert_for_assignment (location_t location, location_t expr_loc, tree type, /* See if the pointers point to incompatible scalar storage orders. */ if (warn_scalar_storage_order + && !null_pointer_constant_p (rhs) && (AGGREGATE_TYPE_P (ttl) && TYPE_REVERSE_STORAGE_ORDER (ttl)) != (AGGREGATE_TYPE_P (ttr) && TYPE_REVERSE_STORAGE_ORDER (ttr))) { diff --git a/gcc/calls.cc b/gcc/calls.cc index e9e6951..9edb583 100644 --- a/gcc/calls.cc +++ b/gcc/calls.cc @@ -848,6 +848,9 @@ flags_from_decl_or_type (const_tree exp) flags |= ECF_TM_PURE; } + if (lookup_attribute ("expected_throw", DECL_ATTRIBUTES (exp))) + flags |= ECF_XTHROW; + flags = special_function_p (exp, flags); } else if (TYPE_P (exp)) diff --git a/gcc/common.opt b/gcc/common.opt index b103b8d..1cf3bdd 100644 --- a/gcc/common.opt +++ b/gcc/common.opt @@ -1831,6 +1831,41 @@ fharden-conditional-branches Common Var(flag_harden_conditional_branches) Optimization Harden conditional branches by checking reversed conditions. +fharden-control-flow-redundancy +Common Var(flag_harden_control_flow_redundancy) Optimization +Harden control flow by recording and checking execution paths. + +fhardcfr-skip-leaf +Common Var(flag_harden_control_flow_redundancy_skip_leaf) Optimization +Disable CFR in leaf functions. + +fhardcfr-check-returning-calls +Common Var(flag_harden_control_flow_redundancy_check_returning_calls) Init(-1) Optimization +Check CFR execution paths also before calls followed by returns of their results. + +fhardcfr-check-exceptions +Common Var(flag_harden_control_flow_redundancy_check_exceptions) Init(-1) Optimization +Check CFR execution paths also when exiting a function through an exception. + +fhardcfr-check-noreturn-calls= +Common Joined RejectNegative Enum(hardcfr_check_noreturn_calls) Var(flag_harden_control_flow_redundancy_check_noreturn) Init(HCFRNR_UNSPECIFIED) Optimization +-fhardcfr-check-noreturn-calls=[always|no-xthrow|nothrow|never] Check CFR execution paths also before calling noreturn functions. + +Enum +Name(hardcfr_check_noreturn_calls) Type(enum hardcfr_noret) UnknownError(unknown hardcfr noreturn checking level %qs) + +EnumValue +Enum(hardcfr_check_noreturn_calls) String(never) Value(HCFRNR_NEVER) + +EnumValue +Enum(hardcfr_check_noreturn_calls) String(nothrow) Value(HCFRNR_NOTHROW) + +EnumValue +Enum(hardcfr_check_noreturn_calls) String(no-xthrow) Value(HCFRNR_NO_XTHROW) + +EnumValue +Enum(hardcfr_check_noreturn_calls) String(always) Value(HCFRNR_ALWAYS) + ; Nonzero means ignore `#ident' directives. 0 means handle them. ; Generate position-independent code for executables if possible ; On SVR4 targets, it also controls whether or not to emit a @@ -2892,10 +2927,25 @@ Common Var(flag_tracer) Optimization Perform superblock formation via tail duplication. ftrampolines -Common Var(flag_trampolines) Init(0) +Common Var(flag_trampolines) Init(HEAP_TRAMPOLINES_INIT) For targets that normally need trampolines for nested functions, always generate them instead of using descriptors. +ftrampoline-impl= +Common Joined RejectNegative Enum(trampoline_impl) Var(flag_trampoline_impl) Init(HEAP_TRAMPOLINES_INIT ? TRAMPOLINE_IMPL_HEAP : TRAMPOLINE_IMPL_STACK) +Whether trampolines are generated in executable memory rather than +executable stack. + +Enum +Name(trampoline_impl) Type(enum trampoline_impl) UnknownError(unknown trampoline implementation %qs) + +EnumValue +Enum(trampoline_impl) String(stack) Value(TRAMPOLINE_IMPL_STACK) + +EnumValue +Enum(trampoline_impl) String(heap) Value(TRAMPOLINE_IMPL_HEAP) + + ; Zero means that floating-point math operations cannot generate a ; (user-visible) trap. This is the case, for example, in nonstop ; IEEE 754 arithmetic. diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc index d721628..526dbb7 100644 --- a/gcc/common/config/riscv/riscv-common.cc +++ b/gcc/common/config/riscv/riscv-common.cc @@ -285,7 +285,7 @@ static const struct riscv_ext_version riscv_ext_version_table[] = {"zvfhmin", ISA_SPEC_CLASS_NONE, 1, 0}, {"zvfh", ISA_SPEC_CLASS_NONE, 1, 0}, - {"zfa", ISA_SPEC_CLASS_NONE, 0, 1}, + {"zfa", ISA_SPEC_CLASS_NONE, 1, 0}, {"zmmul", ISA_SPEC_CLASS_NONE, 1, 0}, @@ -1495,6 +1495,10 @@ riscv_subset_list::parse (const char *arch, location_t loc) error_at (loc, "%<-march=%s%>: z*inx conflicts with floating-point " "extensions", arch); + /* 'H' hypervisor extension requires base ISA with 32 registers. */ + if (subset_list->lookup ("e") && subset_list->lookup ("h")) + error_at (loc, "%<-march=%s%>: h extension requires i extension", arch); + return subset_list; fail: diff --git a/gcc/config.gcc b/gcc/config.gcc index 37311fc..606d3a8 100644 --- a/gcc/config.gcc +++ b/gcc/config.gcc @@ -1171,6 +1171,17 @@ case ${target} in ;; esac +# Figure out if we need to enable heap trampolines by default +case ${target} in +*-*-darwin2*) + # Currently, we do this for macOS 11 and above. + tm_defines="$tm_defines HEAP_TRAMPOLINES_INIT=1" + ;; +*) + tm_defines="$tm_defines HEAP_TRAMPOLINES_INIT=0" + ;; +esac + case ${target} in aarch64*-*-elf | aarch64*-*-fuchsia* | aarch64*-*-rtems*) tm_file="${tm_file} elfos.h newlib-stdint.h" @@ -4529,7 +4540,7 @@ case "${target}" in for which in arch tune; do eval "val=\$with_$which" case ${val} in - "" | fiji | gfx900 | gfx906 | gfx908 | gfx90a) + "" | fiji | gfx900 | gfx906 | gfx908 | gfx90a | gfx1030) # OK ;; *) @@ -4538,7 +4549,19 @@ case "${target}" in ;; esac done - [ "x$with_arch" = x ] && with_arch=fiji + [ "x$with_arch" = x ] && with_arch=gfx900 + + case "x${with_multilib_list}" in + x | xno) + TM_MULTILIB_CONFIG= + ;; + xdefault | xyes) + TM_MULTILIB_CONFIG=`echo "gfx900,gfx906,gfx908,gfx90a" | sed "s/${with_arch},\?//;s/,$//"` + ;; + *) + TM_MULTILIB_CONFIG="${with_multilib_list}" + ;; + esac ;; hppa*-*-*) diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc index 62b1ae0..a28b66a 100644 --- a/gcc/config/aarch64/aarch64.cc +++ b/gcc/config/aarch64/aarch64.cc @@ -28079,18 +28079,18 @@ aarch64_test_fractional_cost () ASSERT_EQ (cf (2, 3) * 5, cf (10, 3)); ASSERT_EQ (14 * cf (11, 21), cf (22, 3)); - ASSERT_TRUE (cf (4, 15) < cf (5, 15)); - ASSERT_FALSE (cf (5, 15) < cf (5, 15)); - ASSERT_FALSE (cf (6, 15) < cf (5, 15)); - ASSERT_TRUE (cf (1, 3) < cf (2, 5)); - ASSERT_TRUE (cf (1, 12) < cf (1, 6)); - ASSERT_FALSE (cf (5, 3) < cf (5, 3)); - ASSERT_TRUE (cf (239, 240) < 1); - ASSERT_FALSE (cf (240, 240) < 1); - ASSERT_FALSE (cf (241, 240) < 1); - ASSERT_FALSE (2 < cf (207, 104)); - ASSERT_FALSE (2 < cf (208, 104)); - ASSERT_TRUE (2 < cf (209, 104)); + ASSERT_TRUE (cf (4, 15) <= cf (5, 15)); + ASSERT_TRUE (cf (5, 15) <= cf (5, 15)); + ASSERT_FALSE (cf (6, 15) <= cf (5, 15)); + ASSERT_TRUE (cf (1, 3) <= cf (2, 5)); + ASSERT_TRUE (cf (1, 12) <= cf (1, 6)); + ASSERT_TRUE (cf (5, 3) <= cf (5, 3)); + ASSERT_TRUE (cf (239, 240) <= 1); + ASSERT_TRUE (cf (240, 240) <= 1); + ASSERT_FALSE (cf (241, 240) <= 1); + ASSERT_FALSE (2 <= cf (207, 104)); + ASSERT_TRUE (2 <= cf (208, 104)); + ASSERT_TRUE (2 <= cf (209, 104)); ASSERT_TRUE (cf (4, 15) < cf (5, 15)); ASSERT_FALSE (cf (5, 15) < cf (5, 15)); diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 32c7adc..e6af09c 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -1757,16 +1757,16 @@ } ) -(define_insn "load_pair_dw_tftf" - [(set (match_operand:TF 0 "register_operand" "=w") - (match_operand:TF 1 "aarch64_mem_pair_operand" "Ump")) - (set (match_operand:TF 2 "register_operand" "=w") - (match_operand:TF 3 "memory_operand" "m"))] +(define_insn "load_pair_dw_<TX:mode><TX2:mode>" + [(set (match_operand:TX 0 "register_operand" "=w") + (match_operand:TX 1 "aarch64_mem_pair_operand" "Ump")) + (set (match_operand:TX2 2 "register_operand" "=w") + (match_operand:TX2 3 "memory_operand" "m"))] "TARGET_SIMD && rtx_equal_p (XEXP (operands[3], 0), plus_constant (Pmode, XEXP (operands[1], 0), - GET_MODE_SIZE (TFmode)))" + GET_MODE_SIZE (<TX:MODE>mode)))" "ldp\\t%q0, %q2, %z1" [(set_attr "type" "neon_ldp_q") (set_attr "fp" "yes")] @@ -1805,11 +1805,11 @@ } ) -(define_insn "store_pair_dw_tftf" - [(set (match_operand:TF 0 "aarch64_mem_pair_operand" "=Ump") - (match_operand:TF 1 "register_operand" "w")) - (set (match_operand:TF 2 "memory_operand" "=m") - (match_operand:TF 3 "register_operand" "w"))] +(define_insn "store_pair_dw_<TX:mode><TX2:mode>" + [(set (match_operand:TX 0 "aarch64_mem_pair_operand" "=Ump") + (match_operand:TX 1 "register_operand" "w")) + (set (match_operand:TX2 2 "memory_operand" "=m") + (match_operand:TX2 3 "register_operand" "w"))] "TARGET_SIMD && rtx_equal_p (XEXP (operands[2], 0), plus_constant (Pmode, diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index 2451d8c..f9e2210 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -319,6 +319,9 @@ (define_mode_iterator TX [TI TF TD]) +;; Duplicate of the above +(define_mode_iterator TX2 [TI TF TD]) + (define_mode_iterator VTX [TI TF TD V16QI V8HI V4SI V2DI V8HF V4SF V2DF V8BF]) ;; Advanced SIMD opaque structure modes. diff --git a/gcc/config/gcn/gcn-hsa.h b/gcc/config/gcn/gcn-hsa.h index 0b5610b..aa1294c 100644 --- a/gcc/config/gcn/gcn-hsa.h +++ b/gcc/config/gcn/gcn-hsa.h @@ -75,7 +75,7 @@ extern unsigned int gcn_local_sym_hash (const char *name); supported for gcn. */ #define GOMP_SELF_SPECS "" -#define NO_XNACK "!march=*:;march=fiji:;" +#define NO_XNACK "!march=*:;march=fiji:;march=gfx1030:;" #define NO_SRAM_ECC "!march=*:;march=fiji:;march=gfx900:;march=gfx906:;" /* In HSACOv4 no attribute setting means the binary supports "any" hardware @@ -92,6 +92,7 @@ extern unsigned int gcn_local_sym_hash (const char *name); "%{!march=*|march=fiji:--amdhsa-code-object-version=3} " \ "%{" NO_XNACK XNACKOPT "}" \ "%{" NO_SRAM_ECC SRAMOPT "} " \ + "%{march=gfx1030:-mattr=+wavefrontsize64} " \ "-filetype=obj" #define LINK_SPEC "--pie --export-dynamic" #define LIB_SPEC "-lc" diff --git a/gcc/config/gcn/gcn-opts.h b/gcc/config/gcn/gcn-opts.h index f780a7c..b4f494d 100644 --- a/gcc/config/gcn/gcn-opts.h +++ b/gcc/config/gcn/gcn-opts.h @@ -24,7 +24,8 @@ enum processor_type PROCESSOR_VEGA10, // gfx900 PROCESSOR_VEGA20, // gfx906 PROCESSOR_GFX908, - PROCESSOR_GFX90a + PROCESSOR_GFX90a, + PROCESSOR_GFX1030 }; #define TARGET_FIJI (gcn_arch == PROCESSOR_FIJI) @@ -32,12 +33,14 @@ enum processor_type #define TARGET_VEGA20 (gcn_arch == PROCESSOR_VEGA20) #define TARGET_GFX908 (gcn_arch == PROCESSOR_GFX908) #define TARGET_GFX90a (gcn_arch == PROCESSOR_GFX90a) +#define TARGET_GFX1030 (gcn_arch == PROCESSOR_GFX1030) /* Set in gcn_option_override. */ extern enum gcn_isa { ISA_UNKNOWN, ISA_GCN3, ISA_GCN5, + ISA_RDNA2, ISA_CDNA1, ISA_CDNA2 } gcn_isa; @@ -50,6 +53,8 @@ extern enum gcn_isa { #define TARGET_CDNA1_PLUS (gcn_isa >= ISA_CDNA1) #define TARGET_CDNA2 (gcn_isa == ISA_CDNA2) #define TARGET_CDNA2_PLUS (gcn_isa >= ISA_CDNA2) +#define TARGET_RDNA2 (gcn_isa == ISA_RDNA2) + #define TARGET_M0_LDS_LIMIT (TARGET_GCN3) #define TARGET_PACKED_WORK_ITEMS (TARGET_CDNA2_PLUS) diff --git a/gcc/config/gcn/gcn-valu.md b/gcc/config/gcn/gcn-valu.md index 32b170e..c128c81 100644 --- a/gcc/config/gcn/gcn-valu.md +++ b/gcc/config/gcn/gcn-valu.md @@ -1412,7 +1412,7 @@ [(match_operand:V_noHI 1 "register_operand" " v") (match_operand:SI 2 "const_int_operand" " n")] UNSPEC_MOV_DPP_SHR))] - "" + "!TARGET_RDNA2" { return gcn_expand_dpp_shr_insn (<MODE>mode, "v_mov_b32", UNSPEC_MOV_DPP_SHR, INTVAL (operands[2])); @@ -1548,7 +1548,7 @@ (match_dup 1)) (match_dup 1))))] "" - "v_addc%^_u32\t%0, %4, %2, %1, %3" + "{v_addc%^_u32|v_add_co_ci_u32}\t%0, %4, %2, %1, %3" [(set_attr "type" "vop2,vop3b") (set_attr "length" "4,8")]) @@ -1613,10 +1613,10 @@ (match_dup 1))))] "" "@ - v_subb%^_u32\t%0, %4, %1, %2, %3 - v_subb%^_u32\t%0, %4, %1, %2, %3 - v_subbrev%^_u32\t%0, %4, %2, %1, %3 - v_subbrev%^_u32\t%0, %4, %2, %1, %3" + {v_subb%^_u32|v_sub_co_ci_u32}\t%0, %4, %1, %2, %3 + {v_subb%^_u32|v_sub_co_ci_u32}\t%0, %4, %1, %2, %3 + {v_subbrev%^_u32|v_subrev_co_ci_u32}\t%0, %4, %2, %1, %3 + {v_subbrev%^_u32|v_subrev_co_ci_u32}\t%0, %4, %2, %1, %3" [(set_attr "type" "vop2,vop3b,vop2,vop3b") (set_attr "length" "4,8,4,8")]) @@ -3667,11 +3667,11 @@ ;; {{{ Vector comparison/merge (define_insn "vec_cmp<mode>di" - [(set (match_operand:DI 0 "register_operand" "=cV,cV, e, e,Sg,Sg") + [(set (match_operand:DI 0 "register_operand" "=cV,cV, e, e,Sg,Sg, e, e") (match_operator:DI 1 "gcn_fp_compare_operator" - [(match_operand:V_noQI 2 "gcn_alu_operand" "vSv, B,vSv, B, v,vA") - (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v, v,vA, v")])) - (clobber (match_scratch:DI 4 "= X, X, cV,cV, X, X"))] + [(match_operand:V_noQI 2 "gcn_alu_operand" "vSv, B,vSv, B, v,vA,vSv, B") + (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v, v,vA, v, v, v")])) + (clobber (match_scratch:DI 4 "= X, X, cV,cV, X, X, X, X"))] "" "@ v_cmp%E1\tvcc, %2, %3 @@ -3679,9 +3679,12 @@ v_cmpx%E1\tvcc, %2, %3 v_cmpx%E1\tvcc, %2, %3 v_cmp%E1\t%0, %2, %3 - v_cmp%E1\t%0, %2, %3" - [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a,vop3a") - (set_attr "length" "4,8,4,8,8,8")]) + v_cmp%E1\t%0, %2, %3 + v_cmpx%E1\t%2, %3 + v_cmpx%E1\t%2, %3" + [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a,vop3a,vopc,vopc") + (set_attr "length" "4,8,4,8,8,8,4,8") + (set_attr "rdna" "*,*,no,no,*,*,yes,yes")]) (define_expand "vec_cmpu<mode>di" [(match_operand:DI 0 "register_operand") @@ -3716,13 +3719,13 @@ }) (define_insn "vec_cmp<mode>di_exec" - [(set (match_operand:DI 0 "register_operand" "=cV,cV, e, e,Sg,Sg") + [(set (match_operand:DI 0 "register_operand" "=cV,cV, e, e,Sg,Sg, e, e") (and:DI (match_operator 1 "gcn_fp_compare_operator" - [(match_operand:V_noQI 2 "gcn_alu_operand" "vSv, B,vSv, B, v,vA") - (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v, v,vA, v")]) - (match_operand:DI 4 "gcn_exec_reg_operand" " e, e, e, e, e, e"))) - (clobber (match_scratch:DI 5 "= X, X, cV,cV, X, X"))] + [(match_operand:V_noQI 2 "gcn_alu_operand" "vSv, B,vSv, B, v,vA,vSv, B") + (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v, v,vA, v, v, v")]) + (match_operand:DI 4 "gcn_exec_reg_operand" " e, e, e, e, e, e, e, e"))) + (clobber (match_scratch:DI 5 "= X, X, cV,cV, X, X, X, X"))] "" "@ v_cmp%E1\tvcc, %2, %3 @@ -3730,9 +3733,12 @@ v_cmpx%E1\tvcc, %2, %3 v_cmpx%E1\tvcc, %2, %3 v_cmp%E1\t%0, %2, %3 - v_cmp%E1\t%0, %2, %3" - [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a,vop3a") - (set_attr "length" "4,8,4,8,8,8")]) + v_cmp%E1\t%0, %2, %3 + v_cmpx%E1\t%2, %3 + v_cmpx%E1\t%2, %3" + [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a,vop3a,vopc,vopc") + (set_attr "length" "4,8,4,8,8,8,4,8") + (set_attr "rdna" "*,*,no,no,*,*,yes,yes")]) (define_expand "vec_cmpu<mode>di_exec" [(match_operand:DI 0 "register_operand") @@ -3772,42 +3778,48 @@ }) (define_insn "vec_cmp<mode>di_dup" - [(set (match_operand:DI 0 "register_operand" "=cV,cV, e,e,Sg") + [(set (match_operand:DI 0 "register_operand" "=cV,cV, e,e,Sg, e,e") (match_operator:DI 1 "gcn_fp_compare_operator" [(vec_duplicate:V_noQI (match_operand:<SCALAR_MODE> 2 "gcn_alu_operand" - " Sv, B,Sv,B, A")) - (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v,v, v")])) - (clobber (match_scratch:DI 4 "= X,X,cV,cV, X"))] + " Sv, B,Sv,B, A,Sv,B")) + (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v,v, v, v,v")])) + (clobber (match_scratch:DI 4 "= X,X,cV,cV, X, X,X"))] "" "@ v_cmp%E1\tvcc, %2, %3 v_cmp%E1\tvcc, %2, %3 v_cmpx%E1\tvcc, %2, %3 v_cmpx%E1\tvcc, %2, %3 - v_cmp%E1\t%0, %2, %3" - [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a") - (set_attr "length" "4,8,4,8,8")]) + v_cmp%E1\t%0, %2, %3 + v_cmpx%E1\t%2, %3 + v_cmpx%E1\t%2, %3" + [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a,vopc,vopc") + (set_attr "length" "4,8,4,8,8,4,8") + (set_attr "rdna" "*,*,no,no,*,yes,yes")]) (define_insn "vec_cmp<mode>di_dup_exec" - [(set (match_operand:DI 0 "register_operand" "=cV,cV, e,e,Sg") + [(set (match_operand:DI 0 "register_operand" "=cV,cV, e,e,Sg, e,e") (and:DI (match_operator 1 "gcn_fp_compare_operator" [(vec_duplicate:V_noQI (match_operand:<SCALAR_MODE> 2 "gcn_alu_operand" - " Sv, B,Sv,B, A")) - (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v,v, v")]) - (match_operand:DI 4 "gcn_exec_reg_operand" " e, e, e,e, e"))) - (clobber (match_scratch:DI 5 "= X,X,cV,cV, X"))] + " Sv, B,Sv,B, A,Sv,B")) + (match_operand:V_noQI 3 "gcn_vop3_operand" " v, v, v,v, v, v,v")]) + (match_operand:DI 4 "gcn_exec_reg_operand" " e, e, e,e, e, e,e"))) + (clobber (match_scratch:DI 5 "= X,X,cV,cV, X, X,X"))] "" "@ v_cmp%E1\tvcc, %2, %3 v_cmp%E1\tvcc, %2, %3 v_cmpx%E1\tvcc, %2, %3 v_cmpx%E1\tvcc, %2, %3 - v_cmp%E1\t%0, %2, %3" - [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a") - (set_attr "length" "4,8,4,8,8")]) + v_cmp%E1\t%0, %2, %3 + v_cmpx%E1\t%2, %3 + v_cmpx%E1\t%2, %3" + [(set_attr "type" "vopc,vopc,vopc,vopc,vop3a,vopc,vopc") + (set_attr "length" "4,8,4,8,8,4,8") + (set_attr "rdna" "*,*,no,no,*,yes,yes")]) (define_expand "vcond_mask_<mode>di" [(parallel @@ -4176,7 +4188,7 @@ (unspec:<SCALAR_MODE> [(match_operand:V_ALL 1 "register_operand")] REDUC_UNSPEC))] - "" + "!TARGET_RDNA2" { rtx tmp = gcn_expand_reduc_scalar (<MODE>mode, operands[1], <reduc_unspec>); @@ -4229,7 +4241,8 @@ REDUC_UNSPEC))] ; GCN3 requires a carry out, GCN5 not "!(TARGET_GCN3 && SCALAR_INT_MODE_P (<SCALAR_MODE>mode) - && <reduc_unspec> == UNSPEC_PLUS_DPP_SHR)" + && <reduc_unspec> == UNSPEC_PLUS_DPP_SHR) + && !TARGET_RDNA2" { return gcn_expand_dpp_shr_insn (<MODE>mode, "<reduc_insn>", <reduc_unspec>, INTVAL (operands[3])); @@ -4274,7 +4287,7 @@ (match_operand:SI 3 "const_int_operand" "n")] UNSPEC_PLUS_CARRY_DPP_SHR)) (clobber (reg:DI VCC_REG))] - "" + "!TARGET_RDNA2" { return gcn_expand_dpp_shr_insn (<VnSI>mode, "v_add%^_u32", UNSPEC_PLUS_CARRY_DPP_SHR, @@ -4292,7 +4305,7 @@ (match_operand:DI 4 "register_operand" "cV")] UNSPEC_PLUS_CARRY_IN_DPP_SHR)) (clobber (reg:DI VCC_REG))] - "" + "!TARGET_RDNA2" { return gcn_expand_dpp_shr_insn (<MODE>mode, "v_addc%^_u32", UNSPEC_PLUS_CARRY_IN_DPP_SHR, diff --git a/gcc/config/gcn/gcn.cc b/gcc/config/gcn/gcn.cc index ef3b647..6f85f55 100644 --- a/gcc/config/gcn/gcn.cc +++ b/gcc/config/gcn/gcn.cc @@ -136,6 +136,7 @@ gcn_option_override (void) : gcn_arch == PROCESSOR_VEGA20 ? ISA_GCN5 : gcn_arch == PROCESSOR_GFX908 ? ISA_CDNA1 : gcn_arch == PROCESSOR_GFX90a ? ISA_CDNA2 + : gcn_arch == PROCESSOR_GFX1030 ? ISA_RDNA2 : ISA_UNKNOWN); gcc_assert (gcn_isa != ISA_UNKNOWN); @@ -1616,6 +1617,7 @@ gcn_global_address_p (rtx addr) { rtx base = XEXP (addr, 0); rtx offset = XEXP (addr, 1); + int offsetbits = (TARGET_RDNA2 ? 11 : 12); bool immediate_p = (CONST_INT_P (offset) && INTVAL (offset) >= -(1 << 12) && INTVAL (offset) < (1 << 12)); @@ -1748,10 +1750,11 @@ gcn_addr_space_legitimate_address_p (machine_mode mode, rtx x, bool strict, rtx base = XEXP (x, 0); rtx offset = XEXP (x, 1); + int offsetbits = (TARGET_RDNA2 ? 11 : 12); bool immediate_p = (GET_CODE (offset) == CONST_INT - /* Signed 13-bit immediate. */ - && INTVAL (offset) >= -(1 << 12) - && INTVAL (offset) < (1 << 12) + /* Signed 12/13-bit immediate. */ + && INTVAL (offset) >= -(1 << offsetbits) + && INTVAL (offset) < (1 << offsetbits) /* The low bits of the offset are ignored, even when they're meant to realign the pointer. */ && !(INTVAL (offset) & 0x3)); @@ -3029,6 +3032,8 @@ gcn_omp_device_kind_arch_isa (enum omp_device_kind_arch_isa trait, return gcn_arch == PROCESSOR_GFX908; if (strcmp (name, "gfx90a") == 0) return gcn_arch == PROCESSOR_GFX90a; + if (strcmp (name, "gfx1030") == 0) + return gcn_arch == PROCESSOR_GFX1030; return 0; default: gcc_unreachable (); @@ -3610,9 +3615,11 @@ gcn_expand_epilogue (void) set_mem_addr_space (retptr_mem, ADDR_SPACE_SCALAR_FLAT); emit_move_insn (kernarg_reg, retptr_mem); - rtx retval_mem = gen_rtx_MEM (SImode, kernarg_reg); - rtx scalar_retval = gen_rtx_REG (SImode, FIRST_PARM_REG); - set_mem_addr_space (retval_mem, ADDR_SPACE_SCALAR_FLAT); + rtx retval_addr = gen_rtx_REG (DImode, FIRST_VPARM_REG); + emit_move_insn (retval_addr, kernarg_reg); + rtx retval_mem = gen_rtx_MEM (SImode, retval_addr); + rtx scalar_retval = gen_rtx_REG (SImode, FIRST_VPARM_REG + 2); + set_mem_addr_space (retval_mem, ADDR_SPACE_FLAT); emit_move_insn (scalar_retval, gen_rtx_REG (SImode, RETURN_VALUE_REG)); emit_move_insn (retval_mem, scalar_retval); } @@ -6454,6 +6461,11 @@ output_file_start (void) case PROCESSOR_GFX90a: cpu = "gfx90a"; break; + case PROCESSOR_GFX1030: + cpu = "gfx1030"; + xnack = ""; + sram_ecc = ""; + break; default: gcc_unreachable (); } diff --git a/gcc/config/gcn/gcn.h b/gcc/config/gcn/gcn.h index 4ff9a5d..6372f49 100644 --- a/gcc/config/gcn/gcn.h +++ b/gcc/config/gcn/gcn.h @@ -28,6 +28,8 @@ builtin_define ("__CDNA1__"); \ else if (TARGET_CDNA2) \ builtin_define ("__CDNA2__"); \ + else if (TARGET_RDNA2) \ + builtin_define ("__RDNA2__"); \ if (TARGET_FIJI) \ { \ builtin_define ("__fiji__"); \ @@ -43,6 +45,8 @@ builtin_define ("__gfx90a__"); \ } while (0) +#define ASSEMBLER_DIALECT (TARGET_RDNA2 ? 1 : 0) + /* Support for a compile-time default architecture and tuning. The rules are: --with-arch is ignored if -march is specified. diff --git a/gcc/config/gcn/gcn.md b/gcc/config/gcn/gcn.md index 30fe9e3..a3d8bee 100644 --- a/gcc/config/gcn/gcn.md +++ b/gcc/config/gcn/gcn.md @@ -285,9 +285,16 @@ ; Disable alternatives that only apply to specific ISA variants. (define_attr "gcn_version" "gcn3,gcn5" (const_string "gcn3")) +(define_attr "rdna" "any,no,yes" (const_string "any")) (define_attr "enabled" "" - (cond [(eq_attr "gcn_version" "gcn3") (const_int 1) + (cond [(and (eq_attr "rdna" "no") + (ne (symbol_ref "TARGET_RDNA2") (const_int 0))) + (const_int 0) + (and (eq_attr "rdna" "yes") + (eq (symbol_ref "TARGET_RDNA2") (const_int 0))) + (const_int 0) + (eq_attr "gcn_version" "gcn3") (const_int 1) (and (eq_attr "gcn_version" "gcn5") (ne (symbol_ref "TARGET_GCN5_PLUS") (const_int 0))) (const_int 1)] @@ -812,7 +819,7 @@ if (cfun && cfun->machine && cfun->machine->normal_function) return "s_setpc_b64\ts[18:19]"; else - return "s_waitcnt\tlgkmcnt(0)\;s_dcache_wb\;s_endpgm"; + return "s_waitcnt\tlgkmcnt(0)\;s_endpgm"; } [(set_attr "type" "sop1") (set_attr "length" "12")]) @@ -1179,7 +1186,7 @@ "" "@ s_addc_u32\t%0, %1, %2 - v_addc%^_u32\t%0, vcc, %2, %1, vcc" + {v_addc%^_u32|v_add_co_ci_u32}\t%0, vcc, %2, %1, vcc" [(set_attr "type" "sop2,vop2") (set_attr "length" "8,4")]) @@ -1195,7 +1202,7 @@ "" "@ s_addc_u32\t%0, %1, 0 - v_addc%^_u32\t%0, vcc, 0, %1, vcc" + {v_addc%^_u32|v_add_co_ci_u32}\t%0, vcc, 0, %1, vcc" [(set_attr "type" "sop2,vop2") (set_attr "length" "4")]) @@ -1225,7 +1232,8 @@ gen_rtx_REG (DImode, CC_SAVE_REG) }; output_asm_insn ("v_add%^_u32\t%L0, %3, %L2, %L1", new_operands); - output_asm_insn ("v_addc%^_u32\t%H0, %3, %H2, %H1, %3", new_operands); + output_asm_insn ("{v_addc%^_u32|v_add_co_ci_u32}\t%H0, %3, %H2, %H1, %3", + new_operands); } else { @@ -1363,7 +1371,7 @@ s_mul_i32\t%0, %1, %2 s_mulk_i32\t%0, %2 s_mul_i32\t%0, %1, %2 - v_mul_lo_i32\t%0, %1, %2" + v_mul_lo_u32\t%0, %1, %2" [(set_attr "type" "sop2,sopk,sop2,vop3a") (set_attr "length" "4,4,8,4")]) @@ -1885,7 +1893,7 @@ [(set (match_operand:BLK 0) (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))] "" - "buffer_wbinvl1_vol" + "{buffer_wbinvl1_vol|buffer_gl0_inv}" [(set_attr "type" "mubuf") (set_attr "length" "4")]) @@ -2004,6 +2012,7 @@ (use (match_operand:SIDI 2 "immediate_operand" " i, i, i"))] "" { + /* FIXME: RDNA cache instructions may be too conservative? */ switch (INTVAL (operands[2])) { case MEMMODEL_RELAXED: @@ -2026,11 +2035,17 @@ return "s_load%o0\t%0, %A1 glc\;s_waitcnt\tlgkmcnt(0)\;" "s_dcache_wb_vol"; case 1: - return "flat_load%o0\t%0, %A1%O1 glc\;s_waitcnt\t0\;" - "buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "flat_load%o0\t%0, %A1%O1 glc\;s_waitcnt\t0\;" + "buffer_gl0_inv" + : "flat_load%o0\t%0, %A1%O1 glc\;s_waitcnt\t0\;" + "buffer_wbinvl1_vol"); case 2: - return "global_load%o0\t%0, %A1%O1 glc\;s_waitcnt\tvmcnt(0)\;" - "buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "global_load%o0\t%0, %A1%O1 glc\;s_waitcnt\tvmcnt(0)\;" + "buffer_gl0_inv" + : "global_load%o0\t%0, %A1%O1 glc\;s_waitcnt\tvmcnt(0)\;" + "buffer_wbinvl1_vol"); } break; case MEMMODEL_ACQ_REL: @@ -2042,11 +2057,17 @@ return "s_dcache_wb_vol\;s_load%o0\t%0, %A1 glc\;" "s_waitcnt\tlgkmcnt(0)\;s_dcache_inv_vol"; case 1: - return "buffer_wbinvl1_vol\;flat_load%o0\t%0, %A1%O1 glc\;" - "s_waitcnt\t0\;buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;flat_load%o0\t%0, %A1%O1 glc\;" + "s_waitcnt\t0\;buffer_gl0_inv" + : "buffer_wbinvl1_vol\;flat_load%o0\t%0, %A1%O1 glc\;" + "s_waitcnt\t0\;buffer_wbinvl1_vol"); case 2: - return "buffer_wbinvl1_vol\;global_load%o0\t%0, %A1%O1 glc\;" - "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;global_load%o0\t%0, %A1%O1 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_gl0_inv" + : "buffer_wbinvl1_vol\;global_load%o0\t%0, %A1%O1 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"); } break; } @@ -2054,7 +2075,8 @@ } [(set_attr "type" "smem,flat,flat") (set_attr "length" "20") - (set_attr "gcn_version" "gcn5,*,gcn5")]) + (set_attr "gcn_version" "gcn5,*,gcn5") + (set_attr "rdna" "no,*,*")]) (define_insn "atomic_store<mode>" [(set (match_operand:SIDI 0 "memory_operand" "=RS,RF,RM") @@ -2084,9 +2106,13 @@ case 0: return "s_dcache_wb_vol\;s_store%o1\t%1, %A0 glc"; case 1: - return "buffer_wbinvl1_vol\;flat_store%o1\t%A0, %1%O0 glc"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;flat_store%o1\t%A0, %1%O0 glc" + : "buffer_wbinvl1_vol\;flat_store%o1\t%A0, %1%O0 glc"); case 2: - return "buffer_wbinvl1_vol\;global_store%o1\t%A0, %1%O0 glc"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;global_store%o1\t%A0, %1%O0 glc" + : "buffer_wbinvl1_vol\;global_store%o1\t%A0, %1%O0 glc"); } break; case MEMMODEL_ACQ_REL: @@ -2098,11 +2124,17 @@ return "s_dcache_wb_vol\;s_store%o1\t%1, %A0 glc\;" "s_waitcnt\tlgkmcnt(0)\;s_dcache_inv_vol"; case 1: - return "buffer_wbinvl1_vol\;flat_store%o1\t%A0, %1%O0 glc\;" - "s_waitcnt\t0\;buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;flat_store%o1\t%A0, %1%O0 glc\;" + "s_waitcnt\t0\;buffer_gl0_inv" + : "buffer_wbinvl1_vol\;flat_store%o1\t%A0, %1%O0 glc\;" + "s_waitcnt\t0\;buffer_wbinvl1_vol"); case 2: - return "buffer_wbinvl1_vol\;global_store%o1\t%A0, %1%O0 glc\;" - "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;global_store%o1\t%A0, %1%O0 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_gl0_inv" + : "buffer_wbinvl1_vol\;global_store%o1\t%A0, %1%O0 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"); } break; } @@ -2110,7 +2142,8 @@ } [(set_attr "type" "smem,flat,flat") (set_attr "length" "20") - (set_attr "gcn_version" "gcn5,*,gcn5")]) + (set_attr "gcn_version" "gcn5,*,gcn5") + (set_attr "rdna" "no,*,*")]) (define_insn "atomic_exchange<mode>" [(set (match_operand:SIDI 0 "register_operand" "=Sm, v, v") @@ -2145,11 +2178,17 @@ return "s_atomic_swap<X>\t%0, %1, %2 glc\;s_waitcnt\tlgkmcnt(0)\;" "s_dcache_wb_vol\;s_dcache_inv_vol"; case 1: - return "flat_atomic_swap<X>\t%0, %1, %2 glc\;s_waitcnt\t0\;" - "buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "flat_atomic_swap<X>\t%0, %1, %2 glc\;s_waitcnt\t0\;" + "buffer_gl0_inv" + : "flat_atomic_swap<X>\t%0, %1, %2 glc\;s_waitcnt\t0\;" + "buffer_wbinvl1_vol"); case 2: - return "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" - "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_gl0_inv" + : "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"); } break; case MEMMODEL_RELEASE: @@ -2160,12 +2199,19 @@ return "s_dcache_wb_vol\;s_atomic_swap<X>\t%0, %1, %2 glc\;" "s_waitcnt\tlgkmcnt(0)"; case 1: - return "buffer_wbinvl1_vol\;flat_atomic_swap<X>\t%0, %1, %2 glc\;" - "s_waitcnt\t0"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;flat_atomic_swap<X>\t%0, %1, %2 glc\;" + "s_waitcnt\t0" + : "buffer_wbinvl1_vol\;flat_atomic_swap<X>\t%0, %1, %2 glc\;" + "s_waitcnt\t0"); case 2: - return "buffer_wbinvl1_vol\;" - "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" - "s_waitcnt\tvmcnt(0)"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;" + "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" + "s_waitcnt\tvmcnt(0)" + : "buffer_wbinvl1_vol\;" + "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" + "s_waitcnt\tvmcnt(0)"); } break; case MEMMODEL_ACQ_REL: @@ -2177,12 +2223,19 @@ return "s_dcache_wb_vol\;s_atomic_swap<X>\t%0, %1, %2 glc\;" "s_waitcnt\tlgkmcnt(0)\;s_dcache_inv_vol"; case 1: - return "buffer_wbinvl1_vol\;flat_atomic_swap<X>\t%0, %1, %2 glc\;" - "s_waitcnt\t0\;buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;flat_atomic_swap<X>\t%0, %1, %2 glc\;" + "s_waitcnt\t0\;buffer_gl0_inv" + : "buffer_wbinvl1_vol\;flat_atomic_swap<X>\t%0, %1, %2 glc\;" + "s_waitcnt\t0\;buffer_wbinvl1_vol"); case 2: - return "buffer_wbinvl1_vol\;" - "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" - "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"; + return (TARGET_RDNA2 + ? "buffer_gl0_inv\;" + "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_gl0_inv" + : "buffer_wbinvl1_vol\;" + "global_atomic_swap<X>\t%0, %A1, %2%O1 glc\;" + "s_waitcnt\tvmcnt(0)\;buffer_wbinvl1_vol"); } break; } @@ -2190,7 +2243,8 @@ } [(set_attr "type" "smem,flat,flat") (set_attr "length" "20") - (set_attr "gcn_version" "gcn5,*,gcn5")]) + (set_attr "gcn_version" "gcn5,*,gcn5") + (set_attr "rdna" "no,*,*")]) ;; }}} ;; {{{ OpenACC / OpenMP diff --git a/gcc/config/gcn/gcn.opt b/gcc/config/gcn/gcn.opt index 36c2b53..7a852c5 100644 --- a/gcc/config/gcn/gcn.opt +++ b/gcc/config/gcn/gcn.opt @@ -40,6 +40,9 @@ Enum(gpu_type) String(gfx908) Value(PROCESSOR_GFX908) EnumValue Enum(gpu_type) String(gfx90a) Value(PROCESSOR_GFX90a) +EnumValue +Enum(gpu_type) String(gfx1030) Value(PROCESSOR_GFX1030) + march= Target RejectNegative Joined ToLower Enum(gpu_type) Var(gcn_arch) Init(PROCESSOR_FIJI) Specify the name of the target GPU. diff --git a/gcc/config/gcn/mkoffload.cc b/gcc/config/gcn/mkoffload.cc index 8b608bf..f6d56b7 100644 --- a/gcc/config/gcn/mkoffload.cc +++ b/gcc/config/gcn/mkoffload.cc @@ -57,6 +57,8 @@ #define EF_AMDGPU_MACH_AMDGCN_GFX908 0x30 #undef EF_AMDGPU_MACH_AMDGCN_GFX90a #define EF_AMDGPU_MACH_AMDGCN_GFX90a 0x3f +#undef EF_AMDGPU_MACH_AMDGCN_GFX1030 +#define EF_AMDGPU_MACH_AMDGCN_GFX1030 0x36 #define EF_AMDGPU_FEATURE_XNACK_V4 0x300 /* Mask. */ #define EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4 0x000 @@ -942,6 +944,8 @@ main (int argc, char **argv) elf_arch = EF_AMDGPU_MACH_AMDGCN_GFX908; else if (strcmp (argv[i], "-march=gfx90a") == 0) elf_arch = EF_AMDGPU_MACH_AMDGCN_GFX90a; + else if (strcmp (argv[i], "-march=gfx1030") == 0) + elf_arch = EF_AMDGPU_MACH_AMDGCN_GFX1030; #define STR "-mstack-size=" else if (startswith (argv[i], STR)) gcn_stack_size = atoi (argv[i] + strlen (STR)); diff --git a/gcc/config/gcn/t-gcn-hsa b/gcc/config/gcn/t-gcn-hsa index ea27122..18db707 100644 --- a/gcc/config/gcn/t-gcn-hsa +++ b/gcc/config/gcn/t-gcn-hsa @@ -42,8 +42,12 @@ ALL_HOST_OBJS += gcn-run.o gcn-run$(exeext): gcn-run.o +$(LINKER) $(ALL_LINKERFLAGS) $(LDFLAGS) -o $@ $< -ldl -MULTILIB_OPTIONS = march=gfx900/march=gfx906/march=gfx908/march=gfx90a -MULTILIB_DIRNAMES = gfx900 gfx906 gfx908 gfx90a +empty := +space := $(empty) $(empty) +comma := , +multilib_list := $(subst $(comma),$(space),$(TM_MULTILIB_CONFIG)) +MULTILIB_OPTIONS = $(subst $(space),/,$(addprefix march=,$(multilib_list))) +MULTILIB_DIRNAMES = $(multilib_list) gcn-tree.o: $(srcdir)/config/gcn/gcn-tree.cc $(COMPILE) $< diff --git a/gcc/config/gcn/t-omp-device b/gcc/config/gcn/t-omp-device index 538624f..b1cd998 100644 --- a/gcc/config/gcn/t-omp-device +++ b/gcc/config/gcn/t-omp-device @@ -1,4 +1,4 @@ omp-device-properties-gcn: $(srcdir)/config/gcn/gcn.cc echo kind: gpu > $@ echo arch: amdgcn gcn >> $@ - echo isa: fiji gfx803 gfx900 gfx906 gfx908 gfx90a >> $@ + echo isa: fiji gfx803 gfx900 gfx906 gfx908 gfx90a gfx1030 >> $@ diff --git a/gcc/config/i386/darwin.h b/gcc/config/i386/darwin.h index 588bd66..036eefb 100644 --- a/gcc/config/i386/darwin.h +++ b/gcc/config/i386/darwin.h @@ -308,3 +308,9 @@ along with GCC; see the file COPYING3. If not see #define CLEAR_INSN_CACHE(beg, end) \ extern void sys_icache_invalidate(void *start, size_t len); \ sys_icache_invalidate ((beg), (size_t)((end)-(beg))) + +/* Disable custom function descriptors for Darwin when we have off-stack + trampolines. */ +#undef X86_CUSTOM_FUNCTION_TEST +#define X86_CUSTOM_FUNCTION_TEST \ + (flag_trampolines && flag_trampoline_impl == TRAMPOLINE_IMPL_HEAP) ? 0 : 1 diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc index 3a03de5..072bbc6 100644 --- a/gcc/config/i386/i386-options.cc +++ b/gcc/config/i386/i386-options.cc @@ -128,24 +128,24 @@ along with GCC; see the file COPYING3. If not see #define m_ROCKETLAKE (HOST_WIDE_INT_1U<<PROCESSOR_ROCKETLAKE) #define m_GRANITERAPIDS (HOST_WIDE_INT_1U<<PROCESSOR_GRANITERAPIDS) #define m_GRANITERAPIDS_D (HOST_WIDE_INT_1U<<PROCESSOR_GRANITERAPIDS_D) +#define m_ARROWLAKE (HOST_WIDE_INT_1U<<PROCESSOR_ARROWLAKE) +#define m_ARROWLAKE_S (HOST_WIDE_INT_1U<<PROCESSOR_ARROWLAKE_S) +#define m_PANTHERLAKE (HOST_WIDE_INT_1U<<PROCESSOR_PANTHERLAKE) #define m_CORE_AVX512 (m_SKYLAKE_AVX512 | m_CANNONLAKE \ | m_ICELAKE_CLIENT | m_ICELAKE_SERVER | m_CASCADELAKE \ | m_TIGERLAKE | m_COOPERLAKE | m_SAPPHIRERAPIDS \ | m_ROCKETLAKE | m_GRANITERAPIDS | m_GRANITERAPIDS_D) #define m_CORE_AVX2 (m_HASWELL | m_SKYLAKE | m_CORE_AVX512) #define m_CORE_ALL (m_CORE2 | m_NEHALEM | m_SANDYBRIDGE | m_CORE_AVX2) +#define m_CORE_HYBRID (m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S \ + | m_PANTHERLAKE) #define m_GOLDMONT (HOST_WIDE_INT_1U<<PROCESSOR_GOLDMONT) #define m_GOLDMONT_PLUS (HOST_WIDE_INT_1U<<PROCESSOR_GOLDMONT_PLUS) #define m_TREMONT (HOST_WIDE_INT_1U<<PROCESSOR_TREMONT) #define m_SIERRAFOREST (HOST_WIDE_INT_1U<<PROCESSOR_SIERRAFOREST) #define m_GRANDRIDGE (HOST_WIDE_INT_1U<<PROCESSOR_GRANDRIDGE) -#define m_ARROWLAKE (HOST_WIDE_INT_1U<<PROCESSOR_ARROWLAKE) -#define m_ARROWLAKE_S (HOST_WIDE_INT_1U<<PROCESSOR_ARROWLAKE_S) #define m_CLEARWATERFOREST (HOST_WIDE_INT_1U<<PROCESSOR_CLEARWATERFOREST) -#define m_PANTHERLAKE (HOST_WIDE_INT_1U<<PROCESSOR_PANTHERLAKE) #define m_CORE_ATOM (m_SIERRAFOREST | m_GRANDRIDGE | m_CLEARWATERFOREST) -#define m_CORE_HYBRID (m_ALDERLAKE | m_ARROWLAKE | m_ARROWLAKE_S \ - | m_PANTHERLAKE) #define m_INTEL (HOST_WIDE_INT_1U<<PROCESSOR_INTEL) /* Gather Data Sampling / CVE-2022-40982 / INTEL-SA-00828. Software mitigation. */ diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc index 641e768..f4772e0 100644 --- a/gcc/config/i386/i386.cc +++ b/gcc/config/i386/i386.cc @@ -26287,7 +26287,7 @@ ix86_libgcc_floating_mode_supported_p #define TARGET_HARD_REGNO_SCRATCH_OK ix86_hard_regno_scratch_ok #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS -#define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1 +#define TARGET_CUSTOM_FUNCTION_DESCRIPTORS X86_CUSTOM_FUNCTION_TEST #undef TARGET_ADDR_SPACE_ZERO_ADDRESS_VALID #define TARGET_ADDR_SPACE_ZERO_ADDRESS_VALID ix86_addr_space_zero_address_valid diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h index abfe167..7e66fa0 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -760,6 +760,12 @@ extern const char *host_detect_local_cpu (int argc, const char **argv); /* Minimum allocation boundary for the code of a function. */ #define FUNCTION_BOUNDARY 8 +/* We will and with this value to test if a custom function descriptor needs + a static chain. The function boundary must the adjusted so that the bit + this represents is no longer part of the address. 0 Disables the custom + function descriptors. */ +#define X86_CUSTOM_FUNCTION_TEST 1 + /* C++ stores the virtual bit in the lowest bit of function pointers. */ #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_pfn @@ -2401,11 +2407,13 @@ constexpr wide_int_bitmask PTA_GRANITERAPIDS = PTA_SAPPHIRERAPIDS | PTA_AMX_FP16 constexpr wide_int_bitmask PTA_GRANITERAPIDS_D = PTA_GRANITERAPIDS | PTA_AMX_COMPLEX; constexpr wide_int_bitmask PTA_GRANDRIDGE = PTA_SIERRAFOREST | PTA_RAOINT; -constexpr wide_int_bitmask PTA_ARROWLAKE = PTA_SIERRAFOREST; +constexpr wide_int_bitmask PTA_ARROWLAKE = PTA_ALDERLAKE | PTA_AVXIFMA + | PTA_AVXVNNIINT8 | PTA_AVXNECONVERT | PTA_CMPCCXADD | PTA_UINTR; constexpr wide_int_bitmask PTA_ARROWLAKE_S = PTA_ARROWLAKE | PTA_AVXVNNIINT16 | PTA_SHA512 | PTA_SM3 | PTA_SM4; -constexpr wide_int_bitmask PTA_CLEARWATERFOREST = PTA_ARROWLAKE_S | PTA_PREFETCHI - | PTA_USER_MSR; +constexpr wide_int_bitmask PTA_CLEARWATERFOREST = PTA_SIERRAFOREST + | PTA_AVXVNNIINT16 | PTA_SHA512 | PTA_SM3 | PTA_SM4 | PTA_USER_MSR + | PTA_PREFETCHI; constexpr wide_int_bitmask PTA_PANTHERLAKE = PTA_ARROWLAKE_S | PTA_PREFETCHI; constexpr wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW | PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ; diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md index f90cf1c..abaf2f3 100644 --- a/gcc/config/i386/i386.md +++ b/gcc/config/i386/i386.md @@ -3414,6 +3414,21 @@ [(set_attr "type" "imovx") (set_attr "mode" "SI")]) +;; Split sign-extension of single least significant bit as and x,$1;neg x +(define_insn_and_split "*extv<mode>_1_0" + [(set (match_operand:SWI48 0 "register_operand" "=r") + (sign_extract:SWI48 (match_operand:SWI48 1 "register_operand" "0") + (const_int 1) + (const_int 0))) + (clobber (reg:CC FLAGS_REG))] + "" + "#" + "" + [(parallel [(set (match_dup 0) (and:SWI48 (match_dup 1) (const_int 1))) + (clobber (reg:CC FLAGS_REG))]) + (parallel [(set (match_dup 0) (neg:SWI48 (match_dup 0))) + (clobber (reg:CC FLAGS_REG))])]) + (define_expand "extzv<mode>" [(set (match_operand:SWI248 0 "register_operand") (zero_extract:SWI248 (match_operand:SWI248 1 "register_operand") diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in index 9f98f2d..4a2d743 100644 --- a/gcc/config/loongarch/genopts/loongarch.opt.in +++ b/gcc/config/loongarch/genopts/loongarch.opt.in @@ -146,10 +146,6 @@ mbranch-cost= Target RejectNegative Joined UInteger Var(loongarch_branch_cost) -mbranch-cost=COST Set the cost of branches to roughly COST instructions. -mmemvec-cost= -Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) -mmemvec-cost=COST Set the cost of vector memory access instructions. - mcheck-zero-division Target Mask(CHECK_ZERO_DIV) Trap on integer divide by zero. @@ -213,3 +209,14 @@ mrelax Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) Take advantage of linker relaxations to reduce the number of instructions required to materialize symbol addresses. + +-param=loongarch-vect-unroll-limit= +Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param +Used to limit unroll factor which indicates how much the autovectorizer may +unroll a loop. The default value is 6. + +-param=loongarch-vect-issue-info= +Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) IntegerRange(1, 64) Param +Indicate how many non memory access vector instructions can be issued per +cycle, it's used in unroll factor determination for autovectorizer. The +default value is 4. diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md index 2bc5d47..442fda2 100644 --- a/gcc/config/loongarch/lasx.md +++ b/gcc/config/loongarch/lasx.md @@ -5048,23 +5048,71 @@ [(set_attr "type" "simd_store") (set_attr "mode" "DI")]) -(define_insn "vec_widen_<su>mult_even_v8si" - [(set (match_operand:V4DI 0 "register_operand" "=f") - (mult:V4DI - (any_extend:V4DI - (vec_select:V4SI - (match_operand:V8SI 1 "register_operand" "%f") - (parallel [(const_int 0) (const_int 2) - (const_int 4) (const_int 6)]))) - (any_extend:V4DI - (vec_select:V4SI - (match_operand:V8SI 2 "register_operand" "f") - (parallel [(const_int 0) (const_int 2) - (const_int 4) (const_int 6)])))))] - "ISA_HAS_LASX" - "xvmulwev.d.w<u>\t%u0,%u1,%u2" - [(set_attr "type" "simd_int_arith") - (set_attr "mode" "V4DI")]) +(define_expand "vec_widen_<su>add_hi_<mode>" + [(match_operand:<VDMODE256> 0 "register_operand") + (any_extend:<VDMODE256> (match_operand:ILASX_HB 1 "register_operand")) + (any_extend:<VDMODE256> (match_operand:ILASX_HB 2 "register_operand"))] + "ISA_HAS_LASX" +{ + loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], + <u_bool>, true, "add"); + DONE; +}) + +(define_expand "vec_widen_<su>add_lo_<mode>" + [(match_operand:<VDMODE256> 0 "register_operand") + (any_extend:<VDMODE256> (match_operand:ILASX_HB 1 "register_operand")) + (any_extend:<VDMODE256> (match_operand:ILASX_HB 2 "register_operand"))] + "ISA_HAS_LASX" +{ + loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], + <u_bool>, false, "add"); + DONE; +}) + +(define_expand "vec_widen_<su>sub_hi_<mode>" + [(match_operand:<VDMODE256> 0 "register_operand") + (any_extend:<VDMODE256> (match_operand:ILASX_HB 1 "register_operand")) + (any_extend:<VDMODE256> (match_operand:ILASX_HB 2 "register_operand"))] + "ISA_HAS_LASX" +{ + loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], + <u_bool>, true, "sub"); + DONE; +}) + +(define_expand "vec_widen_<su>sub_lo_<mode>" + [(match_operand:<VDMODE256> 0 "register_operand") + (any_extend:<VDMODE256> (match_operand:ILASX_HB 1 "register_operand")) + (any_extend:<VDMODE256> (match_operand:ILASX_HB 2 "register_operand"))] + "ISA_HAS_LASX" +{ + loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], + <u_bool>, false, "sub"); + DONE; +}) + +(define_expand "vec_widen_<su>mult_hi_<mode>" + [(match_operand:<VDMODE256> 0 "register_operand") + (any_extend:<VDMODE256> (match_operand:ILASX_HB 1 "register_operand")) + (any_extend:<VDMODE256> (match_operand:ILASX_HB 2 "register_operand"))] + "ISA_HAS_LASX" +{ + loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], + <u_bool>, true, "mult"); + DONE; +}) + +(define_expand "vec_widen_<su>mult_lo_<mode>" + [(match_operand:<VDMODE256> 0 "register_operand") + (any_extend:<VDMODE256> (match_operand:ILASX_HB 1 "register_operand")) + (any_extend:<VDMODE256> (match_operand:ILASX_HB 2 "register_operand"))] + "ISA_HAS_LASX" +{ + loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], + <u_bool>, false, "mult"); + DONE; +}) ;; Vector reduction operation (define_expand "reduc_plus_scal_v4di" @@ -5171,3 +5219,81 @@ const0_rtx)); DONE; }) + +(define_expand "avg<mode>3_ceil" + [(match_operand:ILASX_WHB 0 "register_operand") + (match_operand:ILASX_WHB 1 "register_operand") + (match_operand:ILASX_WHB 2 "register_operand")] + "ISA_HAS_LASX" +{ + emit_insn (gen_lasx_xvavgr_s_<lasxfmt> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "uavg<mode>3_ceil" + [(match_operand:ILASX_WHB 0 "register_operand") + (match_operand:ILASX_WHB 1 "register_operand") + (match_operand:ILASX_WHB 2 "register_operand")] + "ISA_HAS_LASX" +{ + emit_insn (gen_lasx_xvavgr_u_<lasxfmt_u> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "avg<mode>3_floor" + [(match_operand:ILASX_WHB 0 "register_operand") + (match_operand:ILASX_WHB 1 "register_operand") + (match_operand:ILASX_WHB 2 "register_operand")] + "ISA_HAS_LASX" +{ + emit_insn (gen_lasx_xvavg_s_<lasxfmt> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "uavg<mode>3_floor" + [(match_operand:ILASX_WHB 0 "register_operand") + (match_operand:ILASX_WHB 1 "register_operand") + (match_operand:ILASX_WHB 2 "register_operand")] + "ISA_HAS_LASX" +{ + emit_insn (gen_lasx_xvavg_u_<lasxfmt_u> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "usadv32qi" + [(match_operand:V8SI 0 "register_operand") + (match_operand:V32QI 1 "register_operand") + (match_operand:V32QI 2 "register_operand") + (match_operand:V8SI 3 "register_operand")] + "ISA_HAS_LASX" +{ + rtx t1 = gen_reg_rtx (V32QImode); + rtx t2 = gen_reg_rtx (V16HImode); + rtx t3 = gen_reg_rtx (V8SImode); + emit_insn (gen_lasx_xvabsd_u_bu (t1, operands[1], operands[2])); + emit_insn (gen_lasx_xvhaddw_h_b (t2, t1, t1)); + emit_insn (gen_lasx_xvhaddw_w_h (t3, t2, t2)); + emit_insn (gen_addv8si3 (operands[0], t3, operands[3])); + DONE; +}) + +(define_expand "ssadv32qi" + [(match_operand:V8SI 0 "register_operand") + (match_operand:V32QI 1 "register_operand") + (match_operand:V32QI 2 "register_operand") + (match_operand:V8SI 3 "register_operand")] + "ISA_HAS_LASX" +{ + rtx t1 = gen_reg_rtx (V32QImode); + rtx t2 = gen_reg_rtx (V16HImode); + rtx t3 = gen_reg_rtx (V8SImode); + emit_insn (gen_lasx_xvabsd_s_b (t1, operands[1], operands[2])); + emit_insn (gen_lasx_xvhaddw_h_b (t2, t1, t1)); + emit_insn (gen_lasx_xvhaddw_w_h (t3, t2, t2)); + emit_insn (gen_addv8si3 (operands[0], t3, operands[3])); + DONE; +}) diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h index 251011c..72ae991 100644 --- a/gcc/config/loongarch/loongarch-protos.h +++ b/gcc/config/loongarch/loongarch-protos.h @@ -205,6 +205,7 @@ extern void loongarch_register_frame_header_opt (void); extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *); extern void loongarch_expand_vec_cond_mask_expr (machine_mode, machine_mode, rtx *); +extern void loongarch_expand_vec_widen_hilo (rtx, rtx, rtx, bool, bool, const char *); /* Routines implemented in loongarch-c.c. */ void loongarch_cpu_cpp_builtins (cpp_reader *); diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc index 8fa7439..73f0c16 100644 --- a/gcc/config/loongarch/loongarch.cc +++ b/gcc/config/loongarch/loongarch.cc @@ -65,6 +65,8 @@ along with GCC; see the file COPYING3. If not see #include "rtl-iter.h" #include "opts.h" #include "function-abi.h" +#include "cfgloop.h" +#include "tree-vectorizer.h" /* This file should be included last. */ #include "target-def.h" @@ -3845,8 +3847,6 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, } } -/* Vectorizer cost model implementation. */ - /* Implement targetm.vectorize.builtin_vectorization_cost. */ static int @@ -3865,36 +3865,182 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, case vector_load: case vec_to_scalar: case scalar_to_vec: - case cond_branch_not_taken: - case vec_promote_demote: case scalar_store: case vector_store: return 1; + case vec_promote_demote: case vec_perm: return LASX_SUPPORTED_MODE_P (mode) && !LSX_SUPPORTED_MODE_P (mode) ? 2 : 1; case unaligned_load: - case vector_gather_load: - return 2; - case unaligned_store: - case vector_scatter_store: - return 10; + return 2; case cond_branch_taken: - return 3; + return 4; + + case cond_branch_not_taken: + return 2; case vec_construct: elements = TYPE_VECTOR_SUBPARTS (vectype); - return elements / 2 + 1; + if (ISA_HAS_LASX) + return elements + 1; + else + return elements; default: gcc_unreachable (); } } +class loongarch_vector_costs : public vector_costs +{ +public: + using vector_costs::vector_costs; + + unsigned int add_stmt_cost (int count, vect_cost_for_stmt kind, + stmt_vec_info stmt_info, slp_tree, tree vectype, + int misalign, + vect_cost_model_location where) override; + void finish_cost (const vector_costs *) override; + +protected: + void count_operations (vect_cost_for_stmt, stmt_vec_info, + vect_cost_model_location, unsigned int); + unsigned int determine_suggested_unroll_factor (loop_vec_info); + /* The number of vectorized stmts in loop. */ + unsigned m_stmts = 0; + /* The number of load and store operations in loop. */ + unsigned m_loads = 0; + unsigned m_stores = 0; + /* Reduction factor for suggesting unroll factor. */ + unsigned m_reduc_factor = 0; + /* True if the loop contains an average operation. */ + bool m_has_avg =false; +}; + +/* Implement TARGET_VECTORIZE_CREATE_COSTS. */ +static vector_costs * +loongarch_vectorize_create_costs (vec_info *vinfo, bool costing_for_scalar) +{ + return new loongarch_vector_costs (vinfo, costing_for_scalar); +} + +void +loongarch_vector_costs::count_operations (vect_cost_for_stmt kind, + stmt_vec_info stmt_info, + vect_cost_model_location where, + unsigned int count) +{ + if (!m_costing_for_scalar + && is_a<loop_vec_info> (m_vinfo) + && where == vect_body) + { + m_stmts += count; + + if (kind == scalar_load + || kind == vector_load + || kind == unaligned_load) + m_loads += count; + else if (kind == scalar_store + || kind == vector_store + || kind == unaligned_store) + m_stores += count; + else if ((kind == scalar_stmt + || kind == vector_stmt + || kind == vec_to_scalar) + && stmt_info && vect_is_reduction (stmt_info)) + { + tree lhs = gimple_get_lhs (stmt_info->stmt); + unsigned int base = FLOAT_TYPE_P (TREE_TYPE (lhs)) ? 2 : 1; + m_reduc_factor = MAX (base * count, m_reduc_factor); + } + } +} + +unsigned int +loongarch_vector_costs::determine_suggested_unroll_factor (loop_vec_info loop_vinfo) +{ + class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); + + if (m_has_avg) + return 1; + + /* Don't unroll if it's specified explicitly not to be unrolled. */ + if (loop->unroll == 1 + || (OPTION_SET_P (flag_unroll_loops) && !flag_unroll_loops) + || (OPTION_SET_P (flag_unroll_all_loops) && !flag_unroll_all_loops)) + return 1; + + unsigned int nstmts_nonldst = m_stmts - m_loads - m_stores; + /* Don't unroll if no vector instructions excepting for memory access. */ + if (nstmts_nonldst == 0) + return 1; + + /* Use this simple hardware resource model that how many non vld/vst + vector instructions can be issued per cycle. */ + unsigned int issue_info = loongarch_vect_issue_info; + unsigned int reduc_factor = m_reduc_factor > 1 ? m_reduc_factor : 1; + unsigned int uf = CEIL (reduc_factor * issue_info, nstmts_nonldst); + uf = MIN ((unsigned int) loongarch_vect_unroll_limit, uf); + + return 1 << ceil_log2 (uf); +} + +unsigned +loongarch_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind, + stmt_vec_info stmt_info, slp_tree, + tree vectype, int misalign, + vect_cost_model_location where) +{ + unsigned retval = 0; + + if (flag_vect_cost_model) + { + int stmt_cost = loongarch_builtin_vectorization_cost (kind, vectype, + misalign); + retval = adjust_cost_for_freq (stmt_info, where, count * stmt_cost); + m_costs[where] += retval; + + count_operations (kind, stmt_info, where, count); + } + + if (stmt_info) + { + /* Detect the use of an averaging operation. */ + gimple *stmt = stmt_info->stmt; + if (is_gimple_call (stmt) + && gimple_call_internal_p (stmt)) + { + switch (gimple_call_internal_fn (stmt)) + { + case IFN_AVG_FLOOR: + case IFN_AVG_CEIL: + m_has_avg = true; + default: + break; + } + } + } + + return retval; +} + +void +loongarch_vector_costs::finish_cost (const vector_costs *scalar_costs) +{ + loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (m_vinfo); + if (loop_vinfo) + { + m_suggested_unroll_factor = determine_suggested_unroll_factor (loop_vinfo); + } + + vector_costs::finish_cost (scalar_costs); +} + /* Implement TARGET_ADDRESS_COST. */ static int @@ -7265,9 +7411,6 @@ loongarch_option_override_internal (struct gcc_options *opts, if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib) error ("%qs cannot be used for compiling a shared library", "-mdirect-extern-access"); - if (loongarch_vector_access_cost == 0) - loongarch_vector_access_cost = 5; - switch (la_target.cmodel) { @@ -8032,6 +8175,143 @@ loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *d) return loongarch_expand_vec_perm_even_odd_1 (d, odd); } +static void +loongarch_expand_vec_interleave (rtx target, rtx op0, rtx op1, bool high_p) +{ + struct expand_vec_perm_d d; + unsigned i, nelt, base; + bool ok; + + d.target = target; + d.op0 = op0; + d.op1 = op1; + d.vmode = GET_MODE (target); + d.nelt = nelt = GET_MODE_NUNITS (d.vmode); + d.one_vector_p = false; + d.testing_p = false; + + base = high_p ? nelt / 2 : 0; + for (i = 0; i < nelt / 2; ++i) + { + d.perm[i * 2] = i + base; + d.perm[i * 2 + 1] = i + base + nelt; + } + + ok = loongarch_expand_vec_perm_interleave (&d); + gcc_assert (ok); +} + +/* The loongarch lasx instructions xvmulwev and xvmulwod return the even or odd + parts of the double sized result elements in the corresponding elements of + the target register. That's NOT what the vec_widen_umult_lo/hi patterns are + expected to do. We emulate the widening lo/hi multiplies with the even/odd + versions followed by a vector merge. */ + +void +loongarch_expand_vec_widen_hilo (rtx dest, rtx op1, rtx op2, + bool uns_p, bool high_p, const char *optab) +{ + machine_mode wmode = GET_MODE (dest); + machine_mode mode = GET_MODE (op1); + rtx t1, t2, t3; + + t1 = gen_reg_rtx (wmode); + t2 = gen_reg_rtx (wmode); + t3 = gen_reg_rtx (wmode); + switch (mode) + { + case V16HImode: + if (!strcmp (optab, "add")) + { + if (!uns_p) + { + emit_insn (gen_lasx_xvaddwev_w_h (t1, op1, op2)); + emit_insn (gen_lasx_xvaddwod_w_h (t2, op1, op2)); + } + else + { + emit_insn (gen_lasx_xvaddwev_w_hu (t1, op1, op2)); + emit_insn (gen_lasx_xvaddwod_w_hu (t2, op1, op2)); + } + } + else if (!strcmp (optab, "mult")) + { + if (!uns_p) + { + emit_insn (gen_lasx_xvmulwev_w_h (t1, op1, op2)); + emit_insn (gen_lasx_xvmulwod_w_h (t2, op1, op2)); + } + else + { + emit_insn (gen_lasx_xvmulwev_w_hu (t1, op1, op2)); + emit_insn (gen_lasx_xvmulwod_w_hu (t2, op1, op2)); + } + } + else if (!strcmp (optab, "sub")) + { + if (!uns_p) + { + emit_insn (gen_lasx_xvsubwev_w_h (t1, op1, op2)); + emit_insn (gen_lasx_xvsubwod_w_h (t2, op1, op2)); + } + else + { + emit_insn (gen_lasx_xvsubwev_w_hu (t1, op1, op2)); + emit_insn (gen_lasx_xvsubwod_w_hu (t2, op1, op2)); + } + } + break; + + case V32QImode: + if (!strcmp (optab, "add")) + { + if (!uns_p) + { + emit_insn (gen_lasx_xvaddwev_h_b (t1, op1, op2)); + emit_insn (gen_lasx_xvaddwod_h_b (t2, op1, op2)); + } + else + { + emit_insn (gen_lasx_xvaddwev_h_bu (t1, op1, op2)); + emit_insn (gen_lasx_xvaddwod_h_bu (t2, op1, op2)); + } + } + else if (!strcmp (optab, "mult")) + { + if (!uns_p) + { + emit_insn (gen_lasx_xvmulwev_h_b (t1, op1, op2)); + emit_insn (gen_lasx_xvmulwod_h_b (t2, op1, op2)); + } + else + { + emit_insn (gen_lasx_xvmulwev_h_bu (t1, op1, op2)); + emit_insn (gen_lasx_xvmulwod_h_bu (t2, op1, op2)); + } + } + else if (!strcmp (optab, "sub")) + { + if (!uns_p) + { + emit_insn (gen_lasx_xvsubwev_h_b (t1, op1, op2)); + emit_insn (gen_lasx_xvsubwod_h_b (t2, op1, op2)); + } + else + { + emit_insn (gen_lasx_xvsubwev_h_bu (t1, op1, op2)); + emit_insn (gen_lasx_xvsubwod_h_bu (t2, op1, op2)); + } + } + break; + + default: + gcc_unreachable (); + } + + loongarch_expand_vec_interleave (t3, t1, t2, high_p); + emit_move_insn (dest, gen_lowpart (wmode, t3)); +} + /* Expand a variable vector permutation for LASX. */ void @@ -11144,6 +11424,8 @@ loongarch_builtin_support_vector_misalignment (machine_mode mode, #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ loongarch_builtin_vectorization_cost +#undef TARGET_VECTORIZE_CREATE_COSTS +#define TARGET_VECTORIZE_CREATE_COSTS loongarch_vectorize_create_costs #undef TARGET_IN_SMALL_DATA_P diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md index 9f5a753..365b412 100644 --- a/gcc/config/loongarch/loongarch.md +++ b/gcc/config/loongarch/loongarch.md @@ -509,6 +509,8 @@ ;; <su> is like <u>, but the signed form expands to "s" rather than "". (define_code_attr su [(sign_extend "s") (zero_extend "u")]) +(define_code_attr u_bool [(sign_extend "false") (zero_extend "true")]) + ;; <optab> expands to the name of the optab for a particular code. (define_code_attr optab [(ashift "ashl") (ashiftrt "ashr") diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt index e1b085a..6215abc 100644 --- a/gcc/config/loongarch/loongarch.opt +++ b/gcc/config/loongarch/loongarch.opt @@ -153,10 +153,6 @@ mbranch-cost= Target RejectNegative Joined UInteger Var(loongarch_branch_cost) -mbranch-cost=COST Set the cost of branches to roughly COST instructions. -mmemvec-cost= -Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) -mmemvec-cost=COST Set the cost of vector memory access instructions. - mcheck-zero-division Target Mask(CHECK_ZERO_DIV) Trap on integer divide by zero. @@ -220,3 +216,14 @@ mrelax Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) Take advantage of linker relaxations to reduce the number of instructions required to materialize symbol addresses. + +-param=loongarch-vect-unroll-limit= +Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param +Used to limit unroll factor which indicates how much the autovectorizer may +unroll a loop. The default value is 6. + +-param=loongarch-vect-issue-info= +Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) IntegerRange(1, 64) Param +Indicate how many non memory access vector instructions can be issued per +cycle, it's used in unroll factor determination for autovectorizer. The +default value is 4. diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md index 075f6ba..b4e92ae9 100644 --- a/gcc/config/loongarch/lsx.md +++ b/gcc/config/loongarch/lsx.md @@ -3581,6 +3581,84 @@ DONE; }) +(define_expand "avg<mode>3_ceil" + [(match_operand:ILSX_WHB 0 "register_operand") + (match_operand:ILSX_WHB 1 "register_operand") + (match_operand:ILSX_WHB 2 "register_operand")] + "ISA_HAS_LSX" +{ + emit_insn (gen_lsx_vavgr_s_<lsxfmt> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "uavg<mode>3_ceil" + [(match_operand:ILSX_WHB 0 "register_operand") + (match_operand:ILSX_WHB 1 "register_operand") + (match_operand:ILSX_WHB 2 "register_operand")] + "ISA_HAS_LSX" +{ + emit_insn (gen_lsx_vavgr_u_<lsxfmt_u> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "avg<mode>3_floor" + [(match_operand:ILSX_WHB 0 "register_operand") + (match_operand:ILSX_WHB 1 "register_operand") + (match_operand:ILSX_WHB 2 "register_operand")] + "ISA_HAS_LSX" +{ + emit_insn (gen_lsx_vavg_s_<lsxfmt> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "uavg<mode>3_floor" + [(match_operand:ILSX_WHB 0 "register_operand") + (match_operand:ILSX_WHB 1 "register_operand") + (match_operand:ILSX_WHB 2 "register_operand")] + "ISA_HAS_LSX" +{ + emit_insn (gen_lsx_vavg_u_<lsxfmt_u> (operands[0], + operands[1], operands[2])); + DONE; +}) + +(define_expand "usadv16qi" + [(match_operand:V4SI 0 "register_operand") + (match_operand:V16QI 1 "register_operand") + (match_operand:V16QI 2 "register_operand") + (match_operand:V4SI 3 "register_operand")] + "ISA_HAS_LSX" +{ + rtx t1 = gen_reg_rtx (V16QImode); + rtx t2 = gen_reg_rtx (V8HImode); + rtx t3 = gen_reg_rtx (V4SImode); + emit_insn (gen_lsx_vabsd_u_bu (t1, operands[1], operands[2])); + emit_insn (gen_lsx_vhaddw_h_b (t2, t1, t1)); + emit_insn (gen_lsx_vhaddw_w_h (t3, t2, t2)); + emit_insn (gen_addv4si3 (operands[0], t3, operands[3])); + DONE; +}) + +(define_expand "ssadv16qi" + [(match_operand:V4SI 0 "register_operand") + (match_operand:V16QI 1 "register_operand") + (match_operand:V16QI 2 "register_operand") + (match_operand:V4SI 3 "register_operand")] + "ISA_HAS_LSX" +{ + rtx t1 = gen_reg_rtx (V16QImode); + rtx t2 = gen_reg_rtx (V8HImode); + rtx t3 = gen_reg_rtx (V4SImode); + emit_insn (gen_lsx_vabsd_s_b (t1, operands[1], operands[2])); + emit_insn (gen_lsx_vhaddw_h_b (t2, t1, t1)); + emit_insn (gen_lsx_vhaddw_w_h (t3, t2, t2)); + emit_insn (gen_addv4si3 (operands[0], t3, operands[3])); + DONE; +}) + (define_insn "lsx_v<optab>wev_d_w<u>" [(set (match_operand:V2DI 0 "register_operand" "=f") (addsubmul:V2DI diff --git a/gcc/config/pru/pru.cc b/gcc/config/pru/pru.cc index 6e8112b..fd1924e 100644 --- a/gcc/config/pru/pru.cc +++ b/gcc/config/pru/pru.cc @@ -783,6 +783,39 @@ pru_rtx_costs (rtx x, machine_mode mode, } } } + +/* Insn costs on PRU are straightforward because: + - Insns emit 0, 1 or more instructions. + - All instructions are 32-bit length. + - All instructions execute in 1 cycle (sans memory access delays). + The "length" attribute maps nicely to the insn cost. */ + +static int +pru_insn_cost (rtx_insn *insn, bool speed) +{ + /* Use generic cost calculation for unrecognized insns. */ + if (recog_memoized (insn) < 0) + return pattern_cost (insn, speed); + + unsigned int len = get_attr_length (insn); + + gcc_assert ((len % 4) == 0); + + int cost = COSTS_N_INSNS (len / 4); + /* Some insns have zero length (e.g. blockage, pruloop_end). + In such cases give the minimum cost, because a return of + 0 would incorrectly indicate that the insn cost is unknown. */ + if (cost == 0) + cost = 1; + + /* Writes are usually posted, so they take 1 cycle. Reads + from DMEM usually take 3 cycles. + See TI document SPRACE8A, Device-Specific PRU Read Latency Values. */ + if (speed && get_attr_type (insn) == TYPE_LD) + cost += COSTS_N_INSNS (2); + + return cost; +} static GTY(()) rtx eqdf_libfunc; static GTY(()) rtx nedf_libfunc; @@ -3175,6 +3208,9 @@ pru_unwind_word_mode (void) #undef TARGET_RTX_COSTS #define TARGET_RTX_COSTS pru_rtx_costs +#undef TARGET_INSN_COST +#define TARGET_INSN_COST pru_insn_cost + #undef TARGET_PRINT_OPERAND #define TARGET_PRINT_OPERAND pru_print_operand diff --git a/gcc/config/riscv/riscv-opts.h b/gcc/config/riscv/riscv-opts.h index 31ee42d..e557f70 100644 --- a/gcc/config/riscv/riscv-opts.h +++ b/gcc/config/riscv/riscv-opts.h @@ -118,11 +118,6 @@ enum riscv_entity ? 0 \ : 32 << (__builtin_popcount (opts->x_riscv_zvl_flags) - 1)) -/* We only enable VLS modes for VLA vectorization since fixed length VLMAX mode - is the highest priority choice and should not conflict with VLS modes. */ -#define TARGET_VECTOR_VLS \ - (TARGET_VECTOR && riscv_autovec_preference == RVV_SCALABLE) - /* TODO: Enable RVV movmisalign by default for now. */ #define TARGET_VECTOR_MISALIGN_SUPPORTED 1 diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h index 6190faa..f7a9a02 100644 --- a/gcc/config/riscv/riscv-protos.h +++ b/gcc/config/riscv/riscv-protos.h @@ -552,6 +552,7 @@ unsigned int autovectorize_vector_modes (vec<machine_mode> *, bool); bool cmp_lmul_le_one (machine_mode); bool cmp_lmul_gt_one (machine_mode); bool gather_scatter_valid_offset_mode_p (machine_mode); +bool vls_mode_valid_p (machine_mode); } /* We classify builtin types into two classes: diff --git a/gcc/config/riscv/riscv-selftests.cc b/gcc/config/riscv/riscv-selftests.cc index cdc863e..0ac17fb 100644 --- a/gcc/config/riscv/riscv-selftests.cc +++ b/gcc/config/riscv/riscv-selftests.cc @@ -267,15 +267,14 @@ run_const_vector_selftests (void) rtx dup = gen_const_vec_duplicate (mode, GEN_INT (val)); emit_move_insn (dest, dup); rtx_insn *insn = get_last_insn (); - rtx src = XEXP (SET_SRC (PATTERN (insn)), 1); + rtx src = SET_SRC (PATTERN (insn)); /* 1. Should be vmv.v.i for in rang of -16 ~ 15. 2. Should be vmv.v.x for exceed -16 ~ 15. */ if (IN_RANGE (val, -16, 15)) - ASSERT_TRUE (rtx_equal_p (src, dup)); - else ASSERT_TRUE ( - rtx_equal_p (src, - gen_rtx_VEC_DUPLICATE (mode, XEXP (src, 0)))); + rtx_equal_p (XEXP (SET_SRC (PATTERN (insn)), 1), dup)); + else + ASSERT_TRUE (GET_CODE (src) == VEC_DUPLICATE); end_sequence (); } } @@ -294,10 +293,9 @@ run_const_vector_selftests (void) rtx dup = gen_const_vec_duplicate (mode, ele); emit_move_insn (dest, dup); rtx_insn *insn = get_last_insn (); - rtx src = XEXP (SET_SRC (PATTERN (insn)), 1); + rtx src = SET_SRC (PATTERN (insn)); /* Should always be vfmv.v.f. */ - ASSERT_TRUE ( - rtx_equal_p (src, gen_rtx_VEC_DUPLICATE (mode, XEXP (src, 0)))); + ASSERT_TRUE (GET_CODE (src) == VEC_DUPLICATE); end_sequence (); } } diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc index 895c11d..383af55 100644 --- a/gcc/config/riscv/riscv-v.cc +++ b/gcc/config/riscv/riscv-v.cc @@ -1001,8 +1001,31 @@ expand_const_vector (rtx target, rtx src) } else { - rtx ops[] = {tmp, elt}; - emit_vlmax_insn (code_for_pred_broadcast (mode), UNARY_OP, ops); + /* Emit vec_duplicate<mode> split pattern before RA so that + we could have a better optimization opportunity in LICM + which will hoist vmv.v.x outside the loop and in fwprop && combine + which will transform 'vv' into 'vx' instruction. + + The reason we don't emit vec_duplicate<mode> split pattern during + RA since the split stage after RA is a too late stage to generate + RVV instruction which need an additional register (We can't + allocate a new register after RA) for VL operand of vsetvl + instruction (vsetvl a5, zero). */ + if (lra_in_progress) + { + rtx ops[] = {tmp, elt}; + emit_vlmax_insn (code_for_pred_broadcast (mode), UNARY_OP, ops); + } + else + { + struct expand_operand ops[2]; + enum insn_code icode = optab_handler (vec_duplicate_optab, mode); + gcc_assert (icode != CODE_FOR_nothing); + create_output_operand (&ops[0], tmp, mode); + create_input_operand (&ops[1], elt, GET_MODE_INNER (mode)); + expand_insn (icode, 2, ops); + tmp = ops[0].value; + } } if (tmp != target) @@ -2423,20 +2446,19 @@ autovectorize_vector_modes (vector_modes *modes, bool) modes->safe_push (mode); } } - if (TARGET_VECTOR_VLS) - { - /* Push all VLSmodes according to TARGET_MIN_VLEN. */ - unsigned int i = 0; - unsigned int base_size = TARGET_MIN_VLEN * lmul / 8; - unsigned int size = base_size; - machine_mode mode; - while (size > 0 && get_vector_mode (QImode, size).exists (&mode)) - { + /* Push all VLSmodes according to TARGET_MIN_VLEN. */ + unsigned int i = 0; + unsigned int base_size = TARGET_MIN_VLEN * lmul / 8; + unsigned int size = base_size; + machine_mode mode; + while (size > 0 && get_vector_mode (QImode, size).exists (&mode)) + { + if (vls_mode_valid_p (mode)) modes->safe_push (mode); - i++; - size = base_size / (1U << i); - } - } + + i++; + size = base_size / (1U << i); + } /* Enable LOOP_VINFO comparison in COST model. */ return VECT_COMPARE_COSTS; } @@ -3880,6 +3902,95 @@ cmp_lmul_gt_one (machine_mode mode) return false; } +/* Return true if the VLS mode is legal. There are 2 cases here. + + 1. Enable VLS modes for VLA vectorization since fixed length VLMAX mode + is the highest priority choice and should not conflict with VLS modes. + 2. Enable VLS modes for some cases in fixed-vlmax, aka the bitsize of the + VLS mode are smaller than the minimal vla. + + Take vlen = 2048 as example for case 2. + + Note: Below table based on vlen = 2048. + +----------------------------------------------------+----------------------+ + | VLS mode | VLA mode | + +----------------------------------------------------+----------------------+ + | Name | Precision | Inner Precision | Enabled | Min mode | Min bits | + +------------+-----------+-----------------+---------+-----------+----------+ + | V1BI | 1 | 1 | Yes | RVVMF64BI | 32 | + | V2BI | 2 | 1 | Yes | RVVMF64BI | 32 | + | V4BI | 4 | 1 | Yes | RVVMF64BI | 32 | + | V8BI | 8 | 1 | Yes | RVVMF64BI | 32 | + | V16BI | 16 | 1 | Yes | RVVMF64BI | 32 | + | V32BI | 32 | 1 | NO | RVVMF64BI | 32 | + | V64BI | 64 | 1 | NO | RVVMF64BI | 32 | + | ... | ... | ... | ... | RVVMF64BI | 32 | + | V4096BI | 4096 | 1 | NO | RVVMF64BI | 32 | + +------------+-----------+-----------------+---------+-----------+----------+ + | V1QI | 8 | 8 | Yes | RVVMF8QI | 256 | + | V2QI | 16 | 8 | Yes | RVVMF8QI | 256 | + | V4QI | 32 | 8 | Yes | RVVMF8QI | 256 | + | V8QI | 64 | 8 | Yes | RVVMF8QI | 256 | + | V16QI | 128 | 8 | Yes | RVVMF8QI | 256 | + | V32QI | 256 | 8 | NO | RVVMF8QI | 256 | + | V64QI | 512 | 8 | NO | RVVMF8QI | 256 | + | ... | ... | .. | ... | RVVMF8QI | 256 | + | V4096QI | 32768 | 8 | NO | RVVMF8QI | 256 | + +------------+-----------+-----------------+---------+-----------+----------+ + | V1HI | 16 | 16 | Yes | RVVMF4HI | 512 | + | V2HI | 32 | 16 | Yes | RVVMF4HI | 512 | + | V4HI | 64 | 16 | Yes | RVVMF4HI | 512 | + | V8HI | 128 | 16 | Yes | RVVMF4HI | 512 | + | V16HI | 256 | 16 | Yes | RVVMF4HI | 512 | + | V32HI | 512 | 16 | NO | RVVMF4HI | 512 | + | V64HI | 1024 | 16 | NO | RVVMF4HI | 512 | + | ... | ... | .. | ... | RVVMF4HI | 512 | + | V2048HI | 32768 | 16 | NO | RVVMF4HI | 512 | + +------------+-----------+-----------------+---------+-----------+----------+ + | V1SI/SF | 32 | 32 | Yes | RVVMF2SI | 1024 | + | V2SI/SF | 64 | 32 | Yes | RVVMF2SI | 1024 | + | V4SI/SF | 128 | 32 | Yes | RVVMF2SI | 1024 | + | V8SI/SF | 256 | 32 | Yes | RVVMF2SI | 1024 | + | V16SI/SF | 512 | 32 | Yes | RVVMF2SI | 1024 | + | V32SI/SF | 1024 | 32 | NO | RVVMF2SI | 1024 | + | V64SI/SF | 2048 | 32 | NO | RVVMF2SI | 1024 | + | ... | ... | .. | ... | RVVMF2SI | 1024 | + | V1024SI/SF | 32768 | 32 | NO | RVVMF2SI | 1024 | + +------------+-----------+-----------------+---------+-----------+----------+ + | V1DI/DF | 64 | 64 | Yes | RVVM1DI | 2048 | + | V2DI/DF | 128 | 64 | Yes | RVVM1DI | 2048 | + | V4DI/DF | 256 | 64 | Yes | RVVM1DI | 2048 | + | V8DI/DF | 512 | 64 | Yes | RVVM1DI | 2048 | + | V16DI/DF | 1024 | 64 | Yes | RVVM1DI | 2048 | + | V32DI/DF | 2048 | 64 | NO | RVVM1DI | 2048 | + | V64DI/DF | 4096 | 64 | NO | RVVM1DI | 2048 | + | ... | ... | .. | ... | RVVM1DI | 2048 | + | V512DI/DF | 32768 | 64 | NO | RVVM1DI | 2048 | + +------------+-----------+-----------------+---------+-----------+----------+ + + Then we can have the condition for VLS mode in fixed-vlmax, aka: + PRECISION (VLSmode) < VLEN / (64 / PRECISION(VLS_inner_mode)). */ +bool +vls_mode_valid_p (machine_mode vls_mode) +{ + if (!TARGET_VECTOR) + return false; + + if (riscv_autovec_preference == RVV_SCALABLE) + return true; + + if (riscv_autovec_preference == RVV_FIXED_VLMAX) + { + machine_mode inner_mode = GET_MODE_INNER (vls_mode); + int precision = GET_MODE_PRECISION (inner_mode).to_constant (); + int min_vlmax_bitsize = TARGET_MIN_VLEN / (64 / precision); + + return GET_MODE_PRECISION (vls_mode).to_constant () < min_vlmax_bitsize; + } + + return false; +} + /* Return true if the gather/scatter offset mode is valid. */ bool gather_scatter_valid_offset_mode_p (machine_mode mode) diff --git a/gcc/config/riscv/riscv-vector-switch.def b/gcc/config/riscv/riscv-vector-switch.def index 174e5a1..5c9f9bc 100644 --- a/gcc/config/riscv/riscv-vector-switch.def +++ b/gcc/config/riscv/riscv-vector-switch.def @@ -292,100 +292,100 @@ TUPLE_ENTRY (RVVM1x2DF, TARGET_VECTOR_ELEN_FP_64, RVVM1DF, 2, LMUL_1, 16) /* This following VLS modes should satisfy the constraint: GET_MODE_BITSIZE (MODE) <= TARGET_MIN_VLEN * 8. */ -VLS_ENTRY (V1BI, TARGET_VECTOR_VLS) -VLS_ENTRY (V2BI, TARGET_VECTOR_VLS) -VLS_ENTRY (V4BI, TARGET_VECTOR_VLS) -VLS_ENTRY (V8BI, TARGET_VECTOR_VLS) -VLS_ENTRY (V16BI, TARGET_VECTOR_VLS) -VLS_ENTRY (V32BI, TARGET_VECTOR_VLS) -VLS_ENTRY (V64BI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V128BI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V256BI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V512BI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V1024BI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V2048BI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V4096BI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096) - -VLS_ENTRY (V1QI, TARGET_VECTOR_VLS) -VLS_ENTRY (V2QI, TARGET_VECTOR_VLS) -VLS_ENTRY (V4QI, TARGET_VECTOR_VLS) -VLS_ENTRY (V8QI, TARGET_VECTOR_VLS) -VLS_ENTRY (V16QI, TARGET_VECTOR_VLS) -VLS_ENTRY (V32QI, TARGET_VECTOR_VLS) -VLS_ENTRY (V64QI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V128QI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V256QI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V512QI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V1024QI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V2048QI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V4096QI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096) -VLS_ENTRY (V1HI, TARGET_VECTOR_VLS) -VLS_ENTRY (V2HI, TARGET_VECTOR_VLS) -VLS_ENTRY (V4HI, TARGET_VECTOR_VLS) -VLS_ENTRY (V8HI, TARGET_VECTOR_VLS) -VLS_ENTRY (V16HI, TARGET_VECTOR_VLS) -VLS_ENTRY (V32HI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V64HI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V128HI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V256HI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V512HI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V1024HI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V2048HI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096) -VLS_ENTRY (V1SI, TARGET_VECTOR_VLS) -VLS_ENTRY (V2SI, TARGET_VECTOR_VLS) -VLS_ENTRY (V4SI, TARGET_VECTOR_VLS) -VLS_ENTRY (V8SI, TARGET_VECTOR_VLS) -VLS_ENTRY (V16SI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V32SI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V64SI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V128SI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V256SI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V512SI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V1024SI, TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096) -VLS_ENTRY (V1DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64) -VLS_ENTRY (V2DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64) -VLS_ENTRY (V4DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64) -VLS_ENTRY (V8DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V16DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V32DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V64DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V128DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V256DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V512DI, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096) - -VLS_ENTRY (V1HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16) -VLS_ENTRY (V2HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16) -VLS_ENTRY (V4HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16) -VLS_ENTRY (V8HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16) -VLS_ENTRY (V16HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16) -VLS_ENTRY (V32HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V64HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V128HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V256HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V512HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V1024HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V2048HF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096) -VLS_ENTRY (V1SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32) -VLS_ENTRY (V2SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32) -VLS_ENTRY (V4SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32) -VLS_ENTRY (V8SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32) -VLS_ENTRY (V16SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V32SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V64SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V128SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V256SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V512SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V1024SF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096) -VLS_ENTRY (V1DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64) -VLS_ENTRY (V2DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64) -VLS_ENTRY (V4DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64) -VLS_ENTRY (V8DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64) -VLS_ENTRY (V16DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128) -VLS_ENTRY (V32DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256) -VLS_ENTRY (V64DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512) -VLS_ENTRY (V128DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024) -VLS_ENTRY (V256DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048) -VLS_ENTRY (V512DF, TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096) +VLS_ENTRY (V1BI, riscv_vector::vls_mode_valid_p (V1BImode)) +VLS_ENTRY (V2BI, riscv_vector::vls_mode_valid_p (V2BImode)) +VLS_ENTRY (V4BI, riscv_vector::vls_mode_valid_p (V4BImode)) +VLS_ENTRY (V8BI, riscv_vector::vls_mode_valid_p (V8BImode)) +VLS_ENTRY (V16BI, riscv_vector::vls_mode_valid_p (V16BImode)) +VLS_ENTRY (V32BI, riscv_vector::vls_mode_valid_p (V32BImode)) +VLS_ENTRY (V64BI, riscv_vector::vls_mode_valid_p (V64BImode) && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V128BI, riscv_vector::vls_mode_valid_p (V128BImode) && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V256BI, riscv_vector::vls_mode_valid_p (V256BImode) && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V512BI, riscv_vector::vls_mode_valid_p (V512BImode) && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V1024BI, riscv_vector::vls_mode_valid_p (V1024BImode) && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V2048BI, riscv_vector::vls_mode_valid_p (V2048BImode) && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V4096BI, riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096) + +VLS_ENTRY (V1QI, riscv_vector::vls_mode_valid_p (V1QImode)) +VLS_ENTRY (V2QI, riscv_vector::vls_mode_valid_p (V2QImode)) +VLS_ENTRY (V4QI, riscv_vector::vls_mode_valid_p (V4QImode)) +VLS_ENTRY (V8QI, riscv_vector::vls_mode_valid_p (V8QImode)) +VLS_ENTRY (V16QI, riscv_vector::vls_mode_valid_p (V16QImode)) +VLS_ENTRY (V32QI, riscv_vector::vls_mode_valid_p (V32QImode)) +VLS_ENTRY (V64QI, riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V128QI, riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V256QI, riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V512QI, riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V1024QI, riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V2048QI, riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V4096QI, riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096) +VLS_ENTRY (V1HI, riscv_vector::vls_mode_valid_p (V1HImode)) +VLS_ENTRY (V2HI, riscv_vector::vls_mode_valid_p (V2HImode)) +VLS_ENTRY (V4HI, riscv_vector::vls_mode_valid_p (V4HImode)) +VLS_ENTRY (V8HI, riscv_vector::vls_mode_valid_p (V8HImode)) +VLS_ENTRY (V16HI, riscv_vector::vls_mode_valid_p (V16HImode)) +VLS_ENTRY (V32HI, riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V64HI, riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V128HI, riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V256HI, riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V512HI, riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V1024HI, riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V2048HI, riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096) +VLS_ENTRY (V1SI, riscv_vector::vls_mode_valid_p (V1SImode)) +VLS_ENTRY (V2SI, riscv_vector::vls_mode_valid_p (V2SImode)) +VLS_ENTRY (V4SI, riscv_vector::vls_mode_valid_p (V4SImode)) +VLS_ENTRY (V8SI, riscv_vector::vls_mode_valid_p (V8SImode)) +VLS_ENTRY (V16SI, riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V32SI, riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V64SI, riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V128SI, riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V256SI, riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V512SI, riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V1024SI, riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096) +VLS_ENTRY (V1DI, riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64) +VLS_ENTRY (V2DI, riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64) +VLS_ENTRY (V4DI, riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64) +VLS_ENTRY (V8DI, riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V16DI, riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V32DI, riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V64DI, riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V128DI, riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V256DI, riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V512DI, riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096) + +VLS_ENTRY (V1HF, riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_VECTOR_ELEN_FP_16) +VLS_ENTRY (V2HF, riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_VECTOR_ELEN_FP_16) +VLS_ENTRY (V4HF, riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_VECTOR_ELEN_FP_16) +VLS_ENTRY (V8HF, riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_VECTOR_ELEN_FP_16) +VLS_ENTRY (V16HF, riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_VECTOR_ELEN_FP_16) +VLS_ENTRY (V32HF, riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V64HF, riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V128HF, riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V256HF, riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V512HF, riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V1024HF, riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V2048HF, riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096) +VLS_ENTRY (V1SF, riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32) +VLS_ENTRY (V2SF, riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32) +VLS_ENTRY (V4SF, riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32) +VLS_ENTRY (V8SF, riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32) +VLS_ENTRY (V16SF, riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V32SF, riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V64SF, riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V128SF, riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V256SF, riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V512SF, riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V1024SF, riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096) +VLS_ENTRY (V1DF, riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64) +VLS_ENTRY (V2DF, riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64) +VLS_ENTRY (V4DF, riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64) +VLS_ENTRY (V8DF, riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64) +VLS_ENTRY (V16DF, riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128) +VLS_ENTRY (V32DF, riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256) +VLS_ENTRY (V64DF, riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512) +VLS_ENTRY (V128DF, riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024) +VLS_ENTRY (V256DF, riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048) +VLS_ENTRY (V512DF, riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096) #undef VLS_ENTRY #undef TUPLE_ENTRY diff --git a/gcc/config/riscv/riscv-vsetvl.cc b/gcc/config/riscv/riscv-vsetvl.cc index 4b06d93..5f26a9e 100644 --- a/gcc/config/riscv/riscv-vsetvl.cc +++ b/gcc/config/riscv/riscv-vsetvl.cc @@ -18,60 +18,47 @@ You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ -/* This pass is to Set VL/VTYPE global status for RVV instructions - that depend on VL and VTYPE registers by Lazy code motion (LCM). - - Strategy: - - - Backward demanded info fusion within block. - - - Lazy code motion (LCM) based demanded info backward propagation. - - - RTL_SSA framework for def-use, PHI analysis. - - - Lazy code motion (LCM) for global VL/VTYPE optimization. - - Assumption: - - - Each avl operand is either an immediate (must be in range 0 ~ 31) or reg. - - This pass consists of 5 phases: - - - Phase 1 - compute VL/VTYPE demanded information within each block - by backward data-flow analysis. - - - Phase 2 - Emit vsetvl instructions within each basic block according to - demand, compute and save ANTLOC && AVLOC of each block. - - - Phase 3 - LCM Earliest-edge baseed VSETVL demand fusion. - - - Phase 4 - Lazy code motion including: compute local properties, - pre_edge_lcm and vsetvl insertion && delete edges for LCM results. - - - Phase 5 - Cleanup AVL operand of RVV instruction since it will not be - used any more and VL operand of VSETVL instruction if it is not used by - any non-debug instructions. - - - Phase 6 - DF based post VSETVL optimizations. - - Implementation: - - - The subroutine of optimize == 0 is simple_vsetvl. - This function simplily vsetvl insertion for each RVV - instruction. No optimization. - - - The subroutine of optimize > 0 is lazy_vsetvl. - This function optimize vsetvl insertion process by - lazy code motion (LCM) layering on RTL_SSA. - - - get_avl (), get_insn (), get_avl_source (): - - 1. get_insn () is the current instruction, find_access (get_insn - ())->def is the same as get_avl_source () if get_insn () demand VL. - 2. If get_avl () is non-VLMAX REG, get_avl () == get_avl_source - ()->regno (). - 3. get_avl_source ()->regno () is the REGNO that we backward propagate. - */ +/* The values of the vl and vtype registers will affect the behavior of RVV + insns. That is, when we need to execute an RVV instruction, we need to set + the correct vl and vtype values by executing the vsetvl instruction before. + Executing the fewest number of vsetvl instructions while keeping the behavior + the same is the problem this pass is trying to solve. This vsetvl pass is + divided into 5 phases: + + - Phase 1 (fuse local vsetvl infos): traverses each Basic Block, parses + each instruction in it that affects vl and vtype state and generates an + array of vsetvl_info objects. Then traverse the vsetvl_info array from + front to back and perform fusion according to the fusion rules. The fused + vsetvl infos are stored in the vsetvl_block_info object's `infos` field. + + - Phase 2 (earliest fuse global vsetvl infos): The header_info and + footer_info of vsetvl_block_info are used as expressions, and the + earliest of each expression is computed. Based on the earliest + information, try to lift up the corresponding vsetvl info to the src + basic block of the edge (mainly to reduce the total number of vsetvl + instructions, this uplift will cause some execution paths to execute + vsetvl instructions that shouldn't be there). + + - Phase 3 (pre global vsetvl info): The header_info and footer_info of + vsetvl_block_info are used as expressions, and the LCM algorithm is used + to compute the header_info that needs to be deleted and the one that + needs to be inserted in some edges. + + - Phase 4 (emit vsetvl insns) : Based on the fusion result of Phase 1 and + the deletion and insertion information of Phase 3, the mandatory vsetvl + instruction insertion, modification and deletion are performed. + + - Phase 5 (cleanup): Clean up the avl operand in the RVV operator + instruction and cleanup the unused dest operand of the vsetvl insn. + + After the Phase 1 a virtual CFG of vsetvl_info is generated. The virtual + basic block is represented by vsetvl_block_info, and the virtual vsetvl + statements inside are represented by vsetvl_info. The later phases 2 and 3 + are constantly modifying and adjusting this virtual CFG. Phase 4 performs + insertion, modification and deletion of vsetvl instructions based on the + optimized virtual CFG. The Phase 1, 2 and 3 do not involve modifications to + the RTL. +*/ #define IN_TARGET_CODE 1 #define INCLUDE_ALGORITHM @@ -98,61 +85,180 @@ along with GCC; see the file COPYING3. If not see #include "predict.h" #include "profile-count.h" #include "gcse.h" -#include "riscv-vsetvl.h" using namespace rtl_ssa; using namespace riscv_vector; -static CONSTEXPR const unsigned ALL_SEW[] = {8, 16, 32, 64}; -static CONSTEXPR const vlmul_type ALL_LMUL[] - = {LMUL_1, LMUL_2, LMUL_4, LMUL_8, LMUL_F8, LMUL_F4, LMUL_F2}; +/* Set the bitmap DST to the union of SRC of predecessors of + basic block B. + It's a bit different from bitmap_union_of_preds in cfganal.cc. This function + takes into account the case where pred is ENTRY basic block. The main reason + for this difference is to make it easier to insert some special value into + the ENTRY base block. For example, vsetvl_info with a status of UNKNOW. */ +static void +bitmap_union_of_preds_with_entry (sbitmap dst, sbitmap *src, basic_block b) +{ + unsigned int set_size = dst->size; + edge e; + unsigned ix; + + for (ix = 0; ix < EDGE_COUNT (b->preds); ix++) + { + e = EDGE_PRED (b, ix); + bitmap_copy (dst, src[e->src->index]); + break; + } -DEBUG_FUNCTION void -debug (const vector_insn_info *info) + if (ix == EDGE_COUNT (b->preds)) + bitmap_clear (dst); + else + for (ix++; ix < EDGE_COUNT (b->preds); ix++) + { + unsigned int i; + SBITMAP_ELT_TYPE *p, *r; + + e = EDGE_PRED (b, ix); + p = src[e->src->index]->elms; + r = dst->elms; + for (i = 0; i < set_size; i++) + *r++ |= *p++; + } +} + +/* Compute the reaching defintion in and out based on the gen and KILL + informations in each Base Blocks. + This function references the compute_avaiable implementation in lcm.cc */ +static void +compute_reaching_defintion (sbitmap *gen, sbitmap *kill, sbitmap *in, + sbitmap *out) { - info->dump (stderr); + edge e; + basic_block *worklist, *qin, *qout, *qend, bb; + unsigned int qlen; + edge_iterator ei; + + /* Allocate a worklist array/queue. Entries are only added to the + list if they were not already on the list. So the size is + bounded by the number of basic blocks. */ + qin = qout = worklist + = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS); + + /* Put every block on the worklist; this is necessary because of the + optimistic initialization of AVOUT above. Use reverse postorder + to make the forward dataflow problem require less iterations. */ + int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS); + int n = pre_and_rev_post_order_compute_fn (cfun, NULL, rpo, false); + for (int i = 0; i < n; ++i) + { + bb = BASIC_BLOCK_FOR_FN (cfun, rpo[i]); + *qin++ = bb; + bb->aux = bb; + } + free (rpo); + + qin = worklist; + qend = &worklist[n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS]; + qlen = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; + + /* Mark blocks which are successors of the entry block so that we + can easily identify them below. */ + FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs) + e->dest->aux = ENTRY_BLOCK_PTR_FOR_FN (cfun); + + /* Iterate until the worklist is empty. */ + while (qlen) + { + /* Take the first entry off the worklist. */ + bb = *qout++; + qlen--; + + if (qout >= qend) + qout = worklist; + + /* Do not clear the aux field for blocks which are successors of the + ENTRY block. That way we never add then to the worklist again. */ + if (bb->aux != ENTRY_BLOCK_PTR_FOR_FN (cfun)) + bb->aux = NULL; + + bitmap_union_of_preds_with_entry (in[bb->index], out, bb); + + if (bitmap_ior_and_compl (out[bb->index], gen[bb->index], in[bb->index], + kill[bb->index])) + /* If the out state of this block changed, then we need + to add the successors of this block to the worklist + if they are not already on the worklist. */ + FOR_EACH_EDGE (e, ei, bb->succs) + if (!e->dest->aux && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) + { + *qin++ = e->dest; + e->dest->aux = e; + qlen++; + + if (qin >= qend) + qin = worklist; + } + } + + clear_aux_for_edges (); + clear_aux_for_blocks (); + free (worklist); } -DEBUG_FUNCTION void -debug (const vector_infos_manager *info) +/* Classification of vsetvl instruction. */ +enum vsetvl_type { - info->dump (stderr); -} + VSETVL_NORMAL, + VSETVL_VTYPE_CHANGE_ONLY, + VSETVL_DISCARD_RESULT, + NUM_VSETVL_TYPE +}; -static bool -vlmax_avl_p (rtx x) +enum emit_type { - return x && rtx_equal_p (x, RVV_VLMAX); + /* emit_insn directly. */ + EMIT_DIRECT, + EMIT_BEFORE, + EMIT_AFTER, +}; + +/* dump helper functions */ +static const char * +vlmul_to_str (vlmul_type vlmul) +{ + switch (vlmul) + { + case LMUL_1: + return "m1"; + case LMUL_2: + return "m2"; + case LMUL_4: + return "m4"; + case LMUL_8: + return "m8"; + case LMUL_RESERVED: + return "INVALID LMUL"; + case LMUL_F8: + return "mf8"; + case LMUL_F4: + return "mf4"; + case LMUL_F2: + return "mf2"; + + default: + gcc_unreachable (); + } } -static bool -vlmax_avl_insn_p (rtx_insn *rinsn) +static const char * +policy_to_str (bool agnostic_p) { - return (INSN_CODE (rinsn) == CODE_FOR_vlmax_avlsi - || INSN_CODE (rinsn) == CODE_FOR_vlmax_avldi); + return agnostic_p ? "agnostic" : "undisturbed"; } -/* Return true if the block is a loop itself: - local_dem - __________ - ____|____ | - | | | - |________| | - |_________| - reaching_out -*/ static bool -loop_basic_block_p (const basic_block cfg_bb) +vlmax_avl_p (rtx x) { - if (JUMP_P (BB_END (cfg_bb)) && any_condjump_p (BB_END (cfg_bb))) - { - edge e; - edge_iterator ei; - FOR_EACH_EDGE (e, ei, cfg_bb->succs) - if (e->dest->index == cfg_bb->index) - return true; - } - return false; + return x && rtx_equal_p (x, RVV_VLMAX); } /* Return true if it is an RVV instruction depends on VTYPE global @@ -171,13 +277,6 @@ has_vl_op (rtx_insn *rinsn) return recog_memoized (rinsn) >= 0 && get_attr_has_vl_op (rinsn); } -/* Is this a SEW value that can be encoded into the VTYPE format. */ -static bool -valid_sew_p (size_t sew) -{ - return exact_log2 (sew) && sew >= 8 && sew <= 64; -} - /* Return true if the instruction ignores VLMUL field of VTYPE. */ static bool ignore_vlmul_insn_p (rtx_insn *rinsn) @@ -223,7 +322,7 @@ vector_config_insn_p (rtx_insn *rinsn) static bool vsetvl_insn_p (rtx_insn *rinsn) { - if (!vector_config_insn_p (rinsn)) + if (!rinsn || !vector_config_insn_p (rinsn)) return false; return (INSN_CODE (rinsn) == CODE_FOR_vsetvldi || INSN_CODE (rinsn) == CODE_FOR_vsetvlsi); @@ -239,34 +338,13 @@ vsetvl_discard_result_insn_p (rtx_insn *rinsn) || INSN_CODE (rinsn) == CODE_FOR_vsetvl_discard_resultsi); } -/* Return true if it is vsetvl zero, zero. */ -static bool -vsetvl_vtype_change_only_p (rtx_insn *rinsn) -{ - if (!vector_config_insn_p (rinsn)) - return false; - return (INSN_CODE (rinsn) == CODE_FOR_vsetvl_vtype_change_only); -} - -static bool -after_or_same_p (const insn_info *insn1, const insn_info *insn2) -{ - return insn1->compare_with (insn2) >= 0; -} - static bool real_insn_and_same_bb_p (const insn_info *insn, const bb_info *bb) { return insn != nullptr && insn->is_real () && insn->bb () == bb; } -static bool -before_p (const insn_info *insn1, const insn_info *insn2) -{ - return insn1->compare_with (insn2) < 0; -} - -/* Helper function to get VL operand. */ +/* Helper function to get VL operand for VLMAX insn. */ static rtx get_vl (rtx_insn *rinsn) { @@ -278,224 +356,6 @@ get_vl (rtx_insn *rinsn) return SET_DEST (XVECEXP (PATTERN (rinsn), 0, 0)); } -/* An "anticipatable occurrence" is one that is the first occurrence in the - basic block, the operands are not modified in the basic block prior - to the occurrence and the output is not used between the start of - the block and the occurrence. - - For VSETVL instruction, we have these following formats: - 1. vsetvl zero, rs1. - 2. vsetvl zero, imm. - 3. vsetvl rd, rs1. - - So base on these circumstances, a DEM is considered as a local anticipatable - occurrence should satisfy these following conditions: - - 1). rs1 (avl) are not modified in the basic block prior to the VSETVL. - 2). rd (vl) are not modified in the basic block prior to the VSETVL. - 3). rd (vl) is not used between the start of the block and the occurrence. - - Note: We don't need to check VL/VTYPE here since DEM is UNKNOWN if VL/VTYPE - is modified prior to the occurrence. This case is already considered as - a non-local anticipatable occurrence. -*/ -static bool -anticipatable_occurrence_p (const bb_info *bb, const vector_insn_info dem) -{ - insn_info *insn = dem.get_insn (); - /* The only possible operand we care of VSETVL is AVL. */ - if (dem.has_avl_reg ()) - { - /* rs1 (avl) are not modified in the basic block prior to the VSETVL. */ - rtx avl = dem.get_avl_or_vl_reg (); - if (dem.dirty_p ()) - { - gcc_assert (!vsetvl_insn_p (insn->rtl ())); - - /* Earliest VSETVL will be inserted at the end of the block. */ - for (const insn_info *i : bb->real_nondebug_insns ()) - { - /* rs1 (avl) are not modified in the basic block prior to the - VSETVL. */ - if (find_access (i->defs (), REGNO (avl))) - return false; - if (vlmax_avl_p (dem.get_avl ())) - { - /* rd (avl) is not used between the start of the block and - the occurrence. Note: Only for Dirty and VLMAX-avl. */ - if (find_access (i->uses (), REGNO (avl))) - return false; - } - } - - return true; - } - else if (!vlmax_avl_p (avl)) - { - set_info *set = dem.get_avl_source (); - /* If it's undefined, it's not anticipatable conservatively. */ - if (!set) - return false; - if (real_insn_and_same_bb_p (set->insn (), bb) - && before_p (set->insn (), insn)) - return false; - for (insn_info *i = insn->prev_nondebug_insn (); - real_insn_and_same_bb_p (i, bb); i = i->prev_nondebug_insn ()) - { - /* rs1 (avl) are not modified in the basic block prior to the - VSETVL. */ - if (find_access (i->defs (), REGNO (avl))) - return false; - } - } - } - - /* rd (vl) is not used between the start of the block and the occurrence. */ - if (vsetvl_insn_p (insn->rtl ())) - { - rtx dest = get_vl (insn->rtl ()); - for (insn_info *i = insn->prev_nondebug_insn (); - real_insn_and_same_bb_p (i, bb); i = i->prev_nondebug_insn ()) - { - /* rd (vl) is not used between the start of the block and the - * occurrence. */ - if (find_access (i->uses (), REGNO (dest))) - return false; - /* rd (vl) are not modified in the basic block prior to the VSETVL. */ - if (find_access (i->defs (), REGNO (dest))) - return false; - } - } - - return true; -} - -/* An "available occurrence" is one that is the last occurrence in the - basic block and the operands are not modified by following statements in - the basic block [including this insn]. - - For VSETVL instruction, we have these following formats: - 1. vsetvl zero, rs1. - 2. vsetvl zero, imm. - 3. vsetvl rd, rs1. - - So base on these circumstances, a DEM is considered as a local available - occurrence should satisfy these following conditions: - - 1). rs1 (avl) are not modified by following statements in - the basic block. - 2). rd (vl) are not modified by following statements in - the basic block. - - Note: We don't need to check VL/VTYPE here since DEM is UNKNOWN if VL/VTYPE - is modified prior to the occurrence. This case is already considered as - a non-local available occurrence. -*/ -static bool -available_occurrence_p (const bb_info *bb, const vector_insn_info dem) -{ - insn_info *insn = dem.get_insn (); - /* The only possible operand we care of VSETVL is AVL. */ - if (dem.has_avl_reg ()) - { - if (!vlmax_avl_p (dem.get_avl ())) - { - rtx dest = NULL_RTX; - insn_info *i = insn; - if (vsetvl_insn_p (insn->rtl ())) - { - dest = get_vl (insn->rtl ()); - /* For user vsetvl a2, a2 instruction, we consider it as - available even though it modifies "a2". */ - i = i->next_nondebug_insn (); - } - for (; real_insn_and_same_bb_p (i, bb); i = i->next_nondebug_insn ()) - { - if (read_vl_insn_p (i->rtl ())) - continue; - /* rs1 (avl) are not modified by following statements in - the basic block. */ - if (find_access (i->defs (), REGNO (dem.get_avl ()))) - return false; - /* rd (vl) are not modified by following statements in - the basic block. */ - if (dest && find_access (i->defs (), REGNO (dest))) - return false; - } - } - } - return true; -} - -static bool -insn_should_be_added_p (const insn_info *insn, unsigned int types) -{ - if (insn->is_real () && (types & REAL_SET)) - return true; - if (insn->is_phi () && (types & PHI_SET)) - return true; - if (insn->is_bb_head () && (types & BB_HEAD_SET)) - return true; - if (insn->is_bb_end () && (types & BB_END_SET)) - return true; - return false; -} - -/* Recursively find all define instructions. The kind of instruction is - specified by the DEF_TYPE. */ -static hash_set<set_info *> -get_all_sets (phi_info *phi, unsigned int types) -{ - hash_set<set_info *> insns; - auto_vec<phi_info *> work_list; - hash_set<phi_info *> visited_list; - if (!phi) - return hash_set<set_info *> (); - work_list.safe_push (phi); - - while (!work_list.is_empty ()) - { - phi_info *phi = work_list.pop (); - visited_list.add (phi); - for (use_info *use : phi->inputs ()) - { - def_info *def = use->def (); - set_info *set = safe_dyn_cast<set_info *> (def); - if (!set) - return hash_set<set_info *> (); - - gcc_assert (!set->insn ()->is_debug_insn ()); - - if (insn_should_be_added_p (set->insn (), types)) - insns.add (set); - if (set->insn ()->is_phi ()) - { - phi_info *new_phi = as_a<phi_info *> (set); - if (!visited_list.contains (new_phi)) - work_list.safe_push (new_phi); - } - } - } - return insns; -} - -static hash_set<set_info *> -get_all_sets (set_info *set, bool /* get_real_inst */ real_p, - bool /*get_phi*/ phi_p, bool /* get_function_parameter*/ param_p) -{ - if (real_p && phi_p && param_p) - return get_all_sets (safe_dyn_cast<phi_info *> (set), - REAL_SET | PHI_SET | BB_HEAD_SET | BB_END_SET); - - else if (real_p && param_p) - return get_all_sets (safe_dyn_cast<phi_info *> (set), - REAL_SET | BB_HEAD_SET | BB_END_SET); - - else if (real_p) - return get_all_sets (safe_dyn_cast<phi_info *> (set), REAL_SET); - return hash_set<set_info *> (); -} - /* Helper function to get AVL operand. */ static rtx get_avl (rtx_insn *rinsn) @@ -511,15 +371,6 @@ get_avl (rtx_insn *rinsn) return recog_data.operand[get_attr_vl_op_idx (rinsn)]; } -static set_info * -get_same_bb_set (hash_set<set_info *> &sets, const basic_block cfg_bb) -{ - for (set_info *set : sets) - if (set->bb ()->cfg_bb () == cfg_bb) - return set; - return nullptr; -} - /* Helper function to get SEW operand. We always have SEW value for all RVV instructions that have VTYPE OP. */ static uint8_t @@ -589,365 +440,174 @@ has_vector_insn (function *fn) return false; } -/* Emit vsetvl instruction. */ -static rtx -gen_vsetvl_pat (enum vsetvl_type insn_type, const vl_vtype_info &info, rtx vl) -{ - rtx avl = info.get_avl (); - /* if optimization == 0 and the instruction is vmv.x.s/vfmv.f.s, - set the value of avl to (const_int 0) so that VSETVL PASS will - insert vsetvl correctly.*/ - if (info.has_avl_no_reg ()) - avl = GEN_INT (0); - rtx sew = gen_int_mode (info.get_sew (), Pmode); - rtx vlmul = gen_int_mode (info.get_vlmul (), Pmode); - rtx ta = gen_int_mode (info.get_ta (), Pmode); - rtx ma = gen_int_mode (info.get_ma (), Pmode); - - if (insn_type == VSETVL_NORMAL) - { - gcc_assert (vl != NULL_RTX); - return gen_vsetvl (Pmode, vl, avl, sew, vlmul, ta, ma); - } - else if (insn_type == VSETVL_VTYPE_CHANGE_ONLY) - return gen_vsetvl_vtype_change_only (sew, vlmul, ta, ma); - else - return gen_vsetvl_discard_result (Pmode, avl, sew, vlmul, ta, ma); -} - -static rtx -gen_vsetvl_pat (rtx_insn *rinsn, const vector_insn_info &info, - rtx vl = NULL_RTX) +static vlmul_type +calculate_vlmul (unsigned int sew, unsigned int ratio) { - rtx new_pat; - vl_vtype_info new_info = info; - if (info.get_insn () && info.get_insn ()->rtl () - && fault_first_load_p (info.get_insn ()->rtl ())) - new_info.set_avl_info ( - avl_info (get_avl (info.get_insn ()->rtl ()), nullptr)); - if (vl) - new_pat = gen_vsetvl_pat (VSETVL_NORMAL, new_info, vl); - else - { - if (vsetvl_insn_p (rinsn)) - new_pat = gen_vsetvl_pat (VSETVL_NORMAL, new_info, get_vl (rinsn)); - else if (INSN_CODE (rinsn) == CODE_FOR_vsetvl_vtype_change_only) - new_pat = gen_vsetvl_pat (VSETVL_VTYPE_CHANGE_ONLY, new_info, NULL_RTX); - else - new_pat = gen_vsetvl_pat (VSETVL_DISCARD_RESULT, new_info, NULL_RTX); - } - return new_pat; + const vlmul_type ALL_LMUL[] + = {LMUL_1, LMUL_2, LMUL_4, LMUL_8, LMUL_F8, LMUL_F4, LMUL_F2}; + for (const vlmul_type vlmul : ALL_LMUL) + if (calculate_ratio (sew, vlmul) == ratio) + return vlmul; + return LMUL_RESERVED; } -static void -emit_vsetvl_insn (enum vsetvl_type insn_type, enum emit_type emit_type, - const vl_vtype_info &info, rtx vl, rtx_insn *rinsn) +/* Get the currently supported maximum sew used in the int rvv instructions. */ +static uint8_t +get_max_int_sew () { - rtx pat = gen_vsetvl_pat (insn_type, info, vl); - if (dump_file) - { - fprintf (dump_file, "\nInsert vsetvl insn PATTERN:\n"); - print_rtl_single (dump_file, pat); - fprintf (dump_file, "\nfor insn:\n"); - print_rtl_single (dump_file, rinsn); - } - - if (emit_type == EMIT_DIRECT) - emit_insn (pat); - else if (emit_type == EMIT_BEFORE) - emit_insn_before (pat, rinsn); - else - emit_insn_after (pat, rinsn); + if (TARGET_VECTOR_ELEN_64) + return 64; + else if (TARGET_VECTOR_ELEN_32) + return 32; + gcc_unreachable (); } -static void -eliminate_insn (rtx_insn *rinsn) -{ - if (dump_file) - { - fprintf (dump_file, "\nEliminate insn %d:\n", INSN_UID (rinsn)); - print_rtl_single (dump_file, rinsn); - } - if (in_sequence_p ()) - remove_insn (rinsn); - else - delete_insn (rinsn); +/* Get the currently supported maximum sew used in the float rvv instructions. + */ +static uint8_t +get_max_float_sew () +{ + if (TARGET_VECTOR_ELEN_FP_64) + return 64; + else if (TARGET_VECTOR_ELEN_FP_32) + return 32; + else if (TARGET_VECTOR_ELEN_FP_16) + return 16; + gcc_unreachable (); } -static vsetvl_type -insert_vsetvl (enum emit_type emit_type, rtx_insn *rinsn, - const vector_insn_info &info, const vector_insn_info &prev_info) +/* Count the number of REGNO in RINSN. */ +static int +count_regno_occurrences (rtx_insn *rinsn, unsigned int regno) { - /* Use X0, X0 form if the AVL is the same and the SEW+LMUL gives the same - VLMAX. */ - if (prev_info.valid_or_dirty_p () && !prev_info.unknown_p () - && info.compatible_avl_p (prev_info) && info.same_vlmax_p (prev_info)) - { - emit_vsetvl_insn (VSETVL_VTYPE_CHANGE_ONLY, emit_type, info, NULL_RTX, - rinsn); - return VSETVL_VTYPE_CHANGE_ONLY; - } - - if (info.has_avl_imm ()) - { - emit_vsetvl_insn (VSETVL_DISCARD_RESULT, emit_type, info, NULL_RTX, - rinsn); - return VSETVL_DISCARD_RESULT; - } - - if (info.has_avl_no_reg ()) - { - /* We can only use x0, x0 if there's no chance of the vtype change causing - the previous vl to become invalid. */ - if (prev_info.valid_or_dirty_p () && !prev_info.unknown_p () - && info.same_vlmax_p (prev_info)) - { - emit_vsetvl_insn (VSETVL_VTYPE_CHANGE_ONLY, emit_type, info, NULL_RTX, - rinsn); - return VSETVL_VTYPE_CHANGE_ONLY; - } - /* Otherwise use an AVL of 0 to avoid depending on previous vl. */ - vl_vtype_info new_info = info; - new_info.set_avl_info (avl_info (const0_rtx, nullptr)); - emit_vsetvl_insn (VSETVL_DISCARD_RESULT, emit_type, new_info, NULL_RTX, - rinsn); - return VSETVL_DISCARD_RESULT; - } - - /* Use X0 as the DestReg unless AVLReg is X0. We also need to change the - opcode if the AVLReg is X0 as they have different register classes for - the AVL operand. */ - if (vlmax_avl_p (info.get_avl ())) - { - gcc_assert (has_vtype_op (rinsn) || vsetvl_insn_p (rinsn)); - /* For user vsetvli a5, zero, we should use get_vl to get the VL - operand "a5". */ - rtx vl_op = info.get_avl_or_vl_reg (); - gcc_assert (!vlmax_avl_p (vl_op)); - emit_vsetvl_insn (VSETVL_NORMAL, emit_type, info, vl_op, rinsn); - return VSETVL_NORMAL; - } - - emit_vsetvl_insn (VSETVL_DISCARD_RESULT, emit_type, info, NULL_RTX, rinsn); - - if (dump_file) - { - fprintf (dump_file, "Update VL/VTYPE info, previous info="); - prev_info.dump (dump_file); - } - return VSETVL_DISCARD_RESULT; + int count = 0; + extract_insn (rinsn); + for (int i = 0; i < recog_data.n_operands; i++) + if (refers_to_regno_p (regno, recog_data.operand[i])) + count++; + return count; } -/* Get VL/VTYPE information for INSN. */ -static vl_vtype_info -get_vl_vtype_info (const insn_info *insn) +enum def_type { - set_info *set = nullptr; - rtx avl = ::get_avl (insn->rtl ()); - if (avl && REG_P (avl)) - { - if (vlmax_avl_p (avl) && has_vl_op (insn->rtl ())) - set - = find_access (insn->uses (), REGNO (get_vl (insn->rtl ())))->def (); - else if (!vlmax_avl_p (avl)) - set = find_access (insn->uses (), REGNO (avl))->def (); - else - set = nullptr; - } - - uint8_t sew = get_sew (insn->rtl ()); - enum vlmul_type vlmul = get_vlmul (insn->rtl ()); - uint8_t ratio = get_attr_ratio (insn->rtl ()); - /* when get_attr_ratio is invalid, this kind of instructions - doesn't care about ratio. However, we still need this value - in demand info backward analysis. */ - if (ratio == INVALID_ATTRIBUTE) - ratio = calculate_ratio (sew, vlmul); - bool ta = tail_agnostic_p (insn->rtl ()); - bool ma = mask_agnostic_p (insn->rtl ()); - - /* If merge operand is undef value, we prefer agnostic. */ - int merge_op_idx = get_attr_merge_op_idx (insn->rtl ()); - if (merge_op_idx != INVALID_ATTRIBUTE - && satisfies_constraint_vu (recog_data.operand[merge_op_idx])) - { - ta = true; - ma = true; - } - - vl_vtype_info info (avl_info (avl, set), sew, vlmul, ratio, ta, ma); - return info; -} + REAL_SET = 1 << 0, + PHI_SET = 1 << 1, + BB_HEAD_SET = 1 << 2, + BB_END_SET = 1 << 3, + /* ??? TODO: In RTL_SSA framework, we have REAL_SET, + PHI_SET, BB_HEAD_SET, BB_END_SET and + CLOBBER_DEF def_info types. Currently, + we conservatively do not optimize clobber + def since we don't see the case that we + need to optimize it. */ + CLOBBER_DEF = 1 << 4 +}; -/* Change insn and Assert the change always happens. */ -static void -validate_change_or_fail (rtx object, rtx *loc, rtx new_rtx, bool in_group) +static bool +insn_should_be_added_p (const insn_info *insn, unsigned int types) { - bool change_p = validate_change (object, loc, new_rtx, in_group); - gcc_assert (change_p); + if (insn->is_real () && (types & REAL_SET)) + return true; + if (insn->is_phi () && (types & PHI_SET)) + return true; + if (insn->is_bb_head () && (types & BB_HEAD_SET)) + return true; + if (insn->is_bb_end () && (types & BB_END_SET)) + return true; + return false; } -static void -change_insn (rtx_insn *rinsn, rtx new_pat) +static const hash_set<use_info *> +get_all_real_uses (insn_info *insn, unsigned regno) { - /* We don't apply change on RTL_SSA here since it's possible a - new INSN we add in the PASS before which doesn't have RTL_SSA - info yet.*/ - if (dump_file) - { - fprintf (dump_file, "\nChange PATTERN of insn %d from:\n", - INSN_UID (rinsn)); - print_rtl_single (dump_file, PATTERN (rinsn)); - } + gcc_assert (insn->is_real ()); - validate_change_or_fail (rinsn, &PATTERN (rinsn), new_pat, false); + hash_set<use_info *> uses; + auto_vec<phi_info *> work_list; + hash_set<phi_info *> visited_list; - if (dump_file) + for (def_info *def : insn->defs ()) { - fprintf (dump_file, "\nto:\n"); - print_rtl_single (dump_file, PATTERN (rinsn)); + if (!def->is_reg () || def->regno () != regno) + continue; + set_info *set = safe_dyn_cast<set_info *> (def); + if (!set) + continue; + for (use_info *use : set->nondebug_insn_uses ()) + if (use->insn ()->is_real ()) + uses.add (use); + for (use_info *use : set->phi_uses ()) + work_list.safe_push (use->phi ()); } -} -static const insn_info * -get_forward_read_vl_insn (const insn_info *insn) -{ - const bb_info *bb = insn->bb (); - for (const insn_info *i = insn->next_nondebug_insn (); - real_insn_and_same_bb_p (i, bb); i = i->next_nondebug_insn ()) + while (!work_list.is_empty ()) { - if (find_access (i->defs (), VL_REGNUM)) - return nullptr; - if (read_vl_insn_p (i->rtl ())) - return i; - } - return nullptr; -} + phi_info *phi = work_list.pop (); + visited_list.add (phi); -static const insn_info * -get_backward_fault_first_load_insn (const insn_info *insn) -{ - const bb_info *bb = insn->bb (); - for (const insn_info *i = insn->prev_nondebug_insn (); - real_insn_and_same_bb_p (i, bb); i = i->prev_nondebug_insn ()) - { - if (fault_first_load_p (i->rtl ())) - return i; - if (find_access (i->defs (), VL_REGNUM)) - return nullptr; + for (use_info *use : phi->nondebug_insn_uses ()) + if (use->insn ()->is_real ()) + uses.add (use); + for (use_info *use : phi->phi_uses ()) + if (!visited_list.contains (use->phi ())) + work_list.safe_push (use->phi ()); } - return nullptr; + return uses; } -static bool -change_insn (function_info *ssa, insn_change change, insn_info *insn, - rtx new_pat) +/* Recursively find all define instructions. The kind of instruction is + specified by the DEF_TYPE. */ +static hash_set<set_info *> +get_all_sets (phi_info *phi, unsigned int types) { - rtx_insn *rinsn = insn->rtl (); - auto attempt = ssa->new_change_attempt (); - if (!restrict_movement (change)) - return false; + hash_set<set_info *> insns; + auto_vec<phi_info *> work_list; + hash_set<phi_info *> visited_list; + if (!phi) + return hash_set<set_info *> (); + work_list.safe_push (phi); - if (dump_file) + while (!work_list.is_empty ()) { - fprintf (dump_file, "\nChange PATTERN of insn %d from:\n", - INSN_UID (rinsn)); - print_rtl_single (dump_file, PATTERN (rinsn)); - } - - insn_change_watermark watermark; - validate_change_or_fail (rinsn, &PATTERN (rinsn), new_pat, true); - - /* These routines report failures themselves. */ - if (!recog (attempt, change) || !change_is_worthwhile (change, false)) - return false; + phi_info *phi = work_list.pop (); + visited_list.add (phi); + for (use_info *use : phi->inputs ()) + { + def_info *def = use->def (); + set_info *set = safe_dyn_cast<set_info *> (def); + if (!set) + return hash_set<set_info *> (); - /* Fix bug: - (insn 12 34 13 2 (set (reg:RVVM4DI 120 v24 [orig:134 _1 ] [134]) - (if_then_else:RVVM4DI (unspec:RVVMF8BI [ - (const_vector:RVVMF8BI repeat [ - (const_int 1 [0x1]) - ]) - (const_int 0 [0]) - (const_int 2 [0x2]) repeated x2 - (const_int 0 [0]) - (reg:SI 66 vl) - (reg:SI 67 vtype) - ] UNSPEC_VPREDICATE) - (plus:RVVM4DI (reg/v:RVVM4DI 104 v8 [orig:137 op1 ] [137]) - (sign_extend:RVVM4DI (vec_duplicate:RVVM4SI (reg:SI 15 a5 - [140])))) (unspec:RVVM4DI [ (const_int 0 [0]) ] UNSPEC_VUNDEF))) - "rvv.c":8:12 2784 {pred_single_widen_addsvnx8di_scalar} (expr_list:REG_EQUIV - (mem/c:RVVM4DI (reg:DI 10 a0 [142]) [1 <retval>+0 S[64, 64] A128]) - (expr_list:REG_EQUAL (if_then_else:RVVM4DI (unspec:RVVMF8BI [ - (const_vector:RVVMF8BI repeat [ - (const_int 1 [0x1]) - ]) - (reg/v:DI 13 a3 [orig:139 vl ] [139]) - (const_int 2 [0x2]) repeated x2 - (const_int 0 [0]) - (reg:SI 66 vl) - (reg:SI 67 vtype) - ] UNSPEC_VPREDICATE) - (plus:RVVM4DI (reg/v:RVVM4DI 104 v8 [orig:137 op1 ] [137]) - (const_vector:RVVM4DI repeat [ - (const_int 2730 [0xaaa]) - ])) - (unspec:RVVM4DI [ - (const_int 0 [0]) - ] UNSPEC_VUNDEF)) - (nil)))) - Here we want to remove use "a3". However, the REG_EQUAL/REG_EQUIV note use - "a3" which made us fail in change_insn. We reference to the - 'aarch64-cc-fusion.cc' and add this method. */ - remove_reg_equal_equiv_notes (rinsn); - confirm_change_group (); - ssa->change_insn (change); + gcc_assert (!set->insn ()->is_debug_insn ()); - if (dump_file) - { - fprintf (dump_file, "\nto:\n"); - print_rtl_single (dump_file, PATTERN (rinsn)); + if (insn_should_be_added_p (set->insn (), types)) + insns.add (set); + if (set->insn ()->is_phi ()) + { + phi_info *new_phi = as_a<phi_info *> (set); + if (!visited_list.contains (new_phi)) + work_list.safe_push (new_phi); + } + } } - return true; + return insns; } -static void -change_vsetvl_insn (const insn_info *insn, const vector_insn_info &info, - rtx vl = NULL_RTX) +static hash_set<set_info *> +get_all_sets (set_info *set, bool /* get_real_inst */ real_p, + bool /*get_phi*/ phi_p, bool /* get_function_parameter*/ param_p) { - rtx_insn *rinsn; - if (vector_config_insn_p (insn->rtl ())) - { - rinsn = insn->rtl (); - gcc_assert (vsetvl_insn_p (rinsn) && "Can't handle X0, rs1 vsetvli yet"); - } - else - { - gcc_assert (has_vtype_op (insn->rtl ())); - rinsn = PREV_INSN (insn->rtl ()); - gcc_assert (vector_config_insn_p (rinsn)); - } - rtx new_pat = gen_vsetvl_pat (rinsn, info, vl); - change_insn (rinsn, new_pat); -} + if (real_p && phi_p && param_p) + return get_all_sets (safe_dyn_cast<phi_info *> (set), + REAL_SET | PHI_SET | BB_HEAD_SET | BB_END_SET); -static bool -avl_source_has_vsetvl_p (set_info *avl_source) -{ - if (!avl_source) - return false; - if (!avl_source->insn ()) - return false; - if (avl_source->insn ()->is_real ()) - return vsetvl_insn_p (avl_source->insn ()->rtl ()); - hash_set<set_info *> sets = get_all_sets (avl_source, true, false, true); - for (const auto set : sets) - { - if (set->insn ()->is_real () && vsetvl_insn_p (set->insn ()->rtl ())) - return true; - } - return false; + else if (real_p && param_p) + return get_all_sets (safe_dyn_cast<phi_info *> (set), + REAL_SET | BB_HEAD_SET | BB_END_SET); + + else if (real_p) + return get_all_sets (safe_dyn_cast<phi_info *> (set), REAL_SET); + return hash_set<set_info *> (); } static bool @@ -959,93 +619,14 @@ source_equal_p (insn_info *insn1, insn_info *insn2) rtx_insn *rinsn2 = insn2->rtl (); if (!rinsn1 || !rinsn2) return false; + rtx note1 = find_reg_equal_equiv_note (rinsn1); rtx note2 = find_reg_equal_equiv_note (rinsn2); - rtx single_set1 = single_set (rinsn1); - rtx single_set2 = single_set (rinsn2); - if (read_vl_insn_p (rinsn1) && read_vl_insn_p (rinsn2)) - { - const insn_info *load1 = get_backward_fault_first_load_insn (insn1); - const insn_info *load2 = get_backward_fault_first_load_insn (insn2); - return load1 && load2 && load1 == load2; - } - if (note1 && note2 && rtx_equal_p (note1, note2)) return true; - - /* Since vsetvl instruction is not single SET. - We handle this case specially here. */ - if (vsetvl_insn_p (insn1->rtl ()) && vsetvl_insn_p (insn2->rtl ())) - { - /* For example: - vsetvl1 a6,a5,e32m1 - RVV 1 (use a6 as AVL) - vsetvl2 a5,a5,e8mf4 - RVV 2 (use a5 as AVL) - We consider AVL of RVV 1 and RVV 2 are same so that we can - gain more optimization opportunities. - - Note: insn1_info.compatible_avl_p (insn2_info) - will make sure there is no instruction between vsetvl1 and vsetvl2 - modify a5 since their def will be different if there is instruction - modify a5 and compatible_avl_p will return false. */ - vector_insn_info insn1_info, insn2_info; - insn1_info.parse_insn (insn1); - insn2_info.parse_insn (insn2); - - /* To avoid dead loop, we don't optimize a vsetvli def has vsetvli - instructions which will complicate the situation. */ - if (avl_source_has_vsetvl_p (insn1_info.get_avl_source ()) - || avl_source_has_vsetvl_p (insn2_info.get_avl_source ())) - return false; - - if (insn1_info.same_vlmax_p (insn2_info) - && insn1_info.compatible_avl_p (insn2_info)) - return true; - } - - /* We only handle AVL is set by instructions with no side effects. */ - if (!single_set1 || !single_set2) - return false; - if (!rtx_equal_p (SET_SRC (single_set1), SET_SRC (single_set2))) - return false; - /* RTL_SSA uses include REG_NOTE. Consider this following case: - - insn1 RTL: - (insn 41 39 42 4 (set (reg:DI 26 s10 [orig:159 loop_len_46 ] [159]) - (umin:DI (reg:DI 15 a5 [orig:201 _149 ] [201]) - (reg:DI 14 a4 [276]))) 408 {*umindi3} - (expr_list:REG_EQUAL (umin:DI (reg:DI 15 a5 [orig:201 _149 ] [201]) - (const_int 2 [0x2])) - (nil))) - The RTL_SSA uses of this instruction has 2 uses: - 1. (reg:DI 15 a5 [orig:201 _149 ] [201]) - twice. - 2. (reg:DI 14 a4 [276]) - once. - - insn2 RTL: - (insn 38 353 351 4 (set (reg:DI 27 s11 [orig:160 loop_len_47 ] [160]) - (umin:DI (reg:DI 15 a5 [orig:199 _146 ] [199]) - (reg:DI 14 a4 [276]))) 408 {*umindi3} - (expr_list:REG_EQUAL (umin:DI (reg:DI 28 t3 [orig:200 ivtmp_147 ] [200]) - (const_int 2 [0x2])) - (nil))) - The RTL_SSA uses of this instruction has 3 uses: - 1. (reg:DI 15 a5 [orig:199 _146 ] [199]) - once - 2. (reg:DI 14 a4 [276]) - once - 3. (reg:DI 28 t3 [orig:200 ivtmp_147 ] [200]) - once - - Return false when insn1->uses ().size () != insn2->uses ().size () - */ - if (insn1->uses ().size () != insn2->uses ().size ()) - return false; - for (size_t i = 0; i < insn1->uses ().size (); i++) - if (insn1->uses ()[i] != insn2->uses ()[i]) - return false; - return true; + return false; } -/* Helper function to get single same real RTL source. - return NULL if it is not a single real RTL source. */ static insn_info * extract_single_source (set_info *set) { @@ -1066,2068 +647,1933 @@ extract_single_source (set_info *set) NULL so that VSETVL PASS will insert vsetvl directly. */ if (set->insn ()->is_artificial ()) return nullptr; - if (!source_equal_p (set->insn (), first_insn)) + if (set != *sets.begin () && !source_equal_p (set->insn (), first_insn)) return nullptr; } return first_insn; } -static unsigned -calculate_sew (vlmul_type vlmul, unsigned int ratio) -{ - for (const unsigned sew : ALL_SEW) - if (calculate_ratio (sew, vlmul) == ratio) - return sew; - return 0; -} - -static vlmul_type -calculate_vlmul (unsigned int sew, unsigned int ratio) -{ - for (const vlmul_type vlmul : ALL_LMUL) - if (calculate_ratio (sew, vlmul) == ratio) - return vlmul; - return LMUL_RESERVED; -} - -static bool -incompatible_avl_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - return !info1.compatible_avl_p (info2) && !info2.compatible_avl_p (info1); -} - -static bool -different_sew_p (const vector_insn_info &info1, const vector_insn_info &info2) -{ - return info1.get_sew () != info2.get_sew (); -} - -static bool -different_lmul_p (const vector_insn_info &info1, const vector_insn_info &info2) -{ - return info1.get_vlmul () != info2.get_vlmul (); -} - static bool -different_ratio_p (const vector_insn_info &info1, const vector_insn_info &info2) +same_equiv_note_p (set_info *set1, set_info *set2) { - return info1.get_ratio () != info2.get_ratio (); + insn_info *insn1 = extract_single_source (set1); + insn_info *insn2 = extract_single_source (set2); + if (!insn1 || !insn2) + return false; + return source_equal_p (insn1, insn2); } -static bool -different_tail_policy_p (const vector_insn_info &info1, - const vector_insn_info &info2) +static unsigned +get_expr_id (unsigned bb_index, unsigned regno, unsigned num_bbs) { - return info1.get_ta () != info2.get_ta (); + return regno * num_bbs + bb_index; } - -static bool -different_mask_policy_p (const vector_insn_info &info1, - const vector_insn_info &info2) +static unsigned +get_regno (unsigned expr_id, unsigned num_bb) { - return info1.get_ma () != info2.get_ma (); + return expr_id / num_bb; } - -static bool -possible_zero_avl_p (const vector_insn_info &info1, - const vector_insn_info &info2) +static unsigned +get_bb_index (unsigned expr_id, unsigned num_bb) { - return !info1.has_non_zero_avl () || !info2.has_non_zero_avl (); + return expr_id % num_bb; } +/* Return true if the SET result is not used by any instructions. */ static bool -second_ratio_invalid_for_first_sew_p (const vector_insn_info &info1, - const vector_insn_info &info2) +has_no_uses (basic_block cfg_bb, rtx_insn *rinsn, int regno) { - return calculate_vlmul (info1.get_sew (), info2.get_ratio ()) - == LMUL_RESERVED; -} + if (bitmap_bit_p (df_get_live_out (cfg_bb), regno)) + return false; -static bool -second_ratio_invalid_for_first_lmul_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - return calculate_sew (info1.get_vlmul (), info2.get_ratio ()) == 0; -} + rtx_insn *iter; + for (iter = NEXT_INSN (rinsn); iter && iter != NEXT_INSN (BB_END (cfg_bb)); + iter = NEXT_INSN (iter)) + if (df_find_use (iter, regno_reg_rtx[regno])) + return false; -static bool -float_insn_valid_sew_p (const vector_insn_info &info, unsigned int sew) -{ - if (info.get_insn () && info.get_insn ()->is_real () - && get_attr_type (info.get_insn ()->rtl ()) == TYPE_VFMOVFV) - { - if (sew == 16) - return TARGET_VECTOR_ELEN_FP_16; - else if (sew == 32) - return TARGET_VECTOR_ELEN_FP_32; - else if (sew == 64) - return TARGET_VECTOR_ELEN_FP_64; - } return true; } -static bool -second_sew_less_than_first_sew_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - return info2.get_sew () < info1.get_sew () - || !float_insn_valid_sew_p (info1, info2.get_sew ()); -} - -static bool -first_sew_less_than_second_sew_p (const vector_insn_info &info1, - const vector_insn_info &info2) +/* Change insn and Assert the change always happens. */ +static void +validate_change_or_fail (rtx object, rtx *loc, rtx new_rtx, bool in_group) { - return info1.get_sew () < info2.get_sew () - || !float_insn_valid_sew_p (info2, info1.get_sew ()); + bool change_p = validate_change (object, loc, new_rtx, in_group); + gcc_assert (change_p); } -/* return 0 if LMUL1 == LMUL2. - return -1 if LMUL1 < LMUL2. - return 1 if LMUL1 > LMUL2. */ -static int -compare_lmul (vlmul_type vlmul1, vlmul_type vlmul2) -{ - if (vlmul1 == vlmul2) - return 0; - - switch (vlmul1) - { - case LMUL_1: - if (vlmul2 == LMUL_2 || vlmul2 == LMUL_4 || vlmul2 == LMUL_8) - return 1; - else - return -1; - case LMUL_2: - if (vlmul2 == LMUL_4 || vlmul2 == LMUL_8) - return 1; - else - return -1; - case LMUL_4: - if (vlmul2 == LMUL_8) - return 1; - else - return -1; - case LMUL_8: - return -1; - case LMUL_F2: - if (vlmul2 == LMUL_1 || vlmul2 == LMUL_2 || vlmul2 == LMUL_4 - || vlmul2 == LMUL_8) - return 1; - else - return -1; - case LMUL_F4: - if (vlmul2 == LMUL_F2 || vlmul2 == LMUL_1 || vlmul2 == LMUL_2 - || vlmul2 == LMUL_4 || vlmul2 == LMUL_8) - return 1; - else - return -1; - case LMUL_F8: - return 0; - default: - gcc_unreachable (); - } -} +/* This flags indicates the minimum demand of the vl and vtype values by the + RVV instruction. For example, DEMAND_RATIO_P indicates that this RVV + instruction only needs the SEW/LMUL ratio to remain the same, and does not + require SEW and LMUL to be fixed. + Therefore, if the former RVV instruction needs DEMAND_RATIO_P and the latter + instruction needs DEMAND_SEW_LMUL_P and its SEW/LMUL is the same as that of + the former instruction, then we can make the minimu demand of the former + instruction strict to DEMAND_SEW_LMUL_P, and its required SEW and LMUL are + the SEW and LMUL of the latter instruction, and the vsetvl instruction + generated according to the new demand can also be used for the latter + instruction, so there is no need to insert a separate vsetvl instruction for + the latter instruction. */ +enum demand_flags : unsigned +{ + DEMAND_EMPTY_P = 0, + DEMAND_SEW_P = 1 << 0, + DEMAND_LMUL_P = 1 << 1, + DEMAND_RATIO_P = 1 << 2, + DEMAND_GE_SEW_P = 1 << 3, + DEMAND_TAIL_POLICY_P = 1 << 4, + DEMAND_MASK_POLICY_P = 1 << 5, + DEMAND_AVL_P = 1 << 6, + DEMAND_NON_ZERO_AVL_P = 1 << 7, +}; -static bool -second_lmul_less_than_first_lmul_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - return compare_lmul (info2.get_vlmul (), info1.get_vlmul ()) == -1; -} +/* We split the demand information into three parts. They are sew and lmul + related (sew_lmul_demand_type), tail and mask policy related + (policy_demand_type) and avl related (avl_demand_type). Then we define three + interfaces avaiable_with, compatible_p and merge. avaiable_with is + used to determine whether the two vsetvl infos prev_info and next_info are + available or not. If prev_info is available for next_info, it means that the + RVV insn corresponding to next_info on the path from prev_info to next_info + can be used without inserting a separate vsetvl instruction. compatible_p + is used to determine whether prev_info is compatible with next_info, and if + so, merge can be used to merge the stricter demand information from + next_info into prev_info so that prev_info becomes available to next_info. + */ -static bool -second_ratio_less_than_first_ratio_p (const vector_insn_info &info1, - const vector_insn_info &info2) +enum class sew_lmul_demand_type : unsigned { - return info2.get_ratio () < info1.get_ratio (); -} - -static CONSTEXPR const demands_cond incompatible_conds[] = { -#define DEF_INCOMPATIBLE_COND(AVL1, SEW1, LMUL1, RATIO1, NONZERO_AVL1, \ - GE_SEW1, TAIL_POLICTY1, MASK_POLICY1, AVL2, \ - SEW2, LMUL2, RATIO2, NONZERO_AVL2, GE_SEW2, \ - TAIL_POLICTY2, MASK_POLICY2, COND) \ - {{{AVL1, SEW1, LMUL1, RATIO1, NONZERO_AVL1, GE_SEW1, TAIL_POLICTY1, \ - MASK_POLICY1}, \ - {AVL2, SEW2, LMUL2, RATIO2, NONZERO_AVL2, GE_SEW2, TAIL_POLICTY2, \ - MASK_POLICY2}}, \ - COND}, -#include "riscv-vsetvl.def" + sew_lmul = demand_flags::DEMAND_SEW_P | demand_flags::DEMAND_LMUL_P, + ratio_only = demand_flags::DEMAND_RATIO_P, + sew_only = demand_flags::DEMAND_SEW_P, + ge_sew = demand_flags::DEMAND_GE_SEW_P, + ratio_and_ge_sew + = demand_flags::DEMAND_RATIO_P | demand_flags::DEMAND_GE_SEW_P, }; -static unsigned -greatest_sew (const vector_insn_info &info1, const vector_insn_info &info2) +enum class policy_demand_type : unsigned { - return std::max (info1.get_sew (), info2.get_sew ()); -} - -static unsigned -first_sew (const vector_insn_info &info1, const vector_insn_info &) -{ - return info1.get_sew (); -} + tail_mask_policy + = demand_flags::DEMAND_TAIL_POLICY_P | demand_flags::DEMAND_MASK_POLICY_P, + tail_policy_only = demand_flags::DEMAND_TAIL_POLICY_P, + mask_policy_only = demand_flags::DEMAND_MASK_POLICY_P, + ignore_policy = demand_flags::DEMAND_EMPTY_P, +}; -static unsigned -second_sew (const vector_insn_info &, const vector_insn_info &info2) +enum class avl_demand_type : unsigned { - return info2.get_sew (); -} + avl = demand_flags::DEMAND_AVL_P, + non_zero_avl = demand_flags::DEMAND_NON_ZERO_AVL_P, + ignore_avl = demand_flags::DEMAND_EMPTY_P, +}; -static vlmul_type -first_vlmul (const vector_insn_info &info1, const vector_insn_info &) +class vsetvl_info { - return info1.get_vlmul (); -} +private: + insn_info *m_insn; + bb_info *m_bb; + rtx m_avl; + rtx m_vl; + set_info *m_avl_def; + uint8_t m_sew; + uint8_t m_max_sew; + vlmul_type m_vlmul; + uint8_t m_ratio; + bool m_ta; + bool m_ma; + + sew_lmul_demand_type m_sew_lmul_demand; + policy_demand_type m_policy_demand; + avl_demand_type m_avl_demand; + + enum class state_type + { + UNINITIALIZED, + VALID, + UNKNOWN, + EMPTY, + }; + state_type m_state; + + bool m_delete; + bool m_change_vtype_only; + insn_info *m_read_vl_insn; + bool m_vl_used_by_non_rvv_insn; -static vlmul_type -second_vlmul (const vector_insn_info &, const vector_insn_info &info2) -{ - return info2.get_vlmul (); -} +public: + vsetvl_info () + : m_insn (nullptr), m_bb (nullptr), m_avl (NULL_RTX), m_vl (NULL_RTX), + m_avl_def (nullptr), m_sew (0), m_max_sew (0), m_vlmul (LMUL_RESERVED), + m_ratio (0), m_ta (false), m_ma (false), + m_sew_lmul_demand (sew_lmul_demand_type::sew_lmul), + m_policy_demand (policy_demand_type::tail_mask_policy), + m_avl_demand (avl_demand_type::avl), m_state (state_type::UNINITIALIZED), + m_delete (false), m_change_vtype_only (false), m_read_vl_insn (nullptr), + m_vl_used_by_non_rvv_insn (false) + {} + + vsetvl_info (insn_info *insn) : vsetvl_info () { parse_insn (insn); } + + vsetvl_info (rtx_insn *insn) : vsetvl_info () { parse_insn (insn); } + + void set_avl (rtx avl) { m_avl = avl; } + void set_vl (rtx vl) { m_vl = vl; } + void set_avl_def (set_info *avl_def) { m_avl_def = avl_def; } + void set_sew (uint8_t sew) { m_sew = sew; } + void set_vlmul (vlmul_type vlmul) { m_vlmul = vlmul; } + void set_ratio (uint8_t ratio) { m_ratio = ratio; } + void set_ta (bool ta) { m_ta = ta; } + void set_ma (bool ma) { m_ma = ma; } + void set_delete () { m_delete = true; } + void set_bb (bb_info *bb) { m_bb = bb; } + void set_max_sew (uint8_t max_sew) { m_max_sew = max_sew; } + void set_change_vtype_only () { m_change_vtype_only = true; } + void set_read_vl_insn (insn_info *insn) { m_read_vl_insn = insn; } + + rtx get_avl () const { return m_avl; } + rtx get_vl () const { return m_vl; } + set_info *get_avl_def () const { return m_avl_def; } + uint8_t get_sew () const { return m_sew; } + vlmul_type get_vlmul () const { return m_vlmul; } + uint8_t get_ratio () const { return m_ratio; } + bool get_ta () const { return m_ta; } + bool get_ma () const { return m_ma; } + insn_info *get_insn () const { return m_insn; } + bool delete_p () const { return m_delete; } + bb_info *get_bb () const { return m_bb; } + uint8_t get_max_sew () const { return m_max_sew; } + insn_info *get_read_vl_insn () const { return m_read_vl_insn; } + bool vl_use_by_non_rvv_insn_p () const { return m_vl_used_by_non_rvv_insn; } + + bool has_imm_avl () const { return m_avl && CONST_INT_P (m_avl); } + bool has_vlmax_avl () const { return vlmax_avl_p (m_avl); } + bool has_nonvlmax_reg_avl () const + { + return m_avl && REG_P (m_avl) && !has_vlmax_avl (); + } + bool has_non_zero_avl () const + { + if (has_imm_avl ()) + return INTVAL (m_avl) > 0; + return has_vlmax_avl (); + } + bool has_vl () const + { + /* The VL operand can only be either a NULL_RTX or a register. */ + gcc_assert (!m_vl || REG_P (m_vl)); + return m_vl != NULL_RTX; + } + bool has_same_ratio (const vsetvl_info &other) const + { + return get_ratio () == other.get_ratio (); + } + + /* The block of INSN isn't always same as the block of the VSETVL_INFO, + meaning we may have 'get_insn ()->bb () != get_bb ()'. + + E.g. BB 2 (Empty) ---> BB 3 (VALID, has rvv insn 1) + + BB 2 has empty VSETVL_INFO, wheras BB 3 has VSETVL_INFO that satisfies + get_insn ()->bb () == get_bb (). In earliest fusion, we may fuse bb 3 and + bb 2 so that the 'get_bb ()' of BB2 VSETVL_INFO will be BB2 wheras the + 'get_insn ()' of BB2 VSETVL INFO will be the rvv insn 1 (which is located + at BB3). */ + bool insn_inside_bb_p () const { return get_insn ()->bb () == get_bb (); } + void update_avl (const vsetvl_info &other) + { + m_avl = other.get_avl (); + m_vl = other.get_vl (); + m_avl_def = other.get_avl_def (); + } + + bool uninit_p () const { return m_state == state_type::UNINITIALIZED; } + bool valid_p () const { return m_state == state_type::VALID; } + bool unknown_p () const { return m_state == state_type::UNKNOWN; } + bool empty_p () const { return m_state == state_type::EMPTY; } + bool change_vtype_only_p () const { return m_change_vtype_only; } + + void set_valid () { m_state = state_type::VALID; } + void set_unknown () { m_state = state_type::UNKNOWN; } + void set_empty () { m_state = state_type::EMPTY; } + + void set_sew_lmul_demand (sew_lmul_demand_type demand) + { + m_sew_lmul_demand = demand; + } + void set_policy_demand (policy_demand_type demand) + { + m_policy_demand = demand; + } + void set_avl_demand (avl_demand_type demand) { m_avl_demand = demand; } + + sew_lmul_demand_type get_sew_lmul_demand () const + { + return m_sew_lmul_demand; + } + policy_demand_type get_policy_demand () const { return m_policy_demand; } + avl_demand_type get_avl_demand () const { return m_avl_demand; } + + void normalize_demand (unsigned demand_flags) + { + switch (demand_flags + & (DEMAND_SEW_P | DEMAND_LMUL_P | DEMAND_RATIO_P | DEMAND_GE_SEW_P)) + { + case (unsigned) sew_lmul_demand_type::sew_lmul: + m_sew_lmul_demand = sew_lmul_demand_type::sew_lmul; + break; + case (unsigned) sew_lmul_demand_type::ratio_only: + m_sew_lmul_demand = sew_lmul_demand_type::ratio_only; + break; + case (unsigned) sew_lmul_demand_type::sew_only: + m_sew_lmul_demand = sew_lmul_demand_type::sew_only; + break; + case (unsigned) sew_lmul_demand_type::ge_sew: + m_sew_lmul_demand = sew_lmul_demand_type::ge_sew; + break; + case (unsigned) sew_lmul_demand_type::ratio_and_ge_sew: + m_sew_lmul_demand = sew_lmul_demand_type::ratio_and_ge_sew; + break; + default: + gcc_unreachable (); + } + + switch (demand_flags & (DEMAND_TAIL_POLICY_P | DEMAND_MASK_POLICY_P)) + { + case (unsigned) policy_demand_type::tail_mask_policy: + m_policy_demand = policy_demand_type::tail_mask_policy; + break; + case (unsigned) policy_demand_type::tail_policy_only: + m_policy_demand = policy_demand_type::tail_policy_only; + break; + case (unsigned) policy_demand_type::mask_policy_only: + m_policy_demand = policy_demand_type::mask_policy_only; + break; + case (unsigned) policy_demand_type::ignore_policy: + m_policy_demand = policy_demand_type::ignore_policy; + break; + default: + gcc_unreachable (); + } + + switch (demand_flags & (DEMAND_AVL_P | DEMAND_NON_ZERO_AVL_P)) + { + case (unsigned) avl_demand_type::avl: + m_avl_demand = avl_demand_type::avl; + break; + case (unsigned) avl_demand_type::non_zero_avl: + m_avl_demand = avl_demand_type::non_zero_avl; + break; + case (unsigned) avl_demand_type::ignore_avl: + m_avl_demand = avl_demand_type::ignore_avl; + break; + default: + gcc_unreachable (); + } + } + + void parse_insn (rtx_insn *rinsn) + { + if (!NONDEBUG_INSN_P (rinsn)) + return; + if (optimize == 0 && !has_vtype_op (rinsn)) + return; + gcc_assert (!vsetvl_discard_result_insn_p (rinsn)); + set_valid (); + extract_insn_cached (rinsn); + m_avl = ::get_avl (rinsn); + if (has_vlmax_avl () || vsetvl_insn_p (rinsn)) + m_vl = ::get_vl (rinsn); + m_sew = ::get_sew (rinsn); + m_vlmul = ::get_vlmul (rinsn); + m_ta = tail_agnostic_p (rinsn); + m_ma = mask_agnostic_p (rinsn); + } + + void parse_insn (insn_info *insn) + { + m_insn = insn; + m_bb = insn->bb (); + /* Return if it is debug insn for the consistency with optimize == 0. */ + if (insn->is_debug_insn ()) + return; -static unsigned -first_ratio (const vector_insn_info &info1, const vector_insn_info &) -{ - return info1.get_ratio (); -} + /* We set it as unknown since we don't what will happen in CALL or ASM. */ + if (insn->is_call () || insn->is_asm ()) + { + set_unknown (); + return; + } + + /* If this is something that updates VL/VTYPE that we don't know about, set + the state to unknown. */ + if (!vector_config_insn_p (insn->rtl ()) && !has_vtype_op (insn->rtl ()) + && (find_access (insn->defs (), VL_REGNUM) + || find_access (insn->defs (), VTYPE_REGNUM))) + { + set_unknown (); + return; + } + + if (!vector_config_insn_p (insn->rtl ()) && !has_vtype_op (insn->rtl ())) + /* uninitialized */ + return; -static unsigned -second_ratio (const vector_insn_info &, const vector_insn_info &info2) -{ - return info2.get_ratio (); -} + set_valid (); + + m_avl = ::get_avl (insn->rtl ()); + if (m_avl) + { + if (vsetvl_insn_p (insn->rtl ()) || has_vlmax_avl ()) + m_vl = ::get_vl (insn->rtl ()); + + if (has_nonvlmax_reg_avl ()) + m_avl_def = find_access (insn->uses (), REGNO (m_avl))->def (); + } + + m_sew = ::get_sew (insn->rtl ()); + m_vlmul = ::get_vlmul (insn->rtl ()); + m_ratio = get_attr_ratio (insn->rtl ()); + /* when get_attr_ratio is invalid, this kind of instructions + doesn't care about ratio. However, we still need this value + in demand info backward analysis. */ + if (m_ratio == INVALID_ATTRIBUTE) + m_ratio = calculate_ratio (m_sew, m_vlmul); + m_ta = tail_agnostic_p (insn->rtl ()); + m_ma = mask_agnostic_p (insn->rtl ()); + + /* If merge operand is undef value, we prefer agnostic. */ + int merge_op_idx = get_attr_merge_op_idx (insn->rtl ()); + if (merge_op_idx != INVALID_ATTRIBUTE + && satisfies_constraint_vu (recog_data.operand[merge_op_idx])) + { + m_ta = true; + m_ma = true; + } + + /* Determine the demand info of the RVV insn. */ + m_max_sew = get_max_int_sew (); + unsigned demand_flags = 0; + if (vector_config_insn_p (insn->rtl ())) + { + demand_flags |= demand_flags::DEMAND_AVL_P; + demand_flags |= demand_flags::DEMAND_RATIO_P; + } + else + { + if (has_vl_op (insn->rtl ())) + { + if (scalar_move_insn_p (insn->rtl ())) + { + /* If the avl for vmv.s.x comes from the vsetvl instruction, we + don't know if the avl is non-zero, so it is set to + DEMAND_AVL_P for now. it may be corrected to + DEMAND_NON_ZERO_AVL_P later when more information is + available. + */ + if (has_non_zero_avl ()) + demand_flags |= demand_flags::DEMAND_NON_ZERO_AVL_P; + else + demand_flags |= demand_flags::DEMAND_AVL_P; + } + else + demand_flags |= demand_flags::DEMAND_AVL_P; + } -static vlmul_type -vlmul_for_first_sew_second_ratio (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - return calculate_vlmul (info1.get_sew (), info2.get_ratio ()); -} + if (get_attr_ratio (insn->rtl ()) != INVALID_ATTRIBUTE) + demand_flags |= demand_flags::DEMAND_RATIO_P; + else + { + if (scalar_move_insn_p (insn->rtl ()) && m_ta) + { + demand_flags |= demand_flags::DEMAND_GE_SEW_P; + m_max_sew = get_attr_type (insn->rtl ()) == TYPE_VFMOVFV + ? get_max_float_sew () + : get_max_int_sew (); + } + else + demand_flags |= demand_flags::DEMAND_SEW_P; + + if (!ignore_vlmul_insn_p (insn->rtl ())) + demand_flags |= demand_flags::DEMAND_LMUL_P; + } -static vlmul_type -vlmul_for_greatest_sew_second_ratio (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - return calculate_vlmul (MAX (info1.get_sew (), info2.get_sew ()), - info2.get_ratio ()); -} + if (!m_ta) + demand_flags |= demand_flags::DEMAND_TAIL_POLICY_P; + if (!m_ma) + demand_flags |= demand_flags::DEMAND_MASK_POLICY_P; + } + + normalize_demand (demand_flags); + + /* Optimize AVL from the vsetvl instruction. */ + insn_info *def_insn = extract_single_source (get_avl_def ()); + if (def_insn && vsetvl_insn_p (def_insn->rtl ())) + { + vsetvl_info def_info = vsetvl_info (def_insn); + if ((scalar_move_insn_p (insn->rtl ()) + || def_info.get_ratio () == get_ratio ()) + && (def_info.has_vlmax_avl () || def_info.has_imm_avl ())) + { + update_avl (def_info); + if (scalar_move_insn_p (insn->rtl ()) && has_non_zero_avl ()) + m_avl_demand = avl_demand_type::non_zero_avl; + } + } + + /* Determine if dest operand(vl) has been used by non-RVV instructions. */ + if (has_vl ()) + { + const hash_set<use_info *> vl_uses + = get_all_real_uses (get_insn (), REGNO (get_vl ())); + for (use_info *use : vl_uses) + { + gcc_assert (use->insn ()->is_real ()); + rtx_insn *rinsn = use->insn ()->rtl (); + if (!has_vl_op (rinsn) + || count_regno_occurrences (rinsn, REGNO (get_vl ())) != 1) + { + m_vl_used_by_non_rvv_insn = true; + break; + } + rtx avl = ::get_avl (rinsn); + if (!avl || REGNO (get_vl ()) != REGNO (avl)) + { + m_vl_used_by_non_rvv_insn = true; + break; + } + } + } -static unsigned -ratio_for_second_sew_first_vlmul (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - return calculate_ratio (info2.get_sew (), info1.get_vlmul ()); -} + /* Collect the read vl insn for the fault-only-first rvv loads. */ + if (fault_first_load_p (insn->rtl ())) + { + for (insn_info *i = insn->next_nondebug_insn (); + i->bb () == insn->bb (); i = i->next_nondebug_insn ()) + { + if (find_access (i->defs (), VL_REGNUM)) + break; + if (i->rtl () && read_vl_insn_p (i->rtl ())) + { + m_read_vl_insn = i; + break; + } + } + } + } + + /* Returns the corresponding vsetvl rtx pat. */ + rtx get_vsetvl_pat (bool ignore_vl = false) const + { + rtx avl = get_avl (); + /* if optimization == 0 and the instruction is vmv.x.s/vfmv.f.s, + set the value of avl to (const_int 0) so that VSETVL PASS will + insert vsetvl correctly.*/ + if (!get_avl ()) + avl = GEN_INT (0); + rtx sew = gen_int_mode (get_sew (), Pmode); + rtx vlmul = gen_int_mode (get_vlmul (), Pmode); + rtx ta = gen_int_mode (get_ta (), Pmode); + rtx ma = gen_int_mode (get_ma (), Pmode); + + if (change_vtype_only_p ()) + return gen_vsetvl_vtype_change_only (sew, vlmul, ta, ma); + else if (has_vl () && !ignore_vl) + return gen_vsetvl (Pmode, get_vl (), avl, sew, vlmul, ta, ma); + else + return gen_vsetvl_discard_result (Pmode, avl, sew, vlmul, ta, ma); + } + + bool operator== (const vsetvl_info &other) const + { + gcc_assert (!uninit_p () && !other.uninit_p () + && "Uninitialization should not happen"); + + if (empty_p ()) + return other.empty_p (); + if (unknown_p ()) + return other.unknown_p (); + + return get_insn () == other.get_insn () && get_bb () == other.get_bb () + && get_avl () == other.get_avl () && get_vl () == other.get_vl () + && get_avl_def () == other.get_avl_def () + && get_sew () == other.get_sew () + && get_vlmul () == other.get_vlmul () && get_ta () == other.get_ta () + && get_ma () == other.get_ma () + && get_avl_demand () == other.get_avl_demand () + && get_sew_lmul_demand () == other.get_sew_lmul_demand () + && get_policy_demand () == other.get_policy_demand (); + } + + void dump (FILE *file, const char *indent = "") const + { + if (uninit_p ()) + { + fprintf (file, "UNINITIALIZED.\n"); + return; + } + else if (unknown_p ()) + { + fprintf (file, "UNKNOWN.\n"); + return; + } + else if (empty_p ()) + { + fprintf (file, "EMPTY.\n"); + return; + } + else if (valid_p ()) + fprintf (file, "VALID (insn %u, bb %u)%s\n", get_insn ()->uid (), + get_bb ()->index (), delete_p () ? " (deleted)" : ""); + else + gcc_unreachable (); -static CONSTEXPR const demands_fuse_rule fuse_rules[] = { -#define DEF_SEW_LMUL_FUSE_RULE(DEMAND_SEW1, DEMAND_LMUL1, DEMAND_RATIO1, \ - DEMAND_GE_SEW1, DEMAND_SEW2, DEMAND_LMUL2, \ - DEMAND_RATIO2, DEMAND_GE_SEW2, NEW_DEMAND_SEW, \ - NEW_DEMAND_LMUL, NEW_DEMAND_RATIO, \ - NEW_DEMAND_GE_SEW, NEW_SEW, NEW_VLMUL, \ - NEW_RATIO) \ - {{{DEMAND_ANY, DEMAND_SEW1, DEMAND_LMUL1, DEMAND_RATIO1, DEMAND_ANY, \ - DEMAND_GE_SEW1, DEMAND_ANY, DEMAND_ANY}, \ - {DEMAND_ANY, DEMAND_SEW2, DEMAND_LMUL2, DEMAND_RATIO2, DEMAND_ANY, \ - DEMAND_GE_SEW2, DEMAND_ANY, DEMAND_ANY}}, \ - NEW_DEMAND_SEW, \ - NEW_DEMAND_LMUL, \ - NEW_DEMAND_RATIO, \ - NEW_DEMAND_GE_SEW, \ - NEW_SEW, \ - NEW_VLMUL, \ - NEW_RATIO}, -#include "riscv-vsetvl.def" + fprintf (file, "%sDemand fields:", indent); + if (m_sew_lmul_demand == sew_lmul_demand_type::sew_lmul) + fprintf (file, " demand_sew_lmul"); + else if (m_sew_lmul_demand == sew_lmul_demand_type::ratio_only) + fprintf (file, " demand_ratio_only"); + else if (m_sew_lmul_demand == sew_lmul_demand_type::sew_only) + fprintf (file, " demand_sew_only"); + else if (m_sew_lmul_demand == sew_lmul_demand_type::ge_sew) + fprintf (file, " demand_ge_sew"); + else if (m_sew_lmul_demand == sew_lmul_demand_type::ratio_and_ge_sew) + fprintf (file, " demand_ratio_and_ge_sew"); + + if (m_policy_demand == policy_demand_type::tail_mask_policy) + fprintf (file, " demand_tail_mask_policy"); + else if (m_policy_demand == policy_demand_type::tail_policy_only) + fprintf (file, " demand_tail_policy_only"); + else if (m_policy_demand == policy_demand_type::mask_policy_only) + fprintf (file, " demand_mask_policy_only"); + + if (m_avl_demand == avl_demand_type::avl) + fprintf (file, " demand_avl"); + else if (m_avl_demand == avl_demand_type::non_zero_avl) + fprintf (file, " demand_non_zero_avl"); + fprintf (file, "\n"); + + fprintf (file, "%sSEW=%d, ", indent, get_sew ()); + fprintf (file, "VLMUL=%s, ", vlmul_to_str (get_vlmul ())); + fprintf (file, "RATIO=%d, ", get_ratio ()); + fprintf (file, "MAX_SEW=%d\n", get_max_sew ()); + + fprintf (file, "%sTAIL_POLICY=%s, ", indent, policy_to_str (get_ta ())); + fprintf (file, "MASK_POLICY=%s\n", policy_to_str (get_ma ())); + + fprintf (file, "%sAVL=", indent); + print_rtl_single (file, get_avl ()); + fprintf (file, "%sVL=", indent); + print_rtl_single (file, get_vl ()); + if (change_vtype_only_p ()) + fprintf (file, "%schange vtype only\n", indent); + if (get_read_vl_insn ()) + fprintf (file, "%sread_vl_insn: insn %u\n", indent, + get_read_vl_insn ()->uid ()); + if (vl_use_by_non_rvv_insn_p ()) + fprintf (file, "%suse_by_non_rvv_insn=true\n", indent); + } }; -static bool -always_unavailable (const vector_insn_info &, const vector_insn_info &) -{ - return true; -} - -static bool -avl_unavailable_p (const vector_insn_info &info1, const vector_insn_info &info2) -{ - return !info2.compatible_avl_p (info1.get_avl_info ()); -} - -static bool -sew_unavailable_p (const vector_insn_info &info1, const vector_insn_info &info2) -{ - if (!info2.demand_p (DEMAND_LMUL) && !info2.demand_p (DEMAND_RATIO)) - { - if (info2.demand_p (DEMAND_GE_SEW)) - return info1.get_sew () < info2.get_sew (); - return info1.get_sew () != info2.get_sew (); - } - return true; -} - -static bool -lmul_unavailable_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - if (info1.get_vlmul () == info2.get_vlmul () && !info2.demand_p (DEMAND_SEW) - && !info2.demand_p (DEMAND_RATIO)) - return false; - return true; -} - -static bool -ge_sew_unavailable_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - if (!info2.demand_p (DEMAND_LMUL) && !info2.demand_p (DEMAND_RATIO) - && info2.demand_p (DEMAND_GE_SEW)) - return info1.get_sew () < info2.get_sew (); - return true; -} - -static bool -ge_sew_lmul_unavailable_p (const vector_insn_info &info1, - const vector_insn_info &info2) +class vsetvl_block_info { - if (!info2.demand_p (DEMAND_RATIO) && info2.demand_p (DEMAND_GE_SEW)) - return info1.get_sew () < info2.get_sew (); - return true; -} - -static bool -ge_sew_ratio_unavailable_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - if (!info2.demand_p (DEMAND_LMUL)) - { - if (info2.demand_p (DEMAND_GE_SEW)) - return info1.get_sew () < info2.get_sew (); - /* Demand GE_SEW should be available for non-demand SEW. */ - else if (!info2.demand_p (DEMAND_SEW)) - return false; - } - return true; -} - -static CONSTEXPR const demands_cond unavailable_conds[] = { -#define DEF_UNAVAILABLE_COND(AVL1, SEW1, LMUL1, RATIO1, NONZERO_AVL1, GE_SEW1, \ - TAIL_POLICTY1, MASK_POLICY1, AVL2, SEW2, LMUL2, \ - RATIO2, NONZERO_AVL2, GE_SEW2, TAIL_POLICTY2, \ - MASK_POLICY2, COND) \ - {{{AVL1, SEW1, LMUL1, RATIO1, NONZERO_AVL1, GE_SEW1, TAIL_POLICTY1, \ - MASK_POLICY1}, \ - {AVL2, SEW2, LMUL2, RATIO2, NONZERO_AVL2, GE_SEW2, TAIL_POLICTY2, \ - MASK_POLICY2}}, \ - COND}, -#include "riscv-vsetvl.def" +public: + /* The static execute probability of the demand info. */ + profile_probability probability; + + auto_vec<vsetvl_info> local_infos; + vsetvl_info global_info; + bb_info *bb; + + bool full_available; + + vsetvl_block_info () : bb (nullptr), full_available (false) + { + local_infos.safe_grow_cleared (0); + global_info.set_empty (); + } + vsetvl_block_info (const vsetvl_block_info &other) + : probability (other.probability), local_infos (other.local_infos.copy ()), + global_info (other.global_info), bb (other.bb) + {} + + vsetvl_info &get_entry_info () + { + gcc_assert (!empty_p ()); + return local_infos.is_empty () ? global_info : local_infos[0]; + } + vsetvl_info &get_exit_info () + { + gcc_assert (!empty_p ()); + return local_infos.is_empty () ? global_info + : local_infos[local_infos.length () - 1]; + } + const vsetvl_info &get_entry_info () const + { + gcc_assert (!empty_p ()); + return local_infos.is_empty () ? global_info : local_infos[0]; + } + const vsetvl_info &get_exit_info () const + { + gcc_assert (!empty_p ()); + return local_infos.is_empty () ? global_info + : local_infos[local_infos.length () - 1]; + } + + bool empty_p () const { return local_infos.is_empty () && !has_info (); } + bool has_info () const { return !global_info.empty_p (); } + void set_info (const vsetvl_info &info) + { + gcc_assert (local_infos.is_empty ()); + global_info = info; + global_info.set_bb (bb); + } + void set_empty_info () { global_info.set_empty (); } }; -static bool -same_sew_lmul_demand_p (const bool *dems1, const bool *dems2) +/* Demand system is the RVV-based VSETVL info analysis tools wrapper. + It defines compatible rules for SEW/LMUL, POLICY and AVL. + Also, it provides 3 iterfaces avaiable_p, compatible_p and + merge for the VSETVL PASS analysis and optimization. + + - avaiable_p: Determine whether the next info can get the + avaiable VSETVL status from previous info. + e.g. bb 2 (demand SEW = 32, LMUL = M2) -> bb 3 (demand RATIO = 16). + Since bb 2 demand info (SEW/LMUL = 32/2 = 16) satisfies the bb 3 + demand, the VSETVL instruction in bb 3 can be elided. + avaiable_p (previous, next) is true in such situation. + - compatible_p: Determine whether prev_info is compatible with next_info + so that we can have a new merged info that is avaiable to both of them. + - merge: Merge the stricter demand information from + next_info into prev_info so that prev_info becomes available to + next_info. */ +class demand_system { - return dems1[DEMAND_SEW] == dems2[DEMAND_SEW] - && dems1[DEMAND_LMUL] == dems2[DEMAND_LMUL] - && dems1[DEMAND_RATIO] == dems2[DEMAND_RATIO] && !dems1[DEMAND_GE_SEW] - && !dems2[DEMAND_GE_SEW]; -} +private: + sbitmap *m_avl_def_in; + sbitmap *m_avl_def_out; -static bool -propagate_avl_across_demands_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - if (info2.demand_p (DEMAND_AVL)) - { - if (info2.demand_p (DEMAND_NONZERO_AVL)) - return info1.demand_p (DEMAND_AVL) - && !info1.demand_p (DEMAND_NONZERO_AVL) && info1.has_avl_reg (); - } - else - return info1.demand_p (DEMAND_AVL) && info1.has_avl_reg (); - return false; -} + /* predictors. */ -static bool -reg_available_p (const insn_info *insn, const vector_insn_info &info) -{ - if (info.has_avl_reg () && !info.get_avl_source ()) + inline bool always_true (const vsetvl_info &prev ATTRIBUTE_UNUSED, + const vsetvl_info &next ATTRIBUTE_UNUSED) + { + return true; + } + inline bool always_false (const vsetvl_info &prev ATTRIBUTE_UNUSED, + const vsetvl_info &next ATTRIBUTE_UNUSED) + { return false; - insn_info *def_insn = info.get_avl_source ()->insn (); - if (def_insn->bb () == insn->bb ()) - return before_p (def_insn, insn); - else - return dominated_by_p (CDI_DOMINATORS, insn->bb ()->cfg_bb (), - def_insn->bb ()->cfg_bb ()); -} - -/* Return true if the instruction support relaxed compatible check. */ -static bool -support_relaxed_compatible_p (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - if (fault_first_load_p (info1.get_insn ()->rtl ()) - && info2.demand_p (DEMAND_AVL) && info2.has_avl_reg () - && info2.get_avl_source () && info2.get_avl_source ()->insn ()->is_phi ()) - { - hash_set<set_info *> sets - = get_all_sets (info2.get_avl_source (), true, false, false); - for (set_info *set : sets) - { - if (read_vl_insn_p (set->insn ()->rtl ())) - { - const insn_info *insn - = get_backward_fault_first_load_insn (set->insn ()); - if (insn == info1.get_insn ()) - return info2.compatible_vtype_p (info1); - } - } - } - return false; -} - -/* Count the number of REGNO in RINSN. */ -static int -count_regno_occurrences (rtx_insn *rinsn, unsigned int regno) -{ - int count = 0; - extract_insn (rinsn); - for (int i = 0; i < recog_data.n_operands; i++) - if (refers_to_regno_p (regno, recog_data.operand[i])) - count++; - return count; -} - -/* Return TRUE if the demands can be fused. */ -static bool -demands_can_be_fused_p (const vector_insn_info &be_fused, - const vector_insn_info &to_fuse) -{ - return be_fused.compatible_p (to_fuse) && !be_fused.available_p (to_fuse); -} + } + + /* predictors for sew and lmul */ + + inline bool lmul_eq_p (const vsetvl_info &prev, const vsetvl_info &next) + { + return prev.get_vlmul () == next.get_vlmul (); + } + inline bool sew_eq_p (const vsetvl_info &prev, const vsetvl_info &next) + { + return prev.get_sew () == next.get_sew (); + } + inline bool sew_lmul_eq_p (const vsetvl_info &prev, const vsetvl_info &next) + { + return lmul_eq_p (prev, next) && sew_eq_p (prev, next); + } + inline bool sew_ge_p (const vsetvl_info &prev, const vsetvl_info &next) + { + return prev.get_sew () == next.get_sew () + || (next.get_ta () && prev.get_sew () > next.get_sew ()); + } + inline bool sew_le_p (const vsetvl_info &prev, const vsetvl_info &next) + { + return prev.get_sew () == next.get_sew () + || (prev.get_ta () && prev.get_sew () < next.get_sew ()); + } + inline bool prev_sew_le_next_max_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return prev.get_sew () <= next.get_max_sew (); + } + inline bool next_sew_le_prev_max_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return next.get_sew () <= prev.get_max_sew (); + } + inline bool max_sew_overlap_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return !(prev.get_sew () > next.get_max_sew () + || next.get_sew () > prev.get_max_sew ()); + } + inline bool ratio_eq_p (const vsetvl_info &prev, const vsetvl_info &next) + { + return prev.has_same_ratio (next); + } + inline bool prev_ratio_valid_for_next_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return prev.get_ratio () >= (next.get_sew () / 8); + } + inline bool next_ratio_valid_for_prev_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return next.get_ratio () >= (prev.get_sew () / 8); + } + + inline bool sew_ge_and_ratio_eq_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return sew_ge_p (prev, next) && ratio_eq_p (prev, next); + } + inline bool sew_ge_and_prev_sew_le_next_max_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return sew_ge_p (prev, next) && prev_sew_le_next_max_sew_p (prev, next); + } + inline bool + sew_ge_and_prev_sew_le_next_max_sew_and_next_ratio_valid_for_prev_sew_p ( + const vsetvl_info &prev, const vsetvl_info &next) + { + return sew_ge_p (prev, next) && prev_sew_le_next_max_sew_p (prev, next) + && next_ratio_valid_for_prev_sew_p (prev, next); + } + inline bool sew_le_and_next_sew_le_prev_max_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return sew_le_p (prev, next) && next_sew_le_prev_max_sew_p (prev, next); + } + inline bool + max_sew_overlap_and_next_ratio_valid_for_prev_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return next_ratio_valid_for_prev_sew_p (prev, next) + && max_sew_overlap_p (prev, next); + } + inline bool + sew_le_and_next_sew_le_prev_max_sew_and_ratio_eq_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return sew_le_p (prev, next) && ratio_eq_p (prev, next) + && next_sew_le_prev_max_sew_p (prev, next); + } + inline bool + max_sew_overlap_and_prev_ratio_valid_for_next_sew_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return prev_ratio_valid_for_next_sew_p (prev, next) + && max_sew_overlap_p (prev, next); + } + inline bool + sew_le_and_next_sew_le_prev_max_sew_and_prev_ratio_valid_for_next_sew_p ( + const vsetvl_info &prev, const vsetvl_info &next) + { + return sew_le_p (prev, next) && prev_ratio_valid_for_next_sew_p (prev, next) + && next_sew_le_prev_max_sew_p (prev, next); + } + inline bool max_sew_overlap_and_ratio_eq_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return ratio_eq_p (prev, next) && max_sew_overlap_p (prev, next); + } + + /* predictors for tail and mask policy */ + + inline bool tail_policy_eq_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return prev.get_ta () == next.get_ta (); + } + inline bool mask_policy_eq_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return prev.get_ma () == next.get_ma (); + } + inline bool tail_mask_policy_eq_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return tail_policy_eq_p (prev, next) && mask_policy_eq_p (prev, next); + } + + /* predictors for avl */ + + inline bool modify_or_use_vl_p (insn_info *i, const vsetvl_info &info) + { + return info.has_vl () + && (find_access (i->uses (), REGNO (info.get_vl ())) + || find_access (i->defs (), REGNO (info.get_vl ()))); + } + inline bool modify_avl_p (insn_info *i, const vsetvl_info &info) + { + return info.has_nonvlmax_reg_avl () + && find_access (i->defs (), REGNO (info.get_avl ())); + } + + inline bool modify_reg_between_p (insn_info *prev_insn, insn_info *curr_insn, + unsigned regno) + { + gcc_assert (prev_insn->compare_with (curr_insn) < 0); + for (insn_info *i = curr_insn->prev_nondebug_insn (); i != prev_insn; + i = i->prev_nondebug_insn ()) + { + // no def of regno + if (find_access (i->defs (), regno)) + return true; + } + return false; + } -/* Return true if we can fuse VSETVL demand info into predecessor of earliest - * edge. */ -static bool -earliest_pred_can_be_fused_p (const bb_info *earliest_pred, - const vector_insn_info &earliest_info, - const vector_insn_info &expr, rtx *vlmax_vl) -{ - /* Backward VLMAX VL: - bb 3: - vsetivli zero, 1 ... -> vsetvli t1, zero - vmv.s.x - bb 5: - vsetvli t1, zero ... -> to be elided. - vlse16.v - - We should forward "t1". */ - if (!earliest_info.has_avl_reg () && expr.has_avl_reg ()) - { - rtx avl_or_vl_reg = expr.get_avl_or_vl_reg (); - gcc_assert (avl_or_vl_reg); - const insn_info *last_insn = earliest_info.get_insn (); - /* To fuse demand on earlest edge, we make sure AVL/VL - didn't change from the consume insn to the predecessor - of the edge. */ - for (insn_info *i = earliest_pred->end_insn ()->prev_nondebug_insn (); - real_insn_and_same_bb_p (i, earliest_pred) - && after_or_same_p (i, last_insn); - i = i->prev_nondebug_insn ()) - { - if (find_access (i->defs (), REGNO (avl_or_vl_reg))) - return false; - if (find_access (i->uses (), REGNO (avl_or_vl_reg))) - return false; - } - if (vlmax_vl && vlmax_avl_p (expr.get_avl ())) - *vlmax_vl = avl_or_vl_reg; - } + inline bool reg_avl_equal_p (const vsetvl_info &prev, const vsetvl_info &next) + { + if (!prev.has_nonvlmax_reg_avl () || !next.has_nonvlmax_reg_avl ()) + return false; - return true; -} + if (same_equiv_note_p (prev.get_avl_def (), next.get_avl_def ())) + return true; -/* Return true if the current VSETVL 1 is dominated by preceding VSETVL 2. + if (REGNO (prev.get_avl ()) != REGNO (next.get_avl ())) + return false; - VSETVL 2 dominates VSETVL 1 should satisfy this following check: + insn_info *prev_insn = prev.get_insn (); + if (prev.get_bb () != prev_insn->bb ()) + prev_insn = prev.get_bb ()->end_insn (); - - VSETVL 2 should have the RATIO (SEW/LMUL) with VSETVL 1. - - VSETVL 2 is user vsetvl (vsetvl VL, AVL) - - VSETVL 2 "VL" result is the "AVL" of VSETL1. */ -static bool -vsetvl_dominated_by_p (const basic_block cfg_bb, - const vector_insn_info &vsetvl1, - const vector_insn_info &vsetvl2, bool fuse_p) -{ - if (!vsetvl1.valid_or_dirty_p () || !vsetvl2.valid_or_dirty_p ()) - return false; - if (!has_vl_op (vsetvl1.get_insn ()->rtl ()) - || !vsetvl_insn_p (vsetvl2.get_insn ()->rtl ())) - return false; + insn_info *next_insn = next.get_insn (); + if (next.get_bb () != next_insn->bb ()) + next_insn = next.get_bb ()->end_insn (); - hash_set<set_info *> sets - = get_all_sets (vsetvl1.get_avl_source (), true, false, false); - set_info *set = get_same_bb_set (sets, cfg_bb); + return avl_vl_unmodified_between_p (prev_insn, next_insn, next, false); + } - if (!vsetvl1.has_avl_reg () || vlmax_avl_p (vsetvl1.get_avl ()) - || !vsetvl2.same_vlmax_p (vsetvl1) || !set - || set->insn () != vsetvl2.get_insn ()) - return false; + inline bool avl_equal_p (const vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); - if (fuse_p && vsetvl2.same_vtype_p (vsetvl1)) - return false; - else if (!fuse_p && !vsetvl2.same_vtype_p (vsetvl1)) - return false; - return true; -} + if (prev.get_ratio () != next.get_ratio ()) + return false; -avl_info::avl_info (const avl_info &other) -{ - m_value = other.get_value (); - m_source = other.get_source (); -} + if (next.has_vl () && next.vl_use_by_non_rvv_insn_p ()) + return false; -avl_info::avl_info (rtx value_in, set_info *source_in) - : m_value (value_in), m_source (source_in) -{} + if (vector_config_insn_p (prev.get_insn ()->rtl ()) && next.get_avl_def () + && next.get_avl_def ()->insn () == prev.get_insn ()) + return true; -bool -avl_info::single_source_equal_p (const avl_info &other) const -{ - set_info *set1 = m_source; - set_info *set2 = other.get_source (); - insn_info *insn1 = extract_single_source (set1); - insn_info *insn2 = extract_single_source (set2); - if (!insn1 || !insn2) - return false; - return source_equal_p (insn1, insn2); -} + if (prev.get_read_vl_insn ()) + { + if (!next.has_nonvlmax_reg_avl () || !next.get_avl_def ()) + return false; + insn_info *avl_def_insn = extract_single_source (next.get_avl_def ()); + return avl_def_insn == prev.get_read_vl_insn (); + } + + if (prev == next && prev.has_nonvlmax_reg_avl ()) + { + insn_info *insn = prev.get_insn (); + bb_info *bb = insn->bb (); + for (insn_info *i = insn; real_insn_and_same_bb_p (i, bb); + i = i->next_nondebug_insn ()) + if (find_access (i->defs (), REGNO (prev.get_avl ()))) + return false; + } -bool -avl_info::multiple_source_equal_p (const avl_info &other) const -{ - /* When the def info is same in RTL_SSA namespace, it's safe - to consider they are avl compatible. */ - if (m_source == other.get_source ()) - return true; + if (prev.has_vlmax_avl () && next.has_vlmax_avl ()) + return true; + else if (prev.has_imm_avl () && next.has_imm_avl ()) + return INTVAL (prev.get_avl ()) == INTVAL (next.get_avl ()); + else if (prev.has_vl () && next.has_nonvlmax_reg_avl () + && REGNO (prev.get_vl ()) == REGNO (next.get_avl ())) + { + insn_info *prev_insn = prev.insn_inside_bb_p () + ? prev.get_insn () + : prev.get_bb ()->end_insn (); + + insn_info *next_insn = next.insn_inside_bb_p () + ? next.get_insn () + : next.get_bb ()->end_insn (); + return avl_vl_unmodified_between_p (prev_insn, next_insn, next, false); + } + else if (prev.has_nonvlmax_reg_avl () && next.has_nonvlmax_reg_avl ()) + return reg_avl_equal_p (prev, next); - /* We only consider handle PHI node. */ - if (!m_source->insn ()->is_phi () || !other.get_source ()->insn ()->is_phi ()) return false; + } + inline bool avl_equal_or_prev_avl_non_zero_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + return avl_equal_p (prev, next) || prev.has_non_zero_avl (); + } + + inline bool can_use_next_avl_p (const vsetvl_info &prev, + const vsetvl_info &next) + { + if (!next.has_nonvlmax_reg_avl () && !next.has_vl ()) + return true; - phi_info *phi1 = as_a<phi_info *> (m_source); - phi_info *phi2 = as_a<phi_info *> (other.get_source ()); + insn_info *prev_insn = prev.get_insn (); + if (prev.get_bb () != prev_insn->bb ()) + prev_insn = prev.get_bb ()->end_insn (); + + insn_info *next_insn = next.get_insn (); + if (next.get_bb () != next_insn->bb ()) + next_insn = next.get_bb ()->end_insn (); + + return avl_vl_unmodified_between_p (prev_insn, next_insn, next); + } + + inline bool avl_equal_or_next_avl_non_zero_and_can_use_next_avl_p ( + const vsetvl_info &prev, const vsetvl_info &next) + { + return avl_equal_p (prev, next) + || (next.has_non_zero_avl () && can_use_next_avl_p (prev, next)); + } + + /* modifiers */ + + inline void nop (const vsetvl_info &prev ATTRIBUTE_UNUSED, + const vsetvl_info &next ATTRIBUTE_UNUSED) + {} + + /* modifiers for sew and lmul */ + + inline void use_min_of_max_sew (vsetvl_info &prev, const vsetvl_info &next) + { + prev.set_max_sew (MIN (prev.get_max_sew (), next.get_max_sew ())); + } + inline void use_next_sew (vsetvl_info &prev, const vsetvl_info &next) + { + prev.set_sew (next.get_sew ()); + use_min_of_max_sew (prev, next); + } + inline void use_max_sew (vsetvl_info &prev, const vsetvl_info &next) + { + auto max_sew = std::max (prev.get_sew (), next.get_sew ()); + prev.set_sew (max_sew); + use_min_of_max_sew (prev, next); + } + inline void use_next_sew_lmul (vsetvl_info &prev, const vsetvl_info &next) + { + use_next_sew (prev, next); + prev.set_vlmul (next.get_vlmul ()); + prev.set_ratio (next.get_ratio ()); + } + inline void use_next_sew_with_prev_ratio (vsetvl_info &prev, + const vsetvl_info &next) + { + use_next_sew (prev, next); + prev.set_vlmul (calculate_vlmul (next.get_sew (), prev.get_ratio ())); + } + inline void modify_lmul_with_next_ratio (vsetvl_info &prev, + const vsetvl_info &next) + { + prev.set_vlmul (calculate_vlmul (prev.get_sew (), next.get_ratio ())); + prev.set_ratio (next.get_ratio ()); + } + + inline void use_max_sew_and_lmul_with_next_ratio (vsetvl_info &prev, + const vsetvl_info &next) + { + prev.set_vlmul (calculate_vlmul (prev.get_sew (), next.get_ratio ())); + use_max_sew (prev, next); + prev.set_ratio (next.get_ratio ()); + } + + inline void use_max_sew_and_lmul_with_prev_ratio (vsetvl_info &prev, + const vsetvl_info &next) + { + auto max_sew = std::max (prev.get_sew (), next.get_sew ()); + prev.set_vlmul (calculate_vlmul (max_sew, prev.get_ratio ())); + prev.set_sew (max_sew); + } + + /* modifiers for tail and mask policy */ + + inline void use_tail_policy (vsetvl_info &prev, const vsetvl_info &next) + { + if (!next.get_ta ()) + prev.set_ta (next.get_ta ()); + } + inline void use_mask_policy (vsetvl_info &prev, const vsetvl_info &next) + { + if (!next.get_ma ()) + prev.set_ma (next.get_ma ()); + } + inline void use_tail_mask_policy (vsetvl_info &prev, const vsetvl_info &next) + { + use_tail_policy (prev, next); + use_mask_policy (prev, next); + } + + /* modifiers for avl */ + + inline void use_next_avl (vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (can_use_next_avl_p (prev, next)); + prev.update_avl (next); + } + + inline void use_next_avl_when_not_equal (vsetvl_info &prev, + const vsetvl_info &next) + { + if (avl_equal_p (prev, next)) + return; + gcc_assert (next.has_non_zero_avl ()); + use_next_avl (prev, next); + } - if (phi1->is_degenerate () && phi2->is_degenerate ()) - { - /* Degenerate PHI means the PHI node only have one input. */ +public: + demand_system () : m_avl_def_in (nullptr), m_avl_def_out (nullptr) {} + + void set_avl_in_out_data (sbitmap *m_avl_def_in, sbitmap *m_avl_def_out) + { + m_avl_def_in = m_avl_def_in; + m_avl_def_out = m_avl_def_out; + } + + /* Can we move vsetvl info between prev_insn and next_insn safe? */ + bool avl_vl_unmodified_between_p (insn_info *prev_insn, insn_info *next_insn, + const vsetvl_info &info, + bool ignore_vl = false) + { + gcc_assert ((ignore_vl && info.has_nonvlmax_reg_avl ()) + || (info.has_nonvlmax_reg_avl () || info.has_vl ())); + + gcc_assert (!prev_insn->is_debug_insn () && !next_insn->is_debug_insn ()); + if (prev_insn->bb () == next_insn->bb () + && prev_insn->compare_with (next_insn) < 0) + { + for (insn_info *i = next_insn->prev_nondebug_insn (); i != prev_insn; + i = i->prev_nondebug_insn ()) + { + // no def amd use of vl + if (!ignore_vl && modify_or_use_vl_p (i, info)) + return false; - /* If both PHI nodes have the same single input in use list. - We consider they are AVL compatible. */ - if (phi1->input_value (0) == phi2->input_value (0)) + // no def of avl + if (modify_avl_p (i, info)) + return false; + } return true; - } - /* TODO: We can support more optimization cases in the future. */ - return false; -} - -avl_info & -avl_info::operator= (const avl_info &other) -{ - m_value = other.get_value (); - m_source = other.get_source (); - return *this; -} - -bool -avl_info::operator== (const avl_info &other) const -{ - if (!m_value) - return !other.get_value (); - if (!other.get_value ()) - return false; + } + else + { + if (!ignore_vl && info.has_vl ()) + { + bitmap live_out = df_get_live_out (prev_insn->bb ()->cfg_bb ()); + if (bitmap_bit_p (live_out, REGNO (info.get_vl ()))) + return false; + } - if (GET_CODE (m_value) != GET_CODE (other.get_value ())) - return false; + if (info.has_nonvlmax_reg_avl () && m_avl_def_in && m_avl_def_out) + { + bool has_avl_out = false; + unsigned regno = REGNO (info.get_avl ()); + unsigned expr_id; + sbitmap_iterator sbi; + EXECUTE_IF_SET_IN_BITMAP (m_avl_def_out[prev_insn->bb ()->index ()], + 0, expr_id, sbi) + { + if (get_regno (expr_id, last_basic_block_for_fn (cfun)) + != regno) + continue; + has_avl_out = true; + if (!bitmap_bit_p (m_avl_def_in[next_insn->bb ()->index ()], + expr_id)) + return false; + } + if (!has_avl_out) + return false; + } - /* Handle CONST_INT AVL. */ - if (CONST_INT_P (m_value)) - return INTVAL (m_value) == INTVAL (other.get_value ()); + for (insn_info *i = next_insn; i != next_insn->bb ()->head_insn (); + i = i->prev_nondebug_insn ()) + { + // no def amd use of vl + if (!ignore_vl && modify_or_use_vl_p (i, info)) + return false; - /* Handle VLMAX AVL. */ - if (vlmax_avl_p (m_value)) - return vlmax_avl_p (other.get_value ()); - if (vlmax_avl_p (other.get_value ())) - return false; + // no def of avl + if (modify_avl_p (i, info)) + return false; + } - /* If any source is undef value, we think they are not equal. */ - if (!m_source || !other.get_source ()) - return false; + for (insn_info *i = prev_insn->bb ()->end_insn (); i != prev_insn; + i = i->prev_nondebug_insn ()) + { + // no def amd use of vl + if (!ignore_vl && modify_or_use_vl_p (i, info)) + return false; - /* If both sources are single source (defined by a single real RTL) - and their definitions are same. */ - if (single_source_equal_p (other)) + // no def of avl + if (modify_avl_p (i, info)) + return false; + } + } return true; + } + + bool sew_lmul_compatible_p (const vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + sew_lmul_demand_type prev_flags = prev.get_sew_lmul_demand (); + sew_lmul_demand_type next_flags = next.get_sew_lmul_demand (); +#define DEF_SEW_LMUL_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == sew_lmul_demand_type::PREV_FLAGS \ + && next_flags == sew_lmul_demand_type::NEXT_FLAGS) \ + return COMPATIBLE_P (prev, next); - return multiple_source_equal_p (other); -} - -bool -avl_info::operator!= (const avl_info &other) const -{ - return !(*this == other); -} - -bool -avl_info::has_non_zero_avl () const -{ - if (has_avl_imm ()) - return INTVAL (get_value ()) > 0; - if (has_avl_reg ()) - return vlmax_avl_p (get_value ()); - return false; -} - -/* Initialize VL/VTYPE information. */ -vl_vtype_info::vl_vtype_info (avl_info avl_in, uint8_t sew_in, - enum vlmul_type vlmul_in, uint8_t ratio_in, - bool ta_in, bool ma_in) - : m_avl (avl_in), m_sew (sew_in), m_vlmul (vlmul_in), m_ratio (ratio_in), - m_ta (ta_in), m_ma (ma_in) -{ - gcc_assert (valid_sew_p (m_sew) && "Unexpected SEW"); -} - -bool -vl_vtype_info::operator== (const vl_vtype_info &other) const -{ - return same_avl_p (other) && m_sew == other.get_sew () - && m_vlmul == other.get_vlmul () && m_ta == other.get_ta () - && m_ma == other.get_ma () && m_ratio == other.get_ratio (); -} - -bool -vl_vtype_info::operator!= (const vl_vtype_info &other) const -{ - return !(*this == other); -} +#include "riscv-vsetvl.def" -bool -vl_vtype_info::same_avl_p (const vl_vtype_info &other) const -{ - /* We need to compare both RTL and SET. If both AVL are CONST_INT. - For example, const_int 3 and const_int 4, we need to compare - RTL. If both AVL are REG and their REGNO are same, we need to - compare SET. */ - return get_avl () == other.get_avl () - && get_avl_source () == other.get_avl_source (); -} + gcc_unreachable (); + } -bool -vl_vtype_info::same_vtype_p (const vl_vtype_info &other) const -{ - return get_sew () == other.get_sew () && get_vlmul () == other.get_vlmul () - && get_ta () == other.get_ta () && get_ma () == other.get_ma (); -} + bool sew_lmul_available_p (const vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + sew_lmul_demand_type prev_flags = prev.get_sew_lmul_demand (); + sew_lmul_demand_type next_flags = next.get_sew_lmul_demand (); +#define DEF_SEW_LMUL_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == sew_lmul_demand_type::PREV_FLAGS \ + && next_flags == sew_lmul_demand_type::NEXT_FLAGS) \ + return AVAILABLE_P (prev, next); -bool -vl_vtype_info::same_vlmax_p (const vl_vtype_info &other) const -{ - return get_ratio () == other.get_ratio (); -} +#include "riscv-vsetvl.def" -/* Compare the compatibility between Dem1 and Dem2. - If Dem1 > Dem2, Dem1 has bigger compatibility then Dem2 - meaning Dem1 is easier be compatible with others than Dem2 - or Dem2 is stricter than Dem1. - For example, Dem1 (demand SEW + LMUL) > Dem2 (demand RATIO). */ -bool -vector_insn_info::operator>= (const vector_insn_info &other) const -{ - if (support_relaxed_compatible_p (*this, other)) - { - unsigned array_size = sizeof (unavailable_conds) / sizeof (demands_cond); - /* Bypass AVL unavailable cases. */ - for (unsigned i = 2; i < array_size; i++) - if (unavailable_conds[i].pair.match_cond_p (this->get_demands (), - other.get_demands ()) - && unavailable_conds[i].incompatible_p (*this, other)) - return false; - return true; + gcc_unreachable (); + } + + void merge_sew_lmul (vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + sew_lmul_demand_type prev_flags = prev.get_sew_lmul_demand (); + sew_lmul_demand_type next_flags = next.get_sew_lmul_demand (); +#define DEF_SEW_LMUL_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == sew_lmul_demand_type::PREV_FLAGS \ + && next_flags == sew_lmul_demand_type::NEXT_FLAGS) \ + { \ + gcc_assert (COMPATIBLE_P (prev, next)); \ + FUSE (prev, next); \ + prev.set_sew_lmul_demand (sew_lmul_demand_type::NEW_FLAGS); \ + return; \ } - if (!other.compatible_p (static_cast<const vl_vtype_info &> (*this))) - return false; - if (!this->compatible_p (static_cast<const vl_vtype_info &> (other))) - return true; - - if (*this == other) - return true; - - for (const auto &cond : unavailable_conds) - if (cond.pair.match_cond_p (this->get_demands (), other.get_demands ()) - && cond.incompatible_p (*this, other)) - return false; - - return true; -} - -bool -vector_insn_info::operator== (const vector_insn_info &other) const -{ - gcc_assert (!uninit_p () && !other.uninit_p () - && "Uninitialization should not happen"); - - /* Empty is only equal to another Empty. */ - if (empty_p ()) - return other.empty_p (); - if (other.empty_p ()) - return empty_p (); - - /* Unknown is only equal to another Unknown. */ - if (unknown_p ()) - return other.unknown_p (); - if (other.unknown_p ()) - return unknown_p (); - - for (size_t i = 0; i < NUM_DEMAND; i++) - if (m_demands[i] != other.demand_p ((enum demand_type) i)) - return false; +#include "riscv-vsetvl.def" - /* We should consider different INSN demands as different - expression. Otherwise, we will be doing incorrect vsetvl - elimination. */ - if (m_insn != other.get_insn ()) - return false; + gcc_unreachable (); + } - if (!same_avl_p (other)) - return false; + bool policy_compatible_p (const vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + policy_demand_type prev_flags = prev.get_policy_demand (); + policy_demand_type next_flags = next.get_policy_demand (); +#define DEF_POLICY_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == policy_demand_type::PREV_FLAGS \ + && next_flags == policy_demand_type::NEXT_FLAGS) \ + return COMPATIBLE_P (prev, next); - /* If the full VTYPE is valid, check that it is the same. */ - return same_vtype_p (other); -} +#include "riscv-vsetvl.def" -void -vector_insn_info::parse_insn (rtx_insn *rinsn) -{ - *this = vector_insn_info (); - if (!NONDEBUG_INSN_P (rinsn)) - return; - if (optimize == 0 && !has_vtype_op (rinsn)) - return; - gcc_assert (!vsetvl_discard_result_insn_p (rinsn)); - m_state = VALID; - extract_insn_cached (rinsn); - rtx avl = ::get_avl (rinsn); - m_avl = avl_info (avl, nullptr); - m_sew = ::get_sew (rinsn); - m_vlmul = ::get_vlmul (rinsn); - m_ta = tail_agnostic_p (rinsn); - m_ma = mask_agnostic_p (rinsn); -} + gcc_unreachable (); + } -void -vector_insn_info::parse_insn (insn_info *insn) -{ - *this = vector_insn_info (); + bool policy_available_p (const vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + policy_demand_type prev_flags = prev.get_policy_demand (); + policy_demand_type next_flags = next.get_policy_demand (); +#define DEF_POLICY_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == policy_demand_type::PREV_FLAGS \ + && next_flags == policy_demand_type::NEXT_FLAGS) \ + return AVAILABLE_P (prev, next); - /* Return if it is debug insn for the consistency with optimize == 0. */ - if (insn->is_debug_insn ()) - return; +#include "riscv-vsetvl.def" - /* We set it as unknown since we don't what will happen in CALL or ASM. */ - if (insn->is_call () || insn->is_asm ()) - { - set_unknown (); - return; + gcc_unreachable (); + } + + void merge_policy (vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + policy_demand_type prev_flags = prev.get_policy_demand (); + policy_demand_type next_flags = next.get_policy_demand (); +#define DEF_POLICY_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == policy_demand_type::PREV_FLAGS \ + && next_flags == policy_demand_type::NEXT_FLAGS) \ + { \ + gcc_assert (COMPATIBLE_P (prev, next)); \ + FUSE (prev, next); \ + prev.set_policy_demand (policy_demand_type::NEW_FLAGS); \ + return; \ } - /* If this is something that updates VL/VTYPE that we don't know about, set - the state to unknown. */ - if (!vector_config_insn_p (insn->rtl ()) && !has_vtype_op (insn->rtl ()) - && (find_access (insn->defs (), VL_REGNUM) - || find_access (insn->defs (), VTYPE_REGNUM))) - { - set_unknown (); - return; - } +#include "riscv-vsetvl.def" - if (!vector_config_insn_p (insn->rtl ()) && !has_vtype_op (insn->rtl ())) - return; + gcc_unreachable (); + } - /* Warning: This function has to work on both the lowered (i.e. post - emit_local_forward_vsetvls) and pre-lowering forms. The main implication - of this is that it can't use the value of a SEW, VL, or Policy operand as - they might be stale after lowering. */ - vl_vtype_info::operator= (get_vl_vtype_info (insn)); - m_insn = insn; - m_state = VALID; - if (vector_config_insn_p (insn->rtl ())) - { - m_demands[DEMAND_AVL] = true; - m_demands[DEMAND_RATIO] = true; - return; - } + bool avl_compatible_p (const vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + avl_demand_type prev_flags = prev.get_avl_demand (); + avl_demand_type next_flags = next.get_avl_demand (); +#define DEF_AVL_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == avl_demand_type::PREV_FLAGS \ + && next_flags == avl_demand_type::NEXT_FLAGS) \ + return COMPATIBLE_P (prev, next); - if (has_vl_op (insn->rtl ())) - m_demands[DEMAND_AVL] = true; +#include "riscv-vsetvl.def" - if (get_attr_ratio (insn->rtl ()) != INVALID_ATTRIBUTE) - m_demands[DEMAND_RATIO] = true; - else - { - /* TODO: By default, if it doesn't demand RATIO, we set it - demand SEW && LMUL both. Some instructions may demand SEW - only and ignore LMUL, will fix it later. */ - m_demands[DEMAND_SEW] = true; - if (!ignore_vlmul_insn_p (insn->rtl ())) - m_demands[DEMAND_LMUL] = true; - } + gcc_unreachable (); + } - if (get_attr_ta (insn->rtl ()) != INVALID_ATTRIBUTE) - m_demands[DEMAND_TAIL_POLICY] = true; - if (get_attr_ma (insn->rtl ()) != INVALID_ATTRIBUTE) - m_demands[DEMAND_MASK_POLICY] = true; + bool avl_available_p (const vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + avl_demand_type prev_flags = prev.get_avl_demand (); + avl_demand_type next_flags = next.get_avl_demand (); +#define DEF_AVL_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == avl_demand_type::PREV_FLAGS \ + && next_flags == avl_demand_type::NEXT_FLAGS) \ + return AVAILABLE_P (prev, next); - if (vector_config_insn_p (insn->rtl ())) - return; +#include "riscv-vsetvl.def" - if (scalar_move_insn_p (insn->rtl ())) - { - if (m_avl.has_non_zero_avl ()) - m_demands[DEMAND_NONZERO_AVL] = true; - if (m_ta) - m_demands[DEMAND_GE_SEW] = true; + gcc_unreachable (); + } + + void merge_avl (vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (prev.valid_p () && next.valid_p ()); + avl_demand_type prev_flags = prev.get_avl_demand (); + avl_demand_type next_flags = next.get_avl_demand (); +#define DEF_AVL_RULE(PREV_FLAGS, NEXT_FLAGS, NEW_FLAGS, COMPATIBLE_P, \ + AVAILABLE_P, FUSE) \ + if (prev_flags == avl_demand_type::PREV_FLAGS \ + && next_flags == avl_demand_type::NEXT_FLAGS) \ + { \ + gcc_assert (COMPATIBLE_P (prev, next)); \ + FUSE (prev, next); \ + prev.set_avl_demand (avl_demand_type::NEW_FLAGS); \ + return; \ } - if (!m_avl.has_avl_reg () || vlmax_avl_p (get_avl ()) || !m_avl.get_source ()) - return; - if (!m_avl.get_source ()->insn ()->is_real () - && !m_avl.get_source ()->insn ()->is_phi ()) - return; - - insn_info *def_insn = extract_single_source (m_avl.get_source ()); - if (!def_insn || !vsetvl_insn_p (def_insn->rtl ())) - return; - - vector_insn_info new_info; - new_info.parse_insn (def_insn); - if (!same_vlmax_p (new_info) && !scalar_move_insn_p (insn->rtl ())) - return; - - if (new_info.has_avl ()) - { - if (new_info.has_avl_imm ()) - set_avl_info (avl_info (new_info.get_avl (), nullptr)); - else - { - if (vlmax_avl_p (new_info.get_avl ())) - set_avl_info (avl_info (new_info.get_avl (), get_avl_source ())); - else - { - /* Conservatively propagate non-VLMAX AVL of user vsetvl: - 1. The user vsetvl should be same block with the rvv insn. - 2. The user vsetvl is the only def insn of rvv insn. - 3. The AVL is not modified between def-use chain. - 4. The VL is only used by insn within EBB. - */ - bool modified_p = false; - for (insn_info *i = def_insn->next_nondebug_insn (); - real_insn_and_same_bb_p (i, get_insn ()->bb ()); - i = i->next_nondebug_insn ()) - { - /* Consider this following sequence: - - insn 1: vsetvli a5,a3,e8,mf4,ta,mu - insn 2: vsetvli zero,a5,e32,m1,ta,ma - ... - vle32.v v1,0(a1) - vsetvli a2,zero,e32,m1,ta,ma - vadd.vv v1,v1,v1 - vsetvli zero,a5,e32,m1,ta,ma - vse32.v v1,0(a0) - ... - insn 3: sub a3,a3,a5 - ... - - We can local AVL propagate "a3" from insn 1 to insn 2 - if no insns between insn 1 and insn 2 modify "a3 even - though insn 3 modifies "a3". - Otherwise, we can't perform local AVL propagation. - - Early break if we reach the insn 2. */ - if (!before_p (i, insn)) - break; - if (find_access (i->defs (), REGNO (new_info.get_avl ()))) - { - modified_p = true; - break; - } - } +#include "riscv-vsetvl.def" - bool has_live_out_use = false; - for (use_info *use : m_avl.get_source ()->all_uses ()) - { - if (use->is_live_out_use ()) - { - has_live_out_use = true; - break; - } - } - if (!modified_p && !has_live_out_use - && def_insn == m_avl.get_source ()->insn () - && m_insn->bb () == def_insn->bb ()) - set_avl_info (new_info.get_avl_info ()); - } - } - } + gcc_unreachable (); + } + + bool compatible_p (const vsetvl_info &prev, const vsetvl_info &next) + { + bool compatible_p = sew_lmul_compatible_p (prev, next) + && policy_compatible_p (prev, next) + && avl_compatible_p (prev, next); + return compatible_p; + } + + bool available_p (const vsetvl_info &prev, const vsetvl_info &next) + { + bool available_p = sew_lmul_available_p (prev, next) + && policy_available_p (prev, next) + && avl_available_p (prev, next); + gcc_assert (!available_p || compatible_p (prev, next)); + return available_p; + } + + void merge (vsetvl_info &prev, const vsetvl_info &next) + { + gcc_assert (compatible_p (prev, next)); + merge_sew_lmul (prev, next); + merge_policy (prev, next); + merge_avl (prev, next); + gcc_assert (available_p (prev, next)); + } +}; - if (scalar_move_insn_p (insn->rtl ()) && m_avl.has_non_zero_avl ()) - m_demands[DEMAND_NONZERO_AVL] = true; -} -bool -vector_insn_info::compatible_p (const vector_insn_info &other) const +class pre_vsetvl { - gcc_assert (valid_or_dirty_p () && other.valid_or_dirty_p () - && "Can't compare invalid demanded infos"); - - for (const auto &cond : incompatible_conds) - if (cond.dual_incompatible_p (*this, other)) - return false; - return true; -} +private: + demand_system m_dem; + auto_vec<vsetvl_block_info> m_vector_block_infos; + + /* data for avl reaching defintion. */ + sbitmap m_avl_regs; + sbitmap *m_avl_def_in; + sbitmap *m_avl_def_out; + sbitmap *m_reg_def_loc; + + /* data for vsetvl info reaching defintion. */ + vsetvl_info m_unknow_info; + auto_vec<vsetvl_info *> m_vsetvl_def_exprs; + sbitmap *m_vsetvl_def_in; + sbitmap *m_vsetvl_def_out; + + /* data for lcm */ + auto_vec<vsetvl_info *> m_exprs; + sbitmap *m_avloc; + sbitmap *m_avin; + sbitmap *m_avout; + sbitmap *m_kill; + sbitmap *m_antloc; + sbitmap *m_transp; + sbitmap *m_insert; + sbitmap *m_del; + struct edge_list *m_edges; + + auto_vec<vsetvl_info> m_delete_list; + + vsetvl_block_info &get_block_info (const bb_info *bb) + { + return m_vector_block_infos[bb->index ()]; + } + const vsetvl_block_info &get_block_info (const basic_block bb) const + { + return m_vector_block_infos[bb->index]; + } + + vsetvl_block_info &get_block_info (const basic_block bb) + { + return m_vector_block_infos[bb->index]; + } + + void add_expr (auto_vec<vsetvl_info *> &m_exprs, vsetvl_info &info) + { + for (vsetvl_info *item : m_exprs) + { + if (*item == info) + return; + } + m_exprs.safe_push (&info); + } + + unsigned get_expr_index (auto_vec<vsetvl_info *> &m_exprs, + const vsetvl_info &info) + { + for (size_t i = 0; i < m_exprs.length (); i += 1) + { + if (*m_exprs[i] == info) + return i; + } + gcc_unreachable (); + } + + bool anticpatable_exp_p (const vsetvl_info &header_info) + { + if (!header_info.has_nonvlmax_reg_avl () && !header_info.has_vl ()) + return true; -bool -vector_insn_info::skip_avl_compatible_p (const vector_insn_info &other) const -{ - gcc_assert (valid_or_dirty_p () && other.valid_or_dirty_p () - && "Can't compare invalid demanded infos"); - unsigned array_size = sizeof (incompatible_conds) / sizeof (demands_cond); - /* Bypass AVL incompatible cases. */ - for (unsigned i = 1; i < array_size; i++) - if (incompatible_conds[i].dual_incompatible_p (*this, other)) - return false; - return true; -} + bb_info *bb = header_info.get_bb (); + insn_info *prev_insn = bb->head_insn (); + insn_info *next_insn = header_info.insn_inside_bb_p () + ? header_info.get_insn () + : header_info.get_bb ()->end_insn (); + + return m_dem.avl_vl_unmodified_between_p (prev_insn, next_insn, + header_info); + } + + bool available_exp_p (const vsetvl_info &prev_info, + const vsetvl_info &next_info) + { + return m_dem.available_p (prev_info, next_info); + } + + void compute_probabilities () + { + edge e; + edge_iterator ei; + + for (const bb_info *bb : crtl->ssa->bbs ()) + { + basic_block cfg_bb = bb->cfg_bb (); + auto &curr_prob = get_block_info (cfg_bb).probability; + + /* GCC assume entry block (bb 0) are always so + executed so set its probability as "always". */ + if (ENTRY_BLOCK_PTR_FOR_FN (cfun) == cfg_bb) + curr_prob = profile_probability::always (); + /* Exit block (bb 1) is the block we don't need to process. */ + if (EXIT_BLOCK_PTR_FOR_FN (cfun) == cfg_bb) + continue; -bool -vector_insn_info::compatible_avl_p (const vl_vtype_info &other) const -{ - gcc_assert (valid_or_dirty_p () && "Can't compare invalid vl_vtype_info"); - gcc_assert (!unknown_p () && "Can't compare AVL in unknown state"); - if (!demand_p (DEMAND_AVL)) - return true; - if (demand_p (DEMAND_NONZERO_AVL) && other.has_non_zero_avl ()) + gcc_assert (curr_prob.initialized_p ()); + FOR_EACH_EDGE (e, ei, cfg_bb->succs) + { + auto &new_prob = get_block_info (e->dest).probability; + /* Normally, the edge probability should be initialized. + However, some special testing code which is written in + GIMPLE IR style force the edge probility uninitialized, + we conservatively set it as never so that it will not + affect PRE (Phase 3 && Phse 4). */ + if (!e->probability.initialized_p ()) + new_prob = profile_probability::never (); + else if (!new_prob.initialized_p ()) + new_prob = curr_prob * e->probability; + else if (new_prob == profile_probability::always ()) + continue; + else + new_prob += curr_prob * e->probability; + } + } + } + + void insert_vsetvl_insn (enum emit_type emit_type, const vsetvl_info &info) + { + rtx pat = info.get_vsetvl_pat (); + rtx_insn *rinsn = info.get_insn ()->rtl (); + + if (emit_type == EMIT_DIRECT) + { + emit_insn (pat); + if (dump_file) + { + fprintf (dump_file, " Insert vsetvl insn %d:\n", + INSN_UID (get_last_insn ())); + print_rtl_single (dump_file, get_last_insn ()); + } + } + else if (emit_type == EMIT_BEFORE) + { + emit_insn_before (pat, rinsn); + if (dump_file) + { + fprintf (dump_file, " Insert vsetvl insn before insn %d:\n", + INSN_UID (rinsn)); + print_rtl_single (dump_file, PREV_INSN (rinsn)); + } + } + else + { + emit_insn_after (pat, rinsn); + if (dump_file) + { + fprintf (dump_file, " Insert vsetvl insn after insn %d:\n", + INSN_UID (rinsn)); + print_rtl_single (dump_file, NEXT_INSN (rinsn)); + } + } + } + + void change_vsetvl_insn (const vsetvl_info &info) + { + rtx_insn *rinsn = info.get_insn ()->rtl (); + rtx new_pat = info.get_vsetvl_pat (); + + if (dump_file) + { + fprintf (dump_file, " Change insn %d from:\n", INSN_UID (rinsn)); + print_rtl_single (dump_file, rinsn); + } + + validate_change_or_fail (rinsn, &PATTERN (rinsn), new_pat, false); + + if (dump_file) + { + fprintf (dump_file, "\n to:\n"); + print_rtl_single (dump_file, rinsn); + } + } + + void remove_vsetvl_insn (const vsetvl_info &info) + { + rtx_insn *rinsn = info.get_insn ()->rtl (); + if (dump_file) + { + fprintf (dump_file, " Eliminate insn %d:\n", INSN_UID (rinsn)); + print_rtl_single (dump_file, rinsn); + } + if (in_sequence_p ()) + remove_insn (rinsn); + else + delete_insn (rinsn); + } + + bool successors_probability_equal_p (const basic_block cfg_bb) const + { + edge e; + edge_iterator ei; + profile_probability prob = profile_probability::uninitialized (); + FOR_EACH_EDGE (e, ei, cfg_bb->succs) + { + if (prob == profile_probability::uninitialized ()) + prob = m_vector_block_infos[e->dest->index].probability; + else if (prob == m_vector_block_infos[e->dest->index].probability) + continue; + else + /* We pick the highest probability among those incompatible VSETVL + infos. When all incompatible VSTEVL infos have same probability, we + don't pick any of them. */ + return false; + } return true; - return get_avl_info () == other.get_avl_info (); -} + } + + bool preds_has_same_avl_p (const vsetvl_info &curr_info) + { + gcc_assert ( + !bitmap_empty_p (m_vsetvl_def_in[curr_info.get_bb ()->index ()])); + + unsigned expr_index; + sbitmap_iterator sbi; + EXECUTE_IF_SET_IN_BITMAP (m_vsetvl_def_in[curr_info.get_bb ()->index ()], 0, + expr_index, sbi) + { + const vsetvl_info &prev_info = *m_vsetvl_def_exprs[expr_index]; + if (!prev_info.valid_p () + || !m_dem.avl_available_p (prev_info, curr_info)) + return false; + } -bool -vector_insn_info::compatible_avl_p (const avl_info &other) const -{ - gcc_assert (valid_or_dirty_p () && "Can't compare invalid vl_vtype_info"); - gcc_assert (!unknown_p () && "Can't compare AVL in unknown state"); - gcc_assert (demand_p (DEMAND_AVL) && "Can't compare AVL undemand state"); - if (!demand_p (DEMAND_AVL)) - return true; - if (demand_p (DEMAND_NONZERO_AVL) && other.has_non_zero_avl ()) return true; - return get_avl_info () == other; -} - -bool -vector_insn_info::compatible_vtype_p (const vl_vtype_info &other) const -{ - gcc_assert (valid_or_dirty_p () && "Can't compare invalid vl_vtype_info"); - gcc_assert (!unknown_p () && "Can't compare VTYPE in unknown state"); - if (demand_p (DEMAND_SEW)) - { - if (!demand_p (DEMAND_GE_SEW) && m_sew != other.get_sew ()) - return false; - if (demand_p (DEMAND_GE_SEW) && m_sew > other.get_sew ()) - return false; - } - if (demand_p (DEMAND_LMUL) && m_vlmul != other.get_vlmul ()) - return false; - if (demand_p (DEMAND_RATIO) && m_ratio != other.get_ratio ()) - return false; - if (demand_p (DEMAND_TAIL_POLICY) && m_ta != other.get_ta ()) - return false; - if (demand_p (DEMAND_MASK_POLICY) && m_ma != other.get_ma ()) - return false; - return true; -} - -/* Determine whether the vector instructions requirements represented by - Require are compatible with the previous vsetvli instruction represented - by this. INSN is the instruction whose requirements we're considering. */ -bool -vector_insn_info::compatible_p (const vl_vtype_info &curr_info) const -{ - gcc_assert (!uninit_p () && "Can't handle uninitialized info"); - if (empty_p ()) - return false; - - /* Nothing is compatible with Unknown. */ - if (unknown_p ()) - return false; - - /* If the instruction doesn't need an AVLReg and the SEW matches, consider - it compatible. */ - if (!demand_p (DEMAND_AVL)) - if (m_sew == curr_info.get_sew ()) - return true; - - return compatible_avl_p (curr_info) && compatible_vtype_p (curr_info); -} - -bool -vector_insn_info::available_p (const vector_insn_info &other) const -{ - return *this >= other; -} - -void -vector_insn_info::fuse_avl (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - set_insn (info1.get_insn ()); - if (info1.demand_p (DEMAND_AVL)) - { - if (info1.demand_p (DEMAND_NONZERO_AVL)) - { - if (info2.demand_p (DEMAND_AVL) - && !info2.demand_p (DEMAND_NONZERO_AVL)) - { - set_avl_info (info2.get_avl_info ()); - set_demand (DEMAND_AVL, true); - set_demand (DEMAND_NONZERO_AVL, false); - return; - } - } - set_avl_info (info1.get_avl_info ()); - set_demand (DEMAND_NONZERO_AVL, info1.demand_p (DEMAND_NONZERO_AVL)); - } - else - { - set_avl_info (info2.get_avl_info ()); - set_demand (DEMAND_NONZERO_AVL, info2.demand_p (DEMAND_NONZERO_AVL)); - } - set_demand (DEMAND_AVL, - info1.demand_p (DEMAND_AVL) || info2.demand_p (DEMAND_AVL)); -} - -void -vector_insn_info::fuse_sew_lmul (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - /* We need to fuse sew && lmul according to demand info: - - 1. GE_SEW. - 2. SEW. - 3. LMUL. - 4. RATIO. */ - if (same_sew_lmul_demand_p (info1.get_demands (), info2.get_demands ())) - { - set_demand (DEMAND_SEW, info2.demand_p (DEMAND_SEW)); - set_demand (DEMAND_LMUL, info2.demand_p (DEMAND_LMUL)); - set_demand (DEMAND_RATIO, info2.demand_p (DEMAND_RATIO)); - set_demand (DEMAND_GE_SEW, info2.demand_p (DEMAND_GE_SEW)); - set_sew (info2.get_sew ()); - set_vlmul (info2.get_vlmul ()); - set_ratio (info2.get_ratio ()); - return; - } - for (const auto &rule : fuse_rules) - { - if (rule.pair.match_cond_p (info1.get_demands (), info2.get_demands ())) - { - set_demand (DEMAND_SEW, rule.demand_sew_p); - set_demand (DEMAND_LMUL, rule.demand_lmul_p); - set_demand (DEMAND_RATIO, rule.demand_ratio_p); - set_demand (DEMAND_GE_SEW, rule.demand_ge_sew_p); - set_sew (rule.new_sew (info1, info2)); - set_vlmul (rule.new_vlmul (info1, info2)); - set_ratio (rule.new_ratio (info1, info2)); - return; - } - if (rule.pair.match_cond_p (info2.get_demands (), info1.get_demands ())) - { - set_demand (DEMAND_SEW, rule.demand_sew_p); - set_demand (DEMAND_LMUL, rule.demand_lmul_p); - set_demand (DEMAND_RATIO, rule.demand_ratio_p); - set_demand (DEMAND_GE_SEW, rule.demand_ge_sew_p); - set_sew (rule.new_sew (info2, info1)); - set_vlmul (rule.new_vlmul (info2, info1)); - set_ratio (rule.new_ratio (info2, info1)); - return; - } - } - gcc_unreachable (); -} + } -void -vector_insn_info::fuse_tail_policy (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - if (info1.demand_p (DEMAND_TAIL_POLICY)) - { - set_ta (info1.get_ta ()); - demand (DEMAND_TAIL_POLICY); - } - else if (info2.demand_p (DEMAND_TAIL_POLICY)) - { - set_ta (info2.get_ta ()); - demand (DEMAND_TAIL_POLICY); - } - else - set_ta (get_default_ta ()); -} +public: + pre_vsetvl () + : m_avl_def_in (nullptr), m_avl_def_out (nullptr), + m_vsetvl_def_in (nullptr), m_vsetvl_def_out (nullptr), m_avloc (nullptr), + m_avin (nullptr), m_avout (nullptr), m_kill (nullptr), m_antloc (nullptr), + m_transp (nullptr), m_insert (nullptr), m_del (nullptr), m_edges (nullptr) + { + /* Initialization of RTL_SSA. */ + calculate_dominance_info (CDI_DOMINATORS); + df_analyze (); + crtl->ssa = new function_info (cfun); + m_vector_block_infos.safe_grow_cleared (last_basic_block_for_fn (cfun)); + compute_probabilities (); + m_unknow_info.set_unknown (); + } + + void finish () + { + free_dominance_info (CDI_DOMINATORS); + if (crtl->ssa->perform_pending_updates ()) + cleanup_cfg (0); + delete crtl->ssa; + crtl->ssa = nullptr; + + if (m_avl_regs) + sbitmap_free (m_avl_regs); + if (m_reg_def_loc) + sbitmap_vector_free (m_reg_def_loc); + + if (m_avl_def_in) + sbitmap_vector_free (m_avl_def_in); + if (m_avl_def_out) + sbitmap_vector_free (m_avl_def_out); + + if (m_vsetvl_def_in) + sbitmap_vector_free (m_vsetvl_def_in); + if (m_vsetvl_def_out) + sbitmap_vector_free (m_vsetvl_def_out); + + if (m_avloc) + sbitmap_vector_free (m_avloc); + if (m_kill) + sbitmap_vector_free (m_kill); + if (m_antloc) + sbitmap_vector_free (m_antloc); + if (m_transp) + sbitmap_vector_free (m_transp); + if (m_insert) + sbitmap_vector_free (m_insert); + if (m_del) + sbitmap_vector_free (m_del); + if (m_avin) + sbitmap_vector_free (m_avin); + if (m_avout) + sbitmap_vector_free (m_avout); + + if (m_edges) + free_edge_list (m_edges); + } + + void compute_avl_def_data (); + void compute_vsetvl_def_data (); + void compute_lcm_local_properties (); + + void fuse_local_vsetvl_info (); + bool earliest_fuse_vsetvl_info (); + void pre_global_vsetvl_info (); + void emit_vsetvl (); + void cleaup (); + void remove_avl_operand (); + void remove_unused_dest_operand (); + + void dump (FILE *file, const char *title) const + { + fprintf (file, "\nVSETVL infos after %s\n\n", title); + for (const bb_info *bb : crtl->ssa->bbs ()) + { + const auto &block_info = m_vector_block_infos[bb->index ()]; + fprintf (file, " bb %d:\n", bb->index ()); + fprintf (file, " probability: "); + block_info.probability.dump (file); + fprintf (file, "\n"); + if (!block_info.empty_p ()) + { + fprintf (file, " Header vsetvl info:"); + block_info.get_entry_info ().dump (file, " "); + fprintf (file, " Footer vsetvl info:"); + block_info.get_exit_info ().dump (file, " "); + for (const auto &info : block_info.local_infos) + { + fprintf (file, + " insn %d vsetvl info:", info.get_insn ()->uid ()); + info.dump (file, " "); + } + } + } + } +}; void -vector_insn_info::fuse_mask_policy (const vector_insn_info &info1, - const vector_insn_info &info2) -{ - if (info1.demand_p (DEMAND_MASK_POLICY)) - { - set_ma (info1.get_ma ()); - demand (DEMAND_MASK_POLICY); - } - else if (info2.demand_p (DEMAND_MASK_POLICY)) - { - set_ma (info2.get_ma ()); - demand (DEMAND_MASK_POLICY); - } - else - set_ma (get_default_ma ()); -} - -vector_insn_info -vector_insn_info::local_merge (const vector_insn_info &merge_info) const -{ - if (!vsetvl_insn_p (get_insn ()->rtl ()) && *this != merge_info) - gcc_assert (this->compatible_p (merge_info) - && "Can't merge incompatible demanded infos"); - - vector_insn_info new_info; - new_info.set_valid (); - /* For local backward data flow, we always update INSN && AVL as the - latest INSN and AVL so that we can keep track status of each INSN. */ - new_info.fuse_avl (merge_info, *this); - new_info.fuse_sew_lmul (*this, merge_info); - new_info.fuse_tail_policy (*this, merge_info); - new_info.fuse_mask_policy (*this, merge_info); - return new_info; -} - -vector_insn_info -vector_insn_info::global_merge (const vector_insn_info &merge_info, - unsigned int bb_index) const -{ - if (!vsetvl_insn_p (get_insn ()->rtl ()) && *this != merge_info) - gcc_assert (this->compatible_p (merge_info) - && "Can't merge incompatible demanded infos"); - - vector_insn_info new_info; - new_info.set_valid (); - - /* For global data flow, we should keep original INSN and AVL if they - valid since we should keep the life information of each block. - - For example: - bb 0 -> bb 1. - We should keep INSN && AVL of bb 1 since we will eventually emit - vsetvl instruction according to INSN and AVL of bb 1. */ - new_info.fuse_avl (*this, merge_info); - /* Recompute the AVL source whose block index is equal to BB_INDEX. */ - if (new_info.get_avl_source () - && new_info.get_avl_source ()->insn ()->is_phi () - && new_info.get_avl_source ()->bb ()->index () != bb_index) - { - hash_set<set_info *> sets - = get_all_sets (new_info.get_avl_source (), true, true, true); - new_info.set_avl_source (nullptr); - bool can_find_set_p = false; - set_info *first_set = nullptr; - for (set_info *set : sets) - { - if (!first_set) - first_set = set; - if (set->bb ()->index () == bb_index) - { - gcc_assert (!can_find_set_p); - new_info.set_avl_source (set); - can_find_set_p = true; - } - } - if (!can_find_set_p && sets.elements () == 1 - && first_set->insn ()->is_real ()) - new_info.set_avl_source (first_set); - } - - /* Make sure VLMAX AVL always has a set_info the get VL. */ - if (vlmax_avl_p (new_info.get_avl ())) - { - if (this->get_avl_source ()) - new_info.set_avl_source (this->get_avl_source ()); - else - { - gcc_assert (merge_info.get_avl_source ()); - new_info.set_avl_source (merge_info.get_avl_source ()); - } - } - - new_info.fuse_sew_lmul (*this, merge_info); - new_info.fuse_tail_policy (*this, merge_info); - new_info.fuse_mask_policy (*this, merge_info); - return new_info; -} - -/* Wrapper helps to return the AVL or VL operand for the - vector_insn_info. Return AVL if the AVL is not VLMAX. - Otherwise, return the VL operand. */ -rtx -vector_insn_info::get_avl_or_vl_reg (void) const -{ - gcc_assert (has_avl_reg ()); - if (!vlmax_avl_p (get_avl ())) - return get_avl (); - - rtx_insn *rinsn = get_insn ()->rtl (); - if (has_vl_op (rinsn) || vsetvl_insn_p (rinsn)) - { - rtx vl = ::get_vl (rinsn); - /* For VLMAX, we should make sure we get the - REG to emit 'vsetvl VL,zero' since the 'VL' - should be the REG according to RVV ISA. */ - if (REG_P (vl)) - return vl; - } - - /* We always has avl_source if it is VLMAX AVL. */ - gcc_assert (get_avl_source ()); - return get_avl_reg_rtx (); -} - -bool -vector_insn_info::update_fault_first_load_avl (insn_info *insn) -{ - // Update AVL to vl-output of the fault first load. - const insn_info *read_vl = get_forward_read_vl_insn (insn); - if (read_vl) - { - rtx vl = SET_DEST (PATTERN (read_vl->rtl ())); - def_info *def = find_access (read_vl->defs (), REGNO (vl)); - set_info *set = safe_dyn_cast<set_info *> (def); - set_avl_info (avl_info (vl, set)); - set_insn (insn); - return true; - } - return false; -} - -static const char * -vlmul_to_str (vlmul_type vlmul) +pre_vsetvl::compute_avl_def_data () { - switch (vlmul) - { - case LMUL_1: - return "m1"; - case LMUL_2: - return "m2"; - case LMUL_4: - return "m4"; - case LMUL_8: - return "m8"; - case LMUL_RESERVED: - return "INVALID LMUL"; - case LMUL_F8: - return "mf8"; - case LMUL_F4: - return "mf4"; - case LMUL_F2: - return "mf2"; - - default: - gcc_unreachable (); - } -} + if (bitmap_empty_p (m_avl_regs)) + return; -static const char * -policy_to_str (bool agnostic_p) -{ - return agnostic_p ? "agnostic" : "undisturbed"; -} + unsigned num_regs = GP_REG_LAST + 1; + unsigned num_bbs = last_basic_block_for_fn (cfun); -void -vector_insn_info::dump (FILE *file) const -{ - fprintf (file, "["); - if (uninit_p ()) - fprintf (file, "UNINITIALIZED,"); - else if (valid_p ()) - fprintf (file, "VALID,"); - else if (unknown_p ()) - fprintf (file, "UNKNOWN,"); - else if (empty_p ()) - fprintf (file, "EMPTY,"); - else - fprintf (file, "DIRTY,"); - - fprintf (file, "Demand field={%d(VL),", demand_p (DEMAND_AVL)); - fprintf (file, "%d(DEMAND_NONZERO_AVL),", demand_p (DEMAND_NONZERO_AVL)); - fprintf (file, "%d(SEW),", demand_p (DEMAND_SEW)); - fprintf (file, "%d(DEMAND_GE_SEW),", demand_p (DEMAND_GE_SEW)); - fprintf (file, "%d(LMUL),", demand_p (DEMAND_LMUL)); - fprintf (file, "%d(RATIO),", demand_p (DEMAND_RATIO)); - fprintf (file, "%d(TAIL_POLICY),", demand_p (DEMAND_TAIL_POLICY)); - fprintf (file, "%d(MASK_POLICY)}\n", demand_p (DEMAND_MASK_POLICY)); - - fprintf (file, "AVL="); - print_rtl_single (file, get_avl ()); - fprintf (file, "SEW=%d,", get_sew ()); - fprintf (file, "VLMUL=%s,", vlmul_to_str (get_vlmul ())); - fprintf (file, "RATIO=%d,", get_ratio ()); - fprintf (file, "TAIL_POLICY=%s,", policy_to_str (get_ta ())); - fprintf (file, "MASK_POLICY=%s", policy_to_str (get_ma ())); - fprintf (file, "]\n"); - - if (valid_p ()) + sbitmap *avl_def_loc_temp = sbitmap_vector_alloc (num_bbs, num_regs); + for (const bb_info *bb : crtl->ssa->bbs ()) { - if (get_insn ()) - { - fprintf (file, "The real INSN="); - print_rtl_single (file, get_insn ()->rtl ()); - } - } -} + bitmap_and (avl_def_loc_temp[bb->index ()], m_avl_regs, + m_reg_def_loc[bb->index ()]); -vector_infos_manager::vector_infos_manager () -{ - vector_edge_list = nullptr; - vector_kill = nullptr; - vector_del = nullptr; - vector_insert = nullptr; - vector_antic = nullptr; - vector_transp = nullptr; - vector_comp = nullptr; - vector_avin = nullptr; - vector_avout = nullptr; - vector_antin = nullptr; - vector_antout = nullptr; - vector_earliest = nullptr; - vector_insn_infos.safe_grow_cleared (get_max_uid ()); - vector_block_infos.safe_grow_cleared (last_basic_block_for_fn (cfun)); - if (!optimize) - { - basic_block cfg_bb; - rtx_insn *rinsn; - FOR_ALL_BB_FN (cfg_bb, cfun) + vsetvl_block_info &block_info = get_block_info (bb); + if (block_info.has_info ()) { - vector_block_infos[cfg_bb->index].local_dem = vector_insn_info (); - vector_block_infos[cfg_bb->index].reaching_out = vector_insn_info (); - FOR_BB_INSNS (cfg_bb, rinsn) - vector_insn_infos[INSN_UID (rinsn)].parse_insn (rinsn); + vsetvl_info &footer_info = block_info.get_exit_info (); + gcc_assert (footer_info.valid_p ()); + if (footer_info.has_vl ()) + bitmap_set_bit (avl_def_loc_temp[bb->index ()], + REGNO (footer_info.get_vl ())); } } - else - { - for (const bb_info *bb : crtl->ssa->bbs ()) - { - vector_block_infos[bb->index ()].local_dem = vector_insn_info (); - vector_block_infos[bb->index ()].reaching_out = vector_insn_info (); - for (insn_info *insn : bb->real_insns ()) - vector_insn_infos[insn->uid ()].parse_insn (insn); - vector_block_infos[bb->index ()].probability = profile_probability (); - } - } -} - -void -vector_infos_manager::create_expr (vector_insn_info &info) -{ - for (size_t i = 0; i < vector_exprs.length (); i++) - if (*vector_exprs[i] == info) - return; - vector_exprs.safe_push (&info); -} - -size_t -vector_infos_manager::get_expr_id (const vector_insn_info &info) const -{ - for (size_t i = 0; i < vector_exprs.length (); i++) - if (*vector_exprs[i] == info) - return i; - gcc_unreachable (); -} - -auto_vec<size_t> -vector_infos_manager::get_all_available_exprs ( - const vector_insn_info &info) const -{ - auto_vec<size_t> available_list; - for (size_t i = 0; i < vector_exprs.length (); i++) - if (info.available_p (*vector_exprs[i])) - available_list.safe_push (i); - return available_list; -} - -bool -vector_infos_manager::all_same_ratio_p (sbitmap bitdata) const -{ - if (bitmap_empty_p (bitdata)) - return false; - - int ratio = -1; - unsigned int bb_index; - sbitmap_iterator sbi; - - EXECUTE_IF_SET_IN_BITMAP (bitdata, 0, bb_index, sbi) - { - if (ratio == -1) - ratio = vector_exprs[bb_index]->get_ratio (); - else if (vector_exprs[bb_index]->get_ratio () != ratio) - return false; - } - return true; -} -/* Return TRUE if the incoming vector configuration state - to CFG_BB is compatible with the vector configuration - state in CFG_BB, FALSE otherwise. */ -bool -vector_infos_manager::all_avail_in_compatible_p (const basic_block cfg_bb) const -{ - const auto &info = vector_block_infos[cfg_bb->index].local_dem; - sbitmap avin = vector_avin[cfg_bb->index]; - unsigned int bb_index; - sbitmap_iterator sbi; - EXECUTE_IF_SET_IN_BITMAP (avin, 0, bb_index, sbi) - { - const auto &avin_info - = static_cast<const vl_vtype_info &> (*vector_exprs[bb_index]); - if (!info.compatible_p (avin_info)) - return false; - } - return true; -} + if (m_avl_def_in) + sbitmap_vector_free (m_avl_def_in); + if (m_avl_def_out) + sbitmap_vector_free (m_avl_def_out); -bool -vector_infos_manager::all_same_avl_p (const basic_block cfg_bb, - sbitmap bitdata) const -{ - if (bitmap_empty_p (bitdata)) - return false; + unsigned num_exprs = num_bbs * num_regs; + sbitmap *avl_def_loc = sbitmap_vector_alloc (num_bbs, num_exprs); + sbitmap *m_kill = sbitmap_vector_alloc (num_bbs, num_exprs); + m_avl_def_in = sbitmap_vector_alloc (num_bbs, num_exprs); + m_avl_def_out = sbitmap_vector_alloc (num_bbs, num_exprs); - const auto &block_info = vector_block_infos[cfg_bb->index]; - if (!block_info.local_dem.demand_p (DEMAND_AVL)) - return true; + bitmap_vector_clear (avl_def_loc, num_bbs); + bitmap_vector_clear (m_kill, num_bbs); + bitmap_vector_clear (m_avl_def_out, num_bbs); - avl_info avl = block_info.local_dem.get_avl_info (); - unsigned int bb_index; + unsigned regno; sbitmap_iterator sbi; + for (const bb_info *bb : crtl->ssa->bbs ()) + EXECUTE_IF_SET_IN_BITMAP (avl_def_loc_temp[bb->index ()], 0, regno, sbi) + { + bitmap_set_bit (avl_def_loc[bb->index ()], + get_expr_id (bb->index (), regno, num_bbs)); + bitmap_set_range (m_kill[bb->index ()], regno * num_bbs, num_bbs); + } - EXECUTE_IF_SET_IN_BITMAP (bitdata, 0, bb_index, sbi) - { - if (vector_exprs[bb_index]->get_avl_info () != avl) - return false; - } - return true; -} - -bool -vector_infos_manager::earliest_fusion_worthwhile_p ( - const basic_block cfg_bb) const -{ - edge e; - edge_iterator ei; - profile_probability prob = profile_probability::uninitialized (); - FOR_EACH_EDGE (e, ei, cfg_bb->succs) - { - if (prob == profile_probability::uninitialized ()) - prob = vector_block_infos[e->dest->index].probability; - else if (prob == vector_block_infos[e->dest->index].probability) - continue; - else - /* We pick the highest probability among those incompatible VSETVL - infos. When all incompatible VSTEVL infos have same probability, we - don't pick any of them. */ - return true; - } - return false; -} - -bool -vector_infos_manager::vsetvl_dominated_by_all_preds_p ( - const basic_block cfg_bb, const vector_insn_info &info) const -{ - edge e; - edge_iterator ei; - FOR_EACH_EDGE (e, ei, cfg_bb->preds) - { - const auto &reaching_out = vector_block_infos[e->src->index].reaching_out; - if (e->src->index == cfg_bb->index && reaching_out.compatible_p (info)) - continue; - if (!vsetvl_dominated_by_p (e->src, info, reaching_out, false)) - return false; - } - return true; -} - -size_t -vector_infos_manager::expr_set_num (sbitmap bitdata) const -{ - size_t count = 0; - for (size_t i = 0; i < vector_exprs.length (); i++) - if (bitmap_bit_p (bitdata, i)) - count++; - return count; -} - -void -vector_infos_manager::release (void) -{ - if (!vector_insn_infos.is_empty ()) - vector_insn_infos.release (); - if (!vector_block_infos.is_empty ()) - vector_block_infos.release (); - if (!vector_exprs.is_empty ()) - vector_exprs.release (); - - gcc_assert (to_refine_vsetvls.is_empty ()); - gcc_assert (to_delete_vsetvls.is_empty ()); - if (optimize > 0) - free_bitmap_vectors (); -} - -void -vector_infos_manager::create_bitmap_vectors (void) -{ - /* Create the bitmap vectors. */ - vector_antic = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - vector_transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - vector_comp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - vector_avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - vector_avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - vector_kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - vector_antin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - vector_antout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), - vector_exprs.length ()); - - bitmap_vector_ones (vector_transp, last_basic_block_for_fn (cfun)); - bitmap_vector_clear (vector_antic, last_basic_block_for_fn (cfun)); - bitmap_vector_clear (vector_comp, last_basic_block_for_fn (cfun)); - vector_edge_list = create_edge_list (); - vector_earliest = sbitmap_vector_alloc (NUM_EDGES (vector_edge_list), - vector_exprs.length ()); -} - -void -vector_infos_manager::free_bitmap_vectors (void) -{ - /* Finished. Free up all the things we've allocated. */ - free_edge_list (vector_edge_list); - if (vector_del) - sbitmap_vector_free (vector_del); - if (vector_insert) - sbitmap_vector_free (vector_insert); - if (vector_kill) - sbitmap_vector_free (vector_kill); - if (vector_antic) - sbitmap_vector_free (vector_antic); - if (vector_transp) - sbitmap_vector_free (vector_transp); - if (vector_comp) - sbitmap_vector_free (vector_comp); - if (vector_avin) - sbitmap_vector_free (vector_avin); - if (vector_avout) - sbitmap_vector_free (vector_avout); - if (vector_antin) - sbitmap_vector_free (vector_antin); - if (vector_antout) - sbitmap_vector_free (vector_antout); - if (vector_earliest) - sbitmap_vector_free (vector_earliest); - - vector_edge_list = nullptr; - vector_kill = nullptr; - vector_del = nullptr; - vector_insert = nullptr; - vector_antic = nullptr; - vector_transp = nullptr; - vector_comp = nullptr; - vector_avin = nullptr; - vector_avout = nullptr; - vector_antin = nullptr; - vector_antout = nullptr; - vector_earliest = nullptr; -} - -void -vector_infos_manager::dump (FILE *file) const -{ - basic_block cfg_bb; - rtx_insn *rinsn; - - fprintf (file, "\n"); - FOR_ALL_BB_FN (cfg_bb, cfun) - { - fprintf (file, "Local vector info of <bb %d>:\n", cfg_bb->index); - fprintf (file, "<HEADER>="); - vector_block_infos[cfg_bb->index].local_dem.dump (file); - FOR_BB_INSNS (cfg_bb, rinsn) - { - if (!NONDEBUG_INSN_P (rinsn) || !has_vtype_op (rinsn)) - continue; - fprintf (file, "<insn %d>=", INSN_UID (rinsn)); - const auto &info = vector_insn_infos[INSN_UID (rinsn)]; - info.dump (file); - } - fprintf (file, "<FOOTER>="); - vector_block_infos[cfg_bb->index].reaching_out.dump (file); - fprintf (file, "<Probability>="); - vector_block_infos[cfg_bb->index].probability.dump (file); - fprintf (file, "\n\n"); - } - - fprintf (file, "\n"); - FOR_ALL_BB_FN (cfg_bb, cfun) - { - fprintf (file, "Local properties of <bb %d>:\n", cfg_bb->index); - - fprintf (file, "<ANTLOC>="); - if (vector_antic == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_antic[cfg_bb->index]); - - fprintf (file, "<AVLOC>="); - if (vector_comp == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_comp[cfg_bb->index]); - - fprintf (file, "<TRANSP>="); - if (vector_transp == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_transp[cfg_bb->index]); - - fprintf (file, "<KILL>="); - if (vector_kill == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_kill[cfg_bb->index]); - - fprintf (file, "<ANTIN>="); - if (vector_antin == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_antin[cfg_bb->index]); - - fprintf (file, "<ANTOUT>="); - if (vector_antout == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_antout[cfg_bb->index]); - } + basic_block entry = ENTRY_BLOCK_PTR_FOR_FN (cfun); + EXECUTE_IF_SET_IN_BITMAP (m_avl_regs, 0, regno, sbi) + bitmap_set_bit (m_avl_def_out[entry->index], + get_expr_id (entry->index, regno, num_bbs)); - fprintf (file, "\n"); - FOR_ALL_BB_FN (cfg_bb, cfun) - { - fprintf (file, "Global LCM (Lazy code motion) result of <bb %d>:\n", - cfg_bb->index); - - fprintf (file, "<AVIN>="); - if (vector_avin == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_avin[cfg_bb->index]); - - fprintf (file, "<AVOUT>="); - if (vector_avout == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_avout[cfg_bb->index]); - - fprintf (file, "<DELETE>="); - if (vector_del == nullptr) - fprintf (file, "(nil)\n"); - else - dump_bitmap_file (file, vector_del[cfg_bb->index]); - } + compute_reaching_defintion (avl_def_loc, m_kill, m_avl_def_in, m_avl_def_out); - for (size_t i = 0; i < vector_exprs.length (); i++) + if (dump_file && (dump_flags & TDF_DETAILS)) { - for (int ed = 0; ed < NUM_EDGES (vector_edge_list); ed++) + fprintf (dump_file, + " Compute avl reaching defition data (num_bbs %d, num_regs " + "%d):\n\n", + num_bbs, num_regs); + fprintf (dump_file, " avl_regs: "); + dump_bitmap_file (dump_file, m_avl_regs); + fprintf (dump_file, "\n bitmap data:\n"); + for (const bb_info *bb : crtl->ssa->bbs ()) { - edge eg = INDEX_EDGE (vector_edge_list, ed); - if (vector_insert) + unsigned int i = bb->index (); + fprintf (dump_file, " BB %u:\n", i); + fprintf (dump_file, " avl_def_loc:"); + unsigned expr_id; + sbitmap_iterator sbi; + EXECUTE_IF_SET_IN_BITMAP (avl_def_loc[i], 0, expr_id, sbi) { - if (bitmap_bit_p (vector_insert[ed], i)) - { - fprintf (file, - "\nGlobal LCM (Lazy code motion) INSERT info:\n"); - fprintf (file, - "INSERT edge %d from <bb %d> to <bb %d> for VSETVL " - "expr[%ld]\n", - ed, eg->src->index, eg->dest->index, i); - } + fprintf (dump_file, " (r%u,bb%u)", get_regno (expr_id, num_bbs), + get_bb_index (expr_id, num_bbs)); } - else + fprintf (dump_file, "\n kill:"); + EXECUTE_IF_SET_IN_BITMAP (m_kill[i], 0, expr_id, sbi) { - if (bitmap_bit_p (vector_earliest[ed], i)) - { - fprintf (file, - "\nGlobal LCM (Lazy code motion) EARLIEST info:\n"); - fprintf ( - file, - "EARLIEST edge %d from <bb %d> to <bb %d> for VSETVL " - "expr[%ld]\n", - ed, eg->src->index, eg->dest->index, i); - } + fprintf (dump_file, " (r%u,bb%u)", get_regno (expr_id, num_bbs), + get_bb_index (expr_id, num_bbs)); } + fprintf (dump_file, "\n avl_def_in:"); + EXECUTE_IF_SET_IN_BITMAP (m_avl_def_in[i], 0, expr_id, sbi) + { + fprintf (dump_file, " (r%u,bb%u)", get_regno (expr_id, num_bbs), + get_bb_index (expr_id, num_bbs)); + } + fprintf (dump_file, "\n avl_def_out:"); + EXECUTE_IF_SET_IN_BITMAP (m_avl_def_out[i], 0, expr_id, sbi) + { + fprintf (dump_file, " (r%u,bb%u)", get_regno (expr_id, num_bbs), + get_bb_index (expr_id, num_bbs)); + } + fprintf (dump_file, "\n"); } } -} - -const pass_data pass_data_vsetvl = { - RTL_PASS, /* type */ - "vsetvl", /* name */ - OPTGROUP_NONE, /* optinfo_flags */ - TV_NONE, /* tv_id */ - 0, /* properties_required */ - 0, /* properties_provided */ - 0, /* properties_destroyed */ - 0, /* todo_flags_start */ - 0, /* todo_flags_finish */ -}; - -class pass_vsetvl : public rtl_opt_pass -{ -private: - vector_infos_manager *m_vector_manager; - - const vector_insn_info &get_vector_info (const rtx_insn *) const; - const vector_insn_info &get_vector_info (const insn_info *) const; - const vector_block_info &get_block_info (const basic_block) const; - const vector_block_info &get_block_info (const bb_info *) const; - vector_block_info &get_block_info (const basic_block); - vector_block_info &get_block_info (const bb_info *); - void update_vector_info (const insn_info *, const vector_insn_info &); - void update_block_info (int, profile_probability, const vector_insn_info &); - - void simple_vsetvl (void) const; - void lazy_vsetvl (void); - - /* Phase 1. */ - void compute_local_backward_infos (const bb_info *); - - /* Phase 2. */ - bool need_vsetvl (const vector_insn_info &, const vector_insn_info &) const; - void transfer_before (vector_insn_info &, insn_info *) const; - void transfer_after (vector_insn_info &, insn_info *) const; - void emit_local_forward_vsetvls (const bb_info *); - - /* Phase 3. */ - bool earliest_fusion (void); - void vsetvl_fusion (void); - - /* Phase 4. */ - void prune_expressions (void); - void compute_local_properties (void); - bool can_refine_vsetvl_p (const basic_block, const vector_insn_info &) const; - void refine_vsetvls (void) const; - void cleanup_vsetvls (void); - bool commit_vsetvls (void); - void pre_vsetvl (void); - - /* Phase 5. */ - rtx_insn *get_vsetvl_at_end (const bb_info *, vector_insn_info *) const; - void local_eliminate_vsetvl_insn (const bb_info *) const; - bool global_eliminate_vsetvl_insn (const bb_info *) const; - void ssa_post_optimization (void) const; - - /* Phase 6. */ - void df_post_optimization (void) const; - - void init (void); - void done (void); - void compute_probabilities (void); - -public: - pass_vsetvl (gcc::context *ctxt) : rtl_opt_pass (pass_data_vsetvl, ctxt) {} - - /* opt_pass methods: */ - virtual bool gate (function *) final override { return TARGET_VECTOR; } - virtual unsigned int execute (function *) final override; -}; // class pass_vsetvl - -const vector_insn_info & -pass_vsetvl::get_vector_info (const rtx_insn *i) const -{ - return m_vector_manager->vector_insn_infos[INSN_UID (i)]; -} - -const vector_insn_info & -pass_vsetvl::get_vector_info (const insn_info *i) const -{ - return m_vector_manager->vector_insn_infos[i->uid ()]; -} - -const vector_block_info & -pass_vsetvl::get_block_info (const basic_block bb) const -{ - return m_vector_manager->vector_block_infos[bb->index]; -} -const vector_block_info & -pass_vsetvl::get_block_info (const bb_info *bb) const -{ - return m_vector_manager->vector_block_infos[bb->index ()]; -} + sbitmap_vector_free (avl_def_loc); + sbitmap_vector_free (m_kill); + sbitmap_vector_free (avl_def_loc_temp); -vector_block_info & -pass_vsetvl::get_block_info (const basic_block bb) -{ - return m_vector_manager->vector_block_infos[bb->index]; -} - -vector_block_info & -pass_vsetvl::get_block_info (const bb_info *bb) -{ - return m_vector_manager->vector_block_infos[bb->index ()]; -} - -void -pass_vsetvl::update_vector_info (const insn_info *i, - const vector_insn_info &new_info) -{ - m_vector_manager->vector_insn_infos[i->uid ()] = new_info; -} - -void -pass_vsetvl::update_block_info (int index, profile_probability prob, - const vector_insn_info &new_info) -{ - m_vector_manager->vector_block_infos[index].probability = prob; - if (m_vector_manager->vector_block_infos[index].local_dem - == m_vector_manager->vector_block_infos[index].reaching_out) - m_vector_manager->vector_block_infos[index].local_dem = new_info; - m_vector_manager->vector_block_infos[index].reaching_out = new_info; + m_dem.set_avl_in_out_data (m_avl_def_in, m_avl_def_out); } -/* Simple m_vsetvl_insert vsetvl for optimize == 0. */ void -pass_vsetvl::simple_vsetvl (void) const +pre_vsetvl::compute_vsetvl_def_data () { - if (dump_file) - fprintf (dump_file, - "\nEntering Simple VSETVL PASS and Handling %d basic blocks for " - "function:%s\n", - n_basic_blocks_for_fn (cfun), function_name (cfun)); - - basic_block cfg_bb; - rtx_insn *rinsn; - FOR_ALL_BB_FN (cfg_bb, cfun) + m_vsetvl_def_exprs.truncate (0); + add_expr (m_vsetvl_def_exprs, m_unknow_info); + for (const bb_info *bb : crtl->ssa->bbs ()) { - FOR_BB_INSNS (cfg_bb, rinsn) - { - if (!NONDEBUG_INSN_P (rinsn)) - continue; - if (has_vtype_op (rinsn)) - { - const auto info = get_vector_info (rinsn); - emit_vsetvl_insn (VSETVL_DISCARD_RESULT, EMIT_BEFORE, info, - NULL_RTX, rinsn); - } - } + vsetvl_block_info &block_info = get_block_info (bb); + if (block_info.empty_p ()) + continue; + vsetvl_info &footer_info = block_info.get_exit_info (); + gcc_assert (footer_info.valid_p () || footer_info.unknown_p ()); + add_expr (m_vsetvl_def_exprs, footer_info); } -} -/* Compute demanded information by backward data-flow analysis. */ -void -pass_vsetvl::compute_local_backward_infos (const bb_info *bb) -{ - vector_insn_info change; - change.set_empty (); + if (m_vsetvl_def_in) + sbitmap_vector_free (m_vsetvl_def_in); + if (m_vsetvl_def_out) + sbitmap_vector_free (m_vsetvl_def_out); - auto &block_info = m_vector_manager->vector_block_infos[bb->index ()]; - block_info.reaching_out = change; + sbitmap *def_loc = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), + m_vsetvl_def_exprs.length ()); + sbitmap *m_kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), + m_vsetvl_def_exprs.length ()); - for (insn_info *insn : bb->reverse_real_nondebug_insns ()) + m_vsetvl_def_in = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), + m_vsetvl_def_exprs.length ()); + m_vsetvl_def_out = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), + m_vsetvl_def_exprs.length ()); + + bitmap_vector_clear (def_loc, last_basic_block_for_fn (cfun)); + bitmap_vector_clear (m_kill, last_basic_block_for_fn (cfun)); + bitmap_vector_clear (m_vsetvl_def_out, last_basic_block_for_fn (cfun)); + + for (const bb_info *bb : crtl->ssa->bbs ()) { - auto &info = get_vector_info (insn); - - if (info.uninit_p ()) - /* If it is uninitialized, propagate it directly. */ - update_vector_info (insn, change); - else if (info.unknown_p ()) - change = info; - else + vsetvl_block_info &block_info = get_block_info (bb); + if (block_info.empty_p ()) { - gcc_assert (info.valid_p () && "Unexpected Invalid demanded info"); - if (change.valid_p ()) + for (unsigned i = 0; i < m_vsetvl_def_exprs.length (); i += 1) { - if (!(propagate_avl_across_demands_p (change, info) - && !reg_available_p (insn, change)) - && change.compatible_p (info)) - { - update_vector_info (insn, change.local_merge (info)); - /* Fix PR109399, we should update user vsetvl instruction - if there is a change in demand fusion. */ - if (vsetvl_insn_p (insn->rtl ())) - change_vsetvl_insn (insn, info); - } + const vsetvl_info &info = *m_vsetvl_def_exprs[i]; + if (!info.has_nonvlmax_reg_avl ()) + continue; + unsigned int regno; + sbitmap_iterator sbi; + EXECUTE_IF_SET_IN_BITMAP (m_reg_def_loc[bb->index ()], 0, regno, + sbi) + if (regno == REGNO (info.get_avl ())) + { + bitmap_set_bit (m_kill[bb->index ()], i); + bitmap_set_bit (def_loc[bb->index ()], + get_expr_index (m_vsetvl_def_exprs, + m_unknow_info)); + } } - change = info; + continue; } - } - - block_info.local_dem = change; - if (block_info.local_dem.empty_p ()) - block_info.reaching_out = block_info.local_dem; -} - -/* Return true if a dem_info is required to transition from curr_info to - require before INSN. */ -bool -pass_vsetvl::need_vsetvl (const vector_insn_info &require, - const vector_insn_info &curr_info) const -{ - if (!curr_info.valid_p () || curr_info.unknown_p () || curr_info.uninit_p ()) - return true; - - if (require.compatible_p (static_cast<const vl_vtype_info &> (curr_info))) - return false; - - return true; -} -/* Given an incoming state reaching INSN, modifies that state so that it is - minimally compatible with INSN. The resulting state is guaranteed to be - semantically legal for INSN, but may not be the state requested by INSN. */ -void -pass_vsetvl::transfer_before (vector_insn_info &info, insn_info *insn) const -{ - if (!has_vtype_op (insn->rtl ())) - return; - - const vector_insn_info require = get_vector_info (insn); - if (info.valid_p () && !need_vsetvl (require, info)) - return; - info = require; -} - -/* Given a state with which we evaluated insn (see transfer_before above for why - this might be different that the state insn requested), modify the state to - reflect the changes insn might make. */ -void -pass_vsetvl::transfer_after (vector_insn_info &info, insn_info *insn) const -{ - if (vector_config_insn_p (insn->rtl ())) - { - info = get_vector_info (insn); - return; + vsetvl_info &footer_info = block_info.get_exit_info (); + bitmap_ones (m_kill[bb->index ()]); + bitmap_set_bit (def_loc[bb->index ()], + get_expr_index (m_vsetvl_def_exprs, footer_info)); } - if (fault_first_load_p (insn->rtl ()) - && info.update_fault_first_load_avl (insn)) - return; - - /* If this is something that updates VL/VTYPE that we don't know about, set - the state to unknown. */ - if (insn->is_call () || insn->is_asm () - || find_access (insn->defs (), VL_REGNUM) - || find_access (insn->defs (), VTYPE_REGNUM)) - info = vector_insn_info::get_unknown (); -} + /* Set the def_out of the ENTRY basic block to m_unknow_info expr. */ + basic_block entry = ENTRY_BLOCK_PTR_FOR_FN (cfun); + bitmap_set_bit (m_vsetvl_def_out[entry->index], + get_expr_index (m_vsetvl_def_exprs, m_unknow_info)); -/* Emit vsetvl within each block by forward data-flow analysis. */ -void -pass_vsetvl::emit_local_forward_vsetvls (const bb_info *bb) -{ - auto &block_info = m_vector_manager->vector_block_infos[bb->index ()]; - if (block_info.local_dem.empty_p ()) - return; + compute_reaching_defintion (def_loc, m_kill, m_vsetvl_def_in, + m_vsetvl_def_out); - vector_insn_info curr_info; - for (insn_info *insn : bb->real_nondebug_insns ()) + if (dump_file && (dump_flags & TDF_DETAILS)) { - const vector_insn_info prev_info = curr_info; - enum vsetvl_type type = NUM_VSETVL_TYPE; - transfer_before (curr_info, insn); - - if (has_vtype_op (insn->rtl ())) + fprintf (dump_file, + "\n Compute vsetvl info reaching defition data:\n\n"); + fprintf (dump_file, " Expression List (%d):\n", + m_vsetvl_def_exprs.length ()); + for (unsigned i = 0; i < m_vsetvl_def_exprs.length (); i++) { - if (static_cast<const vl_vtype_info &> (prev_info) - != static_cast<const vl_vtype_info &> (curr_info)) - { - const auto require = get_vector_info (insn); - if (!require.compatible_p ( - static_cast<const vl_vtype_info &> (prev_info))) - type = insert_vsetvl (EMIT_BEFORE, insn->rtl (), require, - prev_info); - } + const auto &info = *m_vsetvl_def_exprs[i]; + fprintf (dump_file, " Expr[%u]: ", i); + info.dump (dump_file, " "); } - - /* Fix the issue of following sequence: - vsetivli zero, 5 - .... - vsetvli zero, zero - vmv.x.s (demand AVL = 8). - .... - incorrect: vsetvli zero, zero ===> Since the curr_info is AVL = 8. - correct: vsetivli zero, 8 - vadd (demand AVL = 8). */ - if (type == VSETVL_VTYPE_CHANGE_ONLY) + fprintf (dump_file, "\n bitmap data:\n"); + for (const bb_info *bb : crtl->ssa->bbs ()) { - /* Update the curr_info to be real correct AVL. */ - curr_info.set_avl_info (prev_info.get_avl_info ()); + unsigned int i = bb->index (); + fprintf (dump_file, " BB %u:\n", i); + fprintf (dump_file, " def_loc: "); + dump_bitmap_file (dump_file, def_loc[i]); + fprintf (dump_file, " kill: "); + dump_bitmap_file (dump_file, m_kill[i]); + fprintf (dump_file, " vsetvl_def_in: "); + dump_bitmap_file (dump_file, m_vsetvl_def_in[i]); + fprintf (dump_file, " vsetvl_def_out: "); + dump_bitmap_file (dump_file, m_vsetvl_def_out[i]); } - transfer_after (curr_info, insn); } - block_info.reaching_out = curr_info; -} - -/* Assemble the candidates expressions for LCM. */ -void -pass_vsetvl::prune_expressions (void) -{ for (const bb_info *bb : crtl->ssa->bbs ()) { - if (m_vector_manager->vector_block_infos[bb->index ()] - .local_dem.valid_or_dirty_p ()) - m_vector_manager->create_expr ( - m_vector_manager->vector_block_infos[bb->index ()].local_dem); - if (m_vector_manager->vector_block_infos[bb->index ()] - .reaching_out.valid_or_dirty_p ()) - m_vector_manager->create_expr ( - m_vector_manager->vector_block_infos[bb->index ()].reaching_out); - } + vsetvl_block_info &block_info = get_block_info (bb); + if (block_info.empty_p ()) + continue; + vsetvl_info &curr_info = block_info.get_entry_info (); + if (!curr_info.valid_p ()) + continue; - if (dump_file) - { - fprintf (dump_file, "\nThe total VSETVL expression num = %d\n", - m_vector_manager->vector_exprs.length ()); - fprintf (dump_file, "Expression List:\n"); - for (size_t i = 0; i < m_vector_manager->vector_exprs.length (); i++) + unsigned int expr_index; + sbitmap_iterator sbi; + gcc_assert ( + !bitmap_empty_p (m_vsetvl_def_in[curr_info.get_bb ()->index ()])); + bool full_available = true; + EXECUTE_IF_SET_IN_BITMAP (m_vsetvl_def_in[bb->index ()], 0, expr_index, + sbi) { - fprintf (dump_file, "Expr[%ld]:\n", i); - m_vector_manager->vector_exprs[i]->dump (dump_file); - fprintf (dump_file, "\n"); + vsetvl_info &prev_info = *m_vsetvl_def_exprs[expr_index]; + if (!prev_info.valid_p () + || !m_dem.available_p (prev_info, curr_info)) + { + full_available = false; + break; + } } + block_info.full_available = full_available; } + + sbitmap_vector_free (def_loc); + sbitmap_vector_free (m_kill); } /* Compute the local properties of each recorded expression. @@ -3146,8 +2592,46 @@ pass_vsetvl::prune_expressions (void) least once and expression would contain the same value if the computation was moved to the beginning of the block. */ void -pass_vsetvl::compute_local_properties (void) +pre_vsetvl::compute_lcm_local_properties () { + m_exprs.truncate (0); + for (const bb_info *bb : crtl->ssa->bbs ()) + { + vsetvl_block_info &block_info = get_block_info (bb); + if (block_info.empty_p ()) + continue; + vsetvl_info &header_info = block_info.get_entry_info (); + vsetvl_info &footer_info = block_info.get_exit_info (); + gcc_assert (footer_info.valid_p () || footer_info.unknown_p ()); + add_expr (m_exprs, header_info); + add_expr (m_exprs, footer_info); + } + + int num_exprs = m_exprs.length (); + if (m_avloc) + sbitmap_vector_free (m_avloc); + if (m_kill) + sbitmap_vector_free (m_kill); + if (m_antloc) + sbitmap_vector_free (m_antloc); + if (m_transp) + sbitmap_vector_free (m_transp); + if (m_avin) + sbitmap_vector_free (m_avin); + if (m_avout) + sbitmap_vector_free (m_avout); + + m_avloc = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); + m_kill = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); + m_antloc = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); + m_transp = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); + m_avin = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); + m_avout = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); + + bitmap_vector_clear (m_avloc, last_basic_block_for_fn (cfun)); + bitmap_vector_clear (m_antloc, last_basic_block_for_fn (cfun)); + bitmap_vector_clear (m_transp, last_basic_block_for_fn (cfun)); + /* - If T is locally available at the end of a block, then T' must be available at the end of the same block. Since some optimization has occurred earlier, T' might not be locally available, however, it must @@ -3165,1223 +2649,881 @@ pass_vsetvl::compute_local_properties (void) basic block, the operands are not modified in the basic block prior to the occurrence and the output is not used between the start of the block and the occurrence. */ - - basic_block cfg_bb; for (const bb_info *bb : crtl->ssa->bbs ()) { - unsigned int curr_bb_idx = bb->index (); - if (curr_bb_idx == ENTRY_BLOCK || curr_bb_idx == EXIT_BLOCK) - continue; - const auto local_dem - = m_vector_manager->vector_block_infos[curr_bb_idx].local_dem; - const auto reaching_out - = m_vector_manager->vector_block_infos[curr_bb_idx].reaching_out; + unsigned bb_index = bb->index (); + vsetvl_block_info &block_info = get_block_info (bb); - /* Compute transparent. */ - for (size_t i = 0; i < m_vector_manager->vector_exprs.length (); i++) + /* Compute m_transp */ + if (block_info.empty_p ()) { - const auto *expr = m_vector_manager->vector_exprs[i]; - if (local_dem.valid_or_dirty_p () || local_dem.unknown_p ()) - bitmap_clear_bit (m_vector_manager->vector_transp[curr_bb_idx], i); - else if (expr->has_avl_reg ()) + bitmap_ones (m_transp[bb_index]); + for (int i = 0; i < num_exprs; i += 1) { - rtx reg = expr->get_avl_or_vl_reg (); + const vsetvl_info &info = *m_exprs[i]; + if (!info.has_nonvlmax_reg_avl () && !info.has_vl ()) + continue; + + unsigned int regno; + sbitmap_iterator sbi; + EXECUTE_IF_SET_IN_BITMAP (m_reg_def_loc[bb->index ()], 0, regno, + sbi) + { + if (regno == REGNO (info.get_avl ())) + bitmap_clear_bit (m_transp[bb->index ()], i); + } + for (const insn_info *insn : bb->real_nondebug_insns ()) { - if (find_access (insn->defs (), REGNO (reg))) - { - bitmap_clear_bit ( - m_vector_manager->vector_transp[curr_bb_idx], i); - break; - } - else if (vlmax_avl_p (expr->get_avl ()) - && find_access (insn->uses (), REGNO (reg))) + if ((info.has_nonvlmax_reg_avl () + && find_access (insn->defs (), REGNO (info.get_avl ()))) + || (info.has_vl () + && find_access (insn->uses (), + REGNO (info.get_vl ())))) { - bitmap_clear_bit ( - m_vector_manager->vector_transp[curr_bb_idx], i); + bitmap_clear_bit (m_transp[bb_index], i); break; } } } - } - /* Compute anticipatable occurrences. */ - if (local_dem.valid_or_dirty_p ()) - if (anticipatable_occurrence_p (bb, local_dem)) - bitmap_set_bit (m_vector_manager->vector_antic[curr_bb_idx], - m_vector_manager->get_expr_id (local_dem)); - - /* Compute available occurrences. */ - if (reaching_out.valid_or_dirty_p ()) - { - auto_vec<size_t> available_list - = m_vector_manager->get_all_available_exprs (reaching_out); - for (size_t i = 0; i < available_list.length (); i++) - { - const vector_insn_info *expr - = m_vector_manager->vector_exprs[available_list[i]]; - if (available_occurrence_p (bb, *expr)) - bitmap_set_bit (m_vector_manager->vector_comp[curr_bb_idx], - available_list[i]); - } + continue; } - if (loop_basic_block_p (bb->cfg_bb ()) && local_dem.valid_or_dirty_p () - && reaching_out.valid_or_dirty_p () - && !local_dem.compatible_p (reaching_out)) - bitmap_clear_bit (m_vector_manager->vector_antic[curr_bb_idx], - m_vector_manager->get_expr_id (local_dem)); - } + vsetvl_info &header_info = block_info.get_entry_info (); + vsetvl_info &footer_info = block_info.get_exit_info (); - /* Compute kill for each basic block using: + if (header_info.valid_p () + && (anticpatable_exp_p (header_info) || block_info.full_available)) + bitmap_set_bit (m_antloc[bb_index], + get_expr_index (m_exprs, header_info)); - ~(TRANSP | COMP) - */ + if (footer_info.valid_p ()) + for (int i = 0; i < num_exprs; i += 1) + { + const vsetvl_info &info = *m_exprs[i]; + if (!info.valid_p ()) + continue; + if (available_exp_p (footer_info, info)) + bitmap_set_bit (m_avloc[bb_index], i); + } + } - FOR_EACH_BB_FN (cfg_bb, cfun) + for (const bb_info *bb : crtl->ssa->bbs ()) { - bitmap_ior (m_vector_manager->vector_kill[cfg_bb->index], - m_vector_manager->vector_transp[cfg_bb->index], - m_vector_manager->vector_comp[cfg_bb->index]); - bitmap_not (m_vector_manager->vector_kill[cfg_bb->index], - m_vector_manager->vector_kill[cfg_bb->index]); + unsigned bb_index = bb->index (); + bitmap_ior (m_kill[bb_index], m_transp[bb_index], m_avloc[bb_index]); + bitmap_not (m_kill[bb_index], m_kill[bb_index]); } - FOR_EACH_BB_FN (cfg_bb, cfun) + for (const bb_info *bb : crtl->ssa->bbs ()) { + unsigned bb_index = bb->index (); edge e; edge_iterator ei; - - /* If the current block is the destination of an abnormal edge, we - kill all trapping (for PRE) and memory (for hoist) expressions - because we won't be able to properly place the instruction on - the edge. So make them neither anticipatable nor transparent. - This is fairly conservative. - - ??? For hoisting it may be necessary to check for set-and-jump - instructions here, not just for abnormal edges. The general problem - is that when an expression cannot not be placed right at the end of - a basic block we should account for any side-effects of a subsequent - jump instructions that could clobber the expression. It would - be best to implement this check along the lines of - should_hoist_expr_to_dom where the target block is already known - and, hence, there's no need to conservatively prune expressions on - "intermediate" set-and-jump instructions. */ - FOR_EACH_EDGE (e, ei, cfg_bb->preds) + FOR_EACH_EDGE (e, ei, bb->cfg_bb ()->preds) if (e->flags & EDGE_COMPLEX) { - bitmap_clear (m_vector_manager->vector_antic[cfg_bb->index]); - bitmap_clear (m_vector_manager->vector_transp[cfg_bb->index]); + bitmap_clear (m_antloc[bb_index]); + bitmap_clear (m_transp[bb_index]); } } } -/* Fuse demand info for earliest edge. */ -bool -pass_vsetvl::earliest_fusion (void) +void +pre_vsetvl::fuse_local_vsetvl_info () { - bool changed_p = false; - for (int ed = 0; ed < NUM_EDGES (m_vector_manager->vector_edge_list); ed++) + m_reg_def_loc + = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), GP_REG_LAST + 1); + bitmap_vector_clear (m_reg_def_loc, last_basic_block_for_fn (cfun)); + bitmap_ones (m_reg_def_loc[ENTRY_BLOCK_PTR_FOR_FN (cfun)->index]); + + for (bb_info *bb : crtl->ssa->bbs ()) { - for (size_t i = 0; i < m_vector_manager->vector_exprs.length (); i++) + auto &block_info = get_block_info (bb); + block_info.bb = bb; + if (dump_file && (dump_flags & TDF_DETAILS)) { - auto &expr = *m_vector_manager->vector_exprs[i]; - if (expr.empty_p ()) - continue; - edge eg = INDEX_EDGE (m_vector_manager->vector_edge_list, ed); - /* If it is the edge that we never reach, skip its possible PRE - fusion conservatively. */ - if (eg->probability == profile_probability::never ()) - break; - if (eg->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) - || eg->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) - break; - if (bitmap_bit_p (m_vector_manager->vector_earliest[ed], i)) - { - auto &src_block_info = get_block_info (eg->src); - auto &dest_block_info = get_block_info (eg->dest); - if (src_block_info.reaching_out.unknown_p ()) - break; + fprintf (dump_file, " Try fuse basic block %d\n", bb->index ()); + } + auto_vec<vsetvl_info> infos; + for (insn_info *insn : bb->real_nondebug_insns ()) + { + vsetvl_info curr_info = vsetvl_info (insn); + if (curr_info.valid_p () || curr_info.unknown_p ()) + infos.safe_push (curr_info); + + /* Collecting GP registers modified by the current bb. */ + if (insn->is_real ()) + for (def_info *def : insn->defs ()) + if (def->is_reg () && GP_REG_P (def->regno ())) + bitmap_set_bit (m_reg_def_loc[bb->index ()], def->regno ()); + } - gcc_assert (!(eg->flags & EDGE_ABNORMAL)); - vector_insn_info new_info = vector_insn_info (); - profile_probability prob = src_block_info.probability; - /* We don't fuse user vsetvl into EMPTY or - DIRTY (EMPTY but polluted) block for these - following reasons: - - - The user vsetvl instruction is configured as - no side effects that the previous passes - (GSCE, Loop-invariant, ..., etc) - should be able to do a good job on optimization - of user explicit vsetvls so we don't need to - PRE optimization (The user vsetvls should be - on the optimal local already before this pass) - again for user vsetvls in VSETVL PASS here - (Phase 3 && Phase 4). - - - Allowing user vsetvls be optimized in PRE - optimization here (Phase 3 && Phase 4) will - complicate the codes so much so we prefer user - vsetvls be optimized in post-optimization - (Phase 5 && Phase 6). */ - if (vsetvl_insn_p (expr.get_insn ()->rtl ())) + vsetvl_info prev_info = vsetvl_info (); + prev_info.set_empty (); + for (auto &curr_info : infos) + { + if (prev_info.empty_p ()) + prev_info = curr_info; + else if ((curr_info.unknown_p () && prev_info.valid_p ()) + || (curr_info.valid_p () && prev_info.unknown_p ())) + { + block_info.local_infos.safe_push (prev_info); + prev_info = curr_info; + } + else if (curr_info.valid_p () && prev_info.valid_p ()) + { + if (m_dem.available_p (prev_info, curr_info)) { - if (src_block_info.reaching_out.empty_p ()) - continue; - else if (src_block_info.reaching_out.dirty_p () - && !src_block_info.reaching_out.compatible_p (expr)) + if (dump_file && (dump_flags & TDF_DETAILS)) { - new_info.set_empty (); - /* Update probability as uninitialized status so that - we won't try to fuse any demand info into such EMPTY - block any more. */ - prob = profile_probability::uninitialized (); - update_block_info (eg->src->index, prob, new_info); - continue; + fprintf (dump_file, + " Ignore curr info since prev info " + "available with it:\n"); + fprintf (dump_file, " prev_info: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, " curr_info: "); + curr_info.dump (dump_file, " "); + fprintf (dump_file, "\n"); } - } + if (!curr_info.vl_use_by_non_rvv_insn_p () + && vsetvl_insn_p (curr_info.get_insn ()->rtl ())) + m_delete_list.safe_push (curr_info); - if (src_block_info.reaching_out.empty_p ()) - { - if (src_block_info.probability - == profile_probability::uninitialized ()) - continue; - new_info = expr.global_merge (expr, eg->src->index); - new_info.set_dirty (); - prob = dest_block_info.probability; - update_block_info (eg->src->index, prob, new_info); - changed_p = true; + if (curr_info.get_read_vl_insn ()) + prev_info.set_read_vl_insn (curr_info.get_read_vl_insn ()); } - else if (src_block_info.reaching_out.dirty_p ()) + else if (m_dem.compatible_p (prev_info, curr_info)) { - /* DIRTY -> DIRTY or VALID -> DIRTY. */ - if (demands_can_be_fused_p (src_block_info.reaching_out, - expr)) - { - new_info = src_block_info.reaching_out.global_merge ( - expr, eg->src->index); - new_info.set_dirty (); - prob += dest_block_info.probability; - } - else if (!src_block_info.reaching_out.compatible_p (expr) - && !m_vector_manager->earliest_fusion_worthwhile_p ( - eg->src)) + if (dump_file && (dump_flags & TDF_DETAILS)) { - new_info.set_empty (); - prob = profile_probability::uninitialized (); + fprintf (dump_file, " Fuse curr info since prev info " + "compatible with it:\n"); + fprintf (dump_file, " prev_info: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, " curr_info: "); + curr_info.dump (dump_file, " "); } - else if (!src_block_info.reaching_out.compatible_p (expr) - && dest_block_info.probability - > src_block_info.probability) + m_dem.merge (prev_info, curr_info); + if (curr_info.get_read_vl_insn ()) + prev_info.set_read_vl_insn (curr_info.get_read_vl_insn ()); + if (dump_file && (dump_flags & TDF_DETAILS)) { - new_info = expr; - new_info.set_dirty (); - prob = dest_block_info.probability; + fprintf (dump_file, " prev_info after fused: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, "\n"); } - else - continue; - update_block_info (eg->src->index, prob, new_info); - changed_p = true; } else { - rtx vl = NULL_RTX; - if (vsetvl_insn_p ( - src_block_info.reaching_out.get_insn ()->rtl ()) - && vsetvl_dominated_by_p (eg->src, expr, - src_block_info.reaching_out, - true)) - ; - else if (!demands_can_be_fused_p (src_block_info.reaching_out, - expr)) - continue; - else if (!earliest_pred_can_be_fused_p ( - crtl->ssa->bb (eg->src), - src_block_info.reaching_out, expr, &vl)) - continue; - - vector_insn_info new_info - = src_block_info.reaching_out.global_merge (expr, - eg->src->index); - - prob = std::max (dest_block_info.probability, - src_block_info.probability); - change_vsetvl_insn (new_info.get_insn (), new_info, vl); - update_block_info (eg->src->index, prob, new_info); - changed_p = true; + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, + " Cannot fuse uncompatible infos:\n"); + fprintf (dump_file, " prev_info: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, " curr_info: "); + curr_info.dump (dump_file, " "); + } + block_info.local_infos.safe_push (prev_info); + prev_info = curr_info; } } } + + if (prev_info.valid_p () || prev_info.unknown_p ()) + block_info.local_infos.safe_push (prev_info); } - return changed_p; -} -/* Fuse VSETVL demand info according LCM computed location. */ -void -pass_vsetvl::vsetvl_fusion (void) -{ - /* Fuse VSETVL demand info until VSETVL CFG fixed. */ - bool changed_p = true; - int fusion_no = 0; - while (changed_p) + m_avl_regs = sbitmap_alloc (GP_REG_LAST + 1); + bitmap_clear (m_avl_regs); + for (const bb_info *bb : crtl->ssa->bbs ()) { - changed_p = false; - fusion_no++; - prune_expressions (); - m_vector_manager->create_bitmap_vectors (); - compute_local_properties (); - /* Compute global availability. */ - compute_available (m_vector_manager->vector_comp, - m_vector_manager->vector_kill, - m_vector_manager->vector_avout, - m_vector_manager->vector_avin); - /* Compute global anticipatability. */ - compute_antinout_edge (m_vector_manager->vector_antic, - m_vector_manager->vector_transp, - m_vector_manager->vector_antin, - m_vector_manager->vector_antout); - /* Compute earliestness. */ - compute_earliest (m_vector_manager->vector_edge_list, - m_vector_manager->vector_exprs.length (), - m_vector_manager->vector_antin, - m_vector_manager->vector_antout, - m_vector_manager->vector_avout, - m_vector_manager->vector_kill, - m_vector_manager->vector_earliest); - changed_p |= earliest_fusion (); - if (dump_file && (dump_flags & TDF_DETAILS)) + vsetvl_block_info &block_info = get_block_info (bb); + if (block_info.empty_p ()) + continue; + + vsetvl_info &header_info = block_info.get_entry_info (); + if (header_info.valid_p () && header_info.has_nonvlmax_reg_avl ()) { - fprintf (dump_file, "\nEARLIEST fusion %d\n", fusion_no); - m_vector_manager->dump (dump_file); + gcc_assert (GP_REG_P (REGNO (header_info.get_avl ()))); + bitmap_set_bit (m_avl_regs, REGNO (header_info.get_avl ())); } - m_vector_manager->free_bitmap_vectors (); - if (!m_vector_manager->vector_exprs.is_empty ()) - m_vector_manager->vector_exprs.release (); } } -/* Return true if VSETVL in the block can be refined as vsetvl zero,zero. */ + bool -pass_vsetvl::can_refine_vsetvl_p (const basic_block cfg_bb, - const vector_insn_info &info) const +pre_vsetvl::earliest_fuse_vsetvl_info () { - if (!m_vector_manager->all_same_ratio_p ( - m_vector_manager->vector_avin[cfg_bb->index])) - return false; + compute_avl_def_data (); + compute_vsetvl_def_data (); + compute_lcm_local_properties (); - if (!m_vector_manager->all_same_avl_p ( - cfg_bb, m_vector_manager->vector_avin[cfg_bb->index])) - return false; + unsigned num_exprs = m_exprs.length (); + struct edge_list *m_edges = create_edge_list (); + unsigned num_edges = NUM_EDGES (m_edges); + sbitmap *antin + = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); + sbitmap *antout + = sbitmap_vector_alloc (last_basic_block_for_fn (cfun), num_exprs); - size_t expr_id - = bitmap_first_set_bit (m_vector_manager->vector_avin[cfg_bb->index]); - if (!m_vector_manager->vector_exprs[expr_id]->same_vlmax_p (info)) - return false; - if (!m_vector_manager->vector_exprs[expr_id]->compatible_avl_p (info)) - return false; + sbitmap *earliest = sbitmap_vector_alloc (num_edges, num_exprs); - edge e; - edge_iterator ei; - bool all_valid_p = true; - FOR_EACH_EDGE (e, ei, cfg_bb->preds) + compute_available (m_avloc, m_kill, m_avout, m_avin); + compute_antinout_edge (m_antloc, m_transp, antin, antout); + compute_earliest (m_edges, num_exprs, antin, antout, m_avout, m_kill, + earliest); + + if (dump_file && (dump_flags & TDF_DETAILS)) { - if (bitmap_empty_p (m_vector_manager->vector_avout[e->src->index])) + fprintf (dump_file, "\n Compute LCM earliest insert data:\n\n"); + fprintf (dump_file, " Expression List (%u):\n", num_exprs); + for (unsigned i = 0; i < num_exprs; i++) { - all_valid_p = false; - break; + const auto &info = *m_exprs[i]; + fprintf (dump_file, " Expr[%u]: ", i); + info.dump (dump_file, " "); } - } - - if (!all_valid_p) - return false; - return true; -} - -/* Optimize athe case like this: + fprintf (dump_file, "\n bitmap data:\n"); + for (const bb_info *bb : crtl->ssa->bbs ()) + { + unsigned int i = bb->index (); + fprintf (dump_file, " BB %u:\n", i); + fprintf (dump_file, " avloc: "); + dump_bitmap_file (dump_file, m_avloc[i]); + fprintf (dump_file, " kill: "); + dump_bitmap_file (dump_file, m_kill[i]); + fprintf (dump_file, " antloc: "); + dump_bitmap_file (dump_file, m_antloc[i]); + fprintf (dump_file, " transp: "); + dump_bitmap_file (dump_file, m_transp[i]); + + fprintf (dump_file, " avin: "); + dump_bitmap_file (dump_file, m_avin[i]); + fprintf (dump_file, " avout: "); + dump_bitmap_file (dump_file, m_avout[i]); + fprintf (dump_file, " antin: "); + dump_bitmap_file (dump_file, antin[i]); + fprintf (dump_file, " antout: "); + dump_bitmap_file (dump_file, antout[i]); + } + fprintf (dump_file, "\n"); + fprintf (dump_file, " earliest:\n"); + for (unsigned ed = 0; ed < num_edges; ed++) + { + edge eg = INDEX_EDGE (m_edges, ed); - bb 0: - vsetvl 0 a5,zero,e8,mf8 - insn 0 (demand SEW + LMUL) - bb 1: - vsetvl 1 a5,zero,e16,mf4 - insn 1 (demand SEW + LMUL) + if (bitmap_empty_p (earliest[ed])) + continue; + fprintf (dump_file, " Edge(bb %u -> bb %u): ", eg->src->index, + eg->dest->index); + dump_bitmap_file (dump_file, earliest[ed]); + } + fprintf (dump_file, "\n"); + } - In this case, we should be able to refine - vsetvl 1 into vsetvl zero, zero according AVIN. */ -void -pass_vsetvl::refine_vsetvls (void) const -{ - basic_block cfg_bb; - FOR_EACH_BB_FN (cfg_bb, cfun) + if (dump_file && (dump_flags & TDF_DETAILS)) { - auto info = get_block_info (cfg_bb).local_dem; - insn_info *insn = info.get_insn (); - if (!info.valid_p ()) - continue; + fprintf (dump_file, " Fused global info result:\n"); + } - rtx_insn *rinsn = insn->rtl (); - if (!can_refine_vsetvl_p (cfg_bb, info)) + bool changed = false; + for (unsigned ed = 0; ed < num_edges; ed++) + { + sbitmap e = earliest[ed]; + if (bitmap_empty_p (e)) continue; - /* We can't refine user vsetvl into vsetvl zero,zero since the dest - will be used by the following instructions. */ - if (vector_config_insn_p (rinsn)) + unsigned int expr_index; + sbitmap_iterator sbi; + EXECUTE_IF_SET_IN_BITMAP (e, 0, expr_index, sbi) { - m_vector_manager->to_refine_vsetvls.add (rinsn); - continue; - } + vsetvl_info &curr_info = *m_exprs[expr_index]; + if (!curr_info.valid_p ()) + continue; - /* If all incoming edges to a block have a vector state that is compatbile - with the block. In such a case we need not emit a vsetvl in the current - block. */ + edge eg = INDEX_EDGE (m_edges, ed); + if (eg->probability == profile_probability::never ()) + continue; + if (eg->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) + || eg->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) + continue; - gcc_assert (has_vtype_op (insn->rtl ())); - rinsn = PREV_INSN (insn->rtl ()); - gcc_assert (vector_config_insn_p (PREV_INSN (insn->rtl ()))); - if (m_vector_manager->all_avail_in_compatible_p (cfg_bb)) - { - size_t id = m_vector_manager->get_expr_id (info); - if (bitmap_bit_p (m_vector_manager->vector_del[cfg_bb->index], id)) + vsetvl_block_info &src_block_info = get_block_info (eg->src); + vsetvl_block_info &dest_block_info = get_block_info (eg->dest); + + if (src_block_info.probability + == profile_probability::uninitialized ()) continue; - eliminate_insn (rinsn); - } - else - { - rtx new_pat - = gen_vsetvl_pat (VSETVL_VTYPE_CHANGE_ONLY, info, NULL_RTX); - change_insn (rinsn, new_pat); - } - } -} -void -pass_vsetvl::cleanup_vsetvls () -{ - basic_block cfg_bb; - FOR_EACH_BB_FN (cfg_bb, cfun) - { - auto &info = get_block_info (cfg_bb).reaching_out; - gcc_assert (m_vector_manager->expr_set_num ( - m_vector_manager->vector_del[cfg_bb->index]) - <= 1); - for (size_t i = 0; i < m_vector_manager->vector_exprs.length (); i++) - { - if (bitmap_bit_p (m_vector_manager->vector_del[cfg_bb->index], i)) + if (src_block_info.empty_p ()) { - if (info.dirty_p ()) - info.set_unknown (); - else + vsetvl_info new_curr_info = curr_info; + new_curr_info.set_bb (crtl->ssa->bb (eg->dest)); + bool has_compatible_p = false; + unsigned int def_expr_index; + sbitmap_iterator sbi2; + EXECUTE_IF_SET_IN_BITMAP ( + m_vsetvl_def_in[new_curr_info.get_bb ()->index ()], 0, + def_expr_index, sbi2) { - const auto dem = get_block_info (cfg_bb).local_dem; - gcc_assert (dem == *m_vector_manager->vector_exprs[i]); - insn_info *insn = dem.get_insn (); - gcc_assert (insn && insn->rtl ()); - rtx_insn *rinsn; - /* We can't eliminate user vsetvl since the dest will be used - * by the following instructions. */ - if (vector_config_insn_p (insn->rtl ())) + vsetvl_info &prev_info = *m_vsetvl_def_exprs[def_expr_index]; + if (!prev_info.valid_p ()) + continue; + if (m_dem.compatible_p (prev_info, new_curr_info)) { - m_vector_manager->to_delete_vsetvls.add (insn->rtl ()); - continue; + has_compatible_p = true; + break; } - - gcc_assert (has_vtype_op (insn->rtl ())); - rinsn = PREV_INSN (insn->rtl ()); - gcc_assert (vector_config_insn_p (PREV_INSN (insn->rtl ()))); - eliminate_insn (rinsn); } - } - } - } -} - -bool -pass_vsetvl::commit_vsetvls (void) -{ - bool need_commit = false; - - for (int ed = 0; ed < NUM_EDGES (m_vector_manager->vector_edge_list); ed++) - { - for (size_t i = 0; i < m_vector_manager->vector_exprs.length (); i++) - { - edge eg = INDEX_EDGE (m_vector_manager->vector_edge_list, ed); - if (bitmap_bit_p (m_vector_manager->vector_insert[ed], i)) - { - const vector_insn_info *require - = m_vector_manager->vector_exprs[i]; - gcc_assert (require->valid_or_dirty_p ()); - rtl_profile_for_edge (eg); - start_sequence (); - - insn_info *insn = require->get_insn (); - vector_insn_info prev_info = vector_insn_info (); - sbitmap bitdata = m_vector_manager->vector_avout[eg->src->index]; - if (m_vector_manager->all_same_ratio_p (bitdata) - && m_vector_manager->all_same_avl_p (eg->dest, bitdata)) + if (!has_compatible_p) { - size_t first = bitmap_first_set_bit (bitdata); - prev_info = *m_vector_manager->vector_exprs[first]; + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, + " Forbidden lift up vsetvl info into bb %u " + "since there is no vsetvl info that reaching in " + "is compatible with it:", + eg->src->index); + curr_info.dump (dump_file, " "); + } + continue; } - insert_vsetvl (EMIT_DIRECT, insn->rtl (), *require, prev_info); - rtx_insn *rinsn = get_insns (); - end_sequence (); - default_rtl_profile (); - - /* We should not get an abnormal edge here. */ - gcc_assert (!(eg->flags & EDGE_ABNORMAL)); - need_commit = true; - insert_insn_on_edge (rinsn, eg); - - if (dump_file) + if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, - "\nInsert vsetvl insn %d at edge %d from <bb %d> to " - "<bb %d>:\n", - INSN_UID (rinsn), ed, eg->src->index, - eg->dest->index); - print_rtl_single (dump_file, rinsn); + " Set empty bb %u to info:", eg->src->index); + curr_info.dump (dump_file, " "); } + src_block_info.set_info (curr_info); + src_block_info.probability = dest_block_info.probability; + changed = true; } - } - } - - for (const bb_info *bb : crtl->ssa->bbs ()) - { - basic_block cfg_bb = bb->cfg_bb (); - const auto reaching_out = get_block_info (cfg_bb).reaching_out; - if (!reaching_out.dirty_p ()) - continue; - - rtx new_pat; - if (!reaching_out.demand_p (DEMAND_AVL)) - { - vl_vtype_info new_info = reaching_out; - new_info.set_avl_info (avl_info (const0_rtx, nullptr)); - new_pat = gen_vsetvl_pat (VSETVL_DISCARD_RESULT, new_info, NULL_RTX); - } - else if (can_refine_vsetvl_p (cfg_bb, reaching_out)) - new_pat - = gen_vsetvl_pat (VSETVL_VTYPE_CHANGE_ONLY, reaching_out, NULL_RTX); - else if (vlmax_avl_p (reaching_out.get_avl ())) - { - rtx vl = reaching_out.get_avl_or_vl_reg (); - new_pat = gen_vsetvl_pat (VSETVL_NORMAL, reaching_out, vl); - } - else - new_pat - = gen_vsetvl_pat (VSETVL_DISCARD_RESULT, reaching_out, NULL_RTX); - - edge eg; - edge_iterator eg_iterator; - FOR_EACH_EDGE (eg, eg_iterator, cfg_bb->succs) - { - /* We should not get an abnormal edge here. */ - gcc_assert (!(eg->flags & EDGE_ABNORMAL)); - /* We failed to optimize this case in Phase 3 (earliest fusion): - - bb 2: vsetvl a5, a3 ... - goto bb 4 - bb 3: vsetvl a5, a2 ... - goto bb 4 - bb 4: vsetvli zero, a5 ---> Redundant, should be elided. - - Since "a5" value can come from either bb 2 or bb 3, we can't make - it optimized in Phase 3 which will make phase 3 so complicated. - Now, we do post optimization here to elide the redundant VSETVL - insn in bb4. */ - if (m_vector_manager->vsetvl_dominated_by_all_preds_p (cfg_bb, - reaching_out)) - continue; - - start_sequence (); - emit_insn (copy_rtx (new_pat)); - rtx_insn *rinsn = get_insns (); - end_sequence (); + else if (src_block_info.has_info ()) + { + vsetvl_info &prev_info = src_block_info.get_exit_info (); + gcc_assert (prev_info.valid_p ()); - insert_insn_on_edge (rinsn, eg); - need_commit = true; - if (dump_file) + if (m_dem.compatible_p (prev_info, curr_info)) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, " Fuse curr info since prev info " + "compatible with it:\n"); + fprintf (dump_file, " prev_info: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, " curr_info: "); + curr_info.dump (dump_file, " "); + } + m_dem.merge (prev_info, curr_info); + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, " prev_info after fused: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, "\n"); + } + changed = true; + if (src_block_info.has_info ()) + src_block_info.probability += dest_block_info.probability; + } + else if (src_block_info.has_info () + && !m_dem.compatible_p (prev_info, curr_info)) + { + /* Cancel lift up if probabilities are equal. */ + if (successors_probability_equal_p (eg->src)) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, + " Change empty bb %u to from:", + eg->src->index); + prev_info.dump (dump_file, " "); + fprintf (dump_file, + " to (higher probability):"); + curr_info.dump (dump_file, " "); + } + src_block_info.set_empty_info (); + src_block_info.probability + = profile_probability::uninitialized (); + changed = true; + } + /* Choose the one with higher probability. */ + else if (dest_block_info.probability + > src_block_info.probability) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, + " Change empty bb %u to from:", + eg->src->index); + prev_info.dump (dump_file, " "); + fprintf (dump_file, + " to (higher probability):"); + curr_info.dump (dump_file, " "); + } + src_block_info.set_info (curr_info); + src_block_info.probability = dest_block_info.probability; + changed = true; + } + } + } + else { - fprintf (dump_file, - "\nInsert vsetvl insn %d from <bb %d> to <bb %d>:\n", - INSN_UID (rinsn), cfg_bb->index, eg->dest->index); - print_rtl_single (dump_file, rinsn); + vsetvl_info &prev_info = src_block_info.get_exit_info (); + if (!prev_info.valid_p () + || m_dem.available_p (prev_info, curr_info)) + continue; + + if (m_dem.compatible_p (prev_info, curr_info)) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, " Fuse curr info since prev info " + "compatible with it:\n"); + fprintf (dump_file, " prev_info: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, " curr_info: "); + curr_info.dump (dump_file, " "); + } + m_dem.merge (prev_info, curr_info); + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, " prev_info after fused: "); + prev_info.dump (dump_file, " "); + fprintf (dump_file, "\n"); + } + changed = true; + } } } } - return need_commit; -} - -void -pass_vsetvl::pre_vsetvl (void) -{ - /* Compute entity list. */ - prune_expressions (); - - m_vector_manager->create_bitmap_vectors (); - compute_local_properties (); - m_vector_manager->vector_edge_list = pre_edge_lcm_avs ( - m_vector_manager->vector_exprs.length (), m_vector_manager->vector_transp, - m_vector_manager->vector_comp, m_vector_manager->vector_antic, - m_vector_manager->vector_kill, m_vector_manager->vector_avin, - m_vector_manager->vector_avout, &m_vector_manager->vector_insert, - &m_vector_manager->vector_del); - - /* We should dump the information before CFG is changed. Otherwise it will - produce ICE (internal compiler error). */ if (dump_file && (dump_flags & TDF_DETAILS)) - m_vector_manager->dump (dump_file); - - refine_vsetvls (); - cleanup_vsetvls (); - bool need_commit = commit_vsetvls (); - if (need_commit) - commit_edge_insertions (); -} - -/* Some instruction can not be accessed in RTL_SSA when we don't re-init - the new RTL_SSA framework but it is definetely at the END of the block. - - Here we optimize the VSETVL is hoisted by LCM: - - Before LCM: - bb 1: - vsetvli a5,a2,e32,m1,ta,mu - bb 2: - vsetvli zero,a5,e32,m1,ta,mu - ... - - After LCM: - bb 1: - vsetvli a5,a2,e32,m1,ta,mu - LCM INSERTED: vsetvli zero,a5,e32,m1,ta,mu --> eliminate - bb 2: - ... - */ -rtx_insn * -pass_vsetvl::get_vsetvl_at_end (const bb_info *bb, vector_insn_info *dem) const -{ - rtx_insn *end_vsetvl = BB_END (bb->cfg_bb ()); - if (end_vsetvl && NONDEBUG_INSN_P (end_vsetvl)) { - if (JUMP_P (end_vsetvl)) - end_vsetvl = PREV_INSN (end_vsetvl); - - if (NONDEBUG_INSN_P (end_vsetvl) - && vsetvl_discard_result_insn_p (end_vsetvl)) - { - /* Only handle single succ. here, multiple succ. is much - more complicated. */ - if (single_succ_p (bb->cfg_bb ())) - { - edge e = single_succ_edge (bb->cfg_bb ()); - *dem = get_block_info (e->dest).local_dem; - return end_vsetvl; - } - } + fprintf (dump_file, "\n"); } - return nullptr; -} -/* This predicator should only used within same basic block. */ -static bool -local_avl_compatible_p (rtx avl1, rtx avl2) -{ - if (!REG_P (avl1) || !REG_P (avl2)) - return false; + sbitmap_vector_free (antin); + sbitmap_vector_free (antout); + sbitmap_vector_free (earliest); + free_edge_list (m_edges); - return REGNO (avl1) == REGNO (avl2); + return changed; } -/* Local user vsetvl optimizaiton: - - Case 1: - vsetvl a5,a4,e8,mf8 - ... - vsetvl zero,a5,e8,mf8 --> Eliminate directly. - - Case 2: - vsetvl a5,a4,e8,mf8 --> vsetvl a5,a4,e32,mf2 - ... - vsetvl zero,a5,e32,mf2 --> Eliminate directly. */ void -pass_vsetvl::local_eliminate_vsetvl_insn (const bb_info *bb) const +pre_vsetvl::pre_global_vsetvl_info () { - rtx_insn *prev_vsetvl = nullptr; - rtx_insn *curr_vsetvl = nullptr; - rtx vl_placeholder = RVV_VLMAX; - rtx prev_avl = vl_placeholder; - rtx curr_avl = vl_placeholder; - vector_insn_info prev_dem; - - /* Instruction inserted by LCM is not appeared in RTL-SSA yet, try to - found those instruciton. */ - if (rtx_insn *end_vsetvl = get_vsetvl_at_end (bb, &prev_dem)) - { - prev_avl = get_avl (end_vsetvl); - prev_vsetvl = end_vsetvl; - } + compute_avl_def_data (); + compute_vsetvl_def_data (); + compute_lcm_local_properties (); - bool skip_one = false; - /* Backward propgate vsetvl info, drop the later one (prev_vsetvl) if it's - compatible with current vsetvl (curr_avl), and merge the vtype and avl - info. into current vsetvl. */ - for (insn_info *insn : bb->reverse_real_nondebug_insns ()) - { - rtx_insn *rinsn = insn->rtl (); - const auto &curr_dem = get_vector_info (insn); - bool need_invalidate = false; + unsigned num_exprs = m_exprs.length (); + m_edges = pre_edge_lcm_avs (num_exprs, m_transp, m_avloc, m_antloc, m_kill, + m_avin, m_avout, &m_insert, &m_del); + unsigned num_edges = NUM_EDGES (m_edges); - /* Skip if this insn already handled in last iteration. */ - if (skip_one) + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "\n Compute LCM insert and delete data:\n\n"); + fprintf (dump_file, " Expression List (%u):\n", num_exprs); + for (unsigned i = 0; i < num_exprs; i++) { - skip_one = false; - continue; + const auto &info = *m_exprs[i]; + fprintf (dump_file, " Expr[%u]: ", i); + info.dump (dump_file, " "); } - - if (vsetvl_insn_p (rinsn)) + fprintf (dump_file, "\n bitmap data:\n"); + for (const bb_info *bb : crtl->ssa->bbs ()) { - curr_vsetvl = rinsn; - /* vsetvl are using vl rather than avl since it will try to merge - with other vsetvl_discard_result. - - v--- avl - vsetvl a5,a4,e8,mf8 # vsetvl - ... ^--- vl - vsetvl zero,a5,e8,mf8 # vsetvl_discard_result - ^--- avl - */ - curr_avl = get_vl (rinsn); - /* vsetvl is a cut point of local backward vsetvl elimination. */ - need_invalidate = true; + unsigned i = bb->index (); + fprintf (dump_file, " BB %u:\n", i); + fprintf (dump_file, " avloc: "); + dump_bitmap_file (dump_file, m_avloc[i]); + fprintf (dump_file, " kill: "); + dump_bitmap_file (dump_file, m_kill[i]); + fprintf (dump_file, " antloc: "); + dump_bitmap_file (dump_file, m_antloc[i]); + fprintf (dump_file, " transp: "); + dump_bitmap_file (dump_file, m_transp[i]); + + fprintf (dump_file, " avin: "); + dump_bitmap_file (dump_file, m_avin[i]); + fprintf (dump_file, " avout: "); + dump_bitmap_file (dump_file, m_avout[i]); + fprintf (dump_file, " del: "); + dump_bitmap_file (dump_file, m_del[i]); } - else if (has_vtype_op (rinsn) && NONDEBUG_INSN_P (PREV_INSN (rinsn)) - && (vsetvl_discard_result_insn_p (PREV_INSN (rinsn)) - || vsetvl_insn_p (PREV_INSN (rinsn)))) + fprintf (dump_file, "\n"); + fprintf (dump_file, " insert:\n"); + for (unsigned ed = 0; ed < num_edges; ed++) { - curr_vsetvl = PREV_INSN (rinsn); - - if (vsetvl_insn_p (PREV_INSN (rinsn))) - { - /* Need invalidate and skip if it's vsetvl. */ - need_invalidate = true; - /* vsetvl_discard_result_insn_p won't appeared in RTL-SSA, - * so only need to skip for vsetvl. */ - skip_one = true; - } + edge eg = INDEX_EDGE (m_edges, ed); - curr_avl = curr_dem.get_avl (); - - /* Some instrucion like pred_extract_first<mode> don't reqruie avl, so - the avl is null, use vl_placeholder for unify the handling - logic. */ - if (!curr_avl) - curr_avl = vl_placeholder; - } - else if (insn->is_call () || insn->is_asm () - || find_access (insn->defs (), VL_REGNUM) - || find_access (insn->defs (), VTYPE_REGNUM) - || (REG_P (prev_avl) - && find_access (insn->defs (), REGNO (prev_avl)))) - { - /* Invalidate if this insn can't propagate vl, vtype or avl. */ - need_invalidate = true; - prev_dem = vector_insn_info (); + if (bitmap_empty_p (m_insert[ed])) + continue; + fprintf (dump_file, " Edge(bb %u -> bb %u): ", eg->src->index, + eg->dest->index); + dump_bitmap_file (dump_file, m_insert[ed]); } - else - /* Not interested instruction. */ + } + + /* Remove vsetvl infos as LCM suggest */ + for (const bb_info *bb : crtl->ssa->bbs ()) + { + sbitmap d = m_del[bb->index ()]; + if (bitmap_count_bits (d) == 0) continue; + gcc_assert (bitmap_count_bits (d) == 1); + unsigned expr_index = bitmap_first_set_bit (d); + vsetvl_info &info = *m_exprs[expr_index]; + gcc_assert (info.valid_p ()); + gcc_assert (info.get_bb () == bb); + const vsetvl_block_info &block_info = get_block_info (info.get_bb ()); + gcc_assert (block_info.get_entry_info () == info); + info.set_delete (); + } - /* Local AVL compatibility checking is simpler than global, we only - need to check the REGNO is same. */ - if (prev_dem.valid_or_dirty_p () - && prev_dem.skip_avl_compatible_p (curr_dem) - && local_avl_compatible_p (prev_avl, curr_avl)) + for (const bb_info *bb : crtl->ssa->bbs ()) + { + vsetvl_block_info &block_info = get_block_info (bb); + if (block_info.empty_p ()) + continue; + vsetvl_info &curr_info = block_info.get_entry_info (); + if (curr_info.delete_p ()) { - /* curr_dem and prev_dem is compatible! */ - /* Update avl info since we need to make sure they are fully - compatible before merge. */ - prev_dem.set_avl_info (curr_dem.get_avl_info ()); - /* Merge both and update into curr_vsetvl. */ - prev_dem = curr_dem.local_merge (prev_dem); - change_vsetvl_insn (curr_dem.get_insn (), prev_dem); - /* Then we can drop prev_vsetvl. */ - eliminate_insn (prev_vsetvl); + if (block_info.local_infos.is_empty ()) + continue; + curr_info = block_info.local_infos[0]; } + if (curr_info.valid_p () && !curr_info.vl_use_by_non_rvv_insn_p () + && preds_has_same_avl_p (curr_info)) + curr_info.set_change_vtype_only (); - if (need_invalidate) + vsetvl_info prev_info = vsetvl_info (); + prev_info.set_empty (); + for (auto &curr_info : block_info.local_infos) { - prev_vsetvl = nullptr; - curr_vsetvl = nullptr; - prev_avl = vl_placeholder; - curr_avl = vl_placeholder; - prev_dem = vector_insn_info (); + if (prev_info.valid_p () && curr_info.valid_p () + && m_dem.avl_available_p (prev_info, curr_info)) + curr_info.set_change_vtype_only (); + prev_info = curr_info; } - else - { - prev_vsetvl = curr_vsetvl; - prev_avl = curr_avl; - prev_dem = curr_dem; - } - } -} - -/* Return the first vsetvl instruction in CFG_BB or NULL if - none exists or if a user RVV instruction is enountered - prior to any vsetvl. */ -static rtx_insn * -get_first_vsetvl_before_rvv_insns (basic_block cfg_bb, - enum vsetvl_type insn_type) -{ - gcc_assert (insn_type == VSETVL_DISCARD_RESULT - || insn_type == VSETVL_VTYPE_CHANGE_ONLY); - rtx_insn *rinsn; - FOR_BB_INSNS (cfg_bb, rinsn) - { - if (!NONDEBUG_INSN_P (rinsn)) - continue; - /* If we don't find any inserted vsetvli before user RVV instructions, - we don't need to optimize the vsetvls in this block. */ - if (has_vtype_op (rinsn) || vsetvl_insn_p (rinsn)) - return nullptr; - - if (insn_type == VSETVL_DISCARD_RESULT - && vsetvl_discard_result_insn_p (rinsn)) - return rinsn; - if (insn_type == VSETVL_VTYPE_CHANGE_ONLY - && vsetvl_vtype_change_only_p (rinsn)) - return rinsn; } - return nullptr; } -/* Global user vsetvl optimizaiton: - - Case 1: - bb 1: - vsetvl a5,a4,e8,mf8 - ... - bb 2: - ... - vsetvl zero,a5,e8,mf8 --> Eliminate directly. - - Case 2: - bb 1: - vsetvl a5,a4,e8,mf8 --> vsetvl a5,a4,e32,mf2 - ... - bb 2: - ... - vsetvl zero,a5,e32,mf2 --> Eliminate directly. - - Case 3: - bb 1: - vsetvl a5,a4,e8,mf8 --> vsetvl a5,a4,e32,mf2 - ... - bb 2: - ... - vsetvl a5,a4,e8,mf8 --> vsetvl a5,a4,e32,mf2 - goto bb 3 - bb 3: - ... - vsetvl zero,a5,e32,mf2 --> Eliminate directly. -*/ -bool -pass_vsetvl::global_eliminate_vsetvl_insn (const bb_info *bb) const +void +pre_vsetvl::emit_vsetvl () { - rtx_insn *vsetvl_rinsn = NULL; - vector_insn_info dem = vector_insn_info (); - const auto &block_info = get_block_info (bb); - basic_block cfg_bb = bb->cfg_bb (); + bool need_commit = false; - if (block_info.local_dem.valid_or_dirty_p ()) + for (const bb_info *bb : crtl->ssa->bbs ()) { - /* Optimize the local vsetvl. */ - dem = block_info.local_dem; - vsetvl_rinsn - = get_first_vsetvl_before_rvv_insns (cfg_bb, VSETVL_DISCARD_RESULT); + for (const auto &curr_info : get_block_info (bb).local_infos) + { + insn_info *insn = curr_info.get_insn (); + if (curr_info.delete_p ()) + { + if (vsetvl_insn_p (insn->rtl ())) + remove_vsetvl_insn (curr_info); + continue; + } + else if (curr_info.valid_p ()) + { + if (vsetvl_insn_p (insn->rtl ())) + { + const vsetvl_info temp = vsetvl_info (insn); + if (!(curr_info == temp)) + { + if (dump_file) + { + fprintf (dump_file, "\n Change vsetvl info from: "); + temp.dump (dump_file, " "); + fprintf (dump_file, " to: "); + curr_info.dump (dump_file, " "); + } + change_vsetvl_insn (curr_info); + } + } + else + { + if (dump_file) + { + fprintf (dump_file, + "\n Insert vsetvl info before insn %d: ", + insn->uid ()); + curr_info.dump (dump_file, " "); + } + insert_vsetvl_insn (EMIT_BEFORE, curr_info); + } + } + } } - if (!vsetvl_rinsn) - /* Optimize the global vsetvl inserted by LCM. */ - vsetvl_rinsn = get_vsetvl_at_end (bb, &dem); - /* No need to optimize if block doesn't have vsetvl instructions. */ - if (!dem.valid_or_dirty_p () || !vsetvl_rinsn || !dem.get_avl_source () - || !dem.has_avl_reg ()) - return false; - - /* Condition 1: Check it has preds. */ - if (EDGE_COUNT (cfg_bb->preds) == 0) - return false; - - /* If all preds has VL/VTYPE status setted by user vsetvls, and these - user vsetvls are all skip_avl_compatible_p with the vsetvl in this - block, we can eliminate this vsetvl instruction. */ - sbitmap avin = m_vector_manager->vector_avin[cfg_bb->index]; - - unsigned int bb_index; - sbitmap_iterator sbi; - rtx avl = dem.get_avl (); - hash_set<set_info *> sets - = get_all_sets (dem.get_avl_source (), true, false, false); - /* Condition 2: All VL/VTYPE available in are all compatible. */ - EXECUTE_IF_SET_IN_BITMAP (avin, 0, bb_index, sbi) + for (const vsetvl_info &item : m_delete_list) { - const auto &expr = m_vector_manager->vector_exprs[bb_index]; - const auto &insn = expr->get_insn (); - def_info *def = find_access (insn->defs (), REGNO (avl)); - set_info *set = safe_dyn_cast<set_info *> (def); - if (!vsetvl_insn_p (insn->rtl ()) || insn->bb () == bb - || !sets.contains (set)) - return false; + gcc_assert (vsetvl_insn_p (item.get_insn ()->rtl ())); + remove_vsetvl_insn (item); } - /* Condition 3: We don't do the global optimization for the block - has a pred is entry block or exit block. */ - /* Condition 4: All preds have available VL/VTYPE out. */ - edge e; - edge_iterator ei; - FOR_EACH_EDGE (e, ei, cfg_bb->preds) + /* m_insert vsetvl as LCM suggest. */ + for (int ed = 0; ed < NUM_EDGES (m_edges); ed++) { - sbitmap avout = m_vector_manager->vector_avout[e->src->index]; - if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) - || e->src == EXIT_BLOCK_PTR_FOR_FN (cfun) - || (unsigned int) e->src->index - >= m_vector_manager->vector_block_infos.length () - || bitmap_empty_p (avout)) - return false; - - EXECUTE_IF_SET_IN_BITMAP (avout, 0, bb_index, sbi) + edge eg = INDEX_EDGE (m_edges, ed); + sbitmap i = m_insert[ed]; + if (bitmap_count_bits (i) < 1) + continue; + + if (bitmap_count_bits (i) > 1) + /* For code with infinite loop (e.g. pr61634.c), The data flow is + completely wrong. */ + continue; + + gcc_assert (bitmap_count_bits (i) == 1); + unsigned expr_index = bitmap_first_set_bit (i); + const vsetvl_info &info = *m_exprs[expr_index]; + gcc_assert (info.valid_p ()); + if (dump_file) { - const auto &expr = m_vector_manager->vector_exprs[bb_index]; - const auto &insn = expr->get_insn (); - def_info *def = find_access (insn->defs (), REGNO (avl)); - set_info *set = safe_dyn_cast<set_info *> (def); - if (!vsetvl_insn_p (insn->rtl ()) || insn->bb () == bb - || !sets.contains (set) || !expr->skip_avl_compatible_p (dem)) - return false; + fprintf (dump_file, + "\n Insert vsetvl info at edge(bb %u -> bb %u): ", + eg->src->index, eg->dest->index); + info.dump (dump_file, " "); } + rtl_profile_for_edge (eg); + start_sequence (); + + insert_vsetvl_insn (EMIT_DIRECT, info); + rtx_insn *rinsn = get_insns (); + end_sequence (); + default_rtl_profile (); + + /* We should not get an abnormal edge here. */ + gcc_assert (!(eg->flags & EDGE_ABNORMAL)); + need_commit = true; + insert_insn_on_edge (rinsn, eg); } - /* Step1: Reshape the VL/VTYPE status to make sure everything compatible. */ - auto_vec<basic_block> pred_cfg_bbs - = get_dominated_by (CDI_POST_DOMINATORS, cfg_bb); - FOR_EACH_EDGE (e, ei, cfg_bb->preds) + /* Insert vsetvl info that was not deleted after lift up. */ + for (const bb_info *bb : crtl->ssa->bbs ()) { - sbitmap avout = m_vector_manager->vector_avout[e->src->index]; - EXECUTE_IF_SET_IN_BITMAP (avout, 0, bb_index, sbi) + const vsetvl_block_info &block_info = get_block_info (bb); + if (!block_info.has_info ()) + continue; + + const vsetvl_info &footer_info = block_info.get_exit_info (); + + if (footer_info.delete_p ()) + continue; + + edge eg; + edge_iterator eg_iterator; + FOR_EACH_EDGE (eg, eg_iterator, bb->cfg_bb ()->succs) { - vector_insn_info prev_dem = *m_vector_manager->vector_exprs[bb_index]; - vector_insn_info curr_dem = dem; - insn_info *insn = prev_dem.get_insn (); - if (!pred_cfg_bbs.contains (insn->bb ()->cfg_bb ())) - continue; - /* Update avl info since we need to make sure they are fully - compatible before merge. */ - curr_dem.set_avl_info (prev_dem.get_avl_info ()); - /* Merge both and update into curr_vsetvl. */ - prev_dem = curr_dem.local_merge (prev_dem); - change_vsetvl_insn (insn, prev_dem); + gcc_assert (!(eg->flags & EDGE_ABNORMAL)); + if (dump_file) + { + fprintf ( + dump_file, + "\n Insert missed vsetvl info at edge(bb %u -> bb %u): ", + eg->src->index, eg->dest->index); + footer_info.dump (dump_file, " "); + } + start_sequence (); + insert_vsetvl_insn (EMIT_DIRECT, footer_info); + rtx_insn *rinsn = get_insns (); + end_sequence (); + default_rtl_profile (); + insert_insn_on_edge (rinsn, eg); + need_commit = true; } } - /* Step2: eliminate the vsetvl instruction. */ - eliminate_insn (vsetvl_rinsn); - return true; + if (need_commit) + commit_edge_insertions (); } -/* This function does the following post optimization base on RTL_SSA: - - 1. Local user vsetvl optimizations. - 2. Global user vsetvl optimizations. - 3. AVL dependencies removal: - Before VSETVL PASS, RVV instructions pattern is depending on AVL operand - implicitly. Since we will emit VSETVL instruction and make RVV - instructions depending on VL/VTYPE global status registers, we remove the - such AVL operand in the RVV instructions pattern here in order to remove - AVL dependencies when AVL operand is a register operand. - - Before the VSETVL PASS: - li a5,32 - ... - vadd.vv (..., a5) - After the VSETVL PASS: - li a5,32 - vsetvli zero, a5, ... - ... - vadd.vv (..., const_int 0). */ void -pass_vsetvl::ssa_post_optimization (void) const +pre_vsetvl::cleaup () { - for (const bb_info *bb : crtl->ssa->bbs ()) - { - local_eliminate_vsetvl_insn (bb); - bool changed_p = true; - while (changed_p) - { - changed_p = false; - changed_p |= global_eliminate_vsetvl_insn (bb); - } - for (insn_info *insn : bb->real_nondebug_insns ()) - { - rtx_insn *rinsn = insn->rtl (); - if (vlmax_avl_insn_p (rinsn)) - { - eliminate_insn (rinsn); - continue; - } + remove_avl_operand (); + remove_unused_dest_operand (); +} - /* Erase the AVL operand from the instruction. */ - if (!has_vl_op (rinsn) || !REG_P (get_vl (rinsn))) - continue; +void +pre_vsetvl::remove_avl_operand () +{ + basic_block cfg_bb; + rtx_insn *rinsn; + FOR_ALL_BB_FN (cfg_bb, cfun) + FOR_BB_INSNS (cfg_bb, rinsn) + if (NONDEBUG_INSN_P (rinsn) && has_vl_op (rinsn) + && REG_P (get_vl (rinsn))) + { rtx avl = get_vl (rinsn); if (count_regno_occurrences (rinsn, REGNO (avl)) == 1) { - /* Get the list of uses for the new instruction. */ - auto attempt = crtl->ssa->new_change_attempt (); - insn_change change (insn); - /* Remove the use of the substituted value. */ - access_array_builder uses_builder (attempt); - uses_builder.reserve (insn->num_uses () - 1); - for (use_info *use : insn->uses ()) - if (use != find_access (insn->uses (), REGNO (avl))) - uses_builder.quick_push (use); - use_array new_uses = use_array (uses_builder.finish ()); - change.new_uses = new_uses; - change.move_range = insn->ebb ()->insn_range (); - rtx pat; + rtx new_pat; if (fault_first_load_p (rinsn)) - pat = simplify_replace_rtx (PATTERN (rinsn), avl, const0_rtx); + new_pat + = simplify_replace_rtx (PATTERN (rinsn), avl, const0_rtx); else { rtx set = single_set (rinsn); rtx src = simplify_replace_rtx (SET_SRC (set), avl, const0_rtx); - pat = gen_rtx_SET (SET_DEST (set), src); + new_pat = gen_rtx_SET (SET_DEST (set), src); } - bool ok = change_insn (crtl->ssa, change, insn, pat); - gcc_assert (ok); + if (dump_file) + { + fprintf (dump_file, " Cleanup insn %u's avl operand:\n", + INSN_UID (rinsn)); + print_rtl_single (dump_file, rinsn); + } + validate_change_or_fail (rinsn, &PATTERN (rinsn), new_pat, false); } } - } -} - -/* Return true if the SET result is not used by any instructions. */ -static bool -has_no_uses (basic_block cfg_bb, rtx_insn *rinsn, int regno) -{ - /* Handle the following case that can not be detected in RTL_SSA. */ - /* E.g. - li a5, 100 - vsetvli a6, a5... - ... - vadd (use a6) - - The use of "a6" is removed from "vadd" but the information is - not updated in RTL_SSA framework. We don't want to re-new - a new RTL_SSA which is expensive, instead, we use data-flow - analysis to check whether "a6" has no uses. */ - if (bitmap_bit_p (df_get_live_out (cfg_bb), regno)) - return false; - - rtx_insn *iter; - for (iter = NEXT_INSN (rinsn); iter && iter != NEXT_INSN (BB_END (cfg_bb)); - iter = NEXT_INSN (iter)) - if (df_find_use (iter, regno_reg_rtx[regno])) - return false; - - return true; } -/* This function does the following post optimization base on dataflow - analysis: - - 1. Change vsetvl rd, rs1 --> vsevl zero, rs1, if rd is not used by any - nondebug instructions. Even though this PASS runs after RA and it doesn't - help for reduce register pressure, it can help instructions scheduling since - we remove the dependencies. - - 2. Remove redundant user vsetvls base on outcome of Phase 4 (LCM) && Phase 5 - (AVL dependencies removal). */ void -pass_vsetvl::df_post_optimization (void) const +pre_vsetvl::remove_unused_dest_operand () { df_analyze (); - hash_set<rtx_insn *> to_delete; basic_block cfg_bb; rtx_insn *rinsn; FOR_ALL_BB_FN (cfg_bb, cfun) - { - FOR_BB_INSNS (cfg_bb, rinsn) + FOR_BB_INSNS (cfg_bb, rinsn) + if (NONDEBUG_INSN_P (rinsn) && vsetvl_insn_p (rinsn)) { - if (NONDEBUG_INSN_P (rinsn) && vsetvl_insn_p (rinsn)) - { - rtx vl = get_vl (rinsn); - vector_insn_info info; - info.parse_insn (rinsn); - bool to_delete_p = m_vector_manager->to_delete_p (rinsn); - bool to_refine_p = m_vector_manager->to_refine_p (rinsn); - if (has_no_uses (cfg_bb, rinsn, REGNO (vl))) - { - if (to_delete_p) - to_delete.add (rinsn); - else if (to_refine_p) - { - rtx new_pat = gen_vsetvl_pat (VSETVL_VTYPE_CHANGE_ONLY, - info, NULL_RTX); - validate_change_or_fail (rinsn, &PATTERN (rinsn), new_pat, - false); - } - else if (!vlmax_avl_p (info.get_avl ())) - { - rtx new_pat = gen_vsetvl_pat (VSETVL_DISCARD_RESULT, info, - NULL_RTX); - validate_change_or_fail (rinsn, &PATTERN (rinsn), new_pat, - false); - } - } - } + rtx vl = get_vl (rinsn); + vsetvl_info info = vsetvl_info (rinsn); + if (has_no_uses (cfg_bb, rinsn, REGNO (vl))) + if (!info.has_vlmax_avl ()) + { + rtx new_pat = info.get_vsetvl_pat (true); + if (dump_file) + { + fprintf (dump_file, + " Remove vsetvl insn %u's dest(vl) operand since " + "it unused:\n", + INSN_UID (rinsn)); + print_rtl_single (dump_file, rinsn); + } + validate_change_or_fail (rinsn, &PATTERN (rinsn), new_pat, + false); + } } - } - for (rtx_insn *rinsn : to_delete) - eliminate_insn (rinsn); } -void -pass_vsetvl::init (void) -{ - if (optimize > 0) - { - /* Initialization of RTL_SSA. */ - calculate_dominance_info (CDI_DOMINATORS); - calculate_dominance_info (CDI_POST_DOMINATORS); - df_analyze (); - crtl->ssa = new function_info (cfun); - } +const pass_data pass_data_vsetvl = { + RTL_PASS, /* type */ + "vsetvl", /* name */ + OPTGROUP_NONE, /* optinfo_flags */ + TV_NONE, /* tv_id */ + 0, /* properties_required */ + 0, /* properties_provided */ + 0, /* properties_destroyed */ + 0, /* todo_flags_start */ + 0, /* todo_flags_finish */ +}; - m_vector_manager = new vector_infos_manager (); - compute_probabilities (); +class pass_vsetvl : public rtl_opt_pass +{ +private: + void simple_vsetvl (); + void lazy_vsetvl (); - if (dump_file && (dump_flags & TDF_DETAILS)) - { - fprintf (dump_file, "\nPrologue: Initialize vector infos\n"); - m_vector_manager->dump (dump_file); - } -} +public: + pass_vsetvl (gcc::context *ctxt) : rtl_opt_pass (pass_data_vsetvl, ctxt) {} -void -pass_vsetvl::done (void) -{ - if (optimize > 0) - { - /* Finalization of RTL_SSA. */ - free_dominance_info (CDI_DOMINATORS); - free_dominance_info (CDI_POST_DOMINATORS); - if (crtl->ssa->perform_pending_updates ()) - cleanup_cfg (0); - delete crtl->ssa; - crtl->ssa = nullptr; - } - m_vector_manager->release (); - delete m_vector_manager; - m_vector_manager = nullptr; -} + /* opt_pass methods: */ + virtual bool gate (function *) final override { return TARGET_VECTOR; } + virtual unsigned int execute (function *) final override; +}; // class pass_vsetvl -/* Compute probability for each block. */ void -pass_vsetvl::compute_probabilities (void) +pass_vsetvl::simple_vsetvl () { - /* Don't compute it in -O0 since we don't need it. */ - if (!optimize) - return; - edge e; - edge_iterator ei; + if (dump_file) + fprintf (dump_file, "\nEntering Simple VSETVL PASS\n"); - for (const bb_info *bb : crtl->ssa->bbs ()) + basic_block cfg_bb; + rtx_insn *rinsn; + FOR_ALL_BB_FN (cfg_bb, cfun) { - basic_block cfg_bb = bb->cfg_bb (); - auto &curr_prob = get_block_info (cfg_bb).probability; - - /* GCC assume entry block (bb 0) are always so - executed so set its probability as "always". */ - if (ENTRY_BLOCK_PTR_FOR_FN (cfun) == cfg_bb) - curr_prob = profile_probability::always (); - /* Exit block (bb 1) is the block we don't need to process. */ - if (EXIT_BLOCK_PTR_FOR_FN (cfun) == cfg_bb) - continue; - - gcc_assert (curr_prob.initialized_p ()); - FOR_EACH_EDGE (e, ei, cfg_bb->succs) + FOR_BB_INSNS (cfg_bb, rinsn) { - auto &new_prob = get_block_info (e->dest).probability; - /* Normally, the edge probability should be initialized. - However, some special testing code which is written in - GIMPLE IR style force the edge probility uninitialized, - we conservatively set it as never so that it will not - affect PRE (Phase 3 && Phse 4). */ - if (!e->probability.initialized_p ()) - new_prob = profile_probability::never (); - else if (!new_prob.initialized_p ()) - new_prob = curr_prob * e->probability; - else if (new_prob == profile_probability::always ()) + if (!NONDEBUG_INSN_P (rinsn)) continue; - else - new_prob += curr_prob * e->probability; + if (has_vtype_op (rinsn)) + { + const auto &info = vsetvl_info (rinsn); + rtx pat = info.get_vsetvl_pat (); + emit_insn_before (pat, rinsn); + if (dump_file) + { + fprintf (dump_file, " Insert vsetvl insn before insn %d:\n", + INSN_UID (rinsn)); + print_rtl_single (dump_file, PREV_INSN (rinsn)); + } + } } } } /* Lazy vsetvl insertion for optimize > 0. */ void -pass_vsetvl::lazy_vsetvl (void) +pass_vsetvl::lazy_vsetvl () { if (dump_file) - fprintf (dump_file, - "\nEntering Lazy VSETVL PASS and Handling %d basic blocks for " - "function:%s\n", - n_basic_blocks_for_fn (cfun), function_name (cfun)); + fprintf (dump_file, "\nEntering Lazy VSETVL PASS\n\n"); + + pre_vsetvl pre = pre_vsetvl (); - /* Phase 1 - Compute the local dems within each block. - The data-flow analysis within each block is backward analysis. */ if (dump_file) - fprintf (dump_file, "\nPhase 1: Compute local backward vector infos\n"); - for (const bb_info *bb : crtl->ssa->bbs ()) - compute_local_backward_infos (bb); + fprintf (dump_file, "\nPhase 1: Fuse local vsetvl infos.\n\n"); + pre.fuse_local_vsetvl_info (); if (dump_file && (dump_flags & TDF_DETAILS)) - m_vector_manager->dump (dump_file); + pre.dump (dump_file, "phase 1"); - /* Phase 2 - Emit vsetvl instructions within each basic block according to - demand, compute and save ANTLOC && AVLOC of each block. */ + /* Phase 2: Fuse header and footer vsetvl infos between basic blocks. */ if (dump_file) - fprintf (dump_file, - "\nPhase 2: Emit vsetvl instruction within each block\n"); - for (const bb_info *bb : crtl->ssa->bbs ()) - emit_local_forward_vsetvls (bb); + fprintf (dump_file, "\nPhase 2: Lift up vsetvl info.\n\n"); + bool changed; + int fused_count = 0; + do + { + if (dump_file) + fprintf (dump_file, " Try lift up %d.\n\n", fused_count); + changed = pre.earliest_fuse_vsetvl_info (); + fused_count += 1; + } while (changed); + if (dump_file && (dump_flags & TDF_DETAILS)) - m_vector_manager->dump (dump_file); + pre.dump (dump_file, "phase 2"); - /* Phase 3 - Propagate demanded info across blocks. */ + /* Phase 3: Reducing redundant vsetvl infos using LCM. */ if (dump_file) - fprintf (dump_file, "\nPhase 3: Demands propagation across blocks\n"); - vsetvl_fusion (); + fprintf (dump_file, "\nPhase 3: Reduce global vsetvl infos.\n\n"); + pre.pre_global_vsetvl_info (); + if (dump_file && (dump_flags & TDF_DETAILS)) + pre.dump (dump_file, "phase 3"); - /* Phase 4 - Lazy code motion. */ + /* Phase 4: Insert, modify and remove vsetvl insns. */ if (dump_file) - fprintf (dump_file, "\nPhase 4: PRE vsetvl by Lazy code motion (LCM)\n"); - pre_vsetvl (); + fprintf (dump_file, + "\nPhase 4: Insert, modify and remove vsetvl insns.\n\n"); + pre.emit_vsetvl (); - /* Phase 5 - Post optimization base on RTL_SSA. */ + /* Phase 5: Cleaup */ if (dump_file) - fprintf (dump_file, "\nPhase 5: Post optimization base on RTL_SSA\n"); - ssa_post_optimization (); + fprintf (dump_file, "\nPhase 5: Cleaup\n\n"); + pre.cleaup (); - /* Phase 6 - Post optimization base on data-flow analysis. */ - if (dump_file) - fprintf (dump_file, - "\nPhase 6: Post optimization base on data-flow analysis\n"); - df_post_optimization (); + pre.finish (); } /* Main entry point for this pass. */ @@ -4400,14 +3542,11 @@ pass_vsetvl::execute (function *) if (!has_vector_insn (cfun)) return 0; - init (); - if (!optimize) simple_vsetvl (); else lazy_vsetvl (); - done (); return 0; } diff --git a/gcc/config/riscv/riscv-vsetvl.def b/gcc/config/riscv/riscv-vsetvl.def index 709cc4e..401d2c6 100644 --- a/gcc/config/riscv/riscv-vsetvl.def +++ b/gcc/config/riscv/riscv-vsetvl.def @@ -18,496 +18,163 @@ You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ -#ifndef DEF_INCOMPATIBLE_COND -#define DEF_INCOMPATIBLE_COND(AVL1, SEW1, LMUL1, RATIO1, NONZERO_AVL1, \ - GE_SEW1, TAIL_POLICTY1, MASK_POLICY1, AVL2, \ - SEW2, LMUL2, RATIO2, NONZERO_AVL2, GE_SEW2, \ - TAIL_POLICTY2, MASK_POLICY2, COND) +/* DEF_XXX_RULE (prev_demand, next_demand, fused_demand, compatible_p, + available_p, fuse) + prev_demand: the prev vector insn's sew_lmul_type + next_demand: the next vector insn's sew_lmul_type + fused_demand: if them are compatible, change prev_info demand to the + fused_demand after fuse prev_info and next_info + compatible_p: check if prev_demand and next_demand are compatible + available_p: check if prev_demand is available for next_demand + fuse: if them are compatible, how to modify prev_info */ + +#ifndef DEF_SEW_LMUL_RULE +#define DEF_SEW_LMUL_RULE(prev_demand, next_demand, fused_demand, \ + compatible_p, available_p, fuse) #endif -#ifndef DEF_SEW_LMUL_FUSE_RULE -#define DEF_SEW_LMUL_FUSE_RULE(DEMAND_SEW1, DEMAND_LMUL1, DEMAND_RATIO1, \ - DEMAND_GE_SEW1, DEMAND_SEW2, DEMAND_LMUL2, \ - DEMAND_RATIO2, DEMAND_GE_SEW2, NEW_DEMAND_SEW, \ - NEW_DEMAND_LMUL, NEW_DEMAND_RATIO, \ - NEW_DEMAND_GE_SEW, NEW_SEW, NEW_VLMUL, \ - NEW_RATIO) +#ifndef DEF_POLICY_RULE +#define DEF_POLICY_RULE(prev_demand, next_demand, fused_demand, compatible_p, \ + available_p, fuse) #endif -#ifndef DEF_UNAVAILABLE_COND -#define DEF_UNAVAILABLE_COND(AVL1, SEW1, LMUL1, RATIO1, NONZERO_AVL1, GE_SEW1, \ - TAIL_POLICTY1, MASK_POLICY1, AVL2, SEW2, LMUL2, \ - RATIO2, NONZERO_AVL2, GE_SEW2, TAIL_POLICTY2, \ - MASK_POLICY2, COND) +#ifndef DEF_AVL_RULE +#define DEF_AVL_RULE(prev_demand, next_demand, fused_demand, compatible_p, \ + available_p, fuse) #endif -/* Case 1: Demand compatible AVL. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_TRUE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_TRUE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ incompatible_avl_p) - -/* Case 2: Demand same SEW. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_sew_p) - -/* Case 3: Demand same LMUL. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_lmul_p) - -/* Case 4: Demand same RATIO. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_ratio_p) - -/* Case 5: Demand same TAIL_POLICY. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_TRUE, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_TRUE, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_tail_policy_p) - -/* Case 6: Demand same MASK_POLICY. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_TRUE, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_TRUE, - /*COND*/ different_mask_policy_p) - -/* Case 7: Demand non zero AVL. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_TRUE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_TRUE, /*GE_SEW*/ DEMAND_ANY, - DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_TRUE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_ANY, - DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ possible_zero_avl_p) - -/* Case 8: First SEW/LMUL/GE_SEW <-> Second RATIO/SEW. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ second_ratio_invalid_for_first_sew_p) -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ second_ratio_invalid_for_first_lmul_p) -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ second_sew_less_than_first_sew_p) - -/* Case 9: First (GE_SEW + LMUL) <-> Second RATIO. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ second_ratio_less_than_first_ratio_p) -/* Case 11: First (SEW + LMUL) <-> Second RATIO. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_ratio_p) -/* Case 13: First (GE_SEW/SEW + RATIO) <-> Second LMUL. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_lmul_p) -/* Case 14: First (LMUL + RATIO) <-> Second SEW. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_sew_p) -/* Case 15: First (LMUL + RATIO) <-> Second GE_SEW. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ first_sew_less_than_second_sew_p) - -/* Case 16: First SEW + Second LMUL <-> First RATIO. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_lmul_p) -/* Case 17: First SEW + Second LMUL <-> Second RATIO. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_sew_p) - -/* Case 18: First SEW + Second RATIO <-> First LMUL. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_ratio_p) - -/* Case 19: First GE_SEW + Second LMUL <-> First RATIO. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ second_lmul_less_than_first_lmul_p) -/* Case 20: First GE_SEW + Second LMUL <-> Second RATIO. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ second_sew_less_than_first_sew_p) - -/* Case 21: First GE_SEW + Second RATIO <-> First LMUL. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ second_ratio_less_than_first_ratio_p) - -/* Case 22: First GE_SEW + Second SEW + First LMUL + Second ratio. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_lmul_p) - -/* Case 23: First GE_SEW + Second SEW + Second LMUL + First ratio. */ -DEF_INCOMPATIBLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ different_ratio_p) - -/* Merge rules. */ -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_TRUE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ false, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ true, greatest_sew, first_vlmul, - first_ratio) - -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_ANY, - /*RATIO*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*SEW*/ DEMAND_ANY, /*LMUL*/ DEMAND_ANY, - /*RATIO*/ DEMAND_TRUE, /*GE_SEW*/ DEMAND_ANY, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ true, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ false, first_sew, - vlmul_for_first_sew_second_ratio, second_ratio) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_ANY, /*LMUL*/ DEMAND_TRUE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_ANY, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_ANY, - /*RATIO*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ true, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ false, second_sew, first_vlmul, - ratio_for_second_sew_first_vlmul) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_FALSE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_TRUE, /*GE_SEW*/ DEMAND_FALSE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ false, - /*NEW_DEMAND_RATIO*/ true, - /*NEW_DEMAND_GE_SEW*/ true, first_sew, - vlmul_for_first_sew_second_ratio, second_ratio) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_TRUE, /*GE_SEW*/ DEMAND_TRUE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ false, - /*NEW_DEMAND_RATIO*/ true, - /*NEW_DEMAND_GE_SEW*/ true, greatest_sew, - vlmul_for_greatest_sew_second_ratio, second_ratio) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_FALSE, /*LMUL*/ DEMAND_TRUE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_FALSE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ true, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ true, first_sew, second_vlmul, - second_ratio) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_TRUE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_FALSE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ true, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ false, second_sew, second_vlmul, - second_ratio) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_TRUE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_TRUE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ true, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ false, greatest_sew, second_vlmul, - second_ratio) - -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_FALSE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ false, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ false, second_sew, second_vlmul, - second_ratio) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_TRUE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_FALSE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ true, - /*NEW_DEMAND_RATIO*/ false, - /*NEW_DEMAND_GE_SEW*/ false, second_sew, first_vlmul, - second_ratio) -DEF_SEW_LMUL_FUSE_RULE (/*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_TRUE, /*GE_SEW*/ DEMAND_TRUE, - /*SEW*/ DEMAND_TRUE, /*LMUL*/ DEMAND_FALSE, - /*RATIO*/ DEMAND_FALSE, /*GE_SEW*/ DEMAND_FALSE, - /*NEW_DEMAND_SEW*/ true, - /*NEW_DEMAND_LMUL*/ false, - /*NEW_DEMAND_RATIO*/ true, - /*NEW_DEMAND_GE_SEW*/ false, second_sew, first_vlmul, - first_ratio) - -/* Define the unavailable cases for LCM. */ - -/* Case 1: Dem1 (Not demand AVL) is unavailable to Dem2 (Demand AVL). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_FALSE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_TRUE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ always_unavailable) -/* Case 2: Dem1 (Demand AVL) is unavailable to Dem2 (Demand normal AVL). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_TRUE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_TRUE, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ avl_unavailable_p) - -/* Case 3: Dem1 (Not demand TAIL) is unavailable to Dem2 (Demand TAIL). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_FALSE, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_TRUE, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ always_unavailable) - -/* Case 4: Dem1 (Not demand MASK) is unavailable to Dem2 (Demand MASK). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_FALSE, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_TRUE, - /*COND*/ always_unavailable) - -/* Case 5: Dem1 (Demand RATIO) is unavailable to Dem2 (Demand SEW/GE_SEW/LMUL). - */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_FALSE, - /*LMUL*/ DEMAND_FALSE, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ always_unavailable) -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_FALSE, - /*LMUL*/ DEMAND_FALSE, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ always_unavailable) - -/* Case 6: Dem1 (Demand SEW). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_FALSE, /*RATIO*/ DEMAND_FALSE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ sew_unavailable_p) - -/* Case 7: Dem1 (Demand LMUL). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_FALSE, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_FALSE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_FALSE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ lmul_unavailable_p) - -/* Case 8: Dem1 (Demand GE_SEW). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_FALSE, /*RATIO*/ DEMAND_FALSE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ ge_sew_unavailable_p) - -/* Case 9: Dem1 (Demand GE_SEW + LMUL). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_TRUE, /*RATIO*/ DEMAND_FALSE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ ge_sew_lmul_unavailable_p) - -/* Case 10: Dem1 (Demand GE_SEW + RATIO). */ -DEF_UNAVAILABLE_COND (/*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_TRUE, - /*LMUL*/ DEMAND_FALSE, /*RATIO*/ DEMAND_TRUE, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_TRUE, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*AVL*/ DEMAND_ANY, /*SEW*/ DEMAND_ANY, - /*LMUL*/ DEMAND_ANY, /*RATIO*/ DEMAND_ANY, - /*NONZERO_AVL*/ DEMAND_ANY, /*GE_SEW*/ DEMAND_ANY, - /*TAIL_POLICTY*/ DEMAND_ANY, /*MASK_POLICY*/ DEMAND_ANY, - /*COND*/ ge_sew_ratio_unavailable_p) - -#undef DEF_INCOMPATIBLE_COND -#undef DEF_SEW_LMUL_FUSE_RULE -#undef DEF_UNAVAILABLE_COND +/* Define SEW and LMUL rules. */ +DEF_SEW_LMUL_RULE (sew_lmul, sew_lmul, sew_lmul, sew_lmul_eq_p, sew_lmul_eq_p, + nop) +DEF_SEW_LMUL_RULE (sew_lmul, ratio_only, sew_lmul, ratio_eq_p, ratio_eq_p, nop) +DEF_SEW_LMUL_RULE (sew_lmul, sew_only, sew_lmul, sew_eq_p, sew_eq_p, nop) +DEF_SEW_LMUL_RULE (sew_lmul, ge_sew, sew_lmul, + sew_ge_and_prev_sew_le_next_max_sew_p, + sew_ge_and_prev_sew_le_next_max_sew_p, nop) +DEF_SEW_LMUL_RULE ( + sew_lmul, ratio_and_ge_sew, sew_lmul, + sew_ge_and_prev_sew_le_next_max_sew_and_next_ratio_valid_for_prev_sew_p, + sew_ge_and_prev_sew_le_next_max_sew_and_next_ratio_valid_for_prev_sew_p, nop) + +DEF_SEW_LMUL_RULE (ratio_only, sew_lmul, sew_lmul, ratio_eq_p, always_false, + use_next_sew_lmul) +/* use_next_sew_lmul for testcase no change. */ +DEF_SEW_LMUL_RULE (ratio_only, ratio_only, ratio_only, ratio_eq_p, ratio_eq_p, + use_next_sew_lmul) +DEF_SEW_LMUL_RULE (ratio_only, sew_only, sew_lmul, + prev_ratio_valid_for_next_sew_p, always_false, + use_next_sew_with_prev_ratio) +DEF_SEW_LMUL_RULE (ratio_only, ge_sew, ratio_and_ge_sew, + prev_ratio_valid_for_next_sew_p, always_false, + use_next_sew_with_prev_ratio) +DEF_SEW_LMUL_RULE (ratio_only, ratio_and_ge_sew, ratio_and_ge_sew, ratio_eq_p, + always_false, use_next_sew_lmul) + +DEF_SEW_LMUL_RULE (sew_only, sew_lmul, sew_lmul, sew_eq_p, always_false, + use_next_sew_lmul) +DEF_SEW_LMUL_RULE (sew_only, ratio_only, sew_lmul, + next_ratio_valid_for_prev_sew_p, always_false, + modify_lmul_with_next_ratio) +DEF_SEW_LMUL_RULE (sew_only, sew_only, sew_only, sew_eq_p, sew_eq_p, nop) +DEF_SEW_LMUL_RULE (sew_only, ge_sew, sew_only, + sew_ge_and_prev_sew_le_next_max_sew_p, sew_ge_p, nop) +DEF_SEW_LMUL_RULE ( + sew_only, ratio_and_ge_sew, sew_lmul, + sew_ge_and_prev_sew_le_next_max_sew_and_next_ratio_valid_for_prev_sew_p, + always_false, modify_lmul_with_next_ratio) + +DEF_SEW_LMUL_RULE (ge_sew, sew_lmul, sew_lmul, + sew_le_and_next_sew_le_prev_max_sew_p, always_false, + use_next_sew_lmul) +DEF_SEW_LMUL_RULE (ge_sew, ratio_only, ratio_and_ge_sew, + next_ratio_valid_for_prev_sew_p, always_false, + modify_lmul_with_next_ratio) +DEF_SEW_LMUL_RULE (ge_sew, sew_only, sew_only, + sew_le_and_next_sew_le_prev_max_sew_p, always_false, + use_next_sew) +DEF_SEW_LMUL_RULE (ge_sew, ge_sew, ge_sew, max_sew_overlap_p, sew_ge_p, + use_max_sew) +DEF_SEW_LMUL_RULE (ge_sew, ratio_and_ge_sew, ratio_and_ge_sew, + max_sew_overlap_and_next_ratio_valid_for_prev_sew_p, + always_false, use_max_sew_and_lmul_with_next_ratio) + +DEF_SEW_LMUL_RULE (ratio_and_ge_sew, sew_lmul, sew_lmul, + sew_le_and_next_sew_le_prev_max_sew_and_ratio_eq_p, + always_false, use_next_sew_lmul) +DEF_SEW_LMUL_RULE (ratio_and_ge_sew, ratio_only, ratio_and_ge_sew, ratio_eq_p, + ratio_eq_p, use_max_sew_and_lmul_with_prev_ratio) +DEF_SEW_LMUL_RULE ( + ratio_and_ge_sew, sew_only, sew_only, + sew_le_and_next_sew_le_prev_max_sew_and_prev_ratio_valid_for_next_sew_p, + always_false, use_next_sew_with_prev_ratio) +DEF_SEW_LMUL_RULE (ratio_and_ge_sew, ge_sew, ratio_and_ge_sew, + max_sew_overlap_and_prev_ratio_valid_for_next_sew_p, + sew_ge_p, use_max_sew_and_lmul_with_prev_ratio) +DEF_SEW_LMUL_RULE (ratio_and_ge_sew, ratio_and_ge_sew, ratio_and_ge_sew, + max_sew_overlap_and_ratio_eq_p, sew_ge_and_ratio_eq_p, + use_max_sew_and_lmul_with_prev_ratio) + +/* Define TAIL and MASK compatible and merge rules. */ + +DEF_POLICY_RULE (tail_mask_policy, tail_mask_policy, tail_mask_policy, + tail_mask_policy_eq_p, tail_mask_policy_eq_p, + use_tail_mask_policy) +DEF_POLICY_RULE (tail_mask_policy, tail_policy_only, tail_mask_policy, + tail_policy_eq_p, tail_policy_eq_p, use_tail_policy) +DEF_POLICY_RULE (tail_mask_policy, mask_policy_only, tail_mask_policy, + mask_policy_eq_p, mask_policy_eq_p, use_mask_policy) +DEF_POLICY_RULE (tail_mask_policy, ignore_policy, tail_mask_policy, always_true, + always_true, nop) + +DEF_POLICY_RULE (tail_policy_only, tail_mask_policy, tail_mask_policy, + tail_policy_eq_p, always_false, use_mask_policy) +DEF_POLICY_RULE (tail_policy_only, tail_policy_only, tail_policy_only, + tail_policy_eq_p, tail_policy_eq_p, use_tail_policy) +DEF_POLICY_RULE (tail_policy_only, mask_policy_only, tail_mask_policy, + always_true, always_false, use_mask_policy) +DEF_POLICY_RULE (tail_policy_only, ignore_policy, tail_policy_only, always_true, + always_true, nop) + +DEF_POLICY_RULE (mask_policy_only, tail_mask_policy, tail_mask_policy, + mask_policy_eq_p, always_false, use_tail_policy) +DEF_POLICY_RULE (mask_policy_only, tail_policy_only, tail_mask_policy, + always_true, always_false, use_tail_policy) +DEF_POLICY_RULE (mask_policy_only, mask_policy_only, mask_policy_only, + mask_policy_eq_p, mask_policy_eq_p, use_mask_policy) +DEF_POLICY_RULE (mask_policy_only, ignore_policy, mask_policy_only, always_true, + always_true, nop) + +DEF_POLICY_RULE (ignore_policy, tail_mask_policy, tail_mask_policy, always_true, + always_false, use_tail_mask_policy) +DEF_POLICY_RULE (ignore_policy, tail_policy_only, tail_policy_only, always_true, + always_false, use_tail_policy) +DEF_POLICY_RULE (ignore_policy, mask_policy_only, mask_policy_only, always_true, + always_false, use_mask_policy) +DEF_POLICY_RULE (ignore_policy, ignore_policy, ignore_policy, always_true, + always_true, nop) + +/* Define AVL compatible and merge rules. */ + +DEF_AVL_RULE (avl, avl, avl, avl_equal_p, avl_equal_p, nop) +DEF_AVL_RULE (avl, non_zero_avl, avl, avl_equal_or_prev_avl_non_zero_p, + avl_equal_or_prev_avl_non_zero_p, nop) +DEF_AVL_RULE (avl, ignore_avl, avl, always_true, always_true, nop) + +DEF_AVL_RULE (non_zero_avl, avl, avl, + avl_equal_or_next_avl_non_zero_and_can_use_next_avl_p, + always_false, use_next_avl_when_not_equal) + +DEF_AVL_RULE (non_zero_avl, non_zero_avl, non_zero_avl, always_true, + always_true, nop) +DEF_AVL_RULE (non_zero_avl, ignore_avl, non_zero_avl, always_true, always_true, + nop) + +DEF_AVL_RULE (ignore_avl, avl, avl, can_use_next_avl_p, always_false, + use_next_avl) +DEF_AVL_RULE (ignore_avl, non_zero_avl, non_zero_avl, can_use_next_avl_p, + always_false, use_next_avl) +DEF_AVL_RULE (ignore_avl, ignore_avl, ignore_avl, always_true, always_true, nop) + +#undef DEF_SEW_LMUL_RULE +#undef DEF_POLICY_RULE +#undef DEF_AVL_RULE diff --git a/gcc/config/riscv/riscv-vsetvl.h b/gcc/config/riscv/riscv-vsetvl.h deleted file mode 100644 index 53549ab..0000000 --- a/gcc/config/riscv/riscv-vsetvl.h +++ /dev/null @@ -1,488 +0,0 @@ -/* VSETVL pass header for RISC-V 'V' Extension for GNU compiler. - Copyright (C) 2022-2023 Free Software Foundation, Inc. - Contributed by Juzhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd. - -This file is part of GCC. - -GCC is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 3, or(at your option) -any later version. - -GCC is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with GCC; see the file COPYING3. If not see -<http://www.gnu.org/licenses/>. */ - -#ifndef GCC_RISCV_VSETVL_H -#define GCC_RISCV_VSETVL_H - -namespace riscv_vector { - -/* Classification of vsetvl instruction. */ -enum vsetvl_type -{ - VSETVL_NORMAL, - VSETVL_VTYPE_CHANGE_ONLY, - VSETVL_DISCARD_RESULT, - NUM_VSETVL_TYPE -}; - -enum emit_type -{ - /* emit_insn directly. */ - EMIT_DIRECT, - EMIT_BEFORE, - EMIT_AFTER, -}; - -enum demand_type -{ - DEMAND_AVL, - DEMAND_SEW, - DEMAND_LMUL, - DEMAND_RATIO, - DEMAND_NONZERO_AVL, - DEMAND_GE_SEW, - DEMAND_TAIL_POLICY, - DEMAND_MASK_POLICY, - NUM_DEMAND -}; - -enum demand_status -{ - DEMAND_FALSE, - DEMAND_TRUE, - DEMAND_ANY, -}; - -enum fusion_type -{ - INVALID_FUSION, - VALID_AVL_FUSION, - KILLED_AVL_FUSION -}; - -enum def_type -{ - REAL_SET = 1 << 0, - PHI_SET = 1 << 1, - BB_HEAD_SET = 1 << 2, - BB_END_SET = 1 << 3, - /* ??? TODO: In RTL_SSA framework, we have REAL_SET, - PHI_SET, BB_HEAD_SET, BB_END_SET and - CLOBBER_DEF def_info types. Currently, - we conservatively do not optimize clobber - def since we don't see the case that we - need to optimize it. */ - CLOBBER_DEF = 1 << 4 -}; - -/* AVL info for RVV instruction. Most RVV instructions have AVL operand in - implicit dependency. The AVL comparison between 2 RVV instructions is - very important since it affects our decision whether we should insert - a vsetvl instruction in this situation. AVL operand of all RVV instructions - can only be either a const_int value with < 32 or a reg value which can be - define by either a real RTL instruction or a PHI instruction. So we need a - standalone method to define AVL comparison and we can not simpily use - operator "==" to compare 2 RTX value since it's to strict which will make - use miss a lot of optimization opportunities. This method handle these - following cases: - - - Background: - Insert-vsetvl PASS is working after RA. - - - Terminology: - - pr: Pseudo-register. - - hr: Hardware-register. - - - Case 1: - - Before RA: - li pr138,13 - insn1 (implicit depend on pr138). - li pr138,14 - insn2 (implicit depend on pr139). - - After RA: - li hr5,13 - insn1 (implicit depend on hr5). - li hr5,14 - insn2 (implicit depend on hr5). - - Correct IR after vsetvl PASS: - li hr5,13 - vsetvl1 zero,hr5.... - insn1 (implicit depend on hr5). - li hr5,14 - vsetvl2 zero,hr5.... - insn2 (implicit depend on hr5). - - In this case, both insn1 and insn2 are using hr5 as the same AVL. - If we use "rtx_equal_p" or "REGNO (AVL1) == REGNO (AVL)", we will end - up with missing the vsetvl2 instruction which creates wrong result. - - Note: Using "==" operator to compare 2 AVL RTX strictly can fix this - issue. However, it is a too strict comparison method since not all member - variables in RTX data structure are not neccessary to be the same. It will - make us miss a lot of optimization opportunities. - - - Case 2: - - After RA: - bb 0: - li hr5,13 - bb 1: - li hr5,14 - bb2: - insn1 (implicit depend on hr5). - insn2 (implicit depend on hr5). - - In this case, we may end up with different AVL RTX and produce redundant - vsetvl instruction. - - VALUE is the implicit dependency in each RVV instruction. - SOURCE is the source definition information of AVL operand. */ -class avl_info -{ -private: - rtx m_value; - rtl_ssa::set_info *m_source; - -public: - avl_info () : m_value (NULL_RTX), m_source (nullptr) {} - avl_info (const avl_info &); - avl_info (rtx, rtl_ssa::set_info *); - rtx get_value () const { return m_value; } - rtl_ssa::set_info *get_source () const { return m_source; } - void set_source (rtl_ssa::set_info *set) { m_source = set; } - bool single_source_equal_p (const avl_info &) const; - bool multiple_source_equal_p (const avl_info &) const; - avl_info &operator= (const avl_info &); - bool operator== (const avl_info &) const; - bool operator!= (const avl_info &) const; - - bool has_avl_imm () const - { - return get_value () && CONST_INT_P (get_value ()); - } - bool has_avl_reg () const { return get_value () && REG_P (get_value ()); } - bool has_avl_no_reg () const { return !get_value (); } - bool has_non_zero_avl () const; - bool has_avl () const { return get_value (); } -}; - -/* Basic structure to save VL/VTYPE information. */ -struct vl_vtype_info -{ -protected: - /* AVL can be either register or const_int. */ - avl_info m_avl; - /* Fields from VTYPE. The VTYPE checking depend on the flag - dem_* before. */ - uint8_t m_sew; - riscv_vector::vlmul_type m_vlmul; - uint8_t m_ratio; - bool m_ta; - bool m_ma; - -public: - void set_sew (uint8_t sew) { m_sew = sew; } - void set_vlmul (riscv_vector::vlmul_type vlmul) { m_vlmul = vlmul; } - void set_ratio (uint8_t ratio) { m_ratio = ratio; } - void set_ta (bool ta) { m_ta = ta; } - void set_ma (bool ma) { m_ma = ma; } - - vl_vtype_info () - : m_avl (avl_info ()), m_sew (0), m_vlmul (riscv_vector::LMUL_RESERVED), - m_ratio (0), m_ta (0), m_ma (0) - {} - vl_vtype_info (const vl_vtype_info &) = default; - vl_vtype_info &operator= (const vl_vtype_info &) = default; - vl_vtype_info (avl_info, uint8_t, riscv_vector::vlmul_type, uint8_t, bool, - bool); - - bool operator== (const vl_vtype_info &) const; - bool operator!= (const vl_vtype_info &) const; - - bool has_avl_imm () const { return m_avl.has_avl_imm (); } - bool has_avl_reg () const { return m_avl.has_avl_reg (); } - bool has_avl_no_reg () const { return m_avl.has_avl_no_reg (); } - bool has_non_zero_avl () const { return m_avl.has_non_zero_avl (); }; - bool has_avl () const { return m_avl.has_avl (); } - - rtx get_avl () const { return m_avl.get_value (); } - const avl_info &get_avl_info () const { return m_avl; } - rtl_ssa::set_info *get_avl_source () const { return m_avl.get_source (); } - void set_avl_source (rtl_ssa::set_info *set) { m_avl.set_source (set); } - void set_avl_info (const avl_info &avl) { m_avl = avl; } - uint8_t get_sew () const { return m_sew; } - riscv_vector::vlmul_type get_vlmul () const { return m_vlmul; } - uint8_t get_ratio () const { return m_ratio; } - bool get_ta () const { return m_ta; } - bool get_ma () const { return m_ma; } - - bool same_avl_p (const vl_vtype_info &) const; - bool same_vtype_p (const vl_vtype_info &) const; - bool same_vlmax_p (const vl_vtype_info &) const; -}; - -class vector_insn_info : public vl_vtype_info -{ -private: - enum state_type - { - UNINITIALIZED, - VALID, - UNKNOWN, - EMPTY, - - /* The block is polluted as containing VSETVL instruction during dem - backward propagation to gain better LCM optimization even though - such VSETVL instruction is not really emit yet during this time. */ - DIRTY, - }; - - enum state_type m_state; - - bool m_demands[NUM_DEMAND]; - - /* TODO: Assume INSN1 = INSN holding of definition of AVL. - INSN2 = INSN that is inserted a vsetvl insn before. - We may need to add a new member to save INSN of holding AVL. - m_insn is holding the INSN that is inserted a vsetvl insn before in - Phase 2. Ideally, most of the time INSN1 == INSN2. However, considering - such case: - - vmv.x.s (INSN2) - vle8.v (INSN1) - - If these 2 instructions are compatible, we should only issue a vsetvl INSN - (with AVL included) before vmv.x.s, but vmv.x.s is not the INSN holding the - definition of AVL. */ - rtl_ssa::insn_info *m_insn; - - friend class vector_infos_manager; - -public: - vector_insn_info () - : vl_vtype_info (), m_state (UNINITIALIZED), m_demands{false}, - m_insn (nullptr) - {} - - /* Parse the instruction to get VL/VTYPE information and demanding - * information. */ - /* This is only called by simple_vsetvl subroutine when optimize == 0. - Since RTL_SSA can not be enabled when optimize == 0, we don't initialize - the m_insn. */ - void parse_insn (rtx_insn *); - /* This is only called by lazy_vsetvl subroutine when optimize > 0. - We use RTL_SSA framework to initialize the insn_info. */ - void parse_insn (rtl_ssa::insn_info *); - - bool operator>= (const vector_insn_info &) const; - bool operator== (const vector_insn_info &) const; - - bool uninit_p () const { return m_state == UNINITIALIZED; } - bool valid_p () const { return m_state == VALID; } - bool unknown_p () const { return m_state == UNKNOWN; } - bool empty_p () const { return m_state == EMPTY; } - bool dirty_p () const { return m_state == DIRTY; } - bool valid_or_dirty_p () const - { - return m_state == VALID || m_state == DIRTY; - } - bool available_p (const vector_insn_info &) const; - - static vector_insn_info get_unknown () - { - vector_insn_info info; - info.set_unknown (); - return info; - } - - void set_valid () { m_state = VALID; } - void set_unknown () { m_state = UNKNOWN; } - void set_empty () { m_state = EMPTY; } - void set_dirty () { m_state = DIRTY; } - void set_insn (rtl_ssa::insn_info *insn) { m_insn = insn; } - - bool demand_p (enum demand_type type) const { return m_demands[type]; } - void demand (enum demand_type type) { m_demands[type] = true; } - void set_demand (enum demand_type type, bool value) - { - m_demands[type] = value; - } - void fuse_avl (const vector_insn_info &, const vector_insn_info &); - void fuse_sew_lmul (const vector_insn_info &, const vector_insn_info &); - void fuse_tail_policy (const vector_insn_info &, const vector_insn_info &); - void fuse_mask_policy (const vector_insn_info &, const vector_insn_info &); - - bool compatible_p (const vector_insn_info &) const; - bool skip_avl_compatible_p (const vector_insn_info &) const; - bool compatible_avl_p (const vl_vtype_info &) const; - bool compatible_avl_p (const avl_info &) const; - bool compatible_vtype_p (const vl_vtype_info &) const; - bool compatible_p (const vl_vtype_info &) const; - vector_insn_info local_merge (const vector_insn_info &) const; - vector_insn_info global_merge (const vector_insn_info &, unsigned int) const; - - rtl_ssa::insn_info *get_insn () const { return m_insn; } - const bool *get_demands (void) const { return m_demands; } - rtx get_avl_or_vl_reg (void) const; - rtx get_avl_reg_rtx (void) const - { - return gen_rtx_REG (Pmode, get_avl_source ()->regno ()); - } - bool update_fault_first_load_avl (rtl_ssa::insn_info *); - - void dump (FILE *) const; -}; - -struct vector_block_info -{ - /* The local_dem vector insn_info of the block. */ - vector_insn_info local_dem; - - /* The reaching_out vector insn_info of the block. */ - vector_insn_info reaching_out; - - /* The static execute probability of the demand info. */ - profile_probability probability; - - vector_block_info () = default; -}; - -class vector_infos_manager -{ -public: - auto_vec<vector_insn_info> vector_insn_infos; - auto_vec<vector_block_info> vector_block_infos; - auto_vec<vector_insn_info *> vector_exprs; - hash_set<rtx_insn *> to_refine_vsetvls; - hash_set<rtx_insn *> to_delete_vsetvls; - - struct edge_list *vector_edge_list; - sbitmap *vector_kill; - sbitmap *vector_del; - sbitmap *vector_insert; - sbitmap *vector_antic; - sbitmap *vector_transp; - sbitmap *vector_comp; - sbitmap *vector_avin; - sbitmap *vector_avout; - sbitmap *vector_antin; - sbitmap *vector_antout; - sbitmap *vector_earliest; - - vector_infos_manager (); - - /* Create a new expr in expr list if it is not exist. */ - void create_expr (vector_insn_info &); - - /* Get the expr id of the pair of expr. */ - size_t get_expr_id (const vector_insn_info &) const; - - /* Return the number of expr that is set in the bitmap. */ - size_t expr_set_num (sbitmap) const; - - /* Get all relaxer expression id for corresponding vector info. */ - auto_vec<size_t> get_all_available_exprs (const vector_insn_info &) const; - - /* Return true if all expression set in bitmap are same AVL. */ - bool all_same_avl_p (const basic_block, sbitmap) const; - - /* Return true if all expression set in bitmap are same ratio. */ - bool all_same_ratio_p (sbitmap) const; - - bool all_avail_in_compatible_p (const basic_block) const; - bool earliest_fusion_worthwhile_p (const basic_block) const; - bool vsetvl_dominated_by_all_preds_p (const basic_block, - const vector_insn_info &) const; - - bool to_delete_p (rtx_insn *rinsn) - { - if (to_delete_vsetvls.contains (rinsn)) - { - to_delete_vsetvls.remove (rinsn); - if (to_refine_vsetvls.contains (rinsn)) - to_refine_vsetvls.remove (rinsn); - return true; - } - return false; - } - bool to_refine_p (rtx_insn *rinsn) - { - if (to_refine_vsetvls.contains (rinsn)) - { - to_refine_vsetvls.remove (rinsn); - return true; - } - return false; - } - - void release (void); - void create_bitmap_vectors (void); - void free_bitmap_vectors (void); - - void dump (FILE *) const; -}; - -struct demands_pair -{ - demand_status first[NUM_DEMAND]; - demand_status second[NUM_DEMAND]; - bool match_cond_p (const bool *dems1, const bool *dems2) const - { - for (unsigned i = 0; i < NUM_DEMAND; i++) - { - if (first[i] != DEMAND_ANY && first[i] != dems1[i]) - return false; - if (second[i] != DEMAND_ANY && second[i] != dems2[i]) - return false; - } - return true; - } -}; - -struct demands_cond -{ - demands_pair pair; - using CONDITION_TYPE - = bool (*) (const vector_insn_info &, const vector_insn_info &); - CONDITION_TYPE incompatible_p; - bool dual_incompatible_p (const vector_insn_info &info1, - const vector_insn_info &info2) const - { - return ((pair.match_cond_p (info1.get_demands (), info2.get_demands ()) - && incompatible_p (info1, info2)) - || (pair.match_cond_p (info2.get_demands (), info1.get_demands ()) - && incompatible_p (info2, info1))); - } -}; - -struct demands_fuse_rule -{ - demands_pair pair; - bool demand_sew_p; - bool demand_lmul_p; - bool demand_ratio_p; - bool demand_ge_sew_p; - - using NEW_SEW - = unsigned (*) (const vector_insn_info &, const vector_insn_info &); - using NEW_VLMUL - = vlmul_type (*) (const vector_insn_info &, const vector_insn_info &); - using NEW_RATIO - = unsigned (*) (const vector_insn_info &, const vector_insn_info &); - NEW_SEW new_sew; - NEW_VLMUL new_vlmul; - NEW_RATIO new_ratio; -}; - -} // namespace riscv_vector -#endif diff --git a/gcc/config/riscv/t-riscv b/gcc/config/riscv/t-riscv index f137e1f..dd17056 100644 --- a/gcc/config/riscv/t-riscv +++ b/gcc/config/riscv/t-riscv @@ -64,7 +64,7 @@ riscv-vsetvl.o: $(srcdir)/config/riscv/riscv-vsetvl.cc \ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(REGS_H) \ $(TARGET_H) tree-pass.h df.h rtl-ssa.h cfgcleanup.h insn-config.h \ insn-attr.h insn-opinit.h tm-constrs.h cfgrtl.h cfganal.h lcm.h \ - predict.h profile-count.h $(srcdir)/config/riscv/riscv-vsetvl.h \ + predict.h profile-count.h \ $(srcdir)/config/riscv/riscv-vsetvl.def $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \ $(srcdir)/config/riscv/riscv-vsetvl.cc diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md index 0850475..e80a20d 100644 --- a/gcc/config/riscv/vector-iterators.md +++ b/gcc/config/riscv/vector-iterators.md @@ -1,3 +1,4 @@ + ;; Iterators for RISC-V 'V' Extension for GNU compiler. ;; Copyright (C) 2022-2023 Free Software Foundation, Inc. ;; Contributed by Juzhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd. @@ -146,85 +147,85 @@ (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") ;; VLS modes. - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") - (V1HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V2HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V4HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V8HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V16HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V32HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VEEWEXT2 [ @@ -315,84 +316,84 @@ (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64") (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") - (V1HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V2HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V4HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V8HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V16HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V32HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VI [ @@ -416,52 +417,52 @@ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64") (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64") - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator V_VLSF [ @@ -473,39 +474,39 @@ (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64") (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") - (V1HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V2HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V4HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V8HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V16HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V32HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_ZVFH") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_ZVFH") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_ZVFH") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VF_ZVFHMIN [ @@ -531,39 +532,39 @@ (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64") (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") - (V1HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V2HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V4HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V8HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V16HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V32HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) ;; This iterator is the same as above but with TARGET_VECTOR_ELEN_FP_16 @@ -594,52 +595,52 @@ (RVVM8DI "TARGET_FULL_V") (RVVM4DI "TARGET_FULL_V") (RVVM2DI "TARGET_FULL_V") (RVVM1DI "TARGET_FULL_V") - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_FULL_V") - (V2DI "TARGET_VECTOR_VLS && TARGET_FULL_V") - (V4DI "TARGET_VECTOR_VLS && TARGET_FULL_V") - (V8DI "TARGET_VECTOR_VLS && TARGET_FULL_V && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_FULL_V && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_FULL_V && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_FULL_V && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_FULL_V && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_FULL_V && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_FULL_V && TARGET_MIN_VLEN >= 4096") + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_FULL_V") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_FULL_V") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_FULL_V") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_FULL_V && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_FULL_V && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_FULL_V && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_FULL_V && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_FULL_V && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_FULL_V && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_FULL_V && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VI_QH [ @@ -655,42 +656,42 @@ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32") - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VI_QHS_NO_M8 [ @@ -700,39 +701,39 @@ RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32") - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") ]) (define_mode_iterator VF_HS [ @@ -743,29 +744,29 @@ (RVVM8SF "TARGET_VECTOR_ELEN_FP_32") (RVVM4SF "TARGET_VECTOR_ELEN_FP_32") (RVVM2SF "TARGET_VECTOR_ELEN_FP_32") (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32") - (V1HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V2HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V4HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V8HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V16HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V32HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_ZVFH") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_ZVFH") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_ZVFH") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VF_HS_NO_M8 [ @@ -779,27 +780,27 @@ (RVVM1SF "TARGET_VECTOR_ELEN_FP_32") (RVVMF2SF "TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32") - (V1HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V2HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V4HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V8HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V16HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V32HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_ZVFH") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_ZVFH") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_ZVFH") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") ]) (define_mode_iterator VF_HS_M8 [ @@ -814,42 +815,42 @@ RVVM8SI RVVM4SI RVVM2SI RVVM1SI (RVVMF2SI "TARGET_MIN_VLEN > 32") - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VI_D [ @@ -861,16 +862,16 @@ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64") (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VFULLI_D [ @@ -1013,19 +1014,19 @@ (define_mode_iterator VB_VLS [ (RVVMF64BI "TARGET_MIN_VLEN > 32") RVVMF32BI RVVMF16BI RVVMF8BI RVVMF4BI RVVMF2BI RVVM1BI - (V1BI "TARGET_VECTOR_VLS") - (V2BI "TARGET_VECTOR_VLS") - (V4BI "TARGET_VECTOR_VLS") - (V8BI "TARGET_VECTOR_VLS") - (V16BI "TARGET_VECTOR_VLS") - (V32BI "TARGET_VECTOR_VLS") - (V64BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") + (V1BI "riscv_vector::vls_mode_valid_p (V1BImode)") + (V2BI "riscv_vector::vls_mode_valid_p (V2BImode)") + (V4BI "riscv_vector::vls_mode_valid_p (V4BImode)") + (V8BI "riscv_vector::vls_mode_valid_p (V8BImode)") + (V16BI "riscv_vector::vls_mode_valid_p (V16BImode)") + (V32BI "riscv_vector::vls_mode_valid_p (V32BImode)") + (V64BI "riscv_vector::vls_mode_valid_p (V64BImode) && TARGET_MIN_VLEN >= 64") + (V128BI "riscv_vector::vls_mode_valid_p (V128BImode) && TARGET_MIN_VLEN >= 128") + (V256BI "riscv_vector::vls_mode_valid_p (V256BImode) && TARGET_MIN_VLEN >= 256") + (V512BI "riscv_vector::vls_mode_valid_p (V512BImode) && TARGET_MIN_VLEN >= 512") + (V1024BI "riscv_vector::vls_mode_valid_p (V1024BImode) && TARGET_MIN_VLEN >= 1024") + (V2048BI "riscv_vector::vls_mode_valid_p (V2048BImode) && TARGET_MIN_VLEN >= 2048") + (V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VWEXTI [ @@ -1036,39 +1037,39 @@ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64") (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") ]) ;; Same iterator split reason as VF_ZVFHMIN and VF. @@ -1082,27 +1083,27 @@ (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64") (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VWEXTF [ @@ -1115,27 +1116,27 @@ (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64") (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VWCONVERTI [ @@ -1147,27 +1148,27 @@ (RVVM2DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") (RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") - (V1SI "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V2SI "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V4SI "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V8SI "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V16SI "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode) && TARGET_ZVFH") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode) && TARGET_ZVFH") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode) && TARGET_ZVFH") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode) && TARGET_ZVFH") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VWWCONVERTI [ @@ -1176,16 +1177,16 @@ (RVVM2DI "TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") (RVVM1DI "TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VQEXTI [ @@ -1194,59 +1195,59 @@ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64") (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VQEXTF [ (RVVM8DF "TARGET_VECTOR_ELEN_FP_64") (RVVM4DF "TARGET_VECTOR_ELEN_FP_64") (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VOEXTI [ (RVVM8DI "TARGET_VECTOR_ELEN_64") (RVVM4DI "TARGET_VECTOR_ELEN_64") (RVVM2DI "TARGET_VECTOR_ELEN_64") (RVVM1DI "TARGET_VECTOR_ELEN_64") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_iterator VT [ @@ -3216,28 +3217,28 @@ (RVVM2DF "TARGET_VECTOR_ELEN_FP_64") (RVVM1DF "TARGET_VECTOR_ELEN_FP_64") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) (define_mode_attr VDEMOTE [ @@ -3748,281 +3749,281 @@ ;; VLS modes. (define_mode_iterator VLS [ - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") - (V1HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V2HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V4HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V8HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V16HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V32HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096")]) + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096")]) (define_mode_iterator VLSB [ - (V1BI "TARGET_VECTOR_VLS") - (V2BI "TARGET_VECTOR_VLS") - (V4BI "TARGET_VECTOR_VLS") - (V8BI "TARGET_VECTOR_VLS") - (V16BI "TARGET_VECTOR_VLS") - (V32BI "TARGET_VECTOR_VLS") - (V64BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096")]) + (V1BI "riscv_vector::vls_mode_valid_p (V1BImode)") + (V2BI "riscv_vector::vls_mode_valid_p (V2BImode)") + (V4BI "riscv_vector::vls_mode_valid_p (V4BImode)") + (V8BI "riscv_vector::vls_mode_valid_p (V8BImode)") + (V16BI "riscv_vector::vls_mode_valid_p (V16BImode)") + (V32BI "riscv_vector::vls_mode_valid_p (V32BImode)") + (V64BI "riscv_vector::vls_mode_valid_p (V64BImode) && TARGET_MIN_VLEN >= 64") + (V128BI "riscv_vector::vls_mode_valid_p (V128BImode) && TARGET_MIN_VLEN >= 128") + (V256BI "riscv_vector::vls_mode_valid_p (V256BImode) && TARGET_MIN_VLEN >= 256") + (V512BI "riscv_vector::vls_mode_valid_p (V512BImode) && TARGET_MIN_VLEN >= 512") + (V1024BI "riscv_vector::vls_mode_valid_p (V1024BImode) && TARGET_MIN_VLEN >= 1024") + (V2048BI "riscv_vector::vls_mode_valid_p (V2048BImode) && TARGET_MIN_VLEN >= 2048") + (V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096")]) ;; VLS modes that has NUNITS < 32. (define_mode_iterator VLS_AVL_IMM [ - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V1HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V2HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V4HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V8HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V16HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - - (V1BI "TARGET_VECTOR_VLS") - (V2BI "TARGET_VECTOR_VLS") - (V4BI "TARGET_VECTOR_VLS") - (V8BI "TARGET_VECTOR_VLS") - (V16BI "TARGET_VECTOR_VLS")]) + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_VECTOR_ELEN_FP_16") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + + (V1BI "riscv_vector::vls_mode_valid_p (V1BImode)") + (V2BI "riscv_vector::vls_mode_valid_p (V2BImode)") + (V4BI "riscv_vector::vls_mode_valid_p (V4BImode)") + (V8BI "riscv_vector::vls_mode_valid_p (V8BImode)") + (V16BI "riscv_vector::vls_mode_valid_p (V16BImode)")]) ;; VLS modes that has NUNITS >= 32. (define_mode_iterator VLS_AVL_REG [ - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") - (V32HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") - - (V32BI "TARGET_VECTOR_VLS") - (V64BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096BI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096")]) + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN >= 4096") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + + (V32BI "riscv_vector::vls_mode_valid_p (V32BImode)") + (V64BI "riscv_vector::vls_mode_valid_p (V64BImode) && TARGET_MIN_VLEN >= 64") + (V128BI "riscv_vector::vls_mode_valid_p (V128BImode) && TARGET_MIN_VLEN >= 128") + (V256BI "riscv_vector::vls_mode_valid_p (V256BImode) && TARGET_MIN_VLEN >= 256") + (V512BI "riscv_vector::vls_mode_valid_p (V512BImode) && TARGET_MIN_VLEN >= 512") + (V1024BI "riscv_vector::vls_mode_valid_p (V1024BImode) && TARGET_MIN_VLEN >= 1024") + (V2048BI "riscv_vector::vls_mode_valid_p (V2048BImode) && TARGET_MIN_VLEN >= 2048") + (V4096BI "riscv_vector::vls_mode_valid_p (V4096BImode) && TARGET_MIN_VLEN >= 4096")]) (define_mode_iterator VLSI [ - (V1QI "TARGET_VECTOR_VLS") - (V2QI "TARGET_VECTOR_VLS") - (V4QI "TARGET_VECTOR_VLS") - (V8QI "TARGET_VECTOR_VLS") - (V16QI "TARGET_VECTOR_VLS") - (V32QI "TARGET_VECTOR_VLS") - (V64QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V128QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V256QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V512QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V1024QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V2048QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V4096QI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1HI "TARGET_VECTOR_VLS") - (V2HI "TARGET_VECTOR_VLS") - (V4HI "TARGET_VECTOR_VLS") - (V8HI "TARGET_VECTOR_VLS") - (V16HI "TARGET_VECTOR_VLS") - (V32HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V64HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V128HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V256HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V512HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V1024HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V2048HI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1SI "TARGET_VECTOR_VLS") - (V2SI "TARGET_VECTOR_VLS") - (V4SI "TARGET_VECTOR_VLS") - (V8SI "TARGET_VECTOR_VLS") - (V16SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 64") - (V32SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 128") - (V64SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 256") - (V128SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 512") - (V256SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 1024") - (V512SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 2048") - (V1024SI "TARGET_VECTOR_VLS && TARGET_MIN_VLEN >= 4096") - (V1DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V2DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V4DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64") - (V8DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") - (V16DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") - (V32DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") - (V64DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") - (V128DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") - (V256DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") - (V512DI "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096")]) + (V1QI "riscv_vector::vls_mode_valid_p (V1QImode)") + (V2QI "riscv_vector::vls_mode_valid_p (V2QImode)") + (V4QI "riscv_vector::vls_mode_valid_p (V4QImode)") + (V8QI "riscv_vector::vls_mode_valid_p (V8QImode)") + (V16QI "riscv_vector::vls_mode_valid_p (V16QImode)") + (V32QI "riscv_vector::vls_mode_valid_p (V32QImode)") + (V64QI "riscv_vector::vls_mode_valid_p (V64QImode) && TARGET_MIN_VLEN >= 64") + (V128QI "riscv_vector::vls_mode_valid_p (V128QImode) && TARGET_MIN_VLEN >= 128") + (V256QI "riscv_vector::vls_mode_valid_p (V256QImode) && TARGET_MIN_VLEN >= 256") + (V512QI "riscv_vector::vls_mode_valid_p (V512QImode) && TARGET_MIN_VLEN >= 512") + (V1024QI "riscv_vector::vls_mode_valid_p (V1024QImode) && TARGET_MIN_VLEN >= 1024") + (V2048QI "riscv_vector::vls_mode_valid_p (V2048QImode) && TARGET_MIN_VLEN >= 2048") + (V4096QI "riscv_vector::vls_mode_valid_p (V4096QImode) && TARGET_MIN_VLEN >= 4096") + (V1HI "riscv_vector::vls_mode_valid_p (V1HImode)") + (V2HI "riscv_vector::vls_mode_valid_p (V2HImode)") + (V4HI "riscv_vector::vls_mode_valid_p (V4HImode)") + (V8HI "riscv_vector::vls_mode_valid_p (V8HImode)") + (V16HI "riscv_vector::vls_mode_valid_p (V16HImode)") + (V32HI "riscv_vector::vls_mode_valid_p (V32HImode) && TARGET_MIN_VLEN >= 64") + (V64HI "riscv_vector::vls_mode_valid_p (V64HImode) && TARGET_MIN_VLEN >= 128") + (V128HI "riscv_vector::vls_mode_valid_p (V128HImode) && TARGET_MIN_VLEN >= 256") + (V256HI "riscv_vector::vls_mode_valid_p (V256HImode) && TARGET_MIN_VLEN >= 512") + (V512HI "riscv_vector::vls_mode_valid_p (V512HImode) && TARGET_MIN_VLEN >= 1024") + (V1024HI "riscv_vector::vls_mode_valid_p (V1024HImode) && TARGET_MIN_VLEN >= 2048") + (V2048HI "riscv_vector::vls_mode_valid_p (V2048HImode) && TARGET_MIN_VLEN >= 4096") + (V1SI "riscv_vector::vls_mode_valid_p (V1SImode)") + (V2SI "riscv_vector::vls_mode_valid_p (V2SImode)") + (V4SI "riscv_vector::vls_mode_valid_p (V4SImode)") + (V8SI "riscv_vector::vls_mode_valid_p (V8SImode)") + (V16SI "riscv_vector::vls_mode_valid_p (V16SImode) && TARGET_MIN_VLEN >= 64") + (V32SI "riscv_vector::vls_mode_valid_p (V32SImode) && TARGET_MIN_VLEN >= 128") + (V64SI "riscv_vector::vls_mode_valid_p (V64SImode) && TARGET_MIN_VLEN >= 256") + (V128SI "riscv_vector::vls_mode_valid_p (V128SImode) && TARGET_MIN_VLEN >= 512") + (V256SI "riscv_vector::vls_mode_valid_p (V256SImode) && TARGET_MIN_VLEN >= 1024") + (V512SI "riscv_vector::vls_mode_valid_p (V512SImode) && TARGET_MIN_VLEN >= 2048") + (V1024SI "riscv_vector::vls_mode_valid_p (V1024SImode) && TARGET_MIN_VLEN >= 4096") + (V1DI "riscv_vector::vls_mode_valid_p (V1DImode) && TARGET_VECTOR_ELEN_64") + (V2DI "riscv_vector::vls_mode_valid_p (V2DImode) && TARGET_VECTOR_ELEN_64") + (V4DI "riscv_vector::vls_mode_valid_p (V4DImode) && TARGET_VECTOR_ELEN_64") + (V8DI "riscv_vector::vls_mode_valid_p (V8DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 64") + (V16DI "riscv_vector::vls_mode_valid_p (V16DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 128") + (V32DI "riscv_vector::vls_mode_valid_p (V32DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 256") + (V64DI "riscv_vector::vls_mode_valid_p (V64DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 512") + (V128DI "riscv_vector::vls_mode_valid_p (V128DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 1024") + (V256DI "riscv_vector::vls_mode_valid_p (V256DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 2048") + (V512DI "riscv_vector::vls_mode_valid_p (V512DImode) && TARGET_VECTOR_ELEN_64 && TARGET_MIN_VLEN >= 4096")]) (define_mode_iterator VLSF [ - (V1HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V2HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V4HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V8HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V16HF "TARGET_VECTOR_VLS && TARGET_ZVFH") - (V32HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") - (V64HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") - (V128HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") - (V256HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") - (V512HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") - (V1024HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") - (V2048HF "TARGET_VECTOR_VLS && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") - (V1SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V2SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V4SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V8SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32") - (V16SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") - (V32SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") - (V64SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") - (V128SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") - (V256SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") - (V512SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") - (V1024SF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") - (V1DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V2DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V4DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64") - (V8DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") - (V16DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") - (V32DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") - (V64DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") - (V128DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") - (V256DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") - (V512DF "TARGET_VECTOR_VLS && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") + (V1HF "riscv_vector::vls_mode_valid_p (V1HFmode) && TARGET_ZVFH") + (V2HF "riscv_vector::vls_mode_valid_p (V2HFmode) && TARGET_ZVFH") + (V4HF "riscv_vector::vls_mode_valid_p (V4HFmode) && TARGET_ZVFH") + (V8HF "riscv_vector::vls_mode_valid_p (V8HFmode) && TARGET_ZVFH") + (V16HF "riscv_vector::vls_mode_valid_p (V16HFmode) && TARGET_ZVFH") + (V32HF "riscv_vector::vls_mode_valid_p (V32HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 64") + (V64HF "riscv_vector::vls_mode_valid_p (V64HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 128") + (V128HF "riscv_vector::vls_mode_valid_p (V128HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 256") + (V256HF "riscv_vector::vls_mode_valid_p (V256HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 512") + (V512HF "riscv_vector::vls_mode_valid_p (V512HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 1024") + (V1024HF "riscv_vector::vls_mode_valid_p (V1024HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 2048") + (V2048HF "riscv_vector::vls_mode_valid_p (V2048HFmode) && TARGET_ZVFH && TARGET_MIN_VLEN >= 4096") + (V1SF "riscv_vector::vls_mode_valid_p (V1SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V2SF "riscv_vector::vls_mode_valid_p (V2SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V4SF "riscv_vector::vls_mode_valid_p (V4SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V8SF "riscv_vector::vls_mode_valid_p (V8SFmode) && TARGET_VECTOR_ELEN_FP_32") + (V16SF "riscv_vector::vls_mode_valid_p (V16SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 64") + (V32SF "riscv_vector::vls_mode_valid_p (V32SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 128") + (V64SF "riscv_vector::vls_mode_valid_p (V64SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 256") + (V128SF "riscv_vector::vls_mode_valid_p (V128SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 512") + (V256SF "riscv_vector::vls_mode_valid_p (V256SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 1024") + (V512SF "riscv_vector::vls_mode_valid_p (V512SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 2048") + (V1024SF "riscv_vector::vls_mode_valid_p (V1024SFmode) && TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN >= 4096") + (V1DF "riscv_vector::vls_mode_valid_p (V1DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V2DF "riscv_vector::vls_mode_valid_p (V2DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V4DF "riscv_vector::vls_mode_valid_p (V4DFmode) && TARGET_VECTOR_ELEN_FP_64") + (V8DF "riscv_vector::vls_mode_valid_p (V8DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 64") + (V16DF "riscv_vector::vls_mode_valid_p (V16DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 128") + (V32DF "riscv_vector::vls_mode_valid_p (V32DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 256") + (V64DF "riscv_vector::vls_mode_valid_p (V64DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 512") + (V128DF "riscv_vector::vls_mode_valid_p (V128DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 1024") + (V256DF "riscv_vector::vls_mode_valid_p (V256DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 2048") + (V512DF "riscv_vector::vls_mode_valid_p (V512DFmode) && TARGET_VECTOR_ELEN_FP_64 && TARGET_MIN_VLEN >= 4096") ]) diff --git a/gcc/config/sh/sh.md b/gcc/config/sh/sh.md index 484a34f..193f40d 100644 --- a/gcc/config/sh/sh.md +++ b/gcc/config/sh/sh.md @@ -842,7 +842,7 @@ if (SUBREG_P (reg)) reg = SUBREG_REG (reg); gcc_assert (REG_P (reg)); - if (find_regno_note (curr_insn, REG_DEAD, REGNO (reg)) != NULL_RTX) + if (find_regno_note (curr_insn, REG_DEAD, REGNO (reg)) == NULL_RTX) FAIL; /* FIXME: Maybe also search the predecessor basic blocks to catch diff --git a/gcc/coretypes.h b/gcc/coretypes.h index f86dc16..db7813b 100644 --- a/gcc/coretypes.h +++ b/gcc/coretypes.h @@ -204,6 +204,12 @@ enum tls_model { TLS_MODEL_LOCAL_EXEC }; +/* Types of trampoline implementation. */ +enum trampoline_impl { + TRAMPOLINE_IMPL_STACK, + TRAMPOLINE_IMPL_HEAP +}; + /* Types of ABI for an offload compiler. */ enum offload_abi { OFFLOAD_ABI_UNSET, diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog index 702402f..95755e6 100644 --- a/gcc/cp/ChangeLog +++ b/gcc/cp/ChangeLog @@ -1,3 +1,243 @@ +2023-10-20 Jason Merrill <jason@redhat.com> + + * call.cc (implicit_conversion_1): Rename... + (implicit_conversion): ...to this. Remove the old wrapper. + +2023-10-20 Jason Merrill <jason@redhat.com> + + * call.cc (tourney): Only skip champ_compared_to_predecessor. + +2023-10-20 Nathan Sidwell <nathan@acm.org> + + PR c++/105322 + * module.cc (trees_out::core_vals): Stream CONSTRUCTOR operands + after the type. + (trees_in::core_vals): Likewise. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + * cp-lang.cc (objcp_tsubst_copy_and_build): Rename to ... + (objcp_tsubst_expr): ... this. + * cp-objcp-common.h (objcp_tsubst_copy_and_build): Rename to ... + (objcp_tsubst_expr): ... this. + * cp-tree.h (tsubst_copy_and_build): Remove declaration. + * init.cc (maybe_instantiate_nsdmi_init): Use tsubst_expr + instead of tsubst_copy_and_build. + * pt.cc (expand_integer_pack): Likewise. + (instantiate_non_dependent_expr_internal): Likewise. + (instantiate_class_template): Use tsubst_stmt instead of + tsubst_expr for STATIC_ASSERT. + (tsubst_function_decl): Adjust tsubst_copy_and_build uses. + (tsubst_arg_types): Likewise. + (tsubst_exception_specification): Likewise. + (tsubst_tree_list): Likewise. + (tsubst): Likewise. + (tsubst_name): Likewise. + (tsubst_omp_clause_decl): Use tsubst_stmt instead of tsubst_expr. + (tsubst_omp_clauses): Likewise. + (tsubst_copy_asm_operands): Adjust tsubst_copy_and_build use. + (tsubst_omp_for_iterator): Use tsubst_stmt instead of tsubst_expr. + (tsubst_expr): Rename to ... + (tsubst_stmt): ... this. + <case CO_YIELD_EXPR, CO_AWAIT_EXPR>: Move to tsubst_expr. + (tsubst_omp_udr): Use tsubst_stmt instead of tsubst_expr. + (tsubst_non_call_postfix_expression): Adjust tsubst_copy_and_build + use. + (tsubst_lambda_expr): Likewise. Use tsubst_stmt instead of + tsubst_expr for the body of a lambda. + (tsubst_copy_and_build_call_args): Rename to ... + (tsubst_call_args): ... this. Adjust tsubst_copy_and_build use. + (tsubst_copy_and_build): Rename to tsubst_expr. Adjust + tsubst_copy_and_build and tsubst_copy_and_build_call_args use. + <case TRANSACTION_EXPR>: Use tsubst_stmt instead of tsubst_expr. + (maybe_instantiate_noexcept): Adjust tsubst_copy_and_build use. + (instantiate_body): Use tsubst_stmt instead of tsubst_expr for + substituting the function body. + (tsubst_initializer_list): Adjust tsubst_copy_and_build use. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + * cp-tree.h (enum tsubst_flags): Add tf_no_name_lookup. + * pt.cc (tsubst_pack_expansion): Use tsubst for substituting + BASES_TYPE. + (tsubst_decl) <case USING_DECL>: Use tsubst_name instead of + tsubst_copy. + (tsubst) <case TEMPLATE_TYPE_PARM>: Use tsubst_copy_and_build + instead of tsubst_copy for substituting + CLASS_PLACEHOLDER_TEMPLATE. + <case TYPENAME_TYPE>: Use tsubst_name instead of tsubst_copy for + substituting TYPENAME_TYPE_FULLNAME. + (tsubst_name): Define. + (tsubst_qualified_id): Use tsubst_name instead of tsubst_copy + for substituting the component name of a SCOPE_REF. + (tsubst_copy): Remove. + (tsubst_copy_and_build): Clear tf_no_name_lookup at the start, + and remember if it was set. Call maybe_dependent_member_ref if + tf_no_name_lookup was not set. + <case IDENTIFIER_NODE>: Don't do name lookup if tf_no_name_lookup + was set. + <case TEMPLATE_ID_EXPR>: If tf_no_name_lookup was set, use + tsubst_name instead of tsubst_copy_and_build to substitute the + template and don't finish the template-id. + <case BIT_NOT_EXPR>: Handle identifier and type operand (if + tf_no_name_lookup was set). + <case SCOPE_REF>: Avoid trying to resolve a SCOPE_REF if + tf_no_name_lookup was set by calling build_qualified_name directly + instead of tsubst_qualified_id. + <case SIZEOF_EXPR>: Handling of sizeof... copied from tsubst_copy. + <case CALL_EXPR>: Use tsubst_name instead of tsubst_copy to + substitute a TEMPLATE_ID_EXPR callee naming an unresolved template. + <case COMPONENT_REF>: Likewise to substitute the member. + <case FUNCTION_DECL>: Copied from tsubst_copy and merged with ... + <case VAR_DECL, PARM_DECL>: ... these. Initial handling copied + from tsubst_copy. Optimize local variable substitution by + trying retrieve_local_specialization before checking + uses_template_parms. + <case CONST_DECL>: Copied from tsubst_copy. + <case FIELD_DECL>: Likewise. + <case NAMESPACE_DECL>: Likewise. + <case OVERLOAD>: Likewise. + <case TEMPLATE_DECL>: Likewise. + <case TEMPLATE_PARM_INDEX>: Likewise. + <case TYPE_DECL>: Likewise. + <case CLEANUP_POINT_EXPR>: Likewise. + <case OFFSET_REF>: Likewise. + <case EXPR_PACK_EXPANSION>: Likewise. + <case NONTYPE_ARGUMENT_PACK>: Likewise. + <case *_CST>: Likewise. + <case *_*_FOLD_EXPR>: Likewise. + <case DEBUG_BEGIN_STMT>: Likewise. + <case CO_AWAIT_EXPR>: Likewise. + <case TRAIT_EXPR>: Use tsubst and tsubst_copy_and_build instead + of tsubst_copy. + <default>: Copied from tsubst_copy. + (tsubst_initializer_list): Use tsubst and tsubst_copy_and_build + instead of tsubst_copy. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + PR c++/106086 + * parser.cc (cp_parser_postfix_expression): Consolidate three + calls to finish_call_expr, one to build_new_method_call and + one to build_min_nt_call_vec into one call to finish_call_expr. + Don't call maybe_generic_this_capture here. + * pt.cc (tsubst_copy_and_build) <case CALL_EXPR>: Remove + COMPONENT_REF callee handling. + (type_dependent_expression_p): Use t_d_object_e_p instead of + t_d_e_p for COMPONENT_REF and OFFSET_REF. + * semantics.cc (finish_call_expr): In the type-dependent case, + call maybe_generic_this_capture here instead. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + * call.cc (build_new_method_call): Remove calls to + build_non_dependent_expr and/or make_args_non_dependent. + * coroutines.cc (finish_co_return_stmt): Likewise. + * cp-tree.h (build_non_dependent_expr): Remove. + (make_args_non_dependent): Remove. + * decl2.cc (grok_array_decl): Remove calls to + build_non_dependent_expr and/or make_args_non_dependent. + (build_offset_ref_call_from_tree): Likewise. + * init.cc (build_new): Likewise. + * pt.cc (make_args_non_dependent): Remove. + (test_build_non_dependent_expr): Remove. + (cp_pt_cc_tests): Adjust. + * semantics.cc (finish_expr_stmt): Remove calls to + build_non_dependent_expr and/or make_args_non_dependent. + (finish_for_expr): Likewise. + (finish_call_expr): Likewise. + (finish_omp_atomic): Likewise. + * typeck.cc (finish_class_member_access_expr): Likewise. + (build_x_indirect_ref): Likewise. + (build_x_binary_op): Likewise. + (build_x_array_ref): Likewise. + (build_x_vec_perm_expr): Likewise. + (build_x_shufflevector): Likewise. + (build_x_unary_op): Likewise. + (cp_build_addressof): Likewise. + (build_x_conditional_expr): Likewise. + (build_x_compound_expr): Likewise. + (build_static_cast): Likewise. + (build_x_modify_expr): Likewise. + (check_return_expr): Likewise. + * typeck2.cc (build_x_arrow): Likewise. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + * class.cc (instantiate_type): Remove NON_DEPENDENT_EXPR + handling. + * constexpr.cc (cxx_eval_constant_expression): Likewise. + (potential_constant_expression_1): Likewise. + * coroutines.cc (coro_validate_builtin_call): Don't + expect ALIGNOF_EXPR to be wrapped in NON_DEPENDENT_EXPR. + * cp-objcp-common.cc (cp_common_init_ts): Remove + NON_DEPENDENT_EXPR handling. + * cp-tree.def (NON_DEPENDENT_EXPR): Remove. + * cp-tree.h (build_non_dependent_expr): Temporarily redefine as + the identity function. + * cvt.cc (maybe_warn_nodiscard): Handle type-dependent and + variable callee of CALL_EXPR. + * cxx-pretty-print.cc (cxx_pretty_printer::expression): Remove + NON_DEPENDENT_EXPR handling. + * error.cc (dump_decl): Likewise. + (dump_expr): Likewise. + * expr.cc (mark_use): Likewise. + (mark_exp_read): Likewise. + * pt.cc (build_non_dependent_expr): Remove. + * tree.cc (lvalue_kind): Remove NON_DEPENDENT_EXPR handling. + (cp_stabilize_reference): Likewise. + * typeck.cc (warn_for_null_address): Likewise. + (cp_build_binary_op): Handle type-dependent SIZEOF_EXPR operands. + (cp_build_unary_op) <case TRUTH_NOT_EXPR>: Don't fold inside a + template. + +2023-10-20 Alexandre Oliva <oliva@adacore.com> + + * decl.cc (push_throw_library_fn): Mark with ECF_XTHROW. + * except.cc (build_throw): Likewise __cxa_throw, + _ITM_cxa_throw, __cxa_rethrow. + +2023-10-20 Nathaniel Shead <nathanieloshead@gmail.com> + + PR c++/101631 + PR c++/102286 + * call.cc (build_over_call): Fold more indirect refs for trivial + assignment op. + * class.cc (type_has_non_deleted_trivial_default_ctor): Create. + * constexpr.cc (cxx_eval_call_expression): Start lifetime of + union member before entering constructor. + (cxx_eval_component_reference): Check against first member of + value-initialised union. + (cxx_eval_store_expression): Activate member for + value-initialised union. Check for accessing inactive union + member indirectly. + * cp-tree.h (type_has_non_deleted_trivial_default_ctor): + Forward declare. + +2023-10-20 Nathaniel Shead <nathanieloshead@gmail.com> + + * constexpr.cc (is_std_source_location_current): New. + (cxx_eval_constant_expression): Only ignore cast from void* for + specific cases and improve other diagnostics. + +2023-10-19 Marek Polacek <polacek@redhat.com> + + * cp-gimplify.cc (cp_fold_r): Don't call maybe_constant_value. + +2023-10-19 Jason Merrill <jason@redhat.com> + + * typeck2.cc (check_narrowing): Adjust. + +2023-10-19 Jason Merrill <jason@redhat.com> + + * parser.cc (cp_parser_primary_expression): Use G_. + (cp_parser_using_enum): Likewise. + * decl.cc (identify_goto): Likewise. + +2023-10-18 Jason Merrill <jason@redhat.com> + + * typeck2.cc (check_narrowing): Use permerror. + 2023-10-17 Marek Polacek <polacek@redhat.com> PR c++/111840 diff --git a/gcc/cp/call.cc b/gcc/cp/call.cc index e8dafbd..2eb54b5 100644 --- a/gcc/cp/call.cc +++ b/gcc/cp/call.cc @@ -2032,12 +2032,14 @@ reference_binding (tree rto, tree rfrom, tree expr, bool c_cast_p, int flags, return conv; } -/* Most of the implementation of implicit_conversion, with the same - parameters. */ +/* Returns the implicit conversion sequence (see [over.ics]) from type + FROM to type TO. The optional expression EXPR may affect the + conversion. FLAGS are the usual overloading flags. If C_CAST_P is + true, this conversion is coming from a C-style cast. */ static conversion * -implicit_conversion_1 (tree to, tree from, tree expr, bool c_cast_p, - int flags, tsubst_flags_t complain) +implicit_conversion (tree to, tree from, tree expr, bool c_cast_p, + int flags, tsubst_flags_t complain) { conversion *conv; @@ -2167,26 +2169,6 @@ implicit_conversion_1 (tree to, tree from, tree expr, bool c_cast_p, return NULL; } -/* Returns the implicit conversion sequence (see [over.ics]) from type - FROM to type TO. The optional expression EXPR may affect the - conversion. FLAGS are the usual overloading flags. If C_CAST_P is - true, this conversion is coming from a C-style cast. */ - -static conversion * -implicit_conversion (tree to, tree from, tree expr, bool c_cast_p, - int flags, tsubst_flags_t complain) -{ - conversion *conv = implicit_conversion_1 (to, from, expr, c_cast_p, - flags, complain); - if (!conv || conv->bad_p) - return conv; - if (conv_is_prvalue (conv) - && CLASS_TYPE_P (conv->type) - && CLASSTYPE_PURE_VIRTUALS (conv->type)) - conv->bad_p = true; - return conv; -} - /* Like implicit_conversion, but return NULL if the conversion is bad. This is not static so that check_non_deducible_conversion can call it within @@ -10330,10 +10312,7 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain) && DECL_OVERLOADED_OPERATOR_IS (fn, NOP_EXPR) && trivial_fn_p (fn)) { - /* Don't use cp_build_fold_indirect_ref, op= returns an lvalue even if - the object argument isn't one. */ - tree to = cp_build_indirect_ref (input_location, argarray[0], - RO_ARROW, complain); + tree to = cp_build_fold_indirect_ref (argarray[0]); tree type = TREE_TYPE (to); tree as_base = CLASSTYPE_AS_BASE (type); tree arg = argarray[1]; @@ -10341,7 +10320,11 @@ build_over_call (struct z_candidate *cand, int flags, tsubst_flags_t complain) if (is_really_empty_class (type, /*ignore_vptr*/true)) { - /* Avoid copying empty classes. */ + /* Avoid copying empty classes, but ensure op= returns an lvalue even + if the object argument isn't one. This isn't needed in other cases + since MODIFY_EXPR is always considered an lvalue. */ + to = cp_build_addr_expr (to, tf_none); + to = cp_build_indirect_ref (input_location, to, RO_ARROW, complain); val = build2 (COMPOUND_EXPR, type, arg, to); suppress_warning (val, OPT_Wunused); } @@ -11430,12 +11413,7 @@ build_new_method_call (tree instance, tree fns, vec<tree, va_gc> **args, } if (processing_template_decl) - { - orig_args = args == NULL ? NULL : make_tree_vector_copy (*args); - instance = build_non_dependent_expr (instance); - if (args != NULL) - make_args_non_dependent (*args); - } + orig_args = args == NULL ? NULL : make_tree_vector_copy (*args); /* Process the argument list. */ if (args != NULL && *args != NULL) @@ -13231,10 +13209,11 @@ tourney (struct z_candidate *candidates, tsubst_flags_t complain) been compared to. */ for (challenger = candidates; - challenger != champ - && challenger != champ_compared_to_predecessor; + challenger != champ; challenger = challenger->next) { + if (challenger == champ_compared_to_predecessor) + continue; fate = joust (champ, challenger, 0, complain); if (fate != 1) return NULL; diff --git a/gcc/cp/class.cc b/gcc/cp/class.cc index b71333a..0d8b780 100644 --- a/gcc/cp/class.cc +++ b/gcc/cp/class.cc @@ -5688,6 +5688,14 @@ type_has_virtual_destructor (tree type) return (dtor && DECL_VIRTUAL_P (dtor)); } +/* True iff class TYPE has a non-deleted trivial default + constructor. */ + +bool type_has_non_deleted_trivial_default_ctor (tree type) +{ + return TYPE_HAS_TRIVIAL_DFLT (type) && locate_ctor (type); +} + /* Returns true iff T, a class, has a move-assignment or move-constructor. Does not lazily declare either. If USER_P is false, any move function will do. If it is true, the @@ -8843,15 +8851,6 @@ instantiate_type (tree lhstype, tree rhs, tsubst_flags_t complain) rhs = BASELINK_FUNCTIONS (rhs); } - /* If we are in a template, and have a NON_DEPENDENT_EXPR, we cannot - deduce any type information. */ - if (TREE_CODE (rhs) == NON_DEPENDENT_EXPR) - { - if (complain & tf_error) - error ("not enough type information"); - return error_mark_node; - } - /* There are only a few kinds of expressions that may have a type dependent on overload resolution. */ gcc_assert (TREE_CODE (rhs) == ADDR_EXPR diff --git a/gcc/cp/constexpr.cc b/gcc/cp/constexpr.cc index 7c8f2cc..c05760e 100644 --- a/gcc/cp/constexpr.cc +++ b/gcc/cp/constexpr.cc @@ -2309,6 +2309,36 @@ is_std_allocator_allocate (const constexpr_call *call) && is_std_allocator_allocate (call->fundef->decl)); } +/* Return true if FNDECL is std::source_location::current. */ + +static inline bool +is_std_source_location_current (tree fndecl) +{ + if (!decl_in_std_namespace_p (fndecl)) + return false; + + tree name = DECL_NAME (fndecl); + if (name == NULL_TREE || !id_equal (name, "current")) + return false; + + tree ctx = DECL_CONTEXT (fndecl); + if (ctx == NULL_TREE || !CLASS_TYPE_P (ctx) || !TYPE_MAIN_DECL (ctx)) + return false; + + name = DECL_NAME (TYPE_MAIN_DECL (ctx)); + return name && id_equal (name, "source_location"); +} + +/* Overload for the above taking constexpr_call*. */ + +static inline bool +is_std_source_location_current (const constexpr_call *call) +{ + return (call + && call->fundef + && is_std_source_location_current (call->fundef->decl)); +} + /* Return true if FNDECL is __dynamic_cast. */ static inline bool @@ -3160,40 +3190,34 @@ cxx_eval_call_expression (const constexpr_ctx *ctx, tree t, cxx_set_object_constness (ctx, new_obj, /*readonly_p=*/false, non_constant_p, overflow_p); + /* If this is a constructor, we are beginning the lifetime of the + object we are initializing. */ + if (new_obj + && DECL_CONSTRUCTOR_P (fun) + && TREE_CODE (new_obj) == COMPONENT_REF + && TREE_CODE (TREE_TYPE (TREE_OPERAND (new_obj, 0))) == UNION_TYPE) + { + tree activate = build2 (INIT_EXPR, TREE_TYPE (new_obj), + new_obj, + build_constructor (TREE_TYPE (new_obj), + NULL)); + cxx_eval_constant_expression (ctx, activate, + lval, non_constant_p, overflow_p); + ggc_free (activate); + } + tree jump_target = NULL_TREE; cxx_eval_constant_expression (&call_ctx, body, vc_discard, non_constant_p, overflow_p, &jump_target); if (DECL_CONSTRUCTOR_P (fun)) - { - /* This can be null for a subobject constructor call, in - which case what we care about is the initialization - side-effects rather than the value. We could get at the - value by evaluating *this, but we don't bother; there's - no need to put such a call in the hash table. */ - result = lval ? ctx->object : ctx->ctor; - - /* If we've just evaluated a subobject constructor call for an - empty union member, it might not have produced a side effect - that actually activated the union member. So produce such a - side effect now to ensure the union appears initialized. */ - if (!result && new_obj - && TREE_CODE (new_obj) == COMPONENT_REF - && TREE_CODE (TREE_TYPE - (TREE_OPERAND (new_obj, 0))) == UNION_TYPE - && is_really_empty_class (TREE_TYPE (new_obj), - /*ignore_vptr*/false)) - { - tree activate = build2 (MODIFY_EXPR, TREE_TYPE (new_obj), - new_obj, - build_constructor (TREE_TYPE (new_obj), - NULL)); - cxx_eval_constant_expression (ctx, activate, lval, - non_constant_p, overflow_p); - ggc_free (activate); - } - } + /* This can be null for a subobject constructor call, in + which case what we care about is the initialization + side-effects rather than the value. We could get at the + value by evaluating *this, but we don't bother; there's + no need to put such a call in the hash table. */ + result = lval ? ctx->object : ctx->ctor; else if (VOID_TYPE_P (TREE_TYPE (res))) result = void_node; else @@ -4493,6 +4517,7 @@ cxx_eval_component_reference (const constexpr_ctx *ctx, tree t, *non_constant_p = true; return t; } + bool pmf = TYPE_PTRMEMFUNC_P (TREE_TYPE (whole)); FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (whole), i, field, value) { @@ -4511,21 +4536,36 @@ cxx_eval_component_reference (const constexpr_ctx *ctx, tree t, break; } } - if (TREE_CODE (TREE_TYPE (whole)) == UNION_TYPE - && CONSTRUCTOR_NELTS (whole) > 0) + if (TREE_CODE (TREE_TYPE (whole)) == UNION_TYPE) { - /* DR 1188 says we don't have to deal with this. */ - if (!ctx->quiet) + if (CONSTRUCTOR_NELTS (whole) > 0) { - constructor_elt *cep = CONSTRUCTOR_ELT (whole, 0); - if (cep->value == NULL_TREE) - error ("accessing uninitialized member %qD", part); - else - error ("accessing %qD member instead of initialized %qD member in " - "constant expression", part, cep->index); + /* DR 1188 says we don't have to deal with this. */ + if (!ctx->quiet) + { + constructor_elt *cep = CONSTRUCTOR_ELT (whole, 0); + if (cep->value == NULL_TREE) + error ("accessing uninitialized member %qD", part); + else + error ("accessing %qD member instead of initialized %qD member " + "in constant expression", part, cep->index); + } + *non_constant_p = true; + return t; + } + else if (!CONSTRUCTOR_NO_CLEARING (whole)) + { + /* Value-initialized union, check if looking at the first member. */ + tree first = next_aggregate_field (TYPE_FIELDS (TREE_TYPE (whole))); + if (first != part) + { + if (!ctx->quiet) + error ("accessing %qD member instead of initialized %qD " + "member in constant expression", part, first); + *non_constant_p = true; + return t; + } } - *non_constant_p = true; - return t; } /* We only create a CONSTRUCTOR for a subobject when we modify it, so empty @@ -6142,6 +6182,14 @@ cxx_eval_store_expression (const constexpr_ctx *ctx, tree t, mutable_p) && const_object_being_modified == NULL_TREE) const_object_being_modified = probe; + + /* Track named member accesses for unions to validate modifications + that change active member. */ + if (!evaluated && TREE_CODE (probe) == COMPONENT_REF) + vec_safe_push (refs, probe); + else + vec_safe_push (refs, NULL_TREE); + vec_safe_push (refs, elt); vec_safe_push (refs, TREE_TYPE (probe)); probe = ob; @@ -6150,6 +6198,7 @@ cxx_eval_store_expression (const constexpr_ctx *ctx, tree t, case REALPART_EXPR: gcc_assert (probe == target); + vec_safe_push (refs, NULL_TREE); vec_safe_push (refs, probe); vec_safe_push (refs, TREE_TYPE (probe)); probe = TREE_OPERAND (probe, 0); @@ -6157,6 +6206,7 @@ cxx_eval_store_expression (const constexpr_ctx *ctx, tree t, case IMAGPART_EXPR: gcc_assert (probe == target); + vec_safe_push (refs, NULL_TREE); vec_safe_push (refs, probe); vec_safe_push (refs, TREE_TYPE (probe)); probe = TREE_OPERAND (probe, 0); @@ -6245,6 +6295,7 @@ cxx_eval_store_expression (const constexpr_ctx *ctx, tree t, enum tree_code code = TREE_CODE (type); tree reftype = refs->pop(); tree index = refs->pop(); + bool is_access_expr = refs->pop() != NULL_TREE; if (code == COMPLEX_TYPE) { @@ -6283,23 +6334,73 @@ cxx_eval_store_expression (const constexpr_ctx *ctx, tree t, break; } - type = reftype; + /* If a union is zero-initialized, its first non-static named data member + is zero-initialized (and therefore active). */ + if (code == UNION_TYPE + && !no_zero_init + && CONSTRUCTOR_NELTS (*valp) == 0) + if (tree first = next_aggregate_field (TYPE_FIELDS (type))) + CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (*valp), first, NULL_TREE); - if (code == UNION_TYPE && CONSTRUCTOR_NELTS (*valp) - && CONSTRUCTOR_ELT (*valp, 0)->index != index) + /* Check for implicit change of active member for a union. */ + if (code == UNION_TYPE + && (CONSTRUCTOR_NELTS (*valp) == 0 + || CONSTRUCTOR_ELT (*valp, 0)->index != index) + /* An INIT_EXPR of the last member in an access chain is always OK, + but still check implicit change of members earlier on; see + cpp2a/constexpr-union6.C. */ + && !(TREE_CODE (t) == INIT_EXPR && refs->is_empty ())) { - if (cxx_dialect < cxx20) + bool has_active_member = CONSTRUCTOR_NELTS (*valp) != 0; + tree inner = strip_array_types (reftype); + + if (has_active_member && cxx_dialect < cxx20) { if (!ctx->quiet) error_at (cp_expr_loc_or_input_loc (t), "change of the active member of a union " - "from %qD to %qD", + "from %qD to %qD is not a constant expression " + "before C++20", CONSTRUCTOR_ELT (*valp, 0)->index, index); *non_constant_p = true; } - else if (TREE_CODE (t) == MODIFY_EXPR - && CONSTRUCTOR_NO_CLEARING (*valp)) + else if (!is_access_expr + || (TREE_CODE (t) == MODIFY_EXPR + && CLASS_TYPE_P (inner) + && !type_has_non_deleted_trivial_default_ctor (inner))) + { + /* Diagnose changing active union member after initialization + without a valid member access expression, as described in + [class.union.general] p5. */ + if (!ctx->quiet) + { + auto_diagnostic_group d; + if (has_active_member) + error_at (cp_expr_loc_or_input_loc (t), + "accessing %qD member instead of initialized " + "%qD member in constant expression", + index, CONSTRUCTOR_ELT (*valp, 0)->index); + else + error_at (cp_expr_loc_or_input_loc (t), + "accessing uninitialized member %qD", + index); + if (is_access_expr) + inform (DECL_SOURCE_LOCATION (index), + "%qD does not implicitly begin its lifetime " + "because %qT does not have a non-deleted " + "trivial default constructor, use " + "%<std::construct_at%> instead", + index, inner); + else + inform (DECL_SOURCE_LOCATION (index), + "initializing %qD requires a member access " + "expression as the left operand of the assignment", + index); + } + *non_constant_p = true; + } + else if (has_active_member && CONSTRUCTOR_NO_CLEARING (*valp)) { /* Diagnose changing the active union member while the union is in the process of being initialized. */ @@ -6325,6 +6426,7 @@ cxx_eval_store_expression (const constexpr_ctx *ctx, tree t, activated_union_member_p = true; valp = &cep->value; + type = reftype; } /* For initialization of an empty base, the original target will be @@ -7866,33 +7968,70 @@ cxx_eval_constant_expression (const constexpr_ctx *ctx, tree t, if (TYPE_PTROB_P (type) && TYPE_PTR_P (TREE_TYPE (op)) && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (op))) - /* Inside a call to std::construct_at or to - std::allocator<T>::{,de}allocate, we permit casting from void* + /* Inside a call to std::construct_at, + std::allocator<T>::{,de}allocate, or + std::source_location::current, we permit casting from void* because that is compiler-generated code. */ && !is_std_construct_at (ctx->call) - && !is_std_allocator_allocate (ctx->call)) + && !is_std_allocator_allocate (ctx->call) + && !is_std_source_location_current (ctx->call)) { /* Likewise, don't error when casting from void* when OP is &heap uninit and similar. */ tree sop = tree_strip_nop_conversions (op); - if (TREE_CODE (sop) == ADDR_EXPR - && VAR_P (TREE_OPERAND (sop, 0)) - && DECL_ARTIFICIAL (TREE_OPERAND (sop, 0))) + tree decl = NULL_TREE; + if (TREE_CODE (sop) == ADDR_EXPR) + decl = TREE_OPERAND (sop, 0); + if (decl + && VAR_P (decl) + && DECL_ARTIFICIAL (decl) + && (DECL_NAME (decl) == heap_identifier + || DECL_NAME (decl) == heap_uninit_identifier + || DECL_NAME (decl) == heap_vec_identifier + || DECL_NAME (decl) == heap_vec_uninit_identifier)) /* OK */; /* P2738 (C++26): a conversion from a prvalue P of type "pointer to cv void" to a pointer-to-object type T unless P points to an object whose type is similar to T. */ - else if (cxx_dialect > cxx23 - && (sop = cxx_fold_indirect_ref (ctx, loc, - TREE_TYPE (type), sop))) + else if (cxx_dialect > cxx23) { - r = build1 (ADDR_EXPR, type, sop); - break; + r = cxx_fold_indirect_ref (ctx, loc, TREE_TYPE (type), sop); + if (r) + { + r = build1 (ADDR_EXPR, type, r); + break; + } + if (!ctx->quiet) + { + if (TREE_CODE (sop) == ADDR_EXPR) + { + auto_diagnostic_group d; + error_at (loc, "cast from %qT is not allowed in a " + "constant expression because " + "pointed-to type %qT is not similar to %qT", + TREE_TYPE (op), TREE_TYPE (TREE_TYPE (sop)), + TREE_TYPE (type)); + tree obj = build_fold_indirect_ref (sop); + inform (DECL_SOURCE_LOCATION (obj), + "pointed-to object declared here"); + } + else + { + gcc_assert (integer_zerop (sop)); + error_at (loc, "cast from %qT is not allowed in a " + "constant expression because " + "%qE does not point to an object", + TREE_TYPE (op), oldop); + } + } + *non_constant_p = true; + return t; } else { if (!ctx->quiet) - error_at (loc, "cast from %qT is not allowed", + error_at (loc, "cast from %qT is not allowed in a " + "constant expression before C++26", TREE_TYPE (op)); *non_constant_p = true; return t; @@ -8069,7 +8208,6 @@ cxx_eval_constant_expression (const constexpr_ctx *ctx, tree t, case MODOP_EXPR: /* GCC internal stuff. */ case VA_ARG_EXPR: - case NON_DEPENDENT_EXPR: case BASELINK: case OFFSET_REF: if (!ctx->quiet) @@ -9938,14 +10076,6 @@ potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now, case BIND_EXPR: return RECUR (BIND_EXPR_BODY (t), want_rval); - case NON_DEPENDENT_EXPR: - /* Treat NON_DEPENDENT_EXPR as non-constant: it's not handled by - constexpr evaluation or tsubst, so fold_non_dependent_expr can't - do anything useful with it. And we shouldn't see it in a context - where a constant expression is strictly required, hence the assert. */ - gcc_checking_assert (!(flags & tf_error)); - return false; - case CLEANUP_POINT_EXPR: case MUST_NOT_THROW_EXPR: case TRY_CATCH_EXPR: diff --git a/gcc/cp/coroutines.cc b/gcc/cp/coroutines.cc index 3493d3c..a5464be 100644 --- a/gcc/cp/coroutines.cc +++ b/gcc/cp/coroutines.cc @@ -1351,9 +1351,6 @@ finish_co_return_stmt (location_t kw, tree expr) to undo it so we can try to treat it as an rvalue below. */ expr = maybe_undo_parenthesized_ref (expr); - if (processing_template_decl) - expr = build_non_dependent_expr (expr); - if (error_operand_p (expr)) return error_mark_node; } @@ -1421,8 +1418,7 @@ coro_validate_builtin_call (tree call, tsubst_flags_t) location_t loc = EXPR_LOCATION (arg); /* We expect alignof expressions in templates. */ - if (TREE_CODE (arg) == NON_DEPENDENT_EXPR - && TREE_CODE (TREE_OPERAND (arg, 0)) == ALIGNOF_EXPR) + if (TREE_CODE (arg) == ALIGNOF_EXPR) ; else if (!TREE_CONSTANT (arg)) { diff --git a/gcc/cp/cp-gimplify.cc b/gcc/cp/cp-gimplify.cc index a282c39..33e9411 100644 --- a/gcc/cp/cp-gimplify.cc +++ b/gcc/cp/cp-gimplify.cc @@ -1152,13 +1152,12 @@ cp_fold_r (tree *stmt_p, int *walk_subtrees, void *data_) auto then_fn = cp_fold_r, else_fn = cp_fold_r; /* See if we can figure out if either of the branches is dead. If it is, we don't need to do everything that cp_fold_r does. */ - tree cond = maybe_constant_value (TREE_OPERAND (stmt, 0)); - if (integer_zerop (cond)) + cp_walk_tree (&TREE_OPERAND (stmt, 0), cp_fold_r, data, nullptr); + if (integer_zerop (TREE_OPERAND (stmt, 0))) then_fn = cp_fold_immediate_r; - else if (TREE_CODE (cond) == INTEGER_CST) + else if (integer_nonzerop (TREE_OPERAND (stmt, 0))) else_fn = cp_fold_immediate_r; - cp_walk_tree (&TREE_OPERAND (stmt, 0), cp_fold_r, data, nullptr); if (TREE_OPERAND (stmt, 1)) cp_walk_tree (&TREE_OPERAND (stmt, 1), then_fn, data, nullptr); diff --git a/gcc/cp/cp-lang.cc b/gcc/cp/cp-lang.cc index 2f54146..f2ed83d 100644 --- a/gcc/cp/cp-lang.cc +++ b/gcc/cp/cp-lang.cc @@ -113,10 +113,8 @@ struct lang_hooks lang_hooks = LANG_HOOKS_INITIALIZER; /* The following function does something real, but only in Objective-C++. */ tree -objcp_tsubst_copy_and_build (tree /*t*/, - tree /*args*/, - tsubst_flags_t /*complain*/, - tree /*in_decl*/) +objcp_tsubst_expr (tree /*t*/, tree /*args*/, tsubst_flags_t /*complain*/, + tree /*in_decl*/) { return NULL_TREE; } diff --git a/gcc/cp/cp-objcp-common.cc b/gcc/cp/cp-objcp-common.cc index 93b027b..2093ae0 100644 --- a/gcc/cp/cp-objcp-common.cc +++ b/gcc/cp/cp-objcp-common.cc @@ -525,7 +525,6 @@ cp_common_init_ts (void) MARK_TS_EXP (MUST_NOT_THROW_EXPR); MARK_TS_EXP (NEW_EXPR); MARK_TS_EXP (NOEXCEPT_EXPR); - MARK_TS_EXP (NON_DEPENDENT_EXPR); MARK_TS_EXP (OFFSETOF_EXPR); MARK_TS_EXP (OFFSET_REF); MARK_TS_EXP (PSEUDO_DTOR_EXPR); diff --git a/gcc/cp/cp-objcp-common.h b/gcc/cp/cp-objcp-common.h index 80893aa..1408301 100644 --- a/gcc/cp/cp-objcp-common.h +++ b/gcc/cp/cp-objcp-common.h @@ -24,7 +24,7 @@ along with GCC; see the file COPYING3. If not see /* In cp/objcp-common.c, cp/cp-lang.cc and objcp/objcp-lang.cc. */ extern tree cp_get_debug_type (const_tree); -extern tree objcp_tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree); +extern tree objcp_tsubst_expr (tree, tree, tsubst_flags_t, tree); extern int cp_decl_dwarf_attribute (const_tree, int); extern int cp_type_dwarf_attribute (const_tree, int); diff --git a/gcc/cp/cp-tree.def b/gcc/cp/cp-tree.def index 0e66ca7..d78005e 100644 --- a/gcc/cp/cp-tree.def +++ b/gcc/cp/cp-tree.def @@ -262,17 +262,6 @@ DEFTREECODE (TYPEID_EXPR, "typeid_expr", tcc_expression, 1) DEFTREECODE (NOEXCEPT_EXPR, "noexcept_expr", tcc_unary, 1) DEFTREECODE (SPACESHIP_EXPR, "spaceship_expr", tcc_expression, 2) -/* A placeholder for an expression that is not type-dependent, but - does occur in a template. When an expression that is not - type-dependent appears in a larger expression, we must compute the - type of that larger expression. That computation would normally - modify the original expression, which would change the mangling of - that expression if it appeared in a template argument list. In - that situation, we create a NON_DEPENDENT_EXPR to take the place of - the original expression. The expression is the only operand -- it - is only needed for diagnostics. */ -DEFTREECODE (NON_DEPENDENT_EXPR, "non_dependent_expr", tcc_expression, 1) - /* CTOR_INITIALIZER is a placeholder in template code for a call to setup_vtbl_pointer (and appears in all functions, not just ctors). */ DEFTREECODE (CTOR_INITIALIZER, "ctor_initializer", tcc_expression, 1) diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h index 1d7df62..30fe716 100644 --- a/gcc/cp/cp-tree.h +++ b/gcc/cp/cp-tree.h @@ -5621,6 +5621,9 @@ enum tsubst_flags { tf_qualifying_scope = 1 << 14, /* Substituting the LHS of the :: operator. Affects TYPENAME_TYPE resolution from make_typename_type. */ + tf_no_name_lookup = 1 << 15, /* Don't look up the terminal name of an + outermost id-expression, or resolve its + constituent template-ids or qualified-ids. */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error }; @@ -6817,6 +6820,7 @@ extern bool trivial_default_constructor_is_constexpr (tree); extern bool type_has_constexpr_default_constructor (tree); extern bool type_has_constexpr_destructor (tree); extern bool type_has_virtual_destructor (tree); +extern bool type_has_non_deleted_trivial_default_ctor (tree); extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared); extern bool classtype_has_non_deleted_move_ctor (tree); extern tree classtype_has_depr_implicit_copy (tree); @@ -7457,7 +7461,6 @@ extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, int, tree, tree, tsubst_flags_t); extern tree tsubst (tree, tree, tsubst_flags_t, tree); -extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree); extern tree tsubst_expr (tree, tree, tsubst_flags_t, tree); extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree); extern tree tsubst_argument_pack (tree, tree, tsubst_flags_t, tree); @@ -7490,8 +7493,6 @@ extern bool any_value_dependent_elements_p (const_tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); -extern tree build_non_dependent_expr (tree); -extern void make_args_non_dependent (vec<tree, va_gc> *); extern bool reregister_specialization (tree, tree, tree); extern tree instantiate_non_dependent_expr (tree, tsubst_flags_t = tf_error); extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t); diff --git a/gcc/cp/cvt.cc b/gcc/cp/cvt.cc index 96abfae..4dfb39f 100644 --- a/gcc/cp/cvt.cc +++ b/gcc/cp/cvt.cc @@ -1048,7 +1048,7 @@ maybe_warn_nodiscard (tree expr, impl_conv_void implicit) call = TARGET_EXPR_INITIAL (expr); location_t loc = cp_expr_loc_or_input_loc (call); tree callee = cp_get_callee (call); - if (!callee) + if (!callee || !TREE_TYPE (callee)) return; tree type = TREE_TYPE (callee); @@ -1056,6 +1056,8 @@ maybe_warn_nodiscard (tree expr, impl_conv_void implicit) type = TYPE_PTRMEMFUNC_FN_TYPE (type); if (INDIRECT_TYPE_P (type)) type = TREE_TYPE (type); + if (!FUNC_OR_METHOD_TYPE_P (type)) + return; tree rettype = TREE_TYPE (type); tree fn = cp_get_fndecl_from_callee (callee); diff --git a/gcc/cp/cxx-pretty-print.cc b/gcc/cp/cxx-pretty-print.cc index eb16e63..6a82358 100644 --- a/gcc/cp/cxx-pretty-print.cc +++ b/gcc/cp/cxx-pretty-print.cc @@ -1207,7 +1207,6 @@ cxx_pretty_printer::expression (tree t) assignment_expression (t); break; - case NON_DEPENDENT_EXPR: case MUST_NOT_THROW_EXPR: expression (TREE_OPERAND (t, 0)); break; diff --git a/gcc/cp/decl.cc b/gcc/cp/decl.cc index 255c402..16af59d 100644 --- a/gcc/cp/decl.cc +++ b/gcc/cp/decl.cc @@ -3607,8 +3607,8 @@ identify_goto (tree decl, location_t loc, const location_t *locus, { bool complained = emit_diagnostic (diag_kind, loc, 0, - decl ? N_("jump to label %qD") - : N_("jump to case label"), decl); + decl ? G_("jump to label %qD") + : G_("jump to case label"), decl); if (complained && locus) inform (*locus, " from here"); return complained; @@ -5281,7 +5281,8 @@ push_cp_library_fn (enum tree_code operator_code, tree type, tree push_throw_library_fn (tree name, tree type) { - tree fn = push_library_fn (name, type, NULL_TREE, ECF_NORETURN | ECF_COLD); + tree fn = push_library_fn (name, type, NULL_TREE, + ECF_NORETURN | ECF_XTHROW | ECF_COLD); return fn; } diff --git a/gcc/cp/decl2.cc b/gcc/cp/decl2.cc index 344e19e..0aa1e35 100644 --- a/gcc/cp/decl2.cc +++ b/gcc/cp/decl2.cc @@ -427,14 +427,8 @@ grok_array_decl (location_t loc, tree array_expr, tree index_exp, return build_min_nt_loc (loc, ARRAY_REF, array_expr, index_exp, NULL_TREE, NULL_TREE); } - array_expr = build_non_dependent_expr (array_expr); - if (index_exp) - index_exp = build_non_dependent_expr (index_exp); - else - { - orig_index_exp_list = make_tree_vector_copy (*index_exp_list); - make_args_non_dependent (*index_exp_list); - } + if (!index_exp) + orig_index_exp_list = make_tree_vector_copy (*index_exp_list); } type = TREE_TYPE (array_expr); @@ -5435,18 +5429,13 @@ build_offset_ref_call_from_tree (tree fn, vec<tree, va_gc> **args, orig_args = make_tree_vector_copy (*args); /* Transform the arguments and add the implicit "this" - parameter. That must be done before the FN is transformed - because we depend on the form of FN. */ - make_args_non_dependent (*args); - object = build_non_dependent_expr (object); + parameter. */ if (TREE_CODE (TREE_TYPE (fn)) == METHOD_TYPE) { if (TREE_CODE (fn) == DOTSTAR_EXPR) object = cp_build_addr_expr (object, complain); vec_safe_insert (*args, 0, object); } - /* Now that the arguments are done, transform FN. */ - fn = build_non_dependent_expr (fn); } /* A qualified name corresponding to a bound pointer-to-member is diff --git a/gcc/cp/error.cc b/gcc/cp/error.cc index 767478cf..0ed69bc 100644 --- a/gcc/cp/error.cc +++ b/gcc/cp/error.cc @@ -1510,10 +1510,6 @@ dump_decl (cxx_pretty_printer *pp, tree t, int flags) dump_decl (pp, BASELINK_FUNCTIONS (t), flags); break; - case NON_DEPENDENT_EXPR: - dump_expr (pp, t, flags); - break; - case TEMPLATE_TYPE_PARM: if (flags & TFF_DECL_SPECIFIERS) pp->declaration (t); @@ -2942,10 +2938,6 @@ dump_expr (cxx_pretty_printer *pp, tree t, int flags) pp_cxx_right_paren (pp); break; - case NON_DEPENDENT_EXPR: - dump_expr (pp, TREE_OPERAND (t, 0), flags); - break; - case ARGUMENT_PACK_SELECT: dump_template_argument (pp, ARGUMENT_PACK_SELECT_FROM_PACK (t), flags); break; diff --git a/gcc/cp/except.cc b/gcc/cp/except.cc index 6c0f081..e32efb3 100644 --- a/gcc/cp/except.cc +++ b/gcc/cp/except.cc @@ -657,12 +657,13 @@ build_throw (location_t loc, tree exp) tree args[3] = {ptr_type_node, ptr_type_node, cleanup_type}; throw_fn = declare_library_fn_1 ("__cxa_throw", - ECF_NORETURN | ECF_COLD, + ECF_NORETURN | ECF_XTHROW | ECF_COLD, void_type_node, 3, args); if (flag_tm && throw_fn != error_mark_node) { tree itm_fn = declare_library_fn_1 ("_ITM_cxa_throw", - ECF_NORETURN | ECF_COLD, + ECF_NORETURN | ECF_XTHROW + | ECF_COLD, void_type_node, 3, args); if (itm_fn != error_mark_node) { @@ -797,7 +798,8 @@ build_throw (location_t loc, tree exp) if (!rethrow_fn) { rethrow_fn = declare_library_fn_1 ("__cxa_rethrow", - ECF_NORETURN | ECF_COLD, + ECF_NORETURN | ECF_XTHROW + | ECF_COLD, void_type_node, 0, NULL); if (flag_tm && rethrow_fn != error_mark_node) apply_tm_attr (rethrow_fn, get_identifier ("transaction_pure")); diff --git a/gcc/cp/expr.cc b/gcc/cp/expr.cc index cdd29c1..8371a24 100644 --- a/gcc/cp/expr.cc +++ b/gcc/cp/expr.cc @@ -147,7 +147,6 @@ mark_use (tree expr, bool rvalue_p, bool read_p, } break; case COMPONENT_REF: - case NON_DEPENDENT_EXPR: recurse_op[0] = true; break; case COMPOUND_EXPR: @@ -371,7 +370,6 @@ mark_exp_read (tree exp) case ADDR_EXPR: case INDIRECT_REF: case FLOAT_EXPR: - case NON_DEPENDENT_EXPR: case VIEW_CONVERT_EXPR: mark_exp_read (TREE_OPERAND (exp, 0)); break; diff --git a/gcc/cp/init.cc b/gcc/cp/init.cc index c583029..d48bb16 100644 --- a/gcc/cp/init.cc +++ b/gcc/cp/init.cc @@ -621,8 +621,7 @@ maybe_instantiate_nsdmi_init (tree member, tsubst_flags_t complain) start_lambda_scope (member); /* Do deferred instantiation of the NSDMI. */ - init = tsubst_copy_and_build (init, DECL_TI_ARGS (member), - complain, member); + init = tsubst_expr (init, DECL_TI_ARGS (member), complain, member); init = digest_nsdmi_init (member, init, complain); finish_lambda_scope (); @@ -3920,11 +3919,6 @@ build_new (location_t loc, vec<tree, va_gc> **placement, tree type, (**init)[i] = copy_node (e); } } - - make_args_non_dependent (*placement); - if (nelts) - nelts = build_non_dependent_expr (nelts); - make_args_non_dependent (*init); } if (nelts) diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc index bbb1e20..539518d 100644 --- a/gcc/cp/module.cc +++ b/gcc/cp/module.cc @@ -6212,19 +6212,9 @@ trees_out::core_vals (tree t) break; case CONSTRUCTOR: - { - unsigned len = vec_safe_length (t->constructor.elts); - if (streaming_p ()) - WU (len); - if (len) - for (unsigned ix = 0; ix != len; ix++) - { - const constructor_elt &elt = (*t->constructor.elts)[ix]; - - WT (elt.index); - WT (elt.value); - } - } + // This must be streamed /after/ we've streamed the type, + // because it can directly refer to elements of the type. Eg, + // FIELD_DECLs of a RECORD_TYPE. break; case OMP_CLAUSE: @@ -6458,6 +6448,21 @@ trees_out::core_vals (tree t) WU (prec); } + if (TREE_CODE (t) == CONSTRUCTOR) + { + unsigned len = vec_safe_length (t->constructor.elts); + if (streaming_p ()) + WU (len); + if (len) + for (unsigned ix = 0; ix != len; ix++) + { + const constructor_elt &elt = (*t->constructor.elts)[ix]; + + WT (elt.index); + WT (elt.value); + } + } + #undef WT #undef WU } @@ -6717,18 +6722,7 @@ trees_in::core_vals (tree t) break; case CONSTRUCTOR: - if (unsigned len = u ()) - { - vec_alloc (t->constructor.elts, len); - for (unsigned ix = 0; ix != len; ix++) - { - constructor_elt elt; - - RT (elt.index); - RTU (elt.value); - t->constructor.elts->quick_push (elt); - } - } + // Streamed after the node's type. break; case OMP_CLAUSE: @@ -6901,6 +6895,20 @@ trees_in::core_vals (tree t) t->typed.type = type; } + if (TREE_CODE (t) == CONSTRUCTOR) + if (unsigned len = u ()) + { + vec_alloc (t->constructor.elts, len); + for (unsigned ix = 0; ix != len; ix++) + { + constructor_elt elt; + + RT (elt.index); + RTU (elt.value); + t->constructor.elts->quick_push (elt); + } + } + #undef RT #undef RM #undef RU diff --git a/gcc/cp/parser.cc b/gcc/cp/parser.cc index 57b62fb..5483121 100644 --- a/gcc/cp/parser.cc +++ b/gcc/cp/parser.cc @@ -6206,8 +6206,8 @@ cp_parser_primary_expression (cp_parser *parser, { const char *msg = (TREE_CODE (decl) == PARM_DECL - ? _("parameter %qD may not appear in this context") - : _("local variable %qD may not appear in this context")); + ? G_("parameter %qD may not appear in this context") + : G_("local variable %qD may not appear in this context")); error_at (id_expression.get_location (), msg, decl.get_value ()); return error_mark_node; @@ -8055,54 +8055,12 @@ cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p, close_paren_loc); iloc_sentinel ils (combined_loc); - if (TREE_CODE (postfix_expression) == COMPONENT_REF) - { - tree instance = TREE_OPERAND (postfix_expression, 0); - tree fn = TREE_OPERAND (postfix_expression, 1); - - if (processing_template_decl - && (type_dependent_object_expression_p (instance) - || (!BASELINK_P (fn) - && TREE_CODE (fn) != FIELD_DECL) - || type_dependent_expression_p (fn) - || any_type_dependent_arguments_p (args))) - { - maybe_generic_this_capture (instance, fn); - postfix_expression - = build_min_nt_call_vec (postfix_expression, args); - } - else if (BASELINK_P (fn)) - { - postfix_expression - = (build_new_method_call - (instance, fn, &args, NULL_TREE, - (idk == CP_ID_KIND_QUALIFIED - ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL - : LOOKUP_NORMAL), - /*fn_p=*/NULL, - complain)); - } - else - postfix_expression - = finish_call_expr (postfix_expression, &args, - /*disallow_virtual=*/false, - /*koenig_p=*/false, - complain); - } - else if (TREE_CODE (postfix_expression) == OFFSET_REF - || TREE_CODE (postfix_expression) == MEMBER_REF - || TREE_CODE (postfix_expression) == DOTSTAR_EXPR) + if (TREE_CODE (postfix_expression) == OFFSET_REF + || TREE_CODE (postfix_expression) == MEMBER_REF + || TREE_CODE (postfix_expression) == DOTSTAR_EXPR) postfix_expression = (build_offset_ref_call_from_tree (postfix_expression, &args, complain)); - else if (idk == CP_ID_KIND_QUALIFIED) - /* A call to a static class member, or a namespace-scope - function. */ - postfix_expression - = finish_call_expr (postfix_expression, &args, - /*disallow_virtual=*/true, - koenig_p, - complain); else /* All other function calls. */ { @@ -8115,12 +8073,14 @@ cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p, "not permitted in intervening code"); parser->omp_for_parse_state->fail = true; } + bool disallow_virtual = (idk == CP_ID_KIND_QUALIFIED); postfix_expression = finish_call_expr (postfix_expression, &args, - /*disallow_virtual=*/false, + disallow_virtual, koenig_p, complain); } + if (close_paren_loc != UNKNOWN_LOCATION) postfix_expression.set_location (combined_loc); @@ -22145,16 +22105,16 @@ cp_parser_using_enum (cp_parser *parser) shall have a reachable enum-specifier. */ const char *msg = nullptr; if (cxx_dialect < cxx20) - msg = _("%<using enum%> " - "only available with %<-std=c++20%> or %<-std=gnu++20%>"); + msg = G_("%<using enum%> " + "only available with %<-std=c++20%> or %<-std=gnu++20%>"); else if (dependent_type_p (type)) - msg = _("%<using enum%> of dependent type %qT"); + msg = G_("%<using enum%> of dependent type %qT"); else if (TREE_CODE (type) != ENUMERAL_TYPE) - msg = _("%<using enum%> of non-enumeration type %q#T"); + msg = G_("%<using enum%> of non-enumeration type %q#T"); else if (!COMPLETE_TYPE_P (type)) - msg = _("%<using enum%> of incomplete type %qT"); + msg = G_("%<using enum%> of incomplete type %qT"); else if (OPAQUE_ENUM_P (type)) - msg = _("%<using enum%> of %qT before its enum-specifier"); + msg = G_("%<using enum%> of %qT before its enum-specifier"); if (msg) { location_t loc = make_location (start, start, end); diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc index 7cbf903..210c6cb 100644 --- a/gcc/cp/pt.cc +++ b/gcc/cp/pt.cc @@ -204,9 +204,10 @@ static void copy_default_args_to_explicit_spec (tree); static bool invalid_nontype_parm_type_p (tree, tsubst_flags_t); static bool dependent_template_arg_p (tree); static bool dependent_type_p_r (tree); -static tree tsubst_copy (tree, tree, tsubst_flags_t, tree); +static tree tsubst_stmt (tree, tree, tsubst_flags_t, tree); static tree tsubst_decl (tree, tree, tsubst_flags_t, bool = true); static tree tsubst_scope (tree, tree, tsubst_flags_t, tree); +static tree tsubst_name (tree, tree, tsubst_flags_t, tree); static void perform_instantiation_time_access_checks (tree, tree); static tree listify (tree); static tree listify_autos (tree, tree); @@ -3763,7 +3764,7 @@ expand_integer_pack (tree call, tree args, tsubst_flags_t complain, tree in_decl) { tree ohi = CALL_EXPR_ARG (call, 0); - tree hi = tsubst_copy_and_build (ohi, args, complain, in_decl); + tree hi = tsubst_expr (ohi, args, complain, in_decl); if (instantiation_dependent_expression_p (hi)) { @@ -3771,7 +3772,7 @@ expand_integer_pack (tree call, tree args, tsubst_flags_t complain, { /* Work around maybe_convert_nontype_argument not doing this for dependent arguments. Don't use IMPLICIT_CONV_EXPR_NONTYPE_ARG - because that will make tsubst_copy_and_build ignore it. */ + because that will make tsubst_expr ignore it. */ tree type = tsubst (TREE_TYPE (ohi), args, complain, in_decl); if (!TREE_TYPE (hi) || !same_type_p (type, TREE_TYPE (hi))) hi = build1 (IMPLICIT_CONV_EXPR, type, hi); @@ -6423,10 +6424,7 @@ redeclare_class_template (tree type, tree parms, tree cons) tree instantiate_non_dependent_expr_internal (tree expr, tsubst_flags_t complain) { - return tsubst_copy_and_build (expr, - /*args=*/NULL_TREE, - complain, - /*in_decl=*/NULL_TREE); + return tsubst_expr (expr, /*args=*/NULL_TREE, complain, /*in_decl=*/NULL_TREE); } /* Instantiate the non-dependent expression EXPR. */ @@ -12367,7 +12365,7 @@ instantiate_class_template (tree type) { /* Build new TYPE_FIELDS. */ if (TREE_CODE (t) == STATIC_ASSERT) - tsubst_expr (t, args, tf_warning_or_error, NULL_TREE); + tsubst_stmt (t, args, tf_warning_or_error, NULL_TREE); else if (TREE_CODE (t) != CONST_DECL) { tree r; @@ -13373,15 +13371,11 @@ tsubst_pack_expansion (tree t, tree args, tsubst_flags_t complain, if (TREE_CODE (parm_pack) == BASES) { gcc_assert (parm_pack == pattern); + tree type = tsubst (BASES_TYPE (parm_pack), args, complain, in_decl); if (BASES_DIRECT (parm_pack)) - return calculate_direct_bases (tsubst_expr (BASES_TYPE (parm_pack), - args, complain, - in_decl), - complain); + return calculate_direct_bases (type, complain); else - return calculate_bases (tsubst_expr (BASES_TYPE (parm_pack), - args, complain, in_decl), - complain); + return calculate_bases (type, complain); } else if (builtin_pack_call_p (parm_pack)) { @@ -14459,7 +14453,7 @@ tsubst_function_decl (tree t, tree args, tsubst_flags_t complain, if (DECL_HAS_DEPENDENT_EXPLICIT_SPEC_P (t)) { tree spec = lookup_explicit_specifier (t); - spec = tsubst_copy_and_build (spec, args, complain, in_decl); + spec = tsubst_expr (spec, args, complain, in_decl); spec = build_explicit_specifier (spec, complain); if (spec == error_mark_node) return error_mark_node; @@ -15171,7 +15165,7 @@ tsubst_decl (tree t, tree args, tsubst_flags_t complain, variadic_p = true; } else - name = tsubst_copy (name, args, complain, in_decl); + name = tsubst_name (name, args, complain, in_decl); int len; if (!variadic_p) @@ -15588,7 +15582,7 @@ tsubst_arg_types (tree arg_types, if (lambda_fn_in_template_p (in_decl) || (in_decl && TREE_CODE (in_decl) == FUNCTION_DECL && DECL_LOCAL_DECL_P (in_decl))) - default_arg = tsubst_copy_and_build (default_arg, args, complain, in_decl); + default_arg = tsubst_expr (default_arg, args, complain, in_decl); tree remaining_arg_types = tsubst_arg_types (TREE_CHAIN (arg_types), args, end, complain, in_decl); @@ -15769,7 +15763,7 @@ tsubst_exception_specification (tree fntype, args); expr = DEFERRED_NOEXCEPT_PATTERN (expr); } - new_specs = tsubst_copy_and_build (expr, args, complain, in_decl); + new_specs = tsubst_expr (expr, args, complain, in_decl); } new_specs = build_noexcept_spec (new_specs, complain); /* We've instantiated a template before a noexcept-specifier @@ -15866,7 +15860,7 @@ tsubst_tree_list (tree t, tree args, tsubst_flags_t complain, tree in_decl) else if (TYPE_P (purpose)) purpose = tsubst (purpose, args, complain, in_decl); else - purpose = tsubst_copy_and_build (purpose, args, complain, in_decl); + purpose = tsubst_expr (purpose, args, complain, in_decl); if (purpose == error_mark_node || purposevec == error_mark_node) return error_mark_node; @@ -15883,7 +15877,7 @@ tsubst_tree_list (tree t, tree args, tsubst_flags_t complain, tree in_decl) else if (TYPE_P (value)) value = tsubst (value, args, complain, in_decl); else - value = tsubst_copy_and_build (value, args, complain, in_decl); + value = tsubst_expr (value, args, complain, in_decl); if (value == error_mark_node || valuevec == error_mark_node) return error_mark_node; @@ -15895,7 +15889,7 @@ tsubst_tree_list (tree t, tree args, tsubst_flags_t complain, tree in_decl) else if (TYPE_P (chain)) chain = tsubst (chain, args, complain, in_decl); else - chain = tsubst_copy_and_build (chain, args, complain, in_decl); + chain = tsubst_expr (chain, args, complain, in_decl); if (chain == error_mark_node) return error_mark_node; @@ -16108,7 +16102,7 @@ tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) if (template_placeholder_p (t)) { tree tmpl = CLASS_PLACEHOLDER_TEMPLATE (t); - tmpl = tsubst_copy (tmpl, args, complain, in_decl); + tmpl = tsubst_expr (tmpl, args, complain, in_decl); if (TREE_CODE (tmpl) == TEMPLATE_TEMPLATE_PARM) tmpl = TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL (tmpl); @@ -16592,7 +16586,7 @@ tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) if (ctx == error_mark_node) return error_mark_node; - tree f = tsubst_copy (TYPENAME_TYPE_FULLNAME (t), args, + tree f = tsubst_name (TYPENAME_TYPE_FULLNAME (t), args, complain, in_decl); if (f == error_mark_node) return error_mark_node; @@ -16700,8 +16694,8 @@ tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; - type = tsubst_copy_and_build (DECLTYPE_TYPE_EXPR (t), args, - complain|tf_decltype, in_decl); + type = tsubst_expr (DECLTYPE_TYPE_EXPR (t), args, + complain|tf_decltype, in_decl); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; @@ -16736,7 +16730,7 @@ tsubst (tree t, tree args, tsubst_flags_t complain, tree in_decl) if (TYPE_P (type1)) type1 = tsubst (type1, args, complain, in_decl); else - type1 = tsubst_copy_and_build (type1, args, complain, in_decl); + type1 = tsubst_expr (type1, args, complain, in_decl); tree type2 = tsubst (TRAIT_TYPE_TYPE2 (t), args, complain, in_decl); type = finish_trait_type (TRAIT_TYPE_KIND (t), type1, type2, complain); return cp_build_qualified_type (type, @@ -16780,6 +16774,15 @@ tsubst_scope (tree t, tree args, tsubst_flags_t complain, tree in_decl) return tsubst (t, args, complain | tf_qualifying_scope, in_decl); } +/* Convenience wrapper over tsubst for substituting into an id-expression + without resolving its terminal name. */ + +static tree +tsubst_name (tree t, tree args, tsubst_flags_t complain, tree in_decl) +{ + return tsubst_expr (t, args, complain | tf_no_name_lookup, in_decl); +} + /* OLDFNS is a lookup set of member functions from some class template, and NEWFNS is a lookup set of member functions from NEWTYPE, a specialization of that class template. Return the subset of NEWFNS which are @@ -17045,7 +17048,7 @@ tsubst_qualified_id (tree qualified_id, tree args, if (args) { scope = tsubst_scope (scope, args, complain, in_decl); - expr = tsubst_copy (name, args, complain, in_decl); + expr = tsubst_name (name, args, complain, in_decl); } else expr = name; @@ -17277,707 +17280,6 @@ maybe_dependent_member_ref (tree t, tree args, tsubst_flags_t complain, TREE_CODE (t) == TEMPLATE_DECL); } -/* Like tsubst, but deals with expressions. This function just replaces - template parms; to finish processing the resultant expression, use - tsubst_copy_and_build or tsubst_expr. */ - -static tree -tsubst_copy (tree t, tree args, tsubst_flags_t complain, tree in_decl) -{ - enum tree_code code; - tree r; - - if (t == NULL_TREE || t == error_mark_node || args == NULL_TREE) - return t; - - if (TYPE_P (t)) - return tsubst (t, args, complain, in_decl); - - if (tree d = maybe_dependent_member_ref (t, args, complain, in_decl)) - return d; - - code = TREE_CODE (t); - - switch (code) - { - case PARM_DECL: - r = retrieve_local_specialization (t); - - if (r == NULL_TREE) - { - /* We get here for a use of 'this' in an NSDMI. */ - if (DECL_NAME (t) == this_identifier && current_class_ptr) - return current_class_ptr; - - /* This can happen for a parameter name used later in a function - declaration (such as in a late-specified return type). Just - make a dummy decl, since it's only used for its type. */ - gcc_assert (cp_unevaluated_operand); - r = tsubst_decl (t, args, complain); - /* Give it the template pattern as its context; its true context - hasn't been instantiated yet and this is good enough for - mangling. */ - DECL_CONTEXT (r) = DECL_CONTEXT (t); - } - - if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) - r = argument_pack_select_arg (r); - if (!mark_used (r, complain) && !(complain & tf_error)) - return error_mark_node; - return r; - - case CONST_DECL: - { - tree enum_type; - tree v; - - if (DECL_TEMPLATE_PARM_P (t)) - return tsubst_copy (DECL_INITIAL (t), args, complain, in_decl); - if (!uses_template_parms (DECL_CONTEXT (t))) - return t; - - /* Unfortunately, we cannot just call lookup_name here. - Consider: - - template <int I> int f() { - enum E { a = I }; - struct S { void g() { E e = a; } }; - }; - - When we instantiate f<7>::S::g(), say, lookup_name is not - clever enough to find f<7>::a. */ - enum_type - = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, - /*entering_scope=*/0); - - for (v = TYPE_VALUES (enum_type); - v != NULL_TREE; - v = TREE_CHAIN (v)) - if (TREE_PURPOSE (v) == DECL_NAME (t)) - return TREE_VALUE (v); - - /* We didn't find the name. That should never happen; if - name-lookup found it during preliminary parsing, we - should find it again here during instantiation. */ - gcc_unreachable (); - } - return t; - - case FIELD_DECL: - if (DECL_CONTEXT (t)) - { - tree ctx; - - ctx = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, - /*entering_scope=*/1); - if (ctx != DECL_CONTEXT (t)) - { - tree r = lookup_field (ctx, DECL_NAME (t), 0, false); - if (!r) - { - if (complain & tf_error) - error ("using invalid field %qD", t); - return error_mark_node; - } - return r; - } - } - - return t; - - case VAR_DECL: - case FUNCTION_DECL: - if (DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) - r = tsubst (t, args, complain, in_decl); - else if (DECL_LOCAL_DECL_P (t)) - { - /* Local specialization will usually have been created when - we instantiated the DECL_EXPR_DECL. */ - r = retrieve_local_specialization (t); - if (!r) - { - /* We're in a generic lambda referencing a local extern - from an outer block-scope of a non-template. */ - gcc_checking_assert (LAMBDA_FUNCTION_P (current_function_decl)); - r = t; - } - } - else if (local_variable_p (t) - && uses_template_parms (DECL_CONTEXT (t))) - { - r = retrieve_local_specialization (t); - if (r == NULL_TREE) - { - /* First try name lookup to find the instantiation. */ - r = lookup_name (DECL_NAME (t)); - if (r) - { - if (!VAR_P (r)) - { - /* During error-recovery we may find a non-variable, - even an OVERLOAD: just bail out and avoid ICEs and - duplicate diagnostics (c++/62207). */ - gcc_assert (seen_error ()); - return error_mark_node; - } - if (!is_capture_proxy (r)) - { - /* Make sure the one we found is the one we want. */ - tree ctx = enclosing_instantiation_of (DECL_CONTEXT (t)); - if (ctx != DECL_CONTEXT (r)) - r = NULL_TREE; - } - } - - if (r) - /* OK */; - else - { - /* This can happen for a variable used in a - late-specified return type of a local lambda, or for a - local static or constant. Building a new VAR_DECL - should be OK in all those cases. */ - r = tsubst_decl (t, args, complain); - if (local_specializations) - /* Avoid infinite recursion (79640). */ - register_local_specialization (r, t); - if (decl_maybe_constant_var_p (r)) - { - /* We can't call cp_finish_decl, so handle the - initializer by hand. */ - tree init = tsubst_init (DECL_INITIAL (t), r, args, - complain, in_decl); - if (!processing_template_decl) - init = maybe_constant_init (init); - if (processing_template_decl - ? potential_constant_expression (init) - : reduced_constant_expression_p (init)) - DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) - = TREE_CONSTANT (r) = true; - DECL_INITIAL (r) = init; - if (tree auto_node = type_uses_auto (TREE_TYPE (r))) - TREE_TYPE (r) - = do_auto_deduction (TREE_TYPE (r), init, auto_node, - complain, adc_variable_type); - } - gcc_assert (cp_unevaluated_operand - || processing_contract_condition - || TREE_STATIC (r) - || decl_constant_var_p (r) - || seen_error ()); - if (!processing_template_decl - && !TREE_STATIC (r)) - r = process_outer_var_ref (r, complain); - } - /* Remember this for subsequent uses. */ - if (local_specializations) - register_local_specialization (r, t); - } - if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) - r = argument_pack_select_arg (r); - } - else - r = t; - if (!mark_used (r, complain)) - return error_mark_node; - return r; - - case NAMESPACE_DECL: - return t; - - case OVERLOAD: - return t; - - case BASELINK: - return tsubst_baselink (t, current_nonlambda_class_type (), - args, complain, in_decl); - - case TEMPLATE_DECL: - if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) - return tsubst (TREE_TYPE (DECL_TEMPLATE_RESULT (t)), - args, complain, in_decl); - else if (DECL_FUNCTION_TEMPLATE_P (t) && DECL_MEMBER_TEMPLATE_P (t)) - return tsubst (t, args, complain, in_decl); - else if (DECL_CLASS_SCOPE_P (t) - && uses_template_parms (DECL_CONTEXT (t))) - { - /* Template template argument like the following example need - special treatment: - - template <template <class> class TT> struct C {}; - template <class T> struct D { - template <class U> struct E {}; - C<E> c; // #1 - }; - D<int> d; // #2 - - We are processing the template argument `E' in #1 for - the template instantiation #2. Originally, `E' is a - TEMPLATE_DECL with `D<T>' as its DECL_CONTEXT. Now we - have to substitute this with one having context `D<int>'. */ - - tree context = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, - in_decl, /*entering_scope=*/true); - return lookup_field (context, DECL_NAME(t), 0, false); - } - else - /* Ordinary template template argument. */ - return t; - - case NON_LVALUE_EXPR: - case VIEW_CONVERT_EXPR: - { - /* Handle location wrappers by substituting the wrapped node - first, *then* reusing the resulting type. Doing the type - first ensures that we handle template parameters and - parameter pack expansions. */ - if (location_wrapper_p (t)) - { - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, - complain, in_decl); - return maybe_wrap_with_location (op0, EXPR_LOCATION (t)); - } - tree op = TREE_OPERAND (t, 0); - /* force_paren_expr can also create a VIEW_CONVERT_EXPR. */ - if (code == VIEW_CONVERT_EXPR && REF_PARENTHESIZED_P (t)) - { - op = tsubst_copy (op, args, complain, in_decl); - op = build1 (code, TREE_TYPE (op), op); - REF_PARENTHESIZED_P (op) = true; - return op; - } - /* We shouldn't see any other uses of these in templates - (tsubst_copy_and_build handles C++20 tparm object wrappers). */ - gcc_unreachable (); - } - - case CAST_EXPR: - case REINTERPRET_CAST_EXPR: - case CONST_CAST_EXPR: - case STATIC_CAST_EXPR: - case DYNAMIC_CAST_EXPR: - case IMPLICIT_CONV_EXPR: - CASE_CONVERT: - { - tsubst_flags_t tcomplain = complain; - if (code == CAST_EXPR) - tcomplain |= tf_tst_ok; - tree type = tsubst (TREE_TYPE (t), args, tcomplain, in_decl); - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - return build1 (code, type, op0); - } - - case BIT_CAST_EXPR: - { - tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - r = build_min (BIT_CAST_EXPR, type, op0); - SET_EXPR_LOCATION (r, EXPR_LOCATION (t)); - return r; - } - - case SIZEOF_EXPR: - if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) - || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) - { - tree expanded, op = TREE_OPERAND (t, 0); - int len = 0; - - if (SIZEOF_EXPR_TYPE_P (t)) - op = TREE_TYPE (op); - - ++cp_unevaluated_operand; - ++c_inhibit_evaluation_warnings; - /* We only want to compute the number of arguments. */ - if (PACK_EXPANSION_P (op)) - expanded = tsubst_pack_expansion (op, args, complain, in_decl); - else - expanded = tsubst_template_args (ARGUMENT_PACK_ARGS (op), - args, complain, in_decl); - --cp_unevaluated_operand; - --c_inhibit_evaluation_warnings; - - if (TREE_CODE (expanded) == TREE_VEC) - { - len = TREE_VEC_LENGTH (expanded); - /* Set TREE_USED for the benefit of -Wunused. */ - for (int i = 0; i < len; i++) - if (DECL_P (TREE_VEC_ELT (expanded, i))) - TREE_USED (TREE_VEC_ELT (expanded, i)) = true; - } - - if (expanded == error_mark_node) - return error_mark_node; - else if (PACK_EXPANSION_P (expanded) - || (TREE_CODE (expanded) == TREE_VEC - && pack_expansion_args_count (expanded))) - - { - if (PACK_EXPANSION_P (expanded)) - /* OK. */; - else if (TREE_VEC_LENGTH (expanded) == 1) - expanded = TREE_VEC_ELT (expanded, 0); - else - expanded = make_argument_pack (expanded); - - if (TYPE_P (expanded)) - return cxx_sizeof_or_alignof_type (input_location, - expanded, SIZEOF_EXPR, - false, - complain & tf_error); - else - return cxx_sizeof_or_alignof_expr (input_location, - expanded, SIZEOF_EXPR, - false, - complain & tf_error); - } - else - return build_int_cst (size_type_node, len); - } - if (SIZEOF_EXPR_TYPE_P (t)) - { - r = tsubst (TREE_TYPE (TREE_OPERAND (t, 0)), - args, complain, in_decl); - r = build1 (NOP_EXPR, r, error_mark_node); - r = build1 (SIZEOF_EXPR, - tsubst (TREE_TYPE (t), args, complain, in_decl), r); - SIZEOF_EXPR_TYPE_P (r) = 1; - return r; - } - /* Fall through */ - - case INDIRECT_REF: - case NEGATE_EXPR: - case TRUTH_NOT_EXPR: - case BIT_NOT_EXPR: - case ADDR_EXPR: - case UNARY_PLUS_EXPR: /* Unary + */ - case ALIGNOF_EXPR: - case AT_ENCODE_EXPR: - case ARROW_EXPR: - case THROW_EXPR: - case TYPEID_EXPR: - case REALPART_EXPR: - case IMAGPART_EXPR: - case PAREN_EXPR: - { - tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - r = build1_loc (EXPR_LOCATION (t), code, type, op0); - if (code == ALIGNOF_EXPR) - ALIGNOF_EXPR_STD_P (r) = ALIGNOF_EXPR_STD_P (t); - /* For addresses of immediate functions ensure we have EXPR_LOCATION - set for possible later diagnostics. */ - if (code == ADDR_EXPR - && EXPR_LOCATION (r) == UNKNOWN_LOCATION - && TREE_CODE (op0) == FUNCTION_DECL - && DECL_IMMEDIATE_FUNCTION_P (op0)) - SET_EXPR_LOCATION (r, input_location); - return r; - } - - case EXCESS_PRECISION_EXPR: - { - tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - if (TREE_CODE (op0) == EXCESS_PRECISION_EXPR) - { - gcc_checking_assert (same_type_p (type, TREE_TYPE (op0))); - return op0; - } - return build1_loc (EXPR_LOCATION (t), code, type, op0); - } - - case COMPONENT_REF: - { - tree object; - tree name; - - object = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - name = TREE_OPERAND (t, 1); - if (TREE_CODE (name) == BIT_NOT_EXPR) - { - name = tsubst_copy (TREE_OPERAND (name, 0), args, - complain, in_decl); - name = build1 (BIT_NOT_EXPR, NULL_TREE, name); - } - else if (TREE_CODE (name) == SCOPE_REF - && TREE_CODE (TREE_OPERAND (name, 1)) == BIT_NOT_EXPR) - { - tree base = tsubst_copy (TREE_OPERAND (name, 0), args, - complain, in_decl); - name = TREE_OPERAND (name, 1); - name = tsubst_copy (TREE_OPERAND (name, 0), args, - complain, in_decl); - name = build1 (BIT_NOT_EXPR, NULL_TREE, name); - name = build_qualified_name (/*type=*/NULL_TREE, - base, name, - /*template_p=*/false); - } - else if (BASELINK_P (name)) - name = tsubst_baselink (name, - non_reference (TREE_TYPE (object)), - args, complain, - in_decl); - else - name = tsubst_copy (name, args, complain, in_decl); - return build_nt (COMPONENT_REF, object, name, NULL_TREE); - } - - case PLUS_EXPR: - case MINUS_EXPR: - case MULT_EXPR: - case TRUNC_DIV_EXPR: - case CEIL_DIV_EXPR: - case FLOOR_DIV_EXPR: - case ROUND_DIV_EXPR: - case EXACT_DIV_EXPR: - case BIT_AND_EXPR: - case BIT_IOR_EXPR: - case BIT_XOR_EXPR: - case TRUNC_MOD_EXPR: - case FLOOR_MOD_EXPR: - case TRUTH_ANDIF_EXPR: - case TRUTH_ORIF_EXPR: - case TRUTH_AND_EXPR: - case TRUTH_OR_EXPR: - case RSHIFT_EXPR: - case LSHIFT_EXPR: - case EQ_EXPR: - case NE_EXPR: - case MAX_EXPR: - case MIN_EXPR: - case LE_EXPR: - case GE_EXPR: - case LT_EXPR: - case GT_EXPR: - case COMPOUND_EXPR: - case DOTSTAR_EXPR: - case MEMBER_REF: - case PREDECREMENT_EXPR: - case PREINCREMENT_EXPR: - case POSTDECREMENT_EXPR: - case POSTINCREMENT_EXPR: - { - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); - return build_nt (code, op0, op1); - } - - case SCOPE_REF: - { - tree op0 = tsubst_scope (TREE_OPERAND (t, 0), args, complain, in_decl); - tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); - return build_qualified_name (/*type=*/NULL_TREE, op0, op1, - QUALIFIED_NAME_IS_TEMPLATE (t)); - } - - case ARRAY_REF: - { - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); - return build_nt (ARRAY_REF, op0, op1, NULL_TREE, NULL_TREE); - } - - case CALL_EXPR: - { - int n = VL_EXP_OPERAND_LENGTH (t); - tree result = build_vl_exp (CALL_EXPR, n); - int i; - for (i = 0; i < n; i++) - TREE_OPERAND (t, i) = tsubst_copy (TREE_OPERAND (t, i), args, - complain, in_decl); - return result; - } - - case COND_EXPR: - case MODOP_EXPR: - case PSEUDO_DTOR_EXPR: - case VEC_PERM_EXPR: - { - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); - tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); - r = build_nt (code, op0, op1, op2); - copy_warning (r, t); - return r; - } - - case NEW_EXPR: - { - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); - tree op2 = tsubst_copy (TREE_OPERAND (t, 2), args, complain, in_decl); - r = build_nt (code, op0, op1, op2); - NEW_EXPR_USE_GLOBAL (r) = NEW_EXPR_USE_GLOBAL (t); - return r; - } - - case DELETE_EXPR: - { - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); - r = build_nt (code, op0, op1); - DELETE_EXPR_USE_GLOBAL (r) = DELETE_EXPR_USE_GLOBAL (t); - DELETE_EXPR_USE_VEC (r) = DELETE_EXPR_USE_VEC (t); - return r; - } - - case TEMPLATE_ID_EXPR: - { - /* Substituted template arguments */ - tree tmpl = TREE_OPERAND (t, 0); - tree targs = TREE_OPERAND (t, 1); - - tmpl = tsubst_copy (tmpl, args, complain, in_decl); - if (targs) - targs = tsubst_template_args (targs, args, complain, in_decl); - - if (variable_template_p (tmpl)) - return lookup_template_variable (tmpl, targs, complain); - else - return lookup_template_function (tmpl, targs); - } - - case TREE_LIST: - { - tree purpose, value, chain; - - if (t == void_list_node) - return t; - - purpose = TREE_PURPOSE (t); - if (purpose) - purpose = tsubst_copy (purpose, args, complain, in_decl); - value = TREE_VALUE (t); - if (value) - value = tsubst_copy (value, args, complain, in_decl); - chain = TREE_CHAIN (t); - if (chain && chain != void_type_node) - chain = tsubst_copy (chain, args, complain, in_decl); - if (purpose == TREE_PURPOSE (t) - && value == TREE_VALUE (t) - && chain == TREE_CHAIN (t)) - return t; - return tree_cons (purpose, value, chain); - } - - case TEMPLATE_PARM_INDEX: - case TYPE_DECL: - return tsubst (t, args, complain, in_decl); - - case USING_DECL: - t = DECL_NAME (t); - /* Fall through. */ - case IDENTIFIER_NODE: - if (IDENTIFIER_CONV_OP_P (t)) - { - tree new_type = tsubst (TREE_TYPE (t), args, complain, in_decl); - return make_conv_op_name (new_type); - } - else - return t; - - case CONSTRUCTOR: - /* This is handled by tsubst_copy_and_build. */ - gcc_unreachable (); - - case VA_ARG_EXPR: - { - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); - return build_x_va_arg (EXPR_LOCATION (t), op0, type); - } - - case CLEANUP_POINT_EXPR: - /* We shouldn't have built any of these during initial template - generation. Instead, they should be built during instantiation - in response to the saved STMT_IS_FULL_EXPR_P setting. */ - gcc_unreachable (); - - case OFFSET_REF: - { - tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); - tree op0 = tsubst_copy (TREE_OPERAND (t, 0), args, complain, in_decl); - tree op1 = tsubst_copy (TREE_OPERAND (t, 1), args, complain, in_decl); - r = build2 (code, type, op0, op1); - PTRMEM_OK_P (r) = PTRMEM_OK_P (t); - if (!mark_used (TREE_OPERAND (r, 1), complain) - && !(complain & tf_error)) - return error_mark_node; - return r; - } - - case EXPR_PACK_EXPANSION: - error ("invalid use of pack expansion expression"); - return error_mark_node; - - case NONTYPE_ARGUMENT_PACK: - error ("use %<...%> to expand argument pack"); - return error_mark_node; - - case VOID_CST: - gcc_checking_assert (t == void_node && VOID_TYPE_P (TREE_TYPE (t))); - return t; - - case INTEGER_CST: - case REAL_CST: - case COMPLEX_CST: - case VECTOR_CST: - { - /* Instantiate any typedefs in the type. */ - tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); - r = fold_convert (type, t); - gcc_assert (TREE_CODE (r) == code); - return r; - } - - case STRING_CST: - { - tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); - r = t; - if (type != TREE_TYPE (t)) - { - r = copy_node (t); - TREE_TYPE (r) = type; - } - return r; - } - - case PTRMEM_CST: - /* These can sometimes show up in a partial instantiation, but never - involve template parms. */ - gcc_assert (!uses_template_parms (t)); - return t; - - case UNARY_LEFT_FOLD_EXPR: - return tsubst_unary_left_fold (t, args, complain, in_decl); - case UNARY_RIGHT_FOLD_EXPR: - return tsubst_unary_right_fold (t, args, complain, in_decl); - case BINARY_LEFT_FOLD_EXPR: - return tsubst_binary_left_fold (t, args, complain, in_decl); - case BINARY_RIGHT_FOLD_EXPR: - return tsubst_binary_right_fold (t, args, complain, in_decl); - case PREDICT_EXPR: - return t; - - case DEBUG_BEGIN_STMT: - /* ??? There's no point in copying it for now, but maybe some - day it will contain more information, such as a pointer back - to the containing function, inlined copy or so. */ - return t; - - case CO_AWAIT_EXPR: - return tsubst_expr (t, args, complain, in_decl); - - default: - /* We shouldn't get here, but keep going if !flag_checking. */ - if (flag_checking) - gcc_unreachable (); - return t; - } -} - /* Helper function for tsubst_omp_clauses, used for instantiation of OMP_CLAUSE_DECL of clauses. */ @@ -18008,11 +17310,11 @@ tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain, DECL_CONTEXT (TREE_VEC_ELT (*tp, 0)) = current_function_decl; pushdecl (TREE_VEC_ELT (*tp, 0)); TREE_VEC_ELT (*tp, 1) - = tsubst_expr (TREE_VEC_ELT (it, 1), args, complain, in_decl); + = tsubst_stmt (TREE_VEC_ELT (it, 1), args, complain, in_decl); TREE_VEC_ELT (*tp, 2) - = tsubst_expr (TREE_VEC_ELT (it, 2), args, complain, in_decl); + = tsubst_stmt (TREE_VEC_ELT (it, 2), args, complain, in_decl); TREE_VEC_ELT (*tp, 3) - = tsubst_expr (TREE_VEC_ELT (it, 3), args, complain, in_decl); + = tsubst_stmt (TREE_VEC_ELT (it, 3), args, complain, in_decl); TREE_CHAIN (*tp) = NULL_TREE; tp = &TREE_CHAIN (*tp); } @@ -18034,8 +17336,8 @@ tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain, if (TREE_CODE (decl) == TREE_LIST) { tree low_bound - = tsubst_expr (TREE_PURPOSE (decl), args, complain, in_decl); - tree length = tsubst_expr (TREE_VALUE (decl), args, complain, in_decl); + = tsubst_stmt (TREE_PURPOSE (decl), args, complain, in_decl); + tree length = tsubst_stmt (TREE_VALUE (decl), args, complain, in_decl); tree chain = tsubst_omp_clause_decl (TREE_CHAIN (decl), args, complain, in_decl, NULL); if (TREE_PURPOSE (decl) == low_bound @@ -18047,7 +17349,7 @@ tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain, = OMP_CLAUSE_DOACROSS_SINK_NEGATIVE (decl); return ret; } - tree ret = tsubst_expr (decl, args, complain, in_decl); + tree ret = tsubst_stmt (decl, args, complain, in_decl); /* Undo convert_from_reference tsubst_expr could have called. */ if (decl && REFERENCE_REF_P (ret) @@ -18078,7 +17380,7 @@ tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, if (OMP_CLAUSE_LASTPRIVATE_STMT (oc)) { OMP_CLAUSE_LASTPRIVATE_STMT (nc) = push_stmt_list (); - tsubst_expr (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, + tsubst_stmt (OMP_CLAUSE_LASTPRIVATE_STMT (oc), args, complain, in_decl); OMP_CLAUSE_LASTPRIVATE_STMT (nc) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (nc)); @@ -18111,7 +17413,7 @@ tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, case OMP_CLAUSE_NUM_TEAMS: if (OMP_CLAUSE_NUM_TEAMS_LOWER_EXPR (oc)) OMP_CLAUSE_NUM_TEAMS_LOWER_EXPR (nc) - = tsubst_expr (OMP_CLAUSE_NUM_TEAMS_LOWER_EXPR (oc), args, + = tsubst_stmt (OMP_CLAUSE_NUM_TEAMS_LOWER_EXPR (oc), args, complain, in_decl); /* FALLTHRU */ case OMP_CLAUSE_TILE: @@ -18140,7 +17442,7 @@ tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, case OMP_CLAUSE_WAIT: case OMP_CLAUSE_DETACH: OMP_CLAUSE_OPERAND (nc, 0) - = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 0), args, complain, in_decl); + = tsubst_stmt (OMP_CLAUSE_OPERAND (oc, 0), args, complain, in_decl); break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: @@ -18170,16 +17472,16 @@ tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); OMP_CLAUSE_OPERAND (nc, 1) - = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain, in_decl); + = tsubst_stmt (OMP_CLAUSE_OPERAND (oc, 1), args, complain, in_decl); break; case OMP_CLAUSE_ALLOCATE: OMP_CLAUSE_DECL (nc) = tsubst_omp_clause_decl (OMP_CLAUSE_DECL (oc), args, complain, in_decl, NULL); OMP_CLAUSE_OPERAND (nc, 1) - = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 1), args, complain, in_decl); + = tsubst_stmt (OMP_CLAUSE_OPERAND (oc, 1), args, complain, in_decl); OMP_CLAUSE_OPERAND (nc, 2) - = tsubst_expr (OMP_CLAUSE_OPERAND (oc, 2), args, complain, in_decl); + = tsubst_stmt (OMP_CLAUSE_OPERAND (oc, 2), args, complain, in_decl); break; case OMP_CLAUSE_LINEAR: OMP_CLAUSE_DECL (nc) @@ -18196,7 +17498,7 @@ tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, complain, in_decl, NULL); else OMP_CLAUSE_LINEAR_STEP (nc) - = tsubst_expr (OMP_CLAUSE_LINEAR_STEP (oc), args, + = tsubst_stmt (OMP_CLAUSE_LINEAR_STEP (oc), args, complain, in_decl); break; case OMP_CLAUSE_NOWAIT: @@ -18308,7 +17610,7 @@ tsubst_omp_clauses (tree clauses, enum c_omp_region_type ort, return new_clauses; } -/* Like tsubst_copy_and_build, but unshare TREE_LIST nodes. */ +/* Like tsubst_expr, but unshare TREE_LIST nodes. */ static tree tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain, @@ -18322,7 +17624,7 @@ tsubst_copy_asm_operands (tree t, tree args, tsubst_flags_t complain, return t; if (TREE_CODE (t) != TREE_LIST) - return tsubst_copy_and_build (t, args, complain, in_decl); + return tsubst_expr (t, args, complain, in_decl); if (t == void_list_node) return t; @@ -18366,7 +17668,7 @@ tsubst_omp_for_iterator (tree t, int i, tree declv, tree &orig_declv, tree args, tsubst_flags_t complain, tree in_decl) { #define RECUR(NODE) \ - tsubst_expr ((NODE), args, complain, in_decl) + tsubst_stmt ((NODE), args, complain, in_decl) tree decl, init, cond = NULL_TREE, incr = NULL_TREE; bool ret = false; @@ -18849,15 +18151,14 @@ dependent_operand_p (tree t) return r; } -/* Like tsubst_copy for expressions, etc. but also does semantic - processing. */ +/* A superset of tsubst_expr that also handles statement trees. */ -tree -tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) +static tree +tsubst_stmt (tree t, tree args, tsubst_flags_t complain, tree in_decl) { #define RETURN(EXP) do { r = (EXP); goto out; } while(0) #define RECUR(NODE) \ - tsubst_expr ((NODE), args, complain, in_decl) + tsubst_stmt ((NODE), args, complain, in_decl) tree stmt, tmp; tree r; @@ -18894,16 +18195,6 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) finish_co_return_stmt (input_location, RECUR (TREE_OPERAND (t, 0))); break; - case CO_YIELD_EXPR: - stmt = finish_co_yield_expr (input_location, - RECUR (TREE_OPERAND (t, 0))); - RETURN (stmt); - - case CO_AWAIT_EXPR: - stmt = finish_co_await_expr (input_location, - RECUR (TREE_OPERAND (t, 0))); - RETURN (stmt); - case EXPR_STMT: tmp = RECUR (EXPR_STMT_EXPR (t)); if (EXPR_STMT_STMT_EXPR_RESULT (t)) @@ -19924,7 +19215,7 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) default: gcc_assert (!STATEMENT_CODE_P (TREE_CODE (t))); - RETURN (tsubst_copy_and_build (t, args, complain, in_decl)); + RETURN (tsubst_expr (t, args, complain, in_decl)); } RETURN (NULL_TREE); @@ -19971,7 +19262,7 @@ tsubst_omp_udr (tree t, tree args, tsubst_flags_t complain, tree in_decl) DECL_CONTEXT (omp_in) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); - tsubst_expr (stmts[2], args, complain, in_decl); + tsubst_stmt (stmts[2], args, complain, in_decl); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); add_decl_expr (omp_out); @@ -19991,7 +19282,7 @@ tsubst_omp_udr (tree t, tree args, tsubst_flags_t complain, tree in_decl) DECL_CONTEXT (omp_orig) = current_function_decl; keep_next_level (true); tree block = begin_omp_structured_block (); - tsubst_expr (stmts[5], args, complain, in_decl); + tsubst_stmt (stmts[5], args, complain, in_decl); block = finish_omp_structured_block (block); block = maybe_cleanup_point_expr_void (block); cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL); @@ -20015,7 +19306,7 @@ tsubst_non_call_postfix_expression (tree t, tree args, t = tsubst_qualified_id (t, args, complain, in_decl, /*done=*/false, /*address_p=*/false); else - t = tsubst_copy_and_build (t, args, complain, in_decl); + t = tsubst_expr (t, args, complain, in_decl); return t; } @@ -20088,7 +19379,7 @@ tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) if (PACK_EXPANSION_P (init)) init = tsubst_pack_expansion (init, args, complain, in_decl); else - init = tsubst_copy_and_build (init, args, complain, in_decl); + init = tsubst_expr (init, args, complain, in_decl); if (init == error_mark_node) return error_mark_node; @@ -20268,7 +19559,7 @@ tsubst_lambda_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) need another to confuse NRV (91217). */ saved = BIND_EXPR_BODY (saved); - tsubst_expr (saved, args, complain, r); + tsubst_stmt (saved, args, complain, r); finish_lambda_function (body); @@ -20353,12 +19644,11 @@ maybe_fold_fn_template_args (tree fn, tsubst_flags_t complain) return fold_targs_r (targs, complain); } -/* Helper function for tsubst_copy_and_build CALL_EXPR and ARRAY_REF - handling. */ +/* Helper function for tsubst_expr CALL_EXPR and ARRAY_REF handling. */ static void -tsubst_copy_and_build_call_args (tree t, tree args, tsubst_flags_t complain, - tree in_decl, releasing_vec &call_args) +tsubst_call_args (tree t, tree args, tsubst_flags_t complain, + tree in_decl, releasing_vec &call_args) { unsigned int nargs = call_expr_nargs (t); for (unsigned int i = 0; i < nargs; ++i) @@ -20366,8 +19656,7 @@ tsubst_copy_and_build_call_args (tree t, tree args, tsubst_flags_t complain, tree arg = CALL_EXPR_ARG (t, i); if (!PACK_EXPANSION_P (arg)) - vec_safe_push (call_args, - tsubst_copy_and_build (arg, args, complain, in_decl)); + vec_safe_push (call_args, tsubst_expr (arg, args, complain, in_decl)); else { /* Expand the pack expansion and push each entry onto CALL_ARGS. */ @@ -20396,14 +19685,11 @@ tsubst_copy_and_build_call_args (tree t, tree args, tsubst_flags_t complain, analysis. */ tree -tsubst_copy_and_build (tree t, - tree args, - tsubst_flags_t complain, - tree in_decl) +tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { #define RETURN(EXP) do { retval = (EXP); goto out; } while(0) #define RECUR(NODE) \ - tsubst_copy_and_build (NODE, args, complain, in_decl) + tsubst_expr (NODE, args, complain, in_decl) tree retval, op1; location_t save_loc; @@ -20420,6 +19706,15 @@ tsubst_copy_and_build (tree t, tsubst_flags_t decltype_flag = (complain & tf_decltype); complain &= ~tf_decltype; + /* This flag only applies to id-expressions at the top level, and + controls resolution thereof. */ + tsubst_flags_t no_name_lookup_flag = (complain & tf_no_name_lookup); + complain &= ~tf_no_name_lookup; + + if (!no_name_lookup_flag) + if (tree d = maybe_dependent_member_ref (t, args, complain, in_decl)) + return d; + switch (TREE_CODE (t)) { case USING_DECL: @@ -20437,6 +19732,9 @@ tsubst_copy_and_build (tree t, t = make_conv_op_name (new_type); } + if (no_name_lookup_flag) + RETURN (t); + /* Look up the name. */ decl = lookup_name (t); @@ -20470,10 +19768,14 @@ tsubst_copy_and_build (tree t, case TEMPLATE_ID_EXPR: { tree object; - tree templ = tsubst_copy_and_build (TREE_OPERAND (t, 0), args, - complain, in_decl); + tree templ = TREE_OPERAND (t, 0); tree targs = TREE_OPERAND (t, 1); + if (no_name_lookup_flag) + templ = tsubst_name (templ, args, complain, in_decl); + else + templ = tsubst_expr (templ, args, complain, in_decl); + if (targs) targs = tsubst_template_args (targs, args, complain, in_decl); if (targs == error_mark_node) @@ -20505,6 +19807,9 @@ tsubst_copy_and_build (tree t, if (variable_template_p (templ)) { + if (no_name_lookup_flag) + RETURN (lookup_template_variable (templ, targs, complain)); + tree r = lookup_and_finish_template_variable (templ, targs, complain); r = convert_from_reference (r); @@ -20526,6 +19831,8 @@ tsubst_copy_and_build (tree t, if (object) RETURN (build3 (COMPONENT_REF, TREE_TYPE (tid), object, tid, NULL_TREE)); + else if (no_name_lookup_flag) + RETURN (tid); else if (identifier_p (templ)) { /* C++20 P0846: we can encounter an IDENTIFIER_NODE here when @@ -20659,10 +19966,22 @@ tsubst_copy_and_build (tree t, templated_operator_saved_lookups (t), complain|decltype_flag)); + case BIT_NOT_EXPR: + if (identifier_p (TREE_OPERAND (t, 0))) + { + gcc_checking_assert (no_name_lookup_flag); + RETURN (t); + } + else if (TYPE_P (TREE_OPERAND (t, 0))) + { + gcc_checking_assert (no_name_lookup_flag); + tree op0 = tsubst (TREE_OPERAND (t, 0), args, complain, in_decl); + RETURN (build_min_nt_loc (EXPR_LOCATION (t), BIT_NOT_EXPR, op0)); + } + /* Fall through. */ case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case NEGATE_EXPR: - case BIT_NOT_EXPR: case ABS_EXPR: case TRUTH_NOT_EXPR: case UNARY_PLUS_EXPR: /* Unary + */ @@ -20779,8 +20098,16 @@ tsubst_copy_and_build (tree t, } case SCOPE_REF: - RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true, - /*address_p=*/false)); + if (no_name_lookup_flag) + { + tree op0 = tsubst_scope (TREE_OPERAND (t, 0), args, complain, in_decl); + tree op1 = tsubst_name (TREE_OPERAND (t, 1), args, complain, in_decl); + RETURN (build_qualified_name (/*type=*/NULL_TREE, op0, op1, + QUALIFIED_NAME_IS_TEMPLATE (t))); + } + else + RETURN (tsubst_qualified_id (t, args, complain, in_decl, /*done=*/true, + /*address_p=*/false)); case BASELINK: RETURN (tsubst_baselink (t, current_nonlambda_class_type (), @@ -20795,8 +20122,7 @@ tsubst_copy_and_build (tree t, { tree c = TREE_OPERAND (t, 1); releasing_vec index_exp_list; - tsubst_copy_and_build_call_args (c, args, complain, in_decl, - index_exp_list); + tsubst_call_args (c, args, complain, in_decl, index_exp_list); tree r; if (vec_safe_length (index_exp_list) == 1 @@ -20817,7 +20143,61 @@ tsubst_copy_and_build (tree t, case SIZEOF_EXPR: if (PACK_EXPANSION_P (TREE_OPERAND (t, 0)) || ARGUMENT_PACK_P (TREE_OPERAND (t, 0))) - RETURN (tsubst_copy (t, args, complain, in_decl)); + { + tree expanded, op = TREE_OPERAND (t, 0); + int len = 0; + + if (SIZEOF_EXPR_TYPE_P (t)) + op = TREE_TYPE (op); + + ++cp_unevaluated_operand; + ++c_inhibit_evaluation_warnings; + /* We only want to compute the number of arguments. */ + if (PACK_EXPANSION_P (op)) + expanded = tsubst_pack_expansion (op, args, complain, in_decl); + else + expanded = tsubst_template_args (ARGUMENT_PACK_ARGS (op), + args, complain, in_decl); + --cp_unevaluated_operand; + --c_inhibit_evaluation_warnings; + + if (TREE_CODE (expanded) == TREE_VEC) + { + len = TREE_VEC_LENGTH (expanded); + /* Set TREE_USED for the benefit of -Wunused. */ + for (int i = 0; i < len; i++) + if (DECL_P (TREE_VEC_ELT (expanded, i))) + TREE_USED (TREE_VEC_ELT (expanded, i)) = true; + } + + if (expanded == error_mark_node) + RETURN (error_mark_node); + else if (PACK_EXPANSION_P (expanded) + || (TREE_CODE (expanded) == TREE_VEC + && pack_expansion_args_count (expanded))) + + { + if (PACK_EXPANSION_P (expanded)) + /* OK. */; + else if (TREE_VEC_LENGTH (expanded) == 1) + expanded = TREE_VEC_ELT (expanded, 0); + else + expanded = make_argument_pack (expanded); + + if (TYPE_P (expanded)) + RETURN (cxx_sizeof_or_alignof_type (input_location, + expanded, SIZEOF_EXPR, + false, + complain & tf_error)); + else + RETURN (cxx_sizeof_or_alignof_expr (input_location, + expanded, SIZEOF_EXPR, + false, + complain & tf_error)); + } + else + RETURN (build_int_cst (size_type_node, len)); + } /* Fall through */ case ALIGNOF_EXPR: @@ -20844,7 +20224,7 @@ tsubst_copy_and_build (tree t, if (TYPE_P (op1)) op1 = tsubst (op1, args, complain, in_decl); else - op1 = tsubst_copy_and_build (op1, args, complain, in_decl); + op1 = tsubst_expr (op1, args, complain, in_decl); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; } @@ -20881,7 +20261,7 @@ tsubst_copy_and_build (tree t, op1 = TREE_OPERAND (t, 0); ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; - op1 = tsubst_copy_and_build (op1, args, complain, in_decl); + op1 = tsubst_expr (op1, args, complain, in_decl); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; RETURN (objc_build_encode_expr (op1)); @@ -20892,7 +20272,7 @@ tsubst_copy_and_build (tree t, ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; ++cp_noexcept_operand; - op1 = tsubst_copy_and_build (op1, args, complain, in_decl); + op1 = tsubst_expr (op1, args, complain, in_decl); --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; --cp_noexcept_operand; @@ -21000,8 +20380,8 @@ tsubst_copy_and_build (tree t, case COMPOUND_EXPR: { - tree op0 = tsubst_copy_and_build (TREE_OPERAND (t, 0), args, - complain & ~tf_decltype, in_decl); + tree op0 = tsubst_expr (TREE_OPERAND (t, 0), args, + complain & ~tf_decltype, in_decl); RETURN (build_x_compound_expr (EXPR_LOCATION (t), op0, RECUR (TREE_OPERAND (t, 1)), @@ -21061,7 +20441,7 @@ tsubst_copy_and_build (tree t, || (TREE_CODE (function) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (function, 0))))) { - /* Do nothing; calling tsubst_copy_and_build on an identifier + /* Do nothing; calling tsubst_expr on an identifier would incorrectly perform unqualified lookup again. Note that we can also have an IDENTIFIER_NODE if the earlier @@ -21072,10 +20452,7 @@ tsubst_copy_and_build (tree t, qualified_p = false; if (TREE_CODE (function) == TEMPLATE_ID_EXPR) - /* Use tsubst_copy to substitute through the template arguments - of the template-id without performing unqualified lookup of - the template name. */ - function = tsubst_copy (function, args, complain, in_decl); + function = tsubst_name (function, args, complain, in_decl); } else { @@ -21101,7 +20478,7 @@ tsubst_copy_and_build (tree t, augmenting the overload set via ADL, so during this initial substitution we disable mark_used by setting tf_conv (68942). */ subcomplain |= tf_conv; - function = tsubst_copy_and_build (function, args, subcomplain, in_decl); + function = tsubst_expr (function, args, subcomplain, in_decl); if (BASELINK_P (function)) qualified_p = true; @@ -21109,8 +20486,7 @@ tsubst_copy_and_build (tree t, nargs = call_expr_nargs (t); releasing_vec call_args; - tsubst_copy_and_build_call_args (t, args, complain, in_decl, - call_args); + tsubst_call_args (t, args, complain, in_decl, call_args); /* Stripped-down processing for a call in a thunk. Specifically, in the thunk template for a generic lambda. */ @@ -21202,8 +20578,7 @@ tsubst_copy_and_build (tree t, /* For backwards compatibility and good diagnostics, try the unqualified lookup again if we aren't in SFINAE context. */ - tree unq = tsubst_copy_and_build (function, args, - complain, in_decl); + tree unq = tsubst_expr (function, args, complain, in_decl); if (unq == error_mark_node) RETURN (error_mark_node); @@ -21364,31 +20739,6 @@ tsubst_copy_and_build (tree t, || TREE_CODE (function) == MEMBER_REF) ret = build_offset_ref_call_from_tree (function, &call_args, complain); - else if (TREE_CODE (function) == COMPONENT_REF) - { - tree instance = TREE_OPERAND (function, 0); - tree fn = TREE_OPERAND (function, 1); - - if (processing_template_decl - && (type_dependent_expression_p (instance) - || (!BASELINK_P (fn) - && TREE_CODE (fn) != FIELD_DECL) - || type_dependent_expression_p (fn) - || any_type_dependent_arguments_p (call_args))) - ret = build_min_nt_call_vec (function, call_args); - else if (!BASELINK_P (fn)) - ret = finish_call_expr (function, &call_args, - /*disallow_virtual=*/false, - /*koenig_p=*/false, - complain); - else - ret = (build_new_method_call - (instance, fn, - &call_args, NULL_TREE, - qualified_p ? LOOKUP_NONVIRTUAL : LOOKUP_NORMAL, - /*fn_p=*/NULL, - complain)); - } else if (concept_check_p (function)) { /* FUNCTION is a template-id referring to a concept definition. */ @@ -21498,7 +20848,7 @@ tsubst_copy_and_build (tree t, non_reference (TREE_TYPE (object)), args, complain, in_decl); else - member = tsubst_copy (member, args, complain, in_decl); + member = tsubst_name (member, args, complain, in_decl); if (member == error_mark_node) RETURN (error_mark_node); @@ -21707,29 +21057,326 @@ tsubst_copy_and_build (tree t, } } + case FUNCTION_DECL: + case PARM_DECL: case VAR_DECL: if (!args) RETURN (t); - /* Fall through */ + tree r; + if (VAR_OR_FUNCTION_DECL_P (t) + && DECL_LANG_SPECIFIC (t) && DECL_TEMPLATE_INFO (t)) + r = tsubst_decl (t, args, complain); + else if (VAR_OR_FUNCTION_DECL_P (t) && DECL_LOCAL_DECL_P (t)) + { + /* Local specialization will usually have been created when + we instantiated the DECL_EXPR_DECL. */ + r = retrieve_local_specialization (t); + if (!r) + { + /* We're in a generic lambda referencing a local extern + from an outer block-scope of a non-template. */ + gcc_checking_assert (LAMBDA_FUNCTION_P (current_function_decl)); + r = t; + } + } + else if (local_variable_p (t) + && ((r = retrieve_local_specialization (t)) + || TREE_CODE (t) == PARM_DECL + || uses_template_parms (DECL_CONTEXT (t)))) + { + if (r == NULL_TREE && TREE_CODE (t) == PARM_DECL) + { + /* We get here for a use of 'this' in an NSDMI. */ + if (DECL_NAME (t) == this_identifier && current_class_ptr) + RETURN (current_class_ptr); - case PARM_DECL: + /* This can happen for a parameter name used later in a function + declaration (such as in a late-specified return type). Just + make a dummy decl, since it's only used for its type. */ + gcc_assert (cp_unevaluated_operand); + r = tsubst_decl (t, args, complain); + /* Give it the template pattern as its context; its true context + hasn't been instantiated yet and this is good enough for + mangling. */ + DECL_CONTEXT (r) = DECL_CONTEXT (t); + } + else if (r == NULL_TREE) + { + /* First try name lookup to find the instantiation. */ + r = lookup_name (DECL_NAME (t)); + if (r) + { + if (!VAR_P (r)) + { + /* During error-recovery we may find a non-variable, + even an OVERLOAD: just bail out and avoid ICEs and + duplicate diagnostics (c++/62207). */ + gcc_assert (seen_error ()); + RETURN (error_mark_node); + } + if (!is_capture_proxy (r)) + { + /* Make sure the one we found is the one we want. */ + tree ctx = enclosing_instantiation_of (DECL_CONTEXT (t)); + if (ctx != DECL_CONTEXT (r)) + r = NULL_TREE; + } + } + + if (r) + /* OK */; + else + { + /* This can happen for a variable used in a + late-specified return type of a local lambda, or for a + local static or constant. Building a new VAR_DECL + should be OK in all those cases. */ + r = tsubst_decl (t, args, complain); + if (local_specializations) + /* Avoid infinite recursion (79640). */ + register_local_specialization (r, t); + if (decl_maybe_constant_var_p (r)) + { + /* We can't call cp_finish_decl, so handle the + initializer by hand. */ + tree init = tsubst_init (DECL_INITIAL (t), r, args, + complain, in_decl); + if (!processing_template_decl) + init = maybe_constant_init (init); + if (processing_template_decl + ? potential_constant_expression (init) + : reduced_constant_expression_p (init)) + DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (r) + = TREE_CONSTANT (r) = true; + DECL_INITIAL (r) = init; + if (tree auto_node = type_uses_auto (TREE_TYPE (r))) + TREE_TYPE (r) + = do_auto_deduction (TREE_TYPE (r), init, auto_node, + complain, adc_variable_type); + } + gcc_assert (cp_unevaluated_operand + || processing_contract_condition + || TREE_STATIC (r) + || decl_constant_var_p (r) + || seen_error ()); + if (!processing_template_decl + && !TREE_STATIC (r)) + r = process_outer_var_ref (r, complain); + } + /* Remember this for subsequent uses. */ + if (local_specializations) + register_local_specialization (r, t); + } + if (TREE_CODE (r) == ARGUMENT_PACK_SELECT) + r = argument_pack_select_arg (r); + } + else + r = t; + if (!mark_used (r, complain)) + RETURN (error_mark_node); + + if (!no_name_lookup_flag + && (TREE_CODE (t) == PARM_DECL || TREE_CODE (t) == VAR_DECL)) + { + /* ??? We're doing a subset of finish_id_expression here. */ + if (tree wrap = maybe_get_tls_wrapper_call (r)) + /* Replace an evaluated use of the thread_local variable with + a call to its wrapper. */ + r = wrap; + else if (outer_automatic_var_p (r)) + r = process_outer_var_ref (r, complain); + + if (!TYPE_REF_P (TREE_TYPE (t))) + /* If the original type was a reference, we'll be wrapped in + the appropriate INDIRECT_REF. */ + r = convert_from_reference (r); + } + RETURN (r); + + case CONST_DECL: + { + tree enum_type; + tree v; + + if (DECL_TEMPLATE_PARM_P (t)) + RETURN (RECUR (DECL_INITIAL (t))); + if (!uses_template_parms (DECL_CONTEXT (t))) + RETURN (t); + + /* Unfortunately, we cannot just call lookup_name here. + Consider: + + template <int I> int f() { + enum E { a = I }; + struct S { void g() { E e = a; } }; + }; + + When we instantiate f<7>::S::g(), say, lookup_name is not + clever enough to find f<7>::a. */ + enum_type + = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, + /*entering_scope=*/0); + + for (v = TYPE_VALUES (enum_type); + v != NULL_TREE; + v = TREE_CHAIN (v)) + if (TREE_PURPOSE (v) == DECL_NAME (t)) + RETURN (TREE_VALUE (v)); + + /* We didn't find the name. That should never happen; if + name-lookup found it during preliminary parsing, we + should find it again here during instantiation. */ + gcc_unreachable (); + RETURN (t); + } + + case FIELD_DECL: + if (DECL_CONTEXT (t)) + { + tree ctx; + + ctx = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, in_decl, + /*entering_scope=*/1); + if (ctx != DECL_CONTEXT (t)) + { + tree r = lookup_field (ctx, DECL_NAME (t), 0, false); + if (!r) + { + if (complain & tf_error) + error ("using invalid field %qD", t); + RETURN (error_mark_node); + } + RETURN (r); + } + } + RETURN (t); + + case NAMESPACE_DECL: + case OVERLOAD: + RETURN (t); + + case TEMPLATE_DECL: + if (DECL_TEMPLATE_TEMPLATE_PARM_P (t)) + RETURN (tsubst (TREE_TYPE (DECL_TEMPLATE_RESULT (t)), + args, complain, in_decl)); + else if (DECL_FUNCTION_TEMPLATE_P (t) && DECL_MEMBER_TEMPLATE_P (t)) + RETURN (tsubst (t, args, complain, in_decl)); + else if (DECL_CLASS_SCOPE_P (t) + && uses_template_parms (DECL_CONTEXT (t))) + { + /* Template template argument like the following example need + special treatment: + + template <template <class> class TT> struct C {}; + template <class T> struct D { + template <class U> struct E {}; + C<E> c; // #1 + }; + D<int> d; // #2 + + We are processing the template argument `E' in #1 for + the template instantiation #2. Originally, `E' is a + TEMPLATE_DECL with `D<T>' as its DECL_CONTEXT. Now we + have to substitute this with one having context `D<int>'. */ + + tree context = tsubst_aggr_type (DECL_CONTEXT (t), args, complain, + in_decl, /*entering_scope=*/true); + RETURN (lookup_field (context, DECL_NAME(t), 0, false)); + } + else + /* Ordinary template template argument. */ + RETURN (t); + + case TEMPLATE_PARM_INDEX: + case TYPE_DECL: + RETURN (tsubst (t, args, complain, in_decl)); + + case CLEANUP_POINT_EXPR: + /* We shouldn't have built any of these during initial template + generation. Instead, they should be built during instantiation + in response to the saved STMT_IS_FULL_EXPR_P setting. */ + gcc_unreachable (); + + case OFFSET_REF: + { + tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); + tree op0 = RECUR (TREE_OPERAND (t, 0)); + tree op1 = RECUR (TREE_OPERAND (t, 1)); + r = build2 (OFFSET_REF, type, op0, op1); + PTRMEM_OK_P (r) = PTRMEM_OK_P (t); + if (!mark_used (TREE_OPERAND (r, 1), complain) + && !(complain & tf_error)) + RETURN (error_mark_node); + RETURN (r); + } + + case EXPR_PACK_EXPANSION: + error ("invalid use of pack expansion expression"); + RETURN (error_mark_node); + + case NONTYPE_ARGUMENT_PACK: + error ("use %<...%> to expand argument pack"); + RETURN (error_mark_node); + + case VOID_CST: + gcc_checking_assert (t == void_node && VOID_TYPE_P (TREE_TYPE (t))); + RETURN (t); + + case INTEGER_CST: + case REAL_CST: + case COMPLEX_CST: + case VECTOR_CST: + { + /* Instantiate any typedefs in the type. */ + tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); + r = fold_convert (type, t); + gcc_assert (TREE_CODE (r) == TREE_CODE (t)); + RETURN (r); + } + + case STRING_CST: { - tree r = tsubst_copy (t, args, complain, in_decl); - /* ??? We're doing a subset of finish_id_expression here. */ - if (tree wrap = maybe_get_tls_wrapper_call (r)) - /* Replace an evaluated use of the thread_local variable with - a call to its wrapper. */ - r = wrap; - else if (outer_automatic_var_p (r)) - r = process_outer_var_ref (r, complain); - - if (!TYPE_REF_P (TREE_TYPE (t))) - /* If the original type was a reference, we'll be wrapped in - the appropriate INDIRECT_REF. */ - r = convert_from_reference (r); + tree type = tsubst (TREE_TYPE (t), args, complain, in_decl); + r = t; + if (type != TREE_TYPE (t)) + { + r = copy_node (t); + TREE_TYPE (r) = type; + } RETURN (r); } + case PTRMEM_CST: + /* These can sometimes show up in a partial instantiation, but never + involve template parms. */ + gcc_assert (!uses_template_parms (t)); + RETURN (t); + + case UNARY_LEFT_FOLD_EXPR: + RETURN (tsubst_unary_left_fold (t, args, complain, in_decl)); + case UNARY_RIGHT_FOLD_EXPR: + RETURN (tsubst_unary_right_fold (t, args, complain, in_decl)); + case BINARY_LEFT_FOLD_EXPR: + RETURN (tsubst_binary_left_fold (t, args, complain, in_decl)); + case BINARY_RIGHT_FOLD_EXPR: + RETURN (tsubst_binary_right_fold (t, args, complain, in_decl)); + case PREDICT_EXPR: + RETURN (t); + + case DEBUG_BEGIN_STMT: + /* ??? There's no point in copying it for now, but maybe some + day it will contain more information, such as a pointer back + to the containing function, inlined copy or so. */ + RETURN (t); + + case CO_YIELD_EXPR: + RETURN (finish_co_yield_expr (input_location, + RECUR (TREE_OPERAND (t, 0)))); + + case CO_AWAIT_EXPR: + RETURN (finish_co_await_expr (input_location, + RECUR (TREE_OPERAND (t, 0)))); + case VA_ARG_EXPR: { tree op0 = RECUR (TREE_OPERAND (t, 0)); @@ -21740,8 +21387,7 @@ tsubst_copy_and_build (tree t, case OFFSETOF_EXPR: { tree object_ptr - = tsubst_copy_and_build (TREE_OPERAND (t, 1), args, - complain, in_decl); + = tsubst_expr (TREE_OPERAND (t, 1), args, complain, in_decl); RETURN (finish_offsetof (object_ptr, RECUR (TREE_OPERAND (t, 0)), EXPR_LOCATION (t))); @@ -21753,8 +21399,11 @@ tsubst_copy_and_build (tree t, case TRAIT_EXPR: { - tree type1 = tsubst_copy (TRAIT_EXPR_TYPE1 (t), args, - complain, in_decl); + tree type1 = TRAIT_EXPR_TYPE1 (t); + if (TYPE_P (type1)) + type1 = tsubst (type1, args, complain, in_decl); + else + type1 = tsubst_expr (type1, args, complain, in_decl); tree type2 = tsubst (TRAIT_EXPR_TYPE2 (t), args, complain, in_decl); RETURN (finish_trait_expr (TRAIT_EXPR_LOCATION (t), @@ -21767,7 +21416,7 @@ tsubst_copy_and_build (tree t, tree stmt_expr = begin_stmt_expr (); cur_stmt_expr = stmt_expr; - tsubst_expr (STMT_EXPR_STMT (t), args, complain, in_decl); + tsubst_stmt (STMT_EXPR_STMT (t), args, complain, in_decl); stmt_expr = finish_stmt_expr (stmt_expr, false); cur_stmt_expr = old_stmt_expr; @@ -21794,7 +21443,8 @@ tsubst_copy_and_build (tree t, } case TRANSACTION_EXPR: - RETURN (tsubst_expr (t, args, complain, in_decl)); + gcc_checking_assert (!TRANSACTION_EXPR_IS_STMT (t)); + RETURN (tsubst_stmt (t, args, complain, in_decl)); case PAREN_EXPR: if (REF_PARENTHESIZED_P (t)) @@ -21875,13 +21525,13 @@ tsubst_copy_and_build (tree t, default: /* Handle Objective-C++ constructs, if appropriate. */ - { - tree subst - = objcp_tsubst_copy_and_build (t, args, complain, in_decl); - if (subst) - RETURN (subst); - } - RETURN (tsubst_copy (t, args, complain, in_decl)); + if (tree subst = objcp_tsubst_expr (t, args, complain, in_decl)) + RETURN (subst); + + /* We shouldn't get here, but keep going if !flag_checking. */ + if (flag_checking) + gcc_unreachable (); + RETURN (t); } #undef RECUR @@ -26913,9 +26563,9 @@ maybe_instantiate_noexcept (tree fn, tsubst_flags_t complain) ++processing_template_decl; /* Do deferred instantiation of the noexcept-specifier. */ - noex = tsubst_copy_and_build (DEFERRED_NOEXCEPT_PATTERN (noex), - DEFERRED_NOEXCEPT_ARGS (noex), - tf_warning_or_error, fn); + noex = tsubst_expr (DEFERRED_NOEXCEPT_PATTERN (noex), + DEFERRED_NOEXCEPT_ARGS (noex), + tf_warning_or_error, fn); /* Build up the noexcept-specification. */ spec = build_noexcept_spec (noex, tf_warning_or_error); @@ -27089,7 +26739,7 @@ instantiate_body (tree pattern, tree args, tree d, bool nested_p) tf_warning_or_error, d); else { - tsubst_expr (DECL_SAVED_TREE (code_pattern), args, + tsubst_stmt (DECL_SAVED_TREE (code_pattern), args, tf_warning_or_error, DECL_TI_TEMPLATE (d)); /* Set the current input_location to the end of the function @@ -27605,8 +27255,12 @@ tsubst_initializer_list (tree t, tree argvec) else { tree tmp; - decl = tsubst_copy (TREE_PURPOSE (t), argvec, - tf_warning_or_error, NULL_TREE); + if (TYPE_P (TREE_PURPOSE (t))) + decl = tsubst (TREE_PURPOSE (t), argvec, + tf_warning_or_error, NULL_TREE); + else + decl = tsubst_expr (TREE_PURPOSE (t), argvec, + tf_warning_or_error, NULL_TREE); decl = expand_member_init (decl); if (decl && !DECL_P (decl)) @@ -28585,7 +28239,7 @@ type_dependent_expression_p (tree expression) if (TREE_CODE (expression) == COMPONENT_REF || TREE_CODE (expression) == OFFSET_REF) { - if (type_dependent_expression_p (TREE_OPERAND (expression, 0))) + if (type_dependent_object_expression_p (TREE_OPERAND (expression, 0))) return true; expression = TREE_OPERAND (expression, 1); if (identifier_p (expression)) @@ -29294,116 +28948,6 @@ resolve_typename_type (tree type, bool only_current_p) return result; } -/* EXPR is an expression which is not type-dependent. Return a proxy - for EXPR that can be used to compute the types of larger - expressions containing EXPR. */ - -tree -build_non_dependent_expr (tree expr) -{ - tree orig_expr = expr; - tree inner_expr; - - /* When checking, try to get a constant value for all non-dependent - expressions in order to expose bugs in *_dependent_expression_p - and constexpr. This can affect code generation, see PR70704, so - only do this for -fchecking=2. */ - if (flag_checking > 1 - && cxx_dialect >= cxx11 - /* Don't do this during nsdmi parsing as it can lead to - unexpected recursive instantiations. */ - && !parsing_nsdmi () - /* Don't do this during concept processing either and for - the same reason. */ - && !processing_constraint_expression_p ()) - fold_non_dependent_expr (expr, tf_none); - - STRIP_ANY_LOCATION_WRAPPER (expr); - - /* Preserve OVERLOADs; the functions must be available to resolve - types. */ - inner_expr = expr; - if (TREE_CODE (inner_expr) == STMT_EXPR) - inner_expr = stmt_expr_value_expr (inner_expr); - if (TREE_CODE (inner_expr) == ADDR_EXPR) - inner_expr = TREE_OPERAND (inner_expr, 0); - if (TREE_CODE (inner_expr) == COMPONENT_REF) - inner_expr = TREE_OPERAND (inner_expr, 1); - if (is_overloaded_fn (inner_expr) - || TREE_CODE (inner_expr) == OFFSET_REF) - return orig_expr; - /* There is no need to return a proxy for a variable, parameter - or enumerator. */ - if (VAR_P (expr) || TREE_CODE (expr) == PARM_DECL - || TREE_CODE (expr) == CONST_DECL) - return orig_expr; - /* Preserve string constants; conversions from string constants to - "char *" are allowed, even though normally a "const char *" - cannot be used to initialize a "char *". */ - if (TREE_CODE (expr) == STRING_CST) - return orig_expr; - /* Preserve void and arithmetic constants, as an optimization -- there is no - reason to create a new node. */ - if (TREE_CODE (expr) == VOID_CST - || TREE_CODE (expr) == INTEGER_CST - || TREE_CODE (expr) == REAL_CST) - return orig_expr; - /* Preserve THROW_EXPRs -- all throw-expressions have type "void". - There is at least one place where we want to know that a - particular expression is a throw-expression: when checking a ?: - expression, there are special rules if the second or third - argument is a throw-expression. */ - if (TREE_CODE (expr) == THROW_EXPR) - return orig_expr; - - /* Don't wrap an initializer list, we need to be able to look inside. */ - if (BRACE_ENCLOSED_INITIALIZER_P (expr)) - return orig_expr; - - /* Don't wrap a dummy object, we need to be able to test for it. */ - if (is_dummy_object (expr)) - return orig_expr; - - if (TREE_CODE (expr) == COND_EXPR) - return build3 (COND_EXPR, - TREE_TYPE (expr), - build_non_dependent_expr (TREE_OPERAND (expr, 0)), - (TREE_OPERAND (expr, 1) - ? build_non_dependent_expr (TREE_OPERAND (expr, 1)) - : build_non_dependent_expr (TREE_OPERAND (expr, 0))), - build_non_dependent_expr (TREE_OPERAND (expr, 2))); - if (TREE_CODE (expr) == COMPOUND_EXPR) - return build2 (COMPOUND_EXPR, - TREE_TYPE (expr), - TREE_OPERAND (expr, 0), - build_non_dependent_expr (TREE_OPERAND (expr, 1))); - - /* If the type is unknown, it can't really be non-dependent */ - gcc_assert (TREE_TYPE (expr) != unknown_type_node); - - /* Otherwise, build a NON_DEPENDENT_EXPR. */ - return build1_loc (EXPR_LOCATION (orig_expr), NON_DEPENDENT_EXPR, - TREE_TYPE (expr), expr); -} - -/* ARGS is a vector of expressions as arguments to a function call. - Replace the arguments with equivalent non-dependent expressions. - This modifies ARGS in place. */ - -void -make_args_non_dependent (vec<tree, va_gc> *args) -{ - unsigned int ix; - tree arg; - - FOR_EACH_VEC_SAFE_ELT (args, ix, arg) - { - tree newarg = build_non_dependent_expr (arg); - if (newarg != arg) - (*args)[ix] = newarg; - } -} - /* Returns a type which represents 'auto' or 'decltype(auto)'. We use a TEMPLATE_TYPE_PARM with a level one deeper than the actual template parms, by default. If set_canonical is true, we set TYPE_CANONICAL on it. */ @@ -31617,33 +31161,6 @@ print_template_statistics (void) namespace selftest { -/* Verify that build_non_dependent_expr () works, for various expressions, - and that location wrappers don't affect the results. */ - -static void -test_build_non_dependent_expr () -{ - location_t loc = BUILTINS_LOCATION; - - /* Verify constants, without and with location wrappers. */ - tree int_cst = build_int_cst (integer_type_node, 42); - ASSERT_EQ (int_cst, build_non_dependent_expr (int_cst)); - - tree wrapped_int_cst = maybe_wrap_with_location (int_cst, loc); - ASSERT_TRUE (location_wrapper_p (wrapped_int_cst)); - ASSERT_EQ (wrapped_int_cst, build_non_dependent_expr (wrapped_int_cst)); - - tree string_lit = build_string (4, "foo"); - TREE_TYPE (string_lit) = char_array_type_node; - string_lit = fix_string_type (string_lit); - ASSERT_EQ (string_lit, build_non_dependent_expr (string_lit)); - - tree wrapped_string_lit = maybe_wrap_with_location (string_lit, loc); - ASSERT_TRUE (location_wrapper_p (wrapped_string_lit)); - ASSERT_EQ (wrapped_string_lit, - build_non_dependent_expr (wrapped_string_lit)); -} - /* Verify that type_dependent_expression_p () works correctly, even in the presence of location wrapper nodes. */ @@ -31684,7 +31201,6 @@ test_type_dependent_expression_p () void cp_pt_cc_tests () { - test_build_non_dependent_expr (); test_type_dependent_expression_p (); } diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc index 2a0cf96..dc3c114 100644 --- a/gcc/cp/semantics.cc +++ b/gcc/cp/semantics.cc @@ -916,8 +916,7 @@ finish_expr_stmt (tree expr) expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) - convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT, - tf_warning_or_error); + convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; @@ -1396,8 +1395,7 @@ finish_for_expr (tree expr, tree for_stmt) tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) - convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR, - tf_warning_or_error); + convert_to_void (expr, ICV_THIRD_IN_FOR, tf_warning_or_error); expr = maybe_cleanup_point_expr_void (expr); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; @@ -2795,18 +2793,19 @@ finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, (c++/89780, c++/107363). This also suppresses the -Wredundant-move warning. */ suppress_warning (result, OPT_Wpessimizing_move); - if (is_overloaded_fn (fn)) - fn = get_fns (fn); if (cfun) { bool abnormal = true; - for (lkp_iterator iter (fn); abnormal && iter; ++iter) + for (lkp_iterator iter (maybe_get_fns (fn)); iter; ++iter) { tree fndecl = STRIP_TEMPLATE (*iter); if (TREE_CODE (fndecl) != FUNCTION_DECL || !TREE_THIS_VOLATILE (fndecl)) - abnormal = false; + { + abnormal = false; + break; + } } /* FIXME: Stop warning about falling off end of non-void function. But this is wrong. Even if we only see @@ -2816,14 +2815,12 @@ finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, if (abnormal) current_function_returns_abnormally = 1; } + if (TREE_CODE (fn) == COMPONENT_REF) + maybe_generic_this_capture (TREE_OPERAND (fn, 0), + TREE_OPERAND (fn, 1)); return result; } orig_args = make_tree_vector_copy (*args); - if (!BASELINK_P (fn) - && TREE_CODE (fn) != PSEUDO_DTOR_EXPR - && TREE_TYPE (fn) != unknown_type_node) - fn = build_non_dependent_expr (fn); - make_args_non_dependent (*args); } if (TREE_CODE (fn) == COMPONENT_REF) @@ -11034,20 +11031,6 @@ finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, || TREE_CODE (OMP_CLAUSE_HINT_EXPR (clauses)) != INTEGER_CST) dependent_p = true; } - if (!dependent_p) - { - lhs = build_non_dependent_expr (lhs); - if (rhs) - rhs = build_non_dependent_expr (rhs); - if (v) - v = build_non_dependent_expr (v); - if (lhs1) - lhs1 = build_non_dependent_expr (lhs1); - if (rhs1) - rhs1 = build_non_dependent_expr (rhs1); - if (r && r != void_list_node) - r = build_non_dependent_expr (r); - } } if (!dependent_p) { diff --git a/gcc/cp/tree.cc b/gcc/cp/tree.cc index eaf882f..a3d61d3 100644 --- a/gcc/cp/tree.cc +++ b/gcc/cp/tree.cc @@ -308,7 +308,6 @@ lvalue_kind (const_tree ref) its argument unmodified and we assign it to a const_tree. */ return lvalue_kind (BASELINK_FUNCTIONS (CONST_CAST_TREE (ref))); - case NON_DEPENDENT_EXPR: case PAREN_EXPR: return lvalue_kind (TREE_OPERAND (ref, 0)); @@ -412,10 +411,6 @@ cp_stabilize_reference (tree ref) STRIP_ANY_LOCATION_WRAPPER (ref); switch (TREE_CODE (ref)) { - case NON_DEPENDENT_EXPR: - /* We aren't actually evaluating this. */ - return ref; - /* We need to treat specially anything stabilize_reference doesn't handle specifically. */ case VAR_DECL: diff --git a/gcc/cp/typeck.cc b/gcc/cp/typeck.cc index 8132bd7..f3dc80c 100644 --- a/gcc/cp/typeck.cc +++ b/gcc/cp/typeck.cc @@ -3385,7 +3385,6 @@ finish_class_member_access_expr (cp_expr object, tree name, bool template_p, return build_min_nt_loc (UNKNOWN_LOCATION, COMPONENT_REF, orig_object, orig_name, NULL_TREE); } - object = build_non_dependent_expr (object); } else if (c_dialect_objc () && identifier_p (name) @@ -3743,7 +3742,6 @@ build_x_indirect_ref (location_t loc, tree expr, ref_operator errorstring, = build_dependent_operator_type (lookups, INDIRECT_REF, false); return expr; } - expr = build_non_dependent_expr (expr); } rval = build_new_op (loc, INDIRECT_REF, LOOKUP_NORMAL, expr, @@ -4712,8 +4710,6 @@ build_x_binary_op (const op_location_t &loc, enum tree_code code, tree arg1, = build_dependent_operator_type (lookups, code, false); return expr; } - arg1 = build_non_dependent_expr (arg1); - arg2 = build_non_dependent_expr (arg2); } if (code == DOTSTAR_EXPR) @@ -4767,8 +4763,6 @@ build_x_array_ref (location_t loc, tree arg1, tree arg2, || type_dependent_expression_p (arg2)) return build_min_nt_loc (loc, ARRAY_REF, arg1, arg2, NULL_TREE, NULL_TREE); - arg1 = build_non_dependent_expr (arg1); - arg2 = build_non_dependent_expr (arg2); } expr = build_new_op (loc, ARRAY_REF, LOOKUP_NORMAL, arg1, arg2, @@ -4844,9 +4838,6 @@ warn_for_null_address (location_t location, tree op, tsubst_flags_t complain) || warning_suppressed_p (op, OPT_Waddress)) return; - if (TREE_CODE (op) == NON_DEPENDENT_EXPR) - op = TREE_OPERAND (op, 0); - tree cop = fold_for_warn (op); if (TREE_CODE (cop) == NON_LVALUE_EXPR) @@ -5405,7 +5396,9 @@ cp_build_binary_op (const op_location_t &location, type0 = TREE_TYPE (type0); if (!TYPE_P (type1)) type1 = TREE_TYPE (type1); - if (INDIRECT_TYPE_P (type0) && same_type_p (TREE_TYPE (type0), type1)) + if (type0 + && INDIRECT_TYPE_P (type0) + && same_type_p (TREE_TYPE (type0), type1)) { if (!(TREE_CODE (first_arg) == PARM_DECL && DECL_ARRAY_PARAMETER_P (first_arg) @@ -5422,7 +5415,9 @@ cp_build_binary_op (const op_location_t &location, "first %<sizeof%> operand was declared here"); } } - else if (TREE_CODE (type0) == ARRAY_TYPE + else if (!dependent_type_p (type0) + && !dependent_type_p (type1) + && TREE_CODE (type0) == ARRAY_TYPE && !char_type_p (TYPE_MAIN_VARIANT (TREE_TYPE (type0))) /* Set by finish_parenthesized_expr. */ && !warning_suppressed_p (op1, OPT_Wsizeof_array_div) @@ -6600,10 +6595,6 @@ build_x_vec_perm_expr (location_t loc, || type_dependent_expression_p (arg1) || type_dependent_expression_p (arg2)) return build_min_nt_loc (loc, VEC_PERM_EXPR, arg0, arg1, arg2); - arg0 = build_non_dependent_expr (arg0); - if (arg1) - arg1 = build_non_dependent_expr (arg1); - arg2 = build_non_dependent_expr (arg2); } tree exp = c_build_vec_perm_expr (loc, arg0, arg1, arg2, complain & tf_error); if (processing_template_decl && exp != error_mark_node) @@ -6631,9 +6622,6 @@ build_x_shufflevector (location_t loc, vec<tree, va_gc> *args, CALL_EXPR_IFN (exp) = IFN_SHUFFLEVECTOR; return exp; } - arg0 = build_non_dependent_expr (arg0); - arg1 = build_non_dependent_expr (arg1); - /* ??? Nothing needed for the index arguments? */ } auto_vec<tree, 16> mask; for (unsigned i = 2; i < args->length (); ++i) @@ -6803,8 +6791,6 @@ build_x_unary_op (location_t loc, enum tree_code code, cp_expr xarg, TREE_TYPE (e) = build_dependent_operator_type (lookups, code, false); return e; } - - xarg = build_non_dependent_expr (xarg); } exp = NULL_TREE; @@ -6922,8 +6908,6 @@ cp_build_addressof (location_t loc, tree arg, tsubst_flags_t complain) { if (type_dependent_expression_p (arg)) return build_min_nt_loc (loc, ADDRESSOF_EXPR, arg, NULL_TREE); - - arg = build_non_dependent_expr (arg); } tree exp = cp_build_addr_expr_strict (arg, complain); @@ -7399,6 +7383,8 @@ cp_build_unary_op (enum tree_code code, tree xarg, bool noconvert, complain); if (arg != error_mark_node) { + if (processing_template_decl) + return build1_loc (location, TRUTH_NOT_EXPR, boolean_type_node, arg); val = invert_truthvalue_loc (location, arg); if (obvalue_p (val)) val = non_lvalue_loc (location, val); @@ -7856,10 +7842,6 @@ build_x_conditional_expr (location_t loc, tree ifexp, tree op1, tree op2, || (op1 && type_dependent_expression_p (op1)) || type_dependent_expression_p (op2)) return build_min_nt_loc (loc, COND_EXPR, ifexp, op1, op2); - ifexp = build_non_dependent_expr (ifexp); - if (op1) - op1 = build_non_dependent_expr (op1); - op2 = build_non_dependent_expr (op2); } expr = build_conditional_expr (loc, ifexp, op1, op2, complain); @@ -7980,8 +7962,6 @@ build_x_compound_expr (location_t loc, tree op1, tree op2, = build_dependent_operator_type (lookups, COMPOUND_EXPR, false); return result; } - op1 = build_non_dependent_expr (op1); - op2 = build_non_dependent_expr (op2); } result = build_new_op (loc, COMPOUND_EXPR, LOOKUP_NORMAL, op1, op2, @@ -8553,8 +8533,6 @@ build_static_cast (location_t loc, tree type, tree oexpr, protected_set_expr_location (result, loc); return result; } - else if (processing_template_decl) - expr = build_non_dependent_expr (expr); /* build_c_cast puts on a NOP_EXPR to make the result not an lvalue. Strip such NOP_EXPRs if VALUE is being used in non-lvalue context. */ @@ -9734,9 +9712,6 @@ build_x_modify_expr (location_t loc, tree lhs, enum tree_code modifycode, = build_dependent_operator_type (lookups, modifycode, true); return rval; } - - lhs = build_non_dependent_expr (lhs); - rhs = build_non_dependent_expr (rhs); } tree rval; @@ -11227,9 +11202,6 @@ check_return_expr (tree retval, bool *no_warning, bool *dangling) if (VOID_TYPE_P (functype)) return error_mark_node; - if (processing_template_decl) - retval = build_non_dependent_expr (retval); - /* Under C++11 [12.8/32 class.copy], a returned lvalue is sometimes treated as an rvalue for the purposes of overload resolution to favor move constructors over copy constructors. diff --git a/gcc/cp/typeck2.cc b/gcc/cp/typeck2.cc index cd1ea04..309903a 100644 --- a/gcc/cp/typeck2.cc +++ b/gcc/cp/typeck2.cc @@ -1109,15 +1109,11 @@ check_narrowing (tree type, tree init, tsubst_flags_t complain, else if (complain & tf_error) { int savederrorcount = errorcount; - if (!flag_permissive) - global_dc->pedantic_errors = 1; - auto s = make_temp_override (global_dc->dc_warn_system_headers, true); - pedwarn (loc, OPT_Wnarrowing, - "narrowing conversion of %qE from %qH to %qI", - init, ftype, type); + permerror_opt (loc, OPT_Wnarrowing, + "narrowing conversion of %qE from %qH to %qI", + init, ftype, type); if (errorcount == savederrorcount) ok = true; - global_dc->pedantic_errors = flag_pedantic_errors; } } @@ -2218,7 +2214,6 @@ build_x_arrow (location_t loc, tree expr, tsubst_flags_t complain) TREE_TYPE (expr) = ttype; return expr; } - expr = build_non_dependent_expr (expr); } if (MAYBE_CLASS_TYPE_P (type)) diff --git a/gcc/diagnostic-core.h b/gcc/diagnostic-core.h index c9e27fd..04eba3d 100644 --- a/gcc/diagnostic-core.h +++ b/gcc/diagnostic-core.h @@ -105,6 +105,10 @@ extern bool pedwarn (rich_location *, int, const char *, ...) extern bool permerror (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); extern bool permerror (rich_location *, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); +extern bool permerror_opt (location_t, int, const char *, ...) + ATTRIBUTE_GCC_DIAG(3,4); +extern bool permerror_opt (rich_location *, int, const char *, + ...) ATTRIBUTE_GCC_DIAG(3,4); extern void sorry (const char *, ...) ATTRIBUTE_GCC_DIAG(1,2); extern void sorry_at (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); extern void inform (location_t, const char *, ...) ATTRIBUTE_GCC_DIAG(2,3); diff --git a/gcc/diagnostic.cc b/gcc/diagnostic.cc index 6e46371..0f39235 100644 --- a/gcc/diagnostic.cc +++ b/gcc/diagnostic.cc @@ -1241,14 +1241,6 @@ static diagnostic_t update_effective_level_from_pragmas (diagnostic_context *context, diagnostic_info *diagnostic) { - if (diagnostic->m_iinfo.m_allsyslocs && !context->dc_warn_system_headers) - { - /* Ignore the diagnostic if all the inlined locations are - in system headers and -Wno-system-headers is in effect. */ - diagnostic->kind = DK_IGNORED; - return DK_IGNORED; - } - if (context->n_classification_history <= 0) return DK_UNSPECIFIED; @@ -1489,24 +1481,16 @@ bool diagnostic_report_diagnostic (diagnostic_context *context, diagnostic_info *diagnostic) { - location_t location = diagnostic_location (diagnostic); diagnostic_t orig_diag_kind = diagnostic->kind; gcc_assert (context->m_output_format); /* Give preference to being able to inhibit warnings, before they get reclassified to something else. */ - bool report_warning_p = true; - if (diagnostic->kind == DK_WARNING || diagnostic->kind == DK_PEDWARN) - { - if (context->dc_inhibit_warnings) - return false; - /* Remember the result of the overall system header warning setting - but proceed to also check the inlining context. */ - report_warning_p = diagnostic_report_warnings_p (context, location); - if (!report_warning_p && diagnostic->kind == DK_PEDWARN) - return false; - } + bool was_warning = (diagnostic->kind == DK_WARNING + || diagnostic->kind == DK_PEDWARN); + if (was_warning && context->dc_inhibit_warnings) + return false; if (diagnostic->kind == DK_PEDWARN) { @@ -1546,9 +1530,12 @@ diagnostic_report_diagnostic (diagnostic_context *context, if (!diagnostic_enabled (context, diagnostic)) return false; - if (!report_warning_p && diagnostic->m_iinfo.m_allsyslocs) - /* Bail if the warning is not to be reported because all locations - in the inlining stack (if there is one) are in system headers. */ + if ((was_warning || diagnostic->kind == DK_WARNING) + && ((!context->dc_warn_system_headers + && diagnostic->m_iinfo.m_allsyslocs) + || context->dc_inhibit_warnings)) + /* Bail if the warning is not to be reported because all locations in the + inlining stack (if there is one) are in system headers. */ return false; if (diagnostic->kind != DK_NOTE && diagnostic->kind != DK_ICE) @@ -1738,7 +1725,8 @@ diagnostic_impl (rich_location *richloc, const diagnostic_metadata *metadata, { diagnostic_set_info (&diagnostic, gmsgid, ap, richloc, permissive_error_kind (global_dc)); - diagnostic.option_index = permissive_error_option (global_dc); + diagnostic.option_index = (opt != -1 ? opt + : permissive_error_option (global_dc)); } else { @@ -2034,6 +2022,37 @@ permerror (rich_location *richloc, const char *gmsgid, ...) return ret; } +/* Similar to the above, but controlled by a flag other than -fpermissive. + As above, an error by default or a warning with -fpermissive, but this + diagnostic can also be downgraded by -Wno-error=opt. */ + +bool +permerror_opt (location_t location, int opt, const char *gmsgid, ...) +{ + auto_diagnostic_group d; + va_list ap; + va_start (ap, gmsgid); + rich_location richloc (line_table, location); + bool ret = diagnostic_impl (&richloc, NULL, opt, gmsgid, &ap, DK_PERMERROR); + va_end (ap); + return ret; +} + +/* Same as "permerror" above, but at RICHLOC. */ + +bool +permerror_opt (rich_location *richloc, int opt, const char *gmsgid, ...) +{ + gcc_assert (richloc); + + auto_diagnostic_group d; + va_list ap; + va_start (ap, gmsgid); + bool ret = diagnostic_impl (richloc, NULL, opt, gmsgid, &ap, DK_PERMERROR); + va_end (ap); + return ret; +} + /* A hard error: the code is definitely ill-formed, and an object file will not be produced. */ void diff --git a/gcc/doc/contrib.texi b/gcc/doc/contrib.texi index 031c4ec..0fe4a87 100644 --- a/gcc/doc/contrib.texi +++ b/gcc/doc/contrib.texi @@ -782,6 +782,9 @@ clean-ups and porting work, and maintaining the IRIX, Solaris 2, and Tru64 UNIX ports. @item +Patrick Palka for contributions to the C++ library and front end. + +@item Steven Pemberton for his contribution of @file{enquire} which allowed GCC to determine various properties of the floating point unit and generate @file{float.h} in older versions of GCC. diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi index a3db942..bf941e6 100644 --- a/gcc/doc/extend.texi +++ b/gcc/doc/extend.texi @@ -3055,6 +3055,17 @@ when using these attributes the problem is diagnosed earlier and with exact location of the call even in presence of inline functions or when not emitting debugging information. +@cindex @code{expected_throw} function attribute +@item expected_throw +This attribute, attached to a function, tells the compiler the function +is more likely to raise or propagate an exception than to return, loop +forever, or terminate the program. + +This hint is mostly ignored by the compiler. The only effect is when +it's applied to @code{noreturn} functions and +@samp{-fharden-control-flow-redundancy} is enabled, and +@samp{-fhardcfr-check-noreturn-calls=not-always} is not overridden. + @cindex @code{externally_visible} function attribute @item externally_visible This attribute, attached to a global variable or function, nullifies @@ -22609,6 +22620,15 @@ Intel Core i7 graniterapids CPU. @item graniterapids-d Intel Core i7 graniterapids D CPU. +@item arrowlake +Intel Core i7 Arrow Lake CPU. + +@item arrowlake-s +Intel Core i7 Arrow Lake S CPU. + +@item pantherlake +Intel Core i7 Panther Lake CPU. + @item bonnell Intel Atom Bonnell CPU. @@ -22630,18 +22650,9 @@ Intel Atom Sierra Forest CPU. @item grandridge Intel Atom Grand Ridge CPU. -@item arrowlake -Intel Core i7 Arrow Lake CPU. - -@item arrowlake-s -Intel Core i7 Arrow Lake S CPU. - @item clearwaterforest Intel Atom Clearwater Forest CPU. -@item pantherlake -Intel Core i7 Panther Lake CPU. - @item knl Intel Knights Landing CPU. diff --git a/gcc/doc/generic.texi b/gcc/doc/generic.texi index 6534c35..3f4ba36 100644 --- a/gcc/doc/generic.texi +++ b/gcc/doc/generic.texi @@ -1314,6 +1314,8 @@ The type of the node specifies the alignment of the access. @tindex THROW_EXPR @tindex LSHIFT_EXPR @tindex RSHIFT_EXPR +@tindex LROTATE_EXPR +@tindex RROTATE_EXPR @tindex BIT_IOR_EXPR @tindex BIT_XOR_EXPR @tindex BIT_AND_EXPR @@ -1480,17 +1482,19 @@ a fixed-point value to a floating-point value. @item LSHIFT_EXPR @itemx RSHIFT_EXPR -These nodes represent left and right shifts, respectively. The first -operand is the value to shift; it will always be of integral type. The -second operand is an expression for the number of bits by which to -shift. Right shift should be treated as arithmetic, i.e., the -high-order bits should be zero-filled when the expression has unsigned -type and filled with the sign bit when the expression has signed type. +@itemx LROTATE_EXPR +@itemx RROTATE_EXPR +These nodes represent left and right shifts and rotates, respectively. +The first operand is the value to shift or rotate; it will always be of +integral type. The second operand is an expression for the number of bits +by which to shift or rotate. Right shift should be treated as arithmetic, +i.e., the high-order bits should be zero-filled when the expression has +unsigned type and filled with the sign bit when the expression has signed type. +All other operations are logical, operating on the bit representation. Note that the result is undefined if the second operand is larger than or equal to the first operand's type size. Unlike most nodes, these can have a vector as first operand and a scalar as second operand. - @item BIT_IOR_EXPR @itemx BIT_XOR_EXPR @itemx BIT_AND_EXPR diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi index 31f2234..4035e80 100644 --- a/gcc/doc/install.texi +++ b/gcc/doc/install.texi @@ -1236,8 +1236,8 @@ sysv, aix. @itemx --without-multilib-list Specify what multilibs to build. @var{list} is a comma separated list of values, possibly consisting of a single value. Currently only implemented -for aarch64*-*-*, arm*-*-*, loongarch*-*-*, riscv*-*-*, sh*-*-* and -x86-64-*-linux*. The accepted values and meaning for each target is given +for aarch64*-*-*, amdgcn*-*-*, arm*-*-*, loongarch*-*-*, riscv*-*-*, sh*-*-* +and x86-64-*-linux*. The accepted values and meaning for each target is given below. @table @code @@ -1250,6 +1250,15 @@ default run-time library will be built. If @var{list} is default set of libraries is selected based on the value of @option{--target}. +@item amdgcn*-*-* +@var{list} is a comma separated list of ISA names (allowed values: @code{fiji}, +@code{gfx900}, @code{gfx906}, @code{gfx908}, @code{gfx90a}). It ought not +include the name of the default ISA, specified via @option{--with-arch}. If +@var{list} is empty, then there will be no multilibs and only the default +run-time library will be built. If @var{list} is @code{default} or +@option{--with-multilib-list=} is not specified, then the default set of +libraries is selected. + @item arm*-*-* @var{list} is a comma separated list of @code{aprofile} and @code{rmprofile} to build multilibs for A or R and M architecture @@ -3922,6 +3931,12 @@ To run the binaries, install the HSA Runtime from the @file{libexec/gcc/amdhsa-amdhsa/@var{version}/gcn-run} to launch them on the GPU. +To enable support for GCN3 Fiji devices (gfx803), GCC has to be configured with +@option{--with-arch=@code{fiji}} or +@option{--with-multilib-list=@code{fiji},...}. Note that support for Fiji +devices has been removed in ROCm 4.0 and support in LLVM is deprecated and will +be removed in the future. + @html <hr /> @end html diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi index a0da7f9..17aaa8c 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi @@ -231,7 +231,7 @@ in the following sections. -fnew-inheriting-ctors -fnew-ttp-matching -fno-nonansi-builtins -fnothrow-opt -fno-operator-names --fno-optional-diags -fpermissive +-fno-optional-diags -fno-pretty-templates -fno-rtti -fsized-deallocation -ftemplate-backtrace-limit=@var{n} @@ -323,7 +323,7 @@ Objective-C and Objective-C++ Dialects}. @item Warning Options @xref{Warning Options,,Options to Request or Suppress Warnings}. @gccoptlist{-fsyntax-only -fmax-errors=@var{n} -Wpedantic --pedantic-errors +-pedantic-errors -fpermissive -w -Wextra -Wall -Wabi=@var{n} -Waddress -Wno-address-of-packed-member -Waggregate-return -Walloc-size-larger-than=@var{byte-size} -Walloc-zero @@ -642,6 +642,9 @@ Objective-C and Objective-C++ Dialects}. -fsanitize-undefined-trap-on-error -fbounds-check -fcf-protection=@r{[}full@r{|}branch@r{|}return@r{|}none@r{|}check@r{]} -fharden-compares -fharden-conditional-branches +-fharden-control-flow-redundancy -fhardcfr-skip-leaf +-fhardcfr-check-exceptions -fhardcfr-check-returning-calls +-fhardcfr-check-noreturn-calls=@r{[}always@r{|}no-xthrow@r{|}nothrow@r{|}never@r{]} -fstack-protector -fstack-protector-all -fstack-protector-strong -fstack-protector-explicit -fstack-check -fstack-limit-register=@var{reg} -fstack-limit-symbol=@var{sym} @@ -715,7 +718,8 @@ Objective-C and Objective-C++ Dialects}. -fverbose-asm -fpack-struct[=@var{n}] -fleading-underscore -ftls-model=@var{model} -fstack-reuse=@var{reuse_level} --ftrampolines -ftrapv -fwrapv +-ftrampolines -ftrampoline-impl=@r{[}stack@r{|}heap@r{]} +-ftrapv -fwrapv -fvisibility=@r{[}default@r{|}internal@r{|}hidden@r{|}protected@r{]} -fstrict-volatile-bitfields -fsync-libcalls} @@ -3501,12 +3505,6 @@ Disable diagnostics that the standard says a compiler does not need to issue. Currently, the only such diagnostic issued by G++ is the one for a name having multiple meanings within a class. -@opindex fpermissive -@item -fpermissive -Downgrade some diagnostics about nonconformant code from errors to -warnings. Thus, using @option{-fpermissive} allows some -nonconforming code to compile. - @opindex fno-pretty-templates @opindex fpretty-templates @item -fno-pretty-templates @@ -6167,6 +6165,18 @@ errors by @option{-pedantic-errors}. For instance: -Wwrite-strings @r{(C++11 or later)} } +@opindex fpermissive +@item -fpermissive +Downgrade some required diagnostics about nonconformant code from +errors to warnings. Thus, using @option{-fpermissive} allows some +nonconforming code to compile. Some C++ diagnostics are controlled +only by this flag, but it also downgrades some diagnostics that have +their own flag: + +@gccoptlist{ +-Wnarrowing @r{(C++)} +} + @opindex Wall @opindex Wno-all @item -Wall @@ -15958,6 +15968,16 @@ A value of zero can be used to lift the bound. A variable whose value is unknown at compilation time and defined outside a SCoP is a parameter of the SCoP. +@item hardcfr-max-blocks +Disable @option{-fharden-control-flow-redundancy} for functions with a +larger number of blocks than the specified value. Zero removes any +limit. + +@item hardcfr-max-inline-blocks +Force @option{-fharden-control-flow-redundancy} to use out-of-line +checking for functions with a larger number of basic blocks than the +specified value. + @item loop-block-tile-size Loop blocking or strip mining transforms, enabled with @option{-floop-block} or @option{-floop-strip-mine}, strip mine each @@ -17442,6 +17462,86 @@ condition, and to call @code{__builtin_trap} if the result is unexpected. Use with @samp{-fharden-compares} to cover all conditionals. +@opindex fharden-control-flow-redundancy +@item -fharden-control-flow-redundancy +Emit extra code to set booleans when entering basic blocks, and to +verify and trap, at function exits, when the booleans do not form an +execution path that is compatible with the control flow graph. + +Verification takes place before returns, before mandatory tail calls +(see below) and, optionally, before escaping exceptions with +@option{-fhardcfr-check-exceptions}, before returning calls with +@option{-fhardcfr-check-returning-calls}, and before noreturn calls with +@option{-fhardcfr-check-noreturn-calls}). Tuning options +@option{--param hardcfr-max-blocks} and @option{--param +hardcfr-max-inline-blocks} are available. + +Tail call optimization takes place too late to affect control flow +redundancy, but calls annotated as mandatory tail calls by language +front-ends, and any calls marked early enough as potential tail calls +would also have verification issued before the call, but these +possibilities are merely theoretical, as these conditions can only be +met when using custom compiler plugins. + +@opindex fhardcfr-skip-leaf +@item -fhardcfr-skip-leaf +Disable @option{-fharden-control-flow-redundancy} in leaf functions. + +@opindex fhardcfr-check-exceptions +@opindex fno-hardcfr-check-exceptions +@item -fhardcfr-check-exceptions +When @option{-fharden-control-flow-redundancy} is active, check the +recorded execution path against the control flow graph at exception +escape points, as if the function body was wrapped with a cleanup +handler that performed the check and reraised. This option is enabled +by default; use @option{-fno-hardcfr-check-exceptions} to disable it. + +@opindex fhardcfr-check-returning-calls +@opindex fno-hardcfr-check-returning-calls +@item -fhardcfr-check-returning-calls +When @option{-fharden-control-flow-redundancy} is active, check the +recorded execution path against the control flow graph before any +function call immediately followed by a return of its result, if any, so +as to not prevent tail-call optimization, whether or not it is +ultimately optimized to a tail call. + +This option is enabled by default whenever sibling call optimizations +are enabled (see @option{-foptimize-sibling-calls}), but it can be +enabled (or disabled, using its negated form) explicitly, regardless of +the optimizations. + +@opindex fhardcfr-check-noreturn-calls +@item -fhardcfr-check-noreturn-calls=@r{[}always@r{|}no-xthrow@r{|}nothrow@r{|}never@r{]} +When @option{-fharden-control-flow-redundancy} is active, check the +recorded execution path against the control flow graph before +@code{noreturn} calls, either all of them (@option{always}), those that +aren't expected to return control to the caller through an exception +(@option{no-xthrow}, the default), those that may not return control to +the caller through an exception either (@option{nothrow}), or none of +them (@option{never}). + +Checking before a @code{noreturn} function that may return control to +the caller through an exception may cause checking to be performed more +than once, if the exception is caught in the caller, whether by a +handler or a cleanup. When @option{-fhardcfr-check-exceptions} is also +enabled, the compiler will avoid associating a @code{noreturn} call with +the implicitly-added cleanup handler, since it would be redundant with +the check performed before the call, but other handlers or cleanups in +the function, if activated, will modify the recorded execution path and +check it again when another checkpoint is hit. The checkpoint may even +be another @code{noreturn} call, so checking may end up performed +multiple times. + +Various optimizers may cause calls to be marked as @code{noreturn} +and/or @code{nothrow}, even in the absence of the corresponding +attributes, which may affect the placement of checks before calls, as +well as the addition of implicit cleanup handlers for them. This +unpredictability, and the fact that raising and reraising exceptions +frequently amounts to implicitly calling @code{noreturn} functions, have +made @option{no-xthrow} the default setting for this option: it excludes +from the @code{noreturn} treatment only internal functions used to +(re)raise exceptions, that are not affected by these optimizations. + @opindex fstack-protector @item -fstack-protector Emit extra code to check for buffer overflows, such as stack smashing @@ -18951,6 +19051,20 @@ For languages other than Ada, the @code{-ftrampolines} and trampolines are always generated on platforms that need them for nested functions. +@opindex ftrampoline-impl +@item -ftrampoline-impl=@r{[}stack@r{|}heap@r{]} +By default, trampolines are generated on stack. However, certain platforms +(such as the Apple M1) do not permit an executable stack. Compiling with +@option{-ftrampoline-impl=heap} generate calls to +@code{__builtin_nested_func_ptr_created} and +@code{__builtin_nested_func_ptr_deleted} in order to allocate and +deallocate trampoline space on the executable heap. These functions are +implemented in libgcc, and will only be provided on specific targets: +x86_64 Darwin, x86_64 and aarch64 Linux. @emph{PLEASE NOTE}: Heap +trampolines are @emph{not} guaranteed to be correctly deallocated if you +@code{setjmp}, instantiate nested functions, and then @code{longjmp} back +to a state prior to having allocated those nested functions. + @opindex fvisibility @item -fvisibility=@r{[}default@r{|}internal@r{|}hidden@r{|}protected@r{]} Set the default ELF image symbol visibility to the specified option---all @@ -21091,7 +21205,9 @@ are @table @samp @item fiji -Compile for GCN3 Fiji devices (gfx803). +Compile for GCN3 Fiji devices (gfx803). Support deprecated; availablility +depends on how GCC has been configured, see @option{--with-arch} and +@option{--with-multilib-list}. @item gfx900 Compile for GCN5 Vega 10 devices (gfx900). @@ -26219,6 +26335,13 @@ environments where no dynamic link is performed, like firmwares, OS kernels, executables linked with @option{-static} or @option{-static-pie}. @option{-mdirect-extern-access} is not compatible with @option{-fPIC} or @option{-fpic}. + +@item loongarch-vect-unroll-limit +The vectorizer will use available tuning information to determine whether it +would be beneficial to unroll the main vectorized loop and by how much. This +parameter set's the upper bound of how much the vectorizer will unroll the main +loop. The default value is six. + @end table @@ -32839,40 +32962,14 @@ PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, ENQCMD, UINTR and RAOINT instruction set support. -@item arrowlake -Intel Arrow Lake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, -SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, -XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, -MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, -PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, -AVXIFMA, AVXVNNIINT8, AVXNECONVERT and CMPCCXADD instruction set support. - -@item arrowlake-s -Intel Arrow Lake S CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, -SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, -XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, -MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, -PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, -AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, SM3 -and SM4 instruction set support. - @item clearwaterforest Intel Clearwater Forest CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, -AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, SM3, SM4, -USER_MSR and PREFETCHI instruction set support. - -@item pantherlake -Intel Panther Lake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, -SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, -XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, -MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, -PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, -AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, SM3, SM4 -and PREFETCHI instruction set support. +ENQCMD, UINTR, AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, +SHA512, SM3, SM4, USER_MSR and PREFETCHI instruction set support. @item knl Intel Knight's Landing CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, @@ -32991,6 +33088,33 @@ MOVDIRI, MOVDIR64B, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG, SERIALIZE, TSXLDTRK, UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512FP16, AVX512BF16, AMX-FP16, PREFETCHI and AMX-COMPLEX instruction set support. +@item arrowlake +Intel Arrow Lake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, +SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, +XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, +MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, +PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, +UINTR, AVXIFMA, AVXVNNIINT8, AVXNECONVERT and CMPCCXADD instruction set +support. + +@item arrowlake-s +Intel Arrow Lake S CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, +SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, +XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, +MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, +PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, +UINTR, AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, +SM3 and SM4 instruction set support. + +@item pantherlake +Intel Panther Lake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, +SSSE3, SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, +XSAVES, XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, +MOVDIR64B, CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, +PCONFIG, PKU, VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL, AVX-VNNI, +UINTR, AVXIFMA, AVXVNNIINT8, AVXNECONVERT, CMPCCXADD, AVXVNNIINT16, SHA512, +SM3, SM4 and PREFETCHI instruction set support. + @item k6 AMD K6 CPU with MMX instruction set support. diff --git a/gcc/expr.cc b/gcc/expr.cc index 8aed3fc..763bd82 100644 --- a/gcc/expr.cc +++ b/gcc/expr.cc @@ -13206,14 +13206,15 @@ do_store_flag (sepops ops, rtx target, machine_mode mode) || integer_pow2p (arg1)) && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type))) { - wide_int nz = tree_nonzero_bits (arg0); - gimple *srcstmt = get_def_for_expr (arg0, BIT_AND_EXPR); + tree narg0 = arg0; + wide_int nz = tree_nonzero_bits (narg0); + gimple *srcstmt = get_def_for_expr (narg0, BIT_AND_EXPR); /* If the defining statement was (x & POW2), then use that instead of the non-zero bits. */ if (srcstmt && integer_pow2p (gimple_assign_rhs2 (srcstmt))) { nz = wi::to_wide (gimple_assign_rhs2 (srcstmt)); - arg0 = gimple_assign_rhs1 (srcstmt); + narg0 = gimple_assign_rhs1 (srcstmt); } if (wi::popcount (nz) == 1 @@ -13227,7 +13228,7 @@ do_store_flag (sepops ops, rtx target, machine_mode mode) type = lang_hooks.types.type_for_mode (mode, unsignedp); return expand_single_bit_test (loc, tcode, - arg0, + narg0, bitnum, type, target, mode); } } diff --git a/gcc/flag-types.h b/gcc/flag-types.h index 7466c11..c1852cd 100644 --- a/gcc/flag-types.h +++ b/gcc/flag-types.h @@ -157,6 +157,16 @@ enum stack_reuse_level SR_ALL }; +/* Control Flow Redundancy hardening options for noreturn calls. */ +enum hardcfr_noret +{ + HCFRNR_NEVER, + HCFRNR_NOTHROW, + HCFRNR_NO_XTHROW, + HCFRNR_UNSPECIFIED, + HCFRNR_ALWAYS, +}; + /* The live patching level. */ enum live_patching_level { diff --git a/gcc/fold-const.cc b/gcc/fold-const.cc index 44118e7..4076773 100644 --- a/gcc/fold-const.cc +++ b/gcc/fold-const.cc @@ -10692,9 +10692,8 @@ valid_mask_for_fold_vec_perm_cst_p (tree arg0, tree arg1, /* Ensure that the stepped sequence always selects from the same input pattern. */ - unsigned arg_npatterns - = ((q1 & 1) == 0) ? VECTOR_CST_NPATTERNS (arg0) - : VECTOR_CST_NPATTERNS (arg1); + tree arg = ((q1 & 1) == 0) ? arg0 : arg1; + unsigned arg_npatterns = VECTOR_CST_NPATTERNS (arg); if (!multiple_p (step, arg_npatterns)) { @@ -10702,6 +10701,31 @@ valid_mask_for_fold_vec_perm_cst_p (tree arg0, tree arg1, *reason = "step is not multiple of npatterns"; return false; } + + /* If a1 chooses base element from arg, ensure that it's a natural + stepped sequence, ie, (arg[2] - arg[1]) == (arg[1] - arg[0]) + to preserve arg's encoding. */ + + if (maybe_lt (r1, arg_npatterns)) + { + unsigned HOST_WIDE_INT index; + if (!r1.is_constant (&index)) + return false; + + tree arg_elem0 = vector_cst_elt (arg, index); + tree arg_elem1 = vector_cst_elt (arg, index + arg_npatterns); + tree arg_elem2 = vector_cst_elt (arg, index + arg_npatterns * 2); + + tree step1, step2; + if (!(step1 = const_binop (MINUS_EXPR, arg_elem1, arg_elem0)) + || !(step2 = const_binop (MINUS_EXPR, arg_elem2, arg_elem1)) + || !operand_equal_p (step1, step2, 0)) + { + if (reason) + *reason = "not a natural stepped sequence"; + return false; + } + } } return true; @@ -17165,7 +17189,8 @@ namespace test_fold_vec_perm_cst { static tree build_vec_cst_rand (machine_mode vmode, unsigned npatterns, unsigned nelts_per_pattern, - int step = 0, int threshold = 100) + int step = 0, bool natural_stepped = false, + int threshold = 100) { tree inner_type = lang_hooks.types.type_for_mode (GET_MODE_INNER (vmode), 1); tree vectype = build_vector_type_for_mode (inner_type, vmode); @@ -17180,17 +17205,28 @@ build_vec_cst_rand (machine_mode vmode, unsigned npatterns, // Fill a1 for each pattern for (unsigned i = 0; i < npatterns; i++) - builder.quick_push (build_int_cst (inner_type, rand () % threshold)); - + { + tree a1; + if (natural_stepped) + { + tree a0 = builder[i]; + wide_int a0_val = wi::to_wide (a0); + wide_int a1_val = a0_val + step; + a1 = wide_int_to_tree (inner_type, a1_val); + } + else + a1 = build_int_cst (inner_type, rand () % threshold); + builder.quick_push (a1); + } if (nelts_per_pattern == 2) return builder.build (); for (unsigned i = npatterns * 2; i < npatterns * nelts_per_pattern; i++) { tree prev_elem = builder[i - npatterns]; - int prev_elem_val = TREE_INT_CST_LOW (prev_elem); - int val = prev_elem_val + step; - builder.quick_push (build_int_cst (inner_type, val)); + wide_int prev_elem_val = wi::to_wide (prev_elem); + wide_int val = prev_elem_val + step; + builder.quick_push (wide_int_to_tree (inner_type, val)); } return builder.build (); @@ -17436,7 +17472,7 @@ test_nunits_min_2 (machine_mode vmode) and step (a2 - a1) = 1, step is not a multiple of npatterns in input vector. So return NULL_TREE. */ { - tree arg0 = build_vec_cst_rand (vmode, 2, 3, 1); + tree arg0 = build_vec_cst_rand (vmode, 2, 3, 1, true); tree arg1 = build_vec_cst_rand (vmode, 2, 3, 1); poly_uint64 len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); @@ -17456,7 +17492,7 @@ test_nunits_min_2 (machine_mode vmode) Test that stepped sequence of the pattern selects from arg0. res = { arg1[0], arg0[0], arg0[1], ... } // (1, 3) */ { - tree arg0 = build_vec_cst_rand (vmode, 1, 3, 1); + tree arg0 = build_vec_cst_rand (vmode, 1, 3, 1, true); tree arg1 = build_vec_cst_rand (vmode, 1, 3, 1); poly_uint64 len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); @@ -17470,6 +17506,62 @@ test_nunits_min_2 (machine_mode vmode) tree expected_res[] = { ARG1(0), ARG0(0), ARG0(1) }; validate_res (1, 3, res, expected_res); } + + /* Case 6: PR111648 - a1 chooses base element from input vector arg. + In this case ensure that arg has a natural stepped sequence + to preserve arg's encoding. + + As a concrete example, consider: + arg0: { -16, -9, -10, ... } // (1, 3) + arg1: { -12, -5, -6, ... } // (1, 3) + sel = { 0, len, len + 1, ... } // (1, 3) + + This will create res with following encoding: + res = { arg0[0], arg1[0], arg1[1], ... } // (1, 3) + = { -16, -12, -5, ... } + + The step in above encoding would be: (-5) - (-12) = 7 + And hence res[3] would be computed as -5 + 7 = 2. + instead of arg1[2], ie, -6. + Ensure that valid_mask_for_fold_vec_perm_cst returns false + for this case. */ + { + tree arg0 = build_vec_cst_rand (vmode, 1, 3, 1); + tree arg1 = build_vec_cst_rand (vmode, 1, 3, 1); + poly_uint64 len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); + + vec_perm_builder builder (len, 1, 3); + poly_uint64 mask_elems[] = { 0, len, len+1 }; + builder_push_elems (builder, mask_elems); + + vec_perm_indices sel (builder, 2, len); + const char *reason; + /* FIXME: It may happen that build_vec_cst_rand may build a natural + stepped pattern, even if we didn't explicitly tell it to. So folding + may not always fail, but if it does, ensure that's because arg1 does + not have a natural stepped sequence (and not due to other reason) */ + tree res = fold_vec_perm_cst (TREE_TYPE (arg0), arg0, arg1, sel, &reason); + if (res == NULL_TREE) + ASSERT_TRUE (!strcmp (reason, "not a natural stepped sequence")); + } + + /* Case 7: Same as Case 6, except that arg1 contains natural stepped + sequence and thus folding should be valid for this case. */ + { + tree arg0 = build_vec_cst_rand (vmode, 1, 3, 1); + tree arg1 = build_vec_cst_rand (vmode, 1, 3, 1, true); + poly_uint64 len = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)); + + vec_perm_builder builder (len, 1, 3); + poly_uint64 mask_elems[] = { 0, len, len+1 }; + builder_push_elems (builder, mask_elems); + + vec_perm_indices sel (builder, 2, len); + tree res = fold_vec_perm_cst (TREE_TYPE (arg0), arg0, arg1, sel); + + tree expected_res[] = { ARG0(0), ARG1(0), ARG1(1) }; + validate_res (1, 3, res, expected_res); + } } } diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog index e753eb9..680d6e8 100644 --- a/gcc/fortran/ChangeLog +++ b/gcc/fortran/ChangeLog @@ -1,3 +1,8 @@ +2023-10-18 Tobias Burnus <tobias@codesourcery.com> + + * intrinsic.texi (signal): Add 'intrinsic :: signal, sleep' to + the example to make it safer. + 2023-10-17 Harald Anlauf <anlauf@gmx.de> PR fortran/111837 @@ -580,6 +580,7 @@ or with constant text in a single argument. %l process LINK_SPEC as a spec. %L process LIB_SPEC as a spec. %M Output multilib_os_dir. + %P Output a RUNPATH_OPTION for each directory in startfile_prefixes. %G process LIBGCC_SPEC as a spec. %R Output the concatenation of target_system_root and target_sysroot_suffix. @@ -1183,6 +1184,10 @@ proper position among the other output files. */ # define SYSROOT_HEADERS_SUFFIX_SPEC "" #endif +#ifndef RUNPATH_OPTION +# define RUNPATH_OPTION "-rpath" +#endif + static const char *asm_debug = ASM_DEBUG_SPEC; static const char *asm_debug_option = ASM_DEBUG_OPTION_SPEC; static const char *cpp_spec = CPP_SPEC; @@ -5929,6 +5934,7 @@ struct spec_path_info { size_t append_len; bool omit_relative; bool separate_options; + bool realpaths; }; static void * @@ -5938,6 +5944,16 @@ spec_path (char *path, void *data) size_t len = 0; char save = 0; + /* The path must exist; we want to resolve it to the realpath so that this + can be embedded as a runpath. */ + if (info->realpaths) + path = lrealpath (path); + + /* However, if we failed to resolve it - perhaps because there was a bogus + -B option on the command line, then punt on this entry. */ + if (!path) + return NULL; + if (info->omit_relative && !IS_ABSOLUTE_PATH (path)) return NULL; @@ -6169,6 +6185,22 @@ do_spec_1 (const char *spec, int inswitch, const char *soft_matched_part) info.omit_relative = false; #endif info.separate_options = false; + info.realpaths = false; + + for_each_path (&startfile_prefixes, true, 0, spec_path, &info); + } + break; + + case 'P': + { + struct spec_path_info info; + + info.option = RUNPATH_OPTION; + info.append_len = 0; + info.omit_relative = false; + info.separate_options = true; + /* We want to embed the actual paths that have the libraries. */ + info.realpaths = true; for_each_path (&startfile_prefixes, true, 0, spec_path, &info); } @@ -6495,6 +6527,7 @@ do_spec_1 (const char *spec, int inswitch, const char *soft_matched_part) info.append_len = strlen (info.append); info.omit_relative = false; info.separate_options = true; + info.realpaths = false; for_each_path (&include_prefixes, false, info.append_len, spec_path, &info); diff --git a/gcc/gimple-harden-control-flow.cc b/gcc/gimple-harden-control-flow.cc new file mode 100644 index 0000000..441df5a --- /dev/null +++ b/gcc/gimple-harden-control-flow.cc @@ -0,0 +1,1490 @@ +/* Control flow redundancy hardening. + Copyright (C) 2022 Free Software Foundation, Inc. + Contributed by Alexandre Oliva <oliva@adacore.com>. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received a copy of the GNU General Public License +along with GCC; see the file COPYING3. If not see +<http://www.gnu.org/licenses/>. */ + +#include "config.h" +#define INCLUDE_ALGORITHM /* find */ +#include "system.h" +#include "coretypes.h" +#include "backend.h" +#include "memmodel.h" +#include "tm_p.h" +#include "tree.h" +#include "fold-const.h" +#include "gimple.h" +#include "gimplify.h" +#include "tree-pass.h" +#include "ssa.h" +#include "gimple-iterator.h" +#include "gimple-pretty-print.h" +#include "tree-cfg.h" +#include "tree-cfgcleanup.h" +#include "tree-eh.h" +#include "except.h" +#include "sbitmap.h" +#include "basic-block.h" +#include "cfghooks.h" +#include "cfgloop.h" +#include "cgraph.h" +#include "alias.h" +#include "varasm.h" +#include "output.h" +#include "langhooks.h" +#include "diagnostic.h" +#include "intl.h" + +namespace { + +/* This pass introduces verification, at function exits, that booleans + set in each basic block during function execution reflect the + control flow graph: for each visited block, check that at least one + predecessor and at least one successor were also visited. This + sort of hardening may detect various kinds of attacks. */ + +/* Define a pass to harden code through control flow redundancy. */ + +const pass_data pass_data_harden_control_flow_redundancy = { + GIMPLE_PASS, + "hardcfr", + OPTGROUP_NONE, + TV_NONE, + PROP_cfg | PROP_ssa, // properties_required + 0, // properties_provided + 0, // properties_destroyed + TODO_cleanup_cfg, // properties_start + 0, // properties_finish +}; + +class pass_harden_control_flow_redundancy : public gimple_opt_pass +{ +public: + pass_harden_control_flow_redundancy (gcc::context *ctxt) + : gimple_opt_pass (pass_data_harden_control_flow_redundancy, ctxt) + {} + opt_pass *clone () { return new pass_harden_control_flow_redundancy (m_ctxt); } + virtual bool gate (function *fun) { + /* Return quickly if the pass is disabled, without checking any of + the conditions that might give rise to warnings that would only + be appropriate if hardening was requested. */ + if (!flag_harden_control_flow_redundancy) + return false; + + /* Functions that return more than once, like setjmp and vfork + (that also gets this flag set), will start recording a path + after the first return, and then may take another path when + they return again. The unterminated path may then be flagged + as an error. ??? We could save the visited array before the + call and restore it if it returns again. */ + if (fun->calls_setjmp) + { + warning_at (DECL_SOURCE_LOCATION (fun->decl), 0, + "%qD calls %<setjmp%> or similar," + " %<-fharden-control-flow-redundancy%> is not supported", + fun->decl); + return false; + } + + /* Some targets bypass the abnormal dispatcher block in nonlocal + gotos, and then we'd miss its visited bit. It might be doable + to make it work uniformly, but this feature is not used often + enough to make it worthwhile. */ + if (fun->has_nonlocal_label) + { + warning_at (DECL_SOURCE_LOCATION (fun->decl), 0, + "%qD receives nonlocal gotos," + " %<-fharden-control-flow-redundancy%> is not supported", + fun->decl); + return false; + } + + if (fun->cfg && param_hardcfr_max_blocks > 0 + && (n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS + > param_hardcfr_max_blocks)) + { + warning_at (DECL_SOURCE_LOCATION (fun->decl), 0, + "%qD has more than %u blocks, the requested" + " maximum for %<-fharden-control-flow-redundancy%>", + fun->decl, param_hardcfr_max_blocks); + return false; + } + + return true; + } + virtual unsigned int execute (function *); +}; + +} + +/* Return TRUE iff CFR checks should be inserted before returning + calls. */ + +static bool +check_returning_calls_p () +{ + return + flag_harden_control_flow_redundancy_check_returning_calls > 0 + || (flag_harden_control_flow_redundancy_check_returning_calls < 0 + /* Gates pass_tail_calls. */ + && flag_optimize_sibling_calls + /* Gates pass_all_optimizations. */ + && optimize >= 1 && !optimize_debug); +} + +/* Scan BB from the end, updating *RETPTR if given as return stmts and + copies are found. Return a call or a stmt that cannot appear after + a tail call, or NULL if the top of the block is reached without + finding any. */ + +static gimple * +hardcfr_scan_block (basic_block bb, tree **retptr) +{ + gimple_stmt_iterator gsi; + for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi)) + { + gimple *stmt = gsi_stmt (gsi); + + /* Ignore labels, returns, nops, clobbers and debug stmts. */ + if (gimple_code (stmt) == GIMPLE_LABEL + || gimple_code (stmt) == GIMPLE_NOP + || gimple_code (stmt) == GIMPLE_PREDICT + || gimple_clobber_p (stmt) + || is_gimple_debug (stmt)) + continue; + + if (gimple_code (stmt) == GIMPLE_RETURN) + { + greturn *gret = as_a <greturn *> (stmt); + if (retptr) + { + gcc_checking_assert (!*retptr); + *retptr = gimple_return_retval_ptr (gret); + } + continue; + } + + /* Check for a call. */ + if (is_gimple_call (stmt)) + return stmt; + + /* Allow simple copies to the return value, updating the return + value to be found in earlier assignments. */ + if (retptr && *retptr && gimple_assign_single_p (stmt) + && **retptr == gimple_assign_lhs (stmt)) + { + *retptr = gimple_assign_rhs1_ptr (stmt); + continue; + } + + return stmt; + } + + /* Any other kind of stmt will prevent a tail call. */ + return NULL; +} + +/* Return TRUE iff CALL is to be preceded by a CFR checkpoint, i.e., + if it's a returning call (one whose result is ultimately returned + without intervening non-copy statements) and we're checking + returning calls, a __builtin_return call (noreturn with a path to + the exit block), a must-tail call, or a tail call. */ + +static bool +returning_call_p (gcall *call) +{ + if (!(gimple_call_noreturn_p (call) + || gimple_call_must_tail_p (call) + || gimple_call_tail_p (call) + || check_returning_calls_p ())) + return false; + + /* Quickly check that there's a path to exit compatible with a + returning call. Detect infinite loops by limiting the path + length to the basic block count, and by looking for duplicate + blocks before allocating more memory for the path, for amortized + O(n). */ + auto_vec<basic_block, 10> path; + for (basic_block bb = gimple_bb (call); + bb != EXIT_BLOCK_PTR_FOR_FN (cfun); + bb = single_succ (bb)) + if (!single_succ_p (bb) + || (single_succ_edge (bb)->flags & EDGE_EH) != 0 + || n_basic_blocks_for_fn (cfun) - path.length () <= NUM_FIXED_BLOCKS + || (path.length () == path.allocated () + && std::find (path.begin (), path.end (), bb) != path.end ())) + return false; + else + path.safe_push (bb); + + /* Check the stmts in the blocks and trace the return value. */ + tree *retptr = NULL; + for (;;) + { + gcc_checking_assert (!path.is_empty ()); + basic_block bb = path.pop (); + gimple *stop = hardcfr_scan_block (bb, &retptr); + if (stop) + { + if (stop != call) + return false; + gcc_checking_assert (path.is_empty ()); + break; + } + + gphi *retphi = NULL; + if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME + && !SSA_NAME_IS_DEFAULT_DEF (*retptr) + && SSA_NAME_DEF_STMT (*retptr) + && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr)) + && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb) + { + retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr)); + gcc_checking_assert (gimple_phi_result (retphi) == *retptr); + } + else + continue; + + gcc_checking_assert (!path.is_empty ()); + edge e = single_succ_edge (path.last ()); + int i = EDGE_COUNT (bb->preds); + while (i--) + if (EDGE_PRED (bb, i) == e) + break; + gcc_checking_assert (i >= 0); + retptr = gimple_phi_arg_def_ptr (retphi, i); + } + + return (gimple_call_noreturn_p (call) + || gimple_call_must_tail_p (call) + || gimple_call_tail_p (call) + || (gimple_call_lhs (call) == (retptr ? *retptr : NULL) + && check_returning_calls_p ())); +} + +typedef auto_vec<edge, 10> chk_edges_t; + +/* Declare for mutual recursion. */ +static bool hardcfr_sibcall_search_preds (basic_block bb, + chk_edges_t &chk_edges, + int &count_chkcall, + auto_sbitmap &chkcall_blocks, + int &count_postchk, + auto_sbitmap &postchk_blocks, + tree *retptr); + +/* Search backwards from the end of BB for a mandatory or potential + sibcall. Schedule the block to be handled sort-of like noreturn if + so. Recurse to preds, with updated RETPTR, if the block only + contains stmts that may follow such a call, scheduling checking at + edges and marking blocks as post-check as needed. Return true iff, + at the end of the block, a check will have already been + performed. */ + +static bool +hardcfr_sibcall_search_block (basic_block bb, + chk_edges_t &chk_edges, + int &count_chkcall, + auto_sbitmap &chkcall_blocks, + int &count_postchk, + auto_sbitmap &postchk_blocks, + tree *retptr) +{ + /* Conditionals and internal exceptions rule out tail calls. */ + if (!single_succ_p (bb) + || (single_succ_edge (bb)->flags & EDGE_EH) != 0) + return false; + + gimple *stmt = hardcfr_scan_block (bb, &retptr); + if (!stmt) + return hardcfr_sibcall_search_preds (bb, chk_edges, + count_chkcall, chkcall_blocks, + count_postchk, postchk_blocks, + retptr); + + if (!is_a <gcall *> (stmt)) + return false; + + /* Avoid disrupting mandatory or early-marked tail calls, + inserting the check before them. This works for + must-tail calls, but tail calling as an optimization is + detected too late for us. + + Also check for noreturn calls here. Noreturn calls won't + normally have edges to exit, so they won't be found here, + but __builtin_return does, and we must check before + it, so handle it like a tail call. */ + gcall *call = as_a <gcall *> (stmt); + if (!(gimple_call_noreturn_p (call) + || gimple_call_must_tail_p (call) + || gimple_call_tail_p (call) + || (gimple_call_lhs (call) == (retptr ? *retptr : NULL) + && check_returning_calls_p ()))) + return false; + + gcc_checking_assert (returning_call_p (call)); + + /* We found a call that is to be preceded by checking. */ + if (bitmap_set_bit (chkcall_blocks, bb->index)) + ++count_chkcall; + else + gcc_unreachable (); + return true; +} + + +/* Search preds of BB for a mandatory or potential sibcall or + returning call, and arrange for the blocks containing them to have + a check inserted before the call, like noreturn calls. If any + preds are found to perform checking, schedule checks at the edges + of those that don't, and mark BB as postcheck.. */ + +static bool +hardcfr_sibcall_search_preds (basic_block bb, + chk_edges_t &chk_edges, + int &count_chkcall, + auto_sbitmap &chkcall_blocks, + int &count_postchk, + auto_sbitmap &postchk_blocks, + tree *retptr) +{ + /* For the exit block, we wish to force a check at every + predecessor, so pretend we've already found a pred that had + checking, so that we schedule checking at every one of its pred + edges. */ + bool first = bb->index >= NUM_FIXED_BLOCKS; + bool postchecked = true; + + gphi *retphi = NULL; + if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME + && !SSA_NAME_IS_DEFAULT_DEF (*retptr) + && SSA_NAME_DEF_STMT (*retptr) + && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr)) + && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb) + { + retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr)); + gcc_checking_assert (gimple_phi_result (retphi) == *retptr); + } + + for (int i = EDGE_COUNT (bb->preds); i--; first = false) + { + edge e = EDGE_PRED (bb, i); + + bool checked + = hardcfr_sibcall_search_block (e->src, chk_edges, + count_chkcall, chkcall_blocks, + count_postchk, postchk_blocks, + !retphi ? retptr + : gimple_phi_arg_def_ptr (retphi, i)); + + if (first) + { + postchecked = checked; + continue; + } + + /* When we first find a checked block, force a check at every + other incoming edge we've already visited, and those we + visit afterwards that don't have their own check, so that + when we reach BB, the check has already been performed. */ + if (!postchecked && checked) + { + for (int j = EDGE_COUNT (bb->preds); --j > i; ) + chk_edges.safe_push (EDGE_PRED (bb, j)); + postchecked = true; + } + if (postchecked && !checked) + chk_edges.safe_push (EDGE_PRED (bb, i)); + } + + if (postchecked && bb->index >= NUM_FIXED_BLOCKS) + { + if (bitmap_set_bit (postchk_blocks, bb->index)) + count_postchk++; + else + gcc_unreachable (); + } + + return postchecked; +} + + +class rt_bb_visited +{ + /* Use a sufficiently wide unsigned type to hold basic block numbers. */ + typedef size_t blknum; + + /* Record the original block count of the function. */ + blknum nblocks; + /* Record the number of bits per VWORD (short for VISITED WORD), an + efficient mode to set and test bits for blocks we visited, and to + encode the CFG in case out-of-line verification is used. */ + unsigned vword_bits; + + /* Hold the unsigned integral VWORD type. */ + tree vword_type; + /* Hold a pointer-to-VWORD type. */ + tree vword_ptr; + + /* Hold a growing sequence used to check, inline or out-of-line, + that VISITED encodes an expected execution path. */ + gimple_seq ckseq; + /* If nonNULL, hold a growing representation of the CFG for + out-of-line testing. */ + tree rtcfg; + + /* Hold the declaration of an array of VWORDs, used as an array of + NBLOCKS-2 bits. */ + tree visited; + + /* If performing inline checking, hold a declarations of boolean + variables used for inline checking. CKBLK holds the result of + testing whether the VISITED bit corresponding to a predecessor or + successor is set, CKINV inverts that bit, CKPART gets cleared if + a block was not visited or if CKINV for any of its predecessors + or successors is set, and CKFAIL gets set if CKPART remains set + at the end of a block's predecessors or successors list. */ + tree ckfail, ckpart, ckinv, ckblk; + + /* Convert a block index N to a block vindex, the index used to + identify it in the VISITED array. Check that it's in range: + neither ENTRY nor EXIT, but maybe one-past-the-end, to compute + the visited array length. */ + blknum num2idx (blknum n) { + gcc_checking_assert (n >= NUM_FIXED_BLOCKS && n <= nblocks); + return (n - NUM_FIXED_BLOCKS); + } + /* Return the block vindex for BB, that must not be ENTRY or + EXIT. */ + blknum bb2idx (basic_block bb) { + gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun) + && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)); + gcc_checking_assert (blknum (bb->index) < nblocks); + return num2idx (bb->index); + } + + /* Compute the type to be used for the VISITED array. */ + tree vtype () + { + blknum n = num2idx (nblocks); + return build_array_type_nelts (vword_type, + (n + vword_bits - 1) / vword_bits); + } + + /* Compute and return the index into VISITED for block BB. If BITP + is non-NULL, also compute and store the bit mask corresponding to + block BB in *BITP, so that (visited[index] & mask) tells whether + BB was visited. */ + tree vwordidx (basic_block bb, tree *bitp = NULL) + { + blknum idx = bb2idx (bb); + if (bitp) + { + unsigned bit = idx % vword_bits; + /* We don't need to adjust shifts to follow native bit + endianness here, all of our uses of the CFG and visited + bitmaps, whether at compile or runtime, are shifted bits on + full words. This adjustment here would require a + corresponding adjustment at runtime, which would be nothing + but undesirable overhead for us. */ + if (0 /* && BITS_BIG_ENDIAN */) + bit = vword_bits - bit - 1; + wide_int wbit = wi::set_bit_in_zero (bit, vword_bits); + *bitp = wide_int_to_tree (vword_type, wbit); + } + return build_int_cst (vword_ptr, idx / vword_bits); + } + + /* Return an expr to accesses the visited element that holds + information about BB. If BITP is non-NULL, set it to the mask to + tell which bit in that expr refers to BB. */ + tree vword (basic_block bb, tree *bitp = NULL) + { + return build2 (MEM_REF, vword_type, + build1 (ADDR_EXPR, vword_ptr, visited), + int_const_binop (MULT_EXPR, vwordidx (bb, bitp), + fold_convert (vword_ptr, + TYPE_SIZE_UNIT + (vword_type)))); + } + + /* Return an expr that evaluates to true iff BB was marked as + VISITED. Add any gimple stmts to SEQP. */ + tree vindex (basic_block bb, gimple_seq *seqp) + { + if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun) + || bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) + return boolean_true_node; + + tree bit, setme = vword (bb, &bit); + tree temp = create_tmp_var (vword_type, ".cfrtemp"); + + gassign *vload = gimple_build_assign (temp, setme); + gimple_seq_add_stmt (seqp, vload); + + gassign *vmask = gimple_build_assign (temp, BIT_AND_EXPR, temp, bit); + gimple_seq_add_stmt (seqp, vmask); + + return build2 (NE_EXPR, boolean_type_node, + temp, build_int_cst (vword_type, 0)); + } + + /* Set the bit corresponding to BB in VISITED. Add to SEQ any + required gimple stmts, and return SEQ, possibly modified. */ + gimple_seq vset (basic_block bb, gimple_seq seq = NULL) + { + tree bit, setme = vword (bb, &bit); + tree temp = create_tmp_var (vword_type, ".cfrtemp"); + + gassign *vload = gimple_build_assign (temp, setme); + gimple_seq_add_stmt (&seq, vload); + + gassign *vbitset = gimple_build_assign (temp, BIT_IOR_EXPR, temp, bit); + gimple_seq_add_stmt (&seq, vbitset); + + gassign *vstore = gimple_build_assign (unshare_expr (setme), temp); + gimple_seq_add_stmt (&seq, vstore); + + /* Prevent stores into visited from being deferred, forcing + subsequent bitsets to reload the word rather than reusing + values already in register. The purpose is threefold: make the + bitset get to memory in this block, so that control flow + attacks in functions called in this block don't easily bypass + the bitset; prevent the bitset word from being retained in a + register across blocks, which could, in an attack scenario, + make a later block set more than one bit; and prevent hoisting + or sinking loads or stores of bitset words out of loops or even + throughout functions, which could significantly weaken the + verification. This is equivalent to making the bitsetting + volatile within the function body, but without changing its + type; making the bitset volatile would make inline checking far + less optimizable for no reason. */ + vec<tree, va_gc> *inputs = NULL; + vec<tree, va_gc> *outputs = NULL; + vec_safe_push (outputs, + build_tree_list + (build_tree_list + (NULL_TREE, build_string (2, "=m")), + visited)); + vec_safe_push (inputs, + build_tree_list + (build_tree_list + (NULL_TREE, build_string (1, "m")), + visited)); + gasm *stabilize = gimple_build_asm_vec ("", inputs, outputs, + NULL, NULL); + gimple_seq_add_stmt (&seq, stabilize); + + return seq; + } + +public: + /* Prepare to add control flow redundancy testing to CFUN. */ + rt_bb_visited (int checkpoints) + : nblocks (n_basic_blocks_for_fn (cfun)), + vword_type (NULL), ckseq (NULL), rtcfg (NULL) + { + /* If we've already added a declaration for the builtin checker, + extract vword_type and vword_bits from its declaration. */ + if (tree checkfn = builtin_decl_explicit (BUILT_IN___HARDCFR_CHECK)) + { + tree check_arg_list = TYPE_ARG_TYPES (TREE_TYPE (checkfn)); + tree vword_const_ptr_type = TREE_VALUE (TREE_CHAIN (check_arg_list)); + vword_type = TYPE_MAIN_VARIANT (TREE_TYPE (vword_const_ptr_type)); + vword_bits = tree_to_shwi (TYPE_SIZE (vword_type)); + } + /* Otherwise, select vword_bits, vword_type et al, and use it to + declare the builtin checker. */ + else + { + /* This setting needs to be kept in sync with libgcc/hardcfr.c. + We aim for at least 28 bits, which enables us to refer to as + many as 28 << 28 blocks in a function's CFG. That's way over + 4G blocks. */ + machine_mode VWORDmode; + if (BITS_PER_UNIT >= 28) + { + VWORDmode = QImode; + vword_bits = BITS_PER_UNIT; + } + else if (BITS_PER_UNIT >= 14) + { + VWORDmode = HImode; + vword_bits = 2 * BITS_PER_UNIT; + } + else + { + VWORDmode = SImode; + vword_bits = 4 * BITS_PER_UNIT; + } + + vword_type = lang_hooks.types.type_for_mode (VWORDmode, 1); + gcc_checking_assert (vword_bits == tree_to_shwi (TYPE_SIZE + (vword_type))); + + vword_type = build_variant_type_copy (vword_type); + TYPE_ALIAS_SET (vword_type) = new_alias_set (); + + tree vword_const = build_qualified_type (vword_type, TYPE_QUAL_CONST); + tree vword_const_ptr = build_pointer_type (vword_const); + tree type = build_function_type_list (void_type_node, sizetype, + vword_const_ptr, vword_const_ptr, + NULL_TREE); + tree decl = add_builtin_function_ext_scope + ("__builtin___hardcfr_check", + type, BUILT_IN___HARDCFR_CHECK, BUILT_IN_NORMAL, + "__hardcfr_check", NULL_TREE); + TREE_NOTHROW (decl) = true; + set_builtin_decl (BUILT_IN___HARDCFR_CHECK, decl, true); + } + + /* The checker uses a qualified pointer, so we can't reuse it, + so build a new one. */ + vword_ptr = build_pointer_type (vword_type); + + tree visited_type = vtype (); + visited = create_tmp_var (visited_type, ".cfrvisited"); + + if (nblocks - NUM_FIXED_BLOCKS > blknum (param_hardcfr_max_inline_blocks) + || checkpoints > 1) + { + /* Make sure vword_bits is wide enough for the representation + of nblocks in rtcfg. Compare with vword_bits << vword_bits, + but avoiding overflows, shifting nblocks right instead. If + vword_bits is wider than HOST_WIDE_INT, assume it fits, so + as to avoid undefined shifts. */ + gcc_assert (HOST_BITS_PER_WIDE_INT <= vword_bits + || (((unsigned HOST_WIDE_INT)(num2idx (nblocks)) + >> vword_bits) < vword_bits)); + + /* Build a terminator for the constructor list. */ + rtcfg = build_tree_list (NULL_TREE, NULL_TREE); + return; + } + + ckfail = create_tmp_var (boolean_type_node, ".cfrfail"); + ckpart = create_tmp_var (boolean_type_node, ".cfrpart"); + ckinv = create_tmp_var (boolean_type_node, ".cfrinv"); + ckblk = create_tmp_var (boolean_type_node, ".cfrblk"); + + gassign *ckfail_init = gimple_build_assign (ckfail, boolean_false_node); + gimple_seq_add_stmt (&ckseq, ckfail_init); + } + + /* Insert SEQ before a resx or a call in INSBB. */ + void insert_exit_check_in_block (gimple_seq seq, basic_block insbb) + { + gimple_stmt_iterator gsi = gsi_last_bb (insbb); + + while (!gsi_end_p (gsi)) + if (is_a <gresx *> (gsi_stmt (gsi)) + || is_a <gcall *> (gsi_stmt (gsi))) + break; + else + gsi_prev (&gsi); + + gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT); + } + + /* Insert SEQ on E. */ + void insert_exit_check_on_edge (gimple_seq seq, edge e) + { + gsi_insert_seq_on_edge_immediate (e, seq); + } + + /* Add checking code to CHK_EDGES and CHKCALL_BLOCKS, and + initialization code on the entry edge. Before this point, the + CFG has been undisturbed, and all the needed data has been + collected and safely stowed. */ + void check (chk_edges_t &chk_edges, + int count_chkcall, auto_sbitmap const &chkcall_blocks) + { + /* If we're using out-of-line checking, create and statically + initialize the CFG checking representation, generate the + checker call for the checking sequence, and insert it in all + exit edges, if there's more than one. If there's only one, we + use the same logic as the inline case to insert the check + sequence. */ + if (rtcfg) + { + /* Unreverse the list, and drop the tail node turned into head. */ + rtcfg = TREE_CHAIN (nreverse (rtcfg)); + + /* Turn the indices stored in TREE_PURPOSE into separate + nodes. It was useful to keep them together to enable + combination of masks and for clear separation of + terminators while constructing it, but now we have to turn + it into a sequence of words. */ + for (tree node = rtcfg; node; node = TREE_CHAIN (node)) + { + tree wordidx = TREE_PURPOSE (node); + if (!wordidx) + continue; + + TREE_PURPOSE (node) = NULL_TREE; + TREE_CHAIN (node) = tree_cons (NULL_TREE, + fold_convert (vword_type, wordidx), + TREE_CHAIN (node)); + } + + /* Build the static initializer for the array with the CFG + representation for out-of-line checking. */ + tree init = build_constructor_from_list (NULL_TREE, rtcfg); + TREE_TYPE (init) = build_array_type_nelts (vword_type, + CONSTRUCTOR_NELTS (init)); + char buf[32]; + ASM_GENERATE_INTERNAL_LABEL (buf, "Lhardcfg", + current_function_funcdef_no); + rtcfg = build_decl (UNKNOWN_LOCATION, VAR_DECL, + get_identifier (buf), + TREE_TYPE (init)); + TREE_READONLY (rtcfg) = 1; + TREE_STATIC (rtcfg) = 1; + TREE_ADDRESSABLE (rtcfg) = 1; + TREE_USED (rtcfg) = 1; + DECL_ARTIFICIAL (rtcfg) = 1; + DECL_IGNORED_P (rtcfg) = 1; + DECL_INITIAL (rtcfg) = init; + make_decl_rtl (rtcfg); + varpool_node::finalize_decl (rtcfg); + + /* Add the checker call to ckseq. */ + gcall *call_chk = gimple_build_call (builtin_decl_explicit + (BUILT_IN___HARDCFR_CHECK), 3, + build_int_cst (sizetype, + num2idx (nblocks)), + build1 (ADDR_EXPR, vword_ptr, + visited), + build1 (ADDR_EXPR, vword_ptr, + rtcfg)); + gimple_seq_add_stmt (&ckseq, call_chk); + + gimple *clobber = gimple_build_assign (visited, + build_clobber + (TREE_TYPE (visited))); + gimple_seq_add_stmt (&ckseq, clobber); + + /* If we have multiple exit edges, insert (copies of) + ckseq in all of them. */ + for (int i = chk_edges.length (); i--; ) + { + gimple_seq seq = ckseq; + /* Copy the sequence, unless we're dealing with the + last edge (we're counting down to zero). */ + if (i || count_chkcall) + seq = gimple_seq_copy (seq); + + edge e = chk_edges[i]; + + if (dump_file) + { + if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) + fprintf (dump_file, + "Inserting out-of-line check in" + " block %i's edge to exit.\n", + e->src->index); + else + fprintf (dump_file, + "Inserting out-of-line check in" + " block %i's edge to postcheck block %i.\n", + e->src->index, e->dest->index); + } + + insert_exit_check_on_edge (seq, e); + + gcc_checking_assert (!bitmap_bit_p (chkcall_blocks, e->src->index)); + } + + sbitmap_iterator it; + unsigned i; + EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it) + { + basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i); + + gimple_seq seq = ckseq; + gcc_checking_assert (count_chkcall > 0); + if (--count_chkcall) + seq = gimple_seq_copy (seq); + + if (dump_file) + fprintf (dump_file, + "Inserting out-of-line check before stmt in block %i.\n", + bb->index); + + insert_exit_check_in_block (seq, bb); + } + + gcc_checking_assert (count_chkcall == 0); + } + else + { + /* Inline checking requires a single exit edge. */ + gimple *last = gimple_build_assign (visited, + build_clobber + (TREE_TYPE (visited))); + gimple_seq_add_stmt (&ckseq, last); + + if (!count_chkcall) + { + edge e = single_pred_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)); + + if (dump_file) + { + if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)) + fprintf (dump_file, + "Inserting out-of-line check in" + " block %i's edge to postcheck block %i.\n", + e->src->index, e->dest->index); + else + fprintf (dump_file, + "Inserting inline check in" + " block %i's edge to exit.\n", + e->src->index); + } + + insert_exit_check_on_edge (ckseq, e); + } + else + { + gcc_checking_assert (count_chkcall == 1); + + sbitmap_iterator it; + unsigned i; + EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it) + { + basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i); + + gimple_seq seq = ckseq; + gcc_checking_assert (count_chkcall > 0); + if (--count_chkcall) + seq = gimple_seq_copy (seq); + + if (dump_file) + fprintf (dump_file, + "Inserting inline check before stmt in block %i.\n", + bb->index); + + insert_exit_check_in_block (seq, bb); + } + + gcc_checking_assert (count_chkcall == 0); + } + + /* The inserted ckseq computes CKFAIL at LAST. Now we have to + conditionally trap on it. */ + basic_block insbb = gimple_bb (last); + + /* Create a block with the unconditional trap. */ + basic_block trp = create_empty_bb (insbb); + gimple_stmt_iterator gsit = gsi_after_labels (trp); + + gcall *trap = gimple_build_call (builtin_decl_explicit + (BUILT_IN_TRAP), 0); + gsi_insert_before (&gsit, trap, GSI_SAME_STMT); + + if (BB_PARTITION (insbb)) + BB_SET_PARTITION (trp, BB_COLD_PARTITION); + + if (current_loops) + add_bb_to_loop (trp, current_loops->tree_root); + + /* Insert a conditional branch to the trap block. If the + conditional wouldn't be the last stmt, split the block. */ + gimple_stmt_iterator gsi = gsi_for_stmt (last); + if (!gsi_one_before_end_p (gsi)) + split_block (gsi_bb (gsi), gsi_stmt (gsi)); + + gcond *cond = gimple_build_cond (NE_EXPR, ckfail, + fold_convert (TREE_TYPE (ckfail), + boolean_false_node), + NULL, NULL); + gsi_insert_after (&gsi, cond, GSI_SAME_STMT); + + /* Adjust the edges. */ + single_succ_edge (gsi_bb (gsi))->flags &= ~EDGE_FALLTHRU; + single_succ_edge (gsi_bb (gsi))->flags |= EDGE_FALSE_VALUE; + single_succ_edge (gsi_bb (gsi))->probability + = profile_probability::always (); + edge e = make_edge (gsi_bb (gsi), trp, EDGE_TRUE_VALUE); + e->probability = profile_probability::never (); + gcc_checking_assert (e->dest == trp); + gcc_checking_assert (!e->dest->count.initialized_p ()); + e->dest->count = e->count (); + + /* Set the trap's dominator after splitting. */ + if (dom_info_available_p (CDI_DOMINATORS)) + set_immediate_dominator (CDI_DOMINATORS, trp, gimple_bb (last)); + } + + /* Insert initializers for visited at the entry. Do this after + other insertions, to avoid messing with block numbers. */ + gimple_seq iseq = NULL; + + gcall *vinit = gimple_build_call (builtin_decl_explicit + (BUILT_IN_MEMSET), 3, + build1 (ADDR_EXPR, + build_pointer_type + (TREE_TYPE (visited)), + visited), + integer_zero_node, + TYPE_SIZE_UNIT (TREE_TYPE (visited))); + gimple_seq_add_stmt (&iseq, vinit); + + gsi_insert_seq_on_edge_immediate (single_succ_edge + (ENTRY_BLOCK_PTR_FOR_FN (cfun)), + iseq); + } + + /* Push onto RTCFG a (mask, index) pair to test for IBB when BB is + visited. XSELF is to be the ENTRY or EXIT block (depending on + whether we're looking at preds or succs), to be remapped to BB + because we can't represent them, and there's no point in testing + them anyway. Return true if no further blocks need to be visited + in the list, because we've already encountered a + self-reference. */ + bool + push_rtcfg_pair (basic_block ibb, basic_block bb, + basic_block xself) + { + /* We don't have a bit to test for the entry and exit + blocks, but it is always visited, so we test for the + block itself, which gets us the right result and + enables the self-test optimization below. */ + if (ibb == xself) + ibb = bb; + + tree mask, idx = vwordidx (ibb, &mask); + /* Combine masks with the same idx, but not if we're going + to optimize for self-test. */ + if (ibb != bb && TREE_PURPOSE (rtcfg) + && tree_int_cst_equal (idx, TREE_PURPOSE (rtcfg))) + TREE_VALUE (rtcfg) = int_const_binop (BIT_IOR_EXPR, mask, + TREE_VALUE (rtcfg)); + else + rtcfg = tree_cons (idx, mask, rtcfg); + + /* For self-tests (i.e., tests that the block itself was + also visited), testing anything else is pointless, + because it's a tautology, so just drop other edges. */ + if (ibb == bb) + { + while (TREE_PURPOSE (TREE_CHAIN (rtcfg))) + TREE_CHAIN (rtcfg) = TREE_CHAIN (TREE_CHAIN (rtcfg)); + return true; + } + + return false; + } + + /* Add to CKSEQ stmts to clear CKPART if OBB is visited. */ + void + build_block_check (basic_block obb) + { + tree vobb = fold_convert (TREE_TYPE (ckblk), + vindex (obb, &ckseq)); + gassign *blkrunp = gimple_build_assign (ckblk, vobb); + gimple_seq_add_stmt (&ckseq, blkrunp); + + gassign *blknotrunp = gimple_build_assign (ckinv, + EQ_EXPR, + ckblk, + fold_convert + (TREE_TYPE (ckblk), + boolean_false_node)); + gimple_seq_add_stmt (&ckseq, blknotrunp); + + gassign *andblk = gimple_build_assign (ckpart, + BIT_AND_EXPR, + ckpart, ckinv); + gimple_seq_add_stmt (&ckseq, andblk); + } + + /* Add to BB code to set its bit in VISITED, and add to RTCFG or + CKSEQ the data or code needed to check BB's predecessors and + successors. If CHECKPOINT, assume the block is a checkpoint, + whether or not it has an edge to EXIT. If POSTCHECK, assume the + block post-dominates checkpoints and therefore no bitmap setting + or checks are to be performed in or for it. Do NOT change the + CFG. */ + void visit (basic_block bb, bool checkpoint, bool postcheck) + { + /* Set the bit in VISITED when entering the block. */ + gimple_stmt_iterator gsi = gsi_after_labels (bb); + if (!postcheck) + gsi_insert_seq_before (&gsi, vset (bb), GSI_SAME_STMT); + + if (rtcfg) + { + if (!postcheck) + { + /* Build a list of (index, mask) terminated by (NULL, 0). + Consolidate masks with the same index when they're + adjacent. First, predecessors. Count backwards, because + we're going to reverse the list. The order shouldn't + matter, but let's not make it surprising. */ + for (int i = EDGE_COUNT (bb->preds); i--; ) + if (push_rtcfg_pair (EDGE_PRED (bb, i)->src, bb, + ENTRY_BLOCK_PTR_FOR_FN (cfun))) + break; + } + rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg); + + if (!postcheck) + { + /* Then, successors. */ + if (!checkpoint + || !push_rtcfg_pair (EXIT_BLOCK_PTR_FOR_FN (cfun), + bb, EXIT_BLOCK_PTR_FOR_FN (cfun))) + for (int i = EDGE_COUNT (bb->succs); i--; ) + if (push_rtcfg_pair (EDGE_SUCC (bb, i)->dest, bb, + EXIT_BLOCK_PTR_FOR_FN (cfun))) + break; + } + rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg); + } + else if (!postcheck) + { + /* Schedule test to fail if the block was reached but somehow none + of its predecessors were. */ + tree bit = fold_convert (TREE_TYPE (ckpart), vindex (bb, &ckseq)); + gassign *blkrunp = gimple_build_assign (ckpart, bit); + gimple_seq_add_stmt (&ckseq, blkrunp); + + for (int i = 0, e = EDGE_COUNT (bb->preds); i < e; i++) + build_block_check (EDGE_PRED (bb, i)->src); + gimple *orfailp = gimple_build_assign (ckfail, BIT_IOR_EXPR, + ckfail, ckpart); + gimple_seq_add_stmt (&ckseq, orfailp); + + /* Likewise for successors. */ + gassign *blkruns = gimple_build_assign (ckpart, unshare_expr (bit)); + gimple_seq_add_stmt (&ckseq, blkruns); + + if (checkpoint) + build_block_check (EXIT_BLOCK_PTR_FOR_FN (cfun)); + for (int i = 0, e = EDGE_COUNT (bb->succs); i < e; i++) + build_block_check (EDGE_SUCC (bb, i)->dest); + + gimple *orfails = gimple_build_assign (ckfail, BIT_IOR_EXPR, + ckfail, ckpart); + gimple_seq_add_stmt (&ckseq, orfails); + } + } +}; + +/* Avoid checking before noreturn calls that are known (expected, + really) to finish by throwing an exception, rather than by ending + the program or looping forever. Such functions have to be + annotated, with an attribute (expected_throw) or flag (ECF_XTHROW), + so that exception-raising functions, such as C++'s __cxa_throw, + __cxa_rethrow, and Ada's gnat_rcheck_*, gnat_reraise*, + ada.exception.raise_exception*, and the language-independent + unwinders could be detected here and handled differently from other + noreturn functions. */ +static bool +always_throwing_noreturn_call_p (gimple *stmt) +{ + if (!is_a <gcall *> (stmt)) + return is_a <gresx *> (stmt); + + gcall *call = as_a <gcall *> (stmt); + return (gimple_call_noreturn_p (call) + && gimple_call_expected_throw_p (call)); +} + +/* Control flow redundancy hardening: record the execution path, and + verify at exit that an expect path was taken. */ + +unsigned int +pass_harden_control_flow_redundancy::execute (function *fun) +{ + bool const check_at_escaping_exceptions + = (flag_exceptions + && flag_harden_control_flow_redundancy_check_exceptions); + bool const check_before_noreturn_calls + = flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NEVER; + bool const check_before_nothrow_noreturn_calls + = (check_before_noreturn_calls + && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_NOTHROW); + bool const check_before_throwing_noreturn_calls + = (flag_exceptions + && check_before_noreturn_calls + && flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NOTHROW); + bool const check_before_always_throwing_noreturn_calls + = (flag_exceptions + && check_before_noreturn_calls + && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_ALWAYS); + basic_block bb; + basic_block bb_eh_cleanup = NULL; + + if (flag_harden_control_flow_redundancy_skip_leaf) + { + bool found_calls_p = false; + + FOR_EACH_BB_FN (bb, fun) + { + for (gimple_stmt_iterator gsi = gsi_last_bb (bb); + !gsi_end_p (gsi); gsi_prev (&gsi)) + if (is_a <gcall *> (gsi_stmt (gsi))) + { + found_calls_p = true; + break; + } + if (found_calls_p) + break; + } + + if (!found_calls_p) + { + if (dump_file) + fprintf (dump_file, + "Disabling CFR for leaf function, as requested\n"); + + return 0; + } + } + + if (check_at_escaping_exceptions) + { + int lp_eh_cleanup = -1; + + /* Record the preexisting blocks, to avoid visiting newly-created + blocks. */ + auto_sbitmap to_visit (last_basic_block_for_fn (fun)); + bitmap_clear (to_visit); + + FOR_EACH_BB_FN (bb, fun) + bitmap_set_bit (to_visit, bb->index); + + /* Scan the blocks for stmts with escaping exceptions, that + wouldn't be denoted in the CFG, and associate them with an + empty cleanup handler around the whole function. Walk + backwards, so that even when we split the block, */ + sbitmap_iterator it; + unsigned i; + EXECUTE_IF_SET_IN_BITMAP (to_visit, 0, i, it) + { + bb = BASIC_BLOCK_FOR_FN (fun, i); + + for (gimple_stmt_iterator gsi = gsi_last_bb (bb); + !gsi_end_p (gsi); gsi_prev (&gsi)) + { + gimple *stmt = gsi_stmt (gsi); + if (!stmt_could_throw_p (fun, stmt)) + continue; + + /* If it must not throw, or if it already has a handler, + we need not worry about it. */ + if (lookup_stmt_eh_lp (stmt) != 0) + continue; + + /* Don't split blocks at, nor add EH edges to, tail + calls, we will add verification before the call + anyway. */ + if (is_a <gcall *> (stmt) + && (gimple_call_must_tail_p (as_a <gcall *> (stmt)) + || gimple_call_tail_p (as_a <gcall *> (stmt)) + || returning_call_p (as_a <gcall *> (stmt)))) + continue; + + if (!gsi_one_before_end_p (gsi)) + split_block (bb, stmt); + /* A resx or noreturn call needs not be associated with + the cleanup handler if we're going to add checking + before it. We only test cases that didn't require + block splitting because noreturn calls would always + be at the end of blocks, and we test for zero + successors because if there is an edge, it's not + noreturn, as any EH edges would have already been + caught by the lookup_stmt_eh_lp test above. */ + else if (check_before_noreturn_calls + && EDGE_COUNT (bb->succs) == 0 + && (is_a <gresx *> (stmt) + ? check_before_always_throwing_noreturn_calls + : (!is_a <gcall *> (stmt) + || !gimple_call_noreturn_p (stmt)) + ? (gcc_unreachable (), false) + : (!flag_exceptions + || gimple_call_nothrow_p (as_a <gcall *> (stmt))) + ? check_before_nothrow_noreturn_calls + : always_throwing_noreturn_call_p (stmt) + ? check_before_always_throwing_noreturn_calls + : check_before_throwing_noreturn_calls)) + { + if (dump_file) + { + fprintf (dump_file, + "Bypassing cleanup for noreturn stmt" + " in block %i:\n", + bb->index); + print_gimple_stmt (dump_file, stmt, 0); + } + continue; + } + + if (!bb_eh_cleanup) + { + bb_eh_cleanup = create_empty_bb (bb); + if (dom_info_available_p (CDI_DOMINATORS)) + set_immediate_dominator (CDI_DOMINATORS, bb_eh_cleanup, bb); + if (current_loops) + add_bb_to_loop (bb_eh_cleanup, current_loops->tree_root); + + /* Make the new block an EH cleanup for the call. */ + eh_region new_r = gen_eh_region_cleanup (NULL); + eh_landing_pad lp = gen_eh_landing_pad (new_r); + tree label = gimple_block_label (bb_eh_cleanup); + lp->post_landing_pad = label; + EH_LANDING_PAD_NR (label) = lp_eh_cleanup = lp->index; + + /* Just propagate the exception. + We will later insert the verifier call. */ + gimple_stmt_iterator ehgsi; + ehgsi = gsi_after_labels (bb_eh_cleanup); + gresx *resx = gimple_build_resx (new_r->index); + gsi_insert_before (&ehgsi, resx, GSI_SAME_STMT); + + if (dump_file) + fprintf (dump_file, + "Created cleanup block %i:\n", + bb_eh_cleanup->index); + } + else if (dom_info_available_p (CDI_DOMINATORS)) + { + basic_block immdom; + immdom = get_immediate_dominator (CDI_DOMINATORS, + bb_eh_cleanup); + if (!dominated_by_p (CDI_DOMINATORS, bb, immdom)) + { + immdom = nearest_common_dominator (CDI_DOMINATORS, + immdom, bb); + set_immediate_dominator (CDI_DOMINATORS, + bb_eh_cleanup, immdom); + } + } + + if (dump_file) + { + fprintf (dump_file, + "Associated cleanup block with stmt in block %i:\n", + bb->index); + print_gimple_stmt (dump_file, stmt, 0); + } + + add_stmt_to_eh_lp (stmt, lp_eh_cleanup); + /* Finally, wire the EH cleanup block into the CFG. */ + edge neeh = make_eh_edges (stmt); + neeh->probability = profile_probability::never (); + gcc_checking_assert (neeh->dest == bb_eh_cleanup); + if (neeh->dest->count.initialized_p ()) + neeh->dest->count += neeh->count (); + else + neeh->dest->count = neeh->count (); + } + } + + if (bb_eh_cleanup) + { + /* A cfg_cleanup after bb_eh_cleanup makes for a more compact + rtcfg, and it avoids bb numbering differences when we split + blocks because of trailing debug insns only. */ + cleanup_tree_cfg (); + gcc_checking_assert (EDGE_COUNT (bb_eh_cleanup->succs) == 0); + } + } + + /* These record blocks with calls that are to be preceded by + checkpoints, such as noreturn calls (if so chosen), must-tail + calls, potential early-marked tail calls, and returning calls (if + so chosen). */ + int count_chkcall = 0; + auto_sbitmap chkcall_blocks (last_basic_block_for_fn (fun)); + bitmap_clear (chkcall_blocks); + + /* We wish to add verification at blocks without successors, such as + noreturn calls (raising or not) and the reraise at the cleanup + block, but not other reraises: they will go through the cleanup + block. */ + if (check_before_noreturn_calls) + FOR_EACH_BB_FN (bb, fun) + { + gimple_stmt_iterator gsi = gsi_last_bb (bb); + if (gsi_end_p (gsi)) + continue; + gimple *stmt = gsi_stmt (gsi); + + if (EDGE_COUNT (bb->succs) == 0) + { + /* A stmt at the end of a block without any successors is + either a resx or a noreturn call without a local + handler. Check that it's one of the desired + checkpoints. */ + if (flag_exceptions && is_a <gresx *> (stmt) + ? (check_before_always_throwing_noreturn_calls + || bb == bb_eh_cleanup) + : (!is_a <gcall *> (stmt) + || !gimple_call_noreturn_p (stmt)) + ? (stmt_can_make_abnormal_goto (stmt) + /* ??? Check before indirect nonlocal goto, or + calls thereof? */ + ? false + /* Catch cases in which successors would be + expected. */ + : (gcc_unreachable (), false)) + : (!flag_exceptions + || gimple_call_nothrow_p (as_a <gcall *> (stmt))) + ? check_before_nothrow_noreturn_calls + : always_throwing_noreturn_call_p (stmt) + ? check_before_always_throwing_noreturn_calls + : check_before_throwing_noreturn_calls) + { + if (dump_file) + { + fprintf (dump_file, + "Scheduling check before stmt" + " in succ-less block %i:\n", + bb->index); + print_gimple_stmt (dump_file, stmt, 0); + } + + if (bitmap_set_bit (chkcall_blocks, bb->index)) + count_chkcall++; + else + gcc_unreachable (); + } + continue; + } + + /* If there are no exceptions, it would seem like any noreturn + call must have zero successor edges, but __builtin_return + gets successor edges. We don't want to handle it here, it + will be dealt with in sibcall_search_preds. Otherwise, + check for blocks without non-EH successors, but skip those + with resx stmts and edges (i.e., those other than that in + bb_eh_cleanup), since those will go through bb_eh_cleanup, + that will have been counted as noreturn above because it + has no successors. */ + gcc_checking_assert (bb != bb_eh_cleanup + || !check_at_escaping_exceptions); + if (flag_exceptions && is_a <gresx *> (stmt) + ? check_before_always_throwing_noreturn_calls + : (!is_a <gcall *> (stmt) + || !gimple_call_noreturn_p (stmt)) + ? false + : (!flag_exceptions + || gimple_call_nothrow_p (as_a <gcall *> (stmt))) + ? false /* rather than check_before_nothrow_noreturn_calls */ + : always_throwing_noreturn_call_p (stmt) + ? check_before_always_throwing_noreturn_calls + : check_before_throwing_noreturn_calls) + { + gcc_checking_assert (single_succ_p (bb) + && (single_succ_edge (bb)->flags & EDGE_EH)); + + if (dump_file) + { + fprintf (dump_file, + "Scheduling check before stmt" + " in EH-succ block %i:\n", + bb->index); + print_gimple_stmt (dump_file, stmt, 0); + } + + if (bitmap_set_bit (chkcall_blocks, bb->index)) + count_chkcall++; + else + gcc_unreachable (); + } + } + else if (bb_eh_cleanup) + { + if (bitmap_set_bit (chkcall_blocks, bb_eh_cleanup->index)) + count_chkcall++; + else + gcc_unreachable (); + } + + gcc_checking_assert (!bb_eh_cleanup + || bitmap_bit_p (chkcall_blocks, bb_eh_cleanup->index)); + + /* If we don't have edges to exit nor noreturn calls (including the + cleanup reraise), then we may skip instrumentation: that would + amount to a function that ends with an infinite loop. */ + if (!count_chkcall + && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) == 0) + { + if (dump_file) + fprintf (dump_file, + "Disabling CFR, no exit paths to check\n"); + + return 0; + } + + /* Search for must-tail calls, early-marked potential tail calls, + and, if requested, returning calls. As we introduce early + checks, */ + int count_postchk = 0; + auto_sbitmap postchk_blocks (last_basic_block_for_fn (fun)); + bitmap_clear (postchk_blocks); + chk_edges_t chk_edges; + hardcfr_sibcall_search_preds (EXIT_BLOCK_PTR_FOR_FN (fun), chk_edges, + count_chkcall, chkcall_blocks, + count_postchk, postchk_blocks, + NULL); + + rt_bb_visited vstd (chk_edges.length () + count_chkcall); + + auto_sbitmap combined_blocks (last_basic_block_for_fn (fun)); + bitmap_copy (combined_blocks, chkcall_blocks); + int i; + edge *e; + FOR_EACH_VEC_ELT (chk_edges, i, e) + if (!bitmap_set_bit (combined_blocks, (*e)->src->index)) + /* There may be multiple chk_edges with the same src block; + guard againt overlaps with chkcall_blocks only. */ + gcc_assert (!bitmap_bit_p (chkcall_blocks, (*e)->src->index)); + + /* Visit blocks in index order, because building rtcfg depends on + that. Blocks must be compact, which the cleanup_cfg requirement + ensures. This would also enable FOR_EACH_BB_FN to be used to + iterate in index order, but bb_eh_cleanup block splits and + insertions changes that. */ + gcc_checking_assert (n_basic_blocks_for_fn (fun) + == last_basic_block_for_fn (fun)); + for (int i = NUM_FIXED_BLOCKS; i < n_basic_blocks_for_fn (fun); i++) + { + bb = BASIC_BLOCK_FOR_FN (fun, i); + gcc_checking_assert (bb->index == i); + vstd.visit (bb, bitmap_bit_p (combined_blocks, i), + bitmap_bit_p (postchk_blocks, i)); + } + + vstd.check (chk_edges, count_chkcall, chkcall_blocks); + + return + TODO_update_ssa + | TODO_cleanup_cfg + | TODO_verify_il; +} + +/* Instantiate a hardcfr pass. */ + +gimple_opt_pass * +make_pass_harden_control_flow_redundancy (gcc::context *ctxt) +{ + return new pass_harden_control_flow_redundancy (ctxt); +} diff --git a/gcc/gimple.cc b/gcc/gimple.cc index 46f2878..7924d90 100644 --- a/gcc/gimple.cc +++ b/gcc/gimple.cc @@ -399,6 +399,10 @@ gimple_build_call_from_tree (tree t, tree fnptrtype) gimple_call_set_from_thunk (call, CALL_FROM_THUNK_P (t)); gimple_call_set_va_arg_pack (call, CALL_EXPR_VA_ARG_PACK (t)); gimple_call_set_nothrow (call, TREE_NOTHROW (t)); + if (fndecl) + gimple_call_set_expected_throw (call, + flags_from_decl_or_type (fndecl) + & ECF_XTHROW); gimple_call_set_by_descriptor (call, CALL_EXPR_BY_DESCRIPTOR (t)); copy_warning (call, t); @@ -1550,6 +1554,8 @@ gimple_call_flags (const gimple *stmt) if (stmt->subcode & GF_CALL_NOTHROW) flags |= ECF_NOTHROW; + if (stmt->subcode & GF_CALL_XTHROW) + flags |= ECF_XTHROW; if (stmt->subcode & GF_CALL_BY_DESCRIPTOR) flags |= ECF_BY_DESCRIPTOR; diff --git a/gcc/gimple.h b/gcc/gimple.h index 2d0ac10..1b0cd4b 100644 --- a/gcc/gimple.h +++ b/gcc/gimple.h @@ -150,6 +150,7 @@ enum gf_mask { GF_CALL_BY_DESCRIPTOR = 1 << 10, GF_CALL_NOCF_CHECK = 1 << 11, GF_CALL_FROM_NEW_OR_DELETE = 1 << 12, + GF_CALL_XTHROW = 1 << 13, GF_OMP_PARALLEL_COMBINED = 1 << 0, GF_OMP_TASK_TASKLOOP = 1 << 0, GF_OMP_TASK_TASKWAIT = 1 << 1, @@ -3561,6 +3562,28 @@ gimple_call_nothrow_p (gcall *s) return (gimple_call_flags (s) & ECF_NOTHROW) != 0; } +/* If EXPECTED_THROW_P is true, GIMPLE_CALL S is a call that is known + to be more likely to throw than to run forever, terminate the + program or return by other means. */ + +static inline void +gimple_call_set_expected_throw (gcall *s, bool expected_throw_p) +{ + if (expected_throw_p) + s->subcode |= GF_CALL_XTHROW; + else + s->subcode &= ~GF_CALL_XTHROW; +} + +/* Return true if S is a call that is more likely to end by + propagating an exception than by other means. */ + +static inline bool +gimple_call_expected_throw_p (gcall *s) +{ + return (gimple_call_flags (s) & ECF_XTHROW) != 0; +} + /* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that is known to be emitted for VLA objects. Those are wrapped by stack_save/stack_restore calls and hence can't lead to unbounded diff --git a/gcc/objcp/ChangeLog b/gcc/objcp/ChangeLog index b17ed64..d4df075 100644 --- a/gcc/objcp/ChangeLog +++ b/gcc/objcp/ChangeLog @@ -1,3 +1,9 @@ +2023-10-20 Patrick Palka <ppalka@redhat.com> + + * objcp-lang.cc (objcp_tsubst_copy_and_build): Rename to ... + (objcp_tsubst_expr): ... this. Adjust tsubst_copy_and_build + uses. + 2022-11-15 Patrick Palka <ppalka@redhat.com> * objcp-lang.cc (objcp_tsubst_copy_and_build): Adjust calls to diff --git a/gcc/objcp/objcp-lang.cc b/gcc/objcp/objcp-lang.cc index 9887209..5b04cd6 100644 --- a/gcc/objcp/objcp-lang.cc +++ b/gcc/objcp/objcp-lang.cc @@ -50,11 +50,10 @@ struct lang_hooks lang_hooks = LANG_HOOKS_INITIALIZER; there should be very few (if any) routines below. */ tree -objcp_tsubst_copy_and_build (tree t, tree args, tsubst_flags_t complain, - tree in_decl) +objcp_tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl) { #define RECURSE(NODE) \ - tsubst_copy_and_build (NODE, args, complain, in_decl) + tsubst_expr (NODE, args, complain, in_decl) /* The following two can only occur in Objective-C++. */ diff --git a/gcc/omp-simd-clone.cc b/gcc/omp-simd-clone.cc index c1cb7cc..f611fdb 100644 --- a/gcc/omp-simd-clone.cc +++ b/gcc/omp-simd-clone.cc @@ -255,16 +255,6 @@ ok_for_auto_simd_clone (struct cgraph_node *node) return true; } - -/* Return the number of elements in vector type VECTYPE, which is associated - with a SIMD clone. At present these always have a constant length. */ - -static unsigned HOST_WIDE_INT -simd_clone_subparts (tree vectype) -{ - return TYPE_VECTOR_SUBPARTS (vectype).to_constant (); -} - /* Allocate a fresh `simd_clone' and return it. NARGS is the number of arguments to reserve space for. */ @@ -817,8 +807,14 @@ simd_clone_adjust_argument_types (struct cgraph_node *node) { ipa_adjusted_param adj; memset (&adj, 0, sizeof (adj)); - tree parm = args[i]; - tree parm_type = node->definition ? TREE_TYPE (parm) : parm; + tree parm = NULL_TREE; + tree parm_type = NULL_TREE; + if (i < args.length()) + { + parm = args[i]; + parm_type = node->definition ? TREE_TYPE (parm) : parm; + } + adj.base_index = i; adj.prev_clone_index = i; @@ -1028,7 +1024,7 @@ simd_clone_init_simd_arrays (struct cgraph_node *node, } continue; } - if (known_eq (simd_clone_subparts (TREE_TYPE (arg)), + if (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)), node->simdclone->simdlen)) { tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); @@ -1040,7 +1036,7 @@ simd_clone_init_simd_arrays (struct cgraph_node *node, } else { - unsigned int simdlen = simd_clone_subparts (TREE_TYPE (arg)); + poly_uint64 simdlen = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg)); unsigned int times = vector_unroll_factor (node->simdclone->simdlen, simdlen); tree ptype = build_pointer_type (TREE_TYPE (TREE_TYPE (array))); @@ -1226,9 +1222,9 @@ ipa_simd_modify_function_body (struct cgraph_node *node, iter, NULL_TREE, NULL_TREE); adjustments->register_replacement (&(*adjustments->m_adj_params)[j], r); - if (multiple_p (node->simdclone->simdlen, simd_clone_subparts (vectype))) + if (multiple_p (node->simdclone->simdlen, TYPE_VECTOR_SUBPARTS (vectype))) j += vector_unroll_factor (node->simdclone->simdlen, - simd_clone_subparts (vectype)) - 1; + TYPE_VECTOR_SUBPARTS (vectype)) - 1; } adjustments->sort_replacements (); @@ -1557,7 +1553,7 @@ simd_clone_adjust (struct cgraph_node *node) mask = gimple_assign_lhs (g); g = gimple_build_assign (make_ssa_name (TREE_TYPE (mask)), BIT_AND_EXPR, mask, - build_int_cst (TREE_TYPE (mask), 1)); + build_one_cst (TREE_TYPE (mask))); gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING); mask = gimple_assign_lhs (g); } diff --git a/gcc/params.opt b/gcc/params.opt index fffa8b1..f1202ab 100644 --- a/gcc/params.opt +++ b/gcc/params.opt @@ -174,6 +174,14 @@ Maximum number of arrays per SCoP. Common Joined UInteger Var(param_graphite_max_nb_scop_params) Init(10) Param Optimization Maximum number of parameters in a SCoP. +-param=hardcfr-max-blocks= +Common Joined UInteger Var(param_hardcfr_max_blocks) Init(0) Param Optimization +Maximum number of blocks for -fharden-control-flow-redundancy. + +-param=hardcfr-max-inline-blocks= +Common Joined UInteger Var(param_hardcfr_max_inline_blocks) Init(16) Param Optimization +Maximum number of blocks for in-line -fharden-control-flow-redundancy. + -param=hash-table-verification-limit= Common Joined UInteger Var(param_hash_table_verification_limit) Init(10) Param The number of elements for which hash table verification is done for each searched element. diff --git a/gcc/passes.def b/gcc/passes.def index df7965d..1e1950b 100644 --- a/gcc/passes.def +++ b/gcc/passes.def @@ -193,6 +193,7 @@ along with GCC; see the file COPYING3. If not see NEXT_PASS (pass_omp_device_lower); NEXT_PASS (pass_omp_target_link); NEXT_PASS (pass_adjust_alignment); + NEXT_PASS (pass_harden_control_flow_redundancy); NEXT_PASS (pass_all_optimizations); PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations) NEXT_PASS (pass_remove_cgraph_callee_edges); diff --git a/gcc/rtl-ssa/access-utils.h b/gcc/rtl-ssa/access-utils.h index fbaaaa2..84d386b 100644 --- a/gcc/rtl-ssa/access-utils.h +++ b/gcc/rtl-ssa/access-utils.h @@ -51,6 +51,19 @@ memory_access (T accesses) -> decltype (accesses[0]) return nullptr; } +// If ACCESSES has a memory access, drop it. Otherwise, return ACCESSES +// unchanged. +template<typename T> +inline T +drop_memory_access (T accesses) +{ + if (!memory_access (accesses)) + return accesses; + + access_array arr (accesses); + return T (arr.begin (), accesses.size () - 1); +} + // If sorted array ACCESSES includes a reference to REGNO, return the // access, otherwise return null. template<typename T> diff --git a/gcc/rtl-ssa/accesses.cc b/gcc/rtl-ssa/accesses.cc index f12b5f4..774ab9d 100644 --- a/gcc/rtl-ssa/accesses.cc +++ b/gcc/rtl-ssa/accesses.cc @@ -1239,6 +1239,14 @@ function_info::add_use (use_info *use) insert_use_before (use, neighbor->value ()); } +void +function_info::reparent_use (use_info *use, set_info *new_def) +{ + remove_use (use); + use->set_def (new_def); + add_use (use); +} + // If USE has a known definition, remove USE from that definition's list // of uses. Also remove if it from the associated splay tree, if any. void diff --git a/gcc/rtl-ssa/changes.cc b/gcc/rtl-ssa/changes.cc index c48ddd2..73ab3cc 100644 --- a/gcc/rtl-ssa/changes.cc +++ b/gcc/rtl-ssa/changes.cc @@ -370,8 +370,11 @@ update_insn_in_place (insn_change &change) // Finalize the new list of definitions and uses in CHANGE, removing // any uses and definitions that are no longer needed, and converting // pending clobbers into actual definitions. +// +// POS gives the final position of INSN, which hasn't yet been moved into +// place. void -function_info::finalize_new_accesses (insn_change &change) +function_info::finalize_new_accesses (insn_change &change, insn_info *pos) { insn_info *insn = change.insn (); @@ -462,13 +465,34 @@ function_info::finalize_new_accesses (insn_change &change) // Add (possibly temporary) uses to m_temp_uses for each resource. // If there are multiple references to the same resource, aggregate // information in the modes and flags. + use_info *mem_use = nullptr; for (rtx_obj_reference ref : properties.refs ()) if (ref.is_read ()) { unsigned int regno = ref.regno; machine_mode mode = ref.is_reg () ? ref.mode : BLKmode; use_info *use = find_access (unshared_uses, ref.regno); - gcc_assert (use); + if (!use) + { + // For now, we only support inferring uses of mem. + gcc_assert (regno == MEM_REGNO); + + if (mem_use) + { + mem_use->record_reference (ref, false); + continue; + } + + resource_info resource { mode, regno }; + auto def = find_def (resource, pos).prev_def (pos); + auto set = safe_dyn_cast <set_info *> (def); + gcc_assert (set); + mem_use = allocate<use_info> (insn, resource, set); + mem_use->record_reference (ref, true); + m_temp_uses.safe_push (mem_use); + continue; + } + if (use->m_has_been_superceded) { // This is the first reference to the resource. @@ -656,7 +680,8 @@ function_info::change_insns (array_slice<insn_change *> changes) // Finalize the new list of accesses for the change. Don't install // them yet, so that we still have access to the old lists below. - finalize_new_accesses (change); + finalize_new_accesses (change, + placeholder ? placeholder : insn); } placeholders[i] = placeholder; } @@ -681,7 +706,11 @@ function_info::change_insns (array_slice<insn_change *> changes) insn_change &change = *changes[i]; insn_info *insn = change.insn (); if (change.is_deletion ()) - remove_insn (insn); + { + if (rtx_insn *rtl = insn->rtl ()) + ::remove_insn (rtl); // Remove the underlying RTL insn. + remove_insn (insn); + } else if (insn_info *placeholder = placeholders[i]) { // Check if earlier movements turned a move into a no-op. diff --git a/gcc/rtl-ssa/functions.h b/gcc/rtl-ssa/functions.h index 8b53b26..73690a0 100644 --- a/gcc/rtl-ssa/functions.h +++ b/gcc/rtl-ssa/functions.h @@ -159,6 +159,9 @@ public: // Like change_insns, but for a single change CHANGE. void change_insn (insn_change &change); + // Given a use USE, re-parent it to get its def from NEW_DEF. + void reparent_use (use_info *use, set_info *new_def); + // If the changes that have been made to instructions require updates // to the CFG, perform those updates now. Return true if something changed. // If it did: @@ -262,7 +265,7 @@ private: insn_info *add_placeholder_after (insn_info *); void possibly_queue_changes (insn_change &); - void finalize_new_accesses (insn_change &); + void finalize_new_accesses (insn_change &, insn_info *); void apply_changes_to_insn (insn_change &); void init_function_data (); diff --git a/gcc/rtl-ssa/insns.cc b/gcc/rtl-ssa/insns.cc index a0c2fec..f970375 100644 --- a/gcc/rtl-ssa/insns.cc +++ b/gcc/rtl-ssa/insns.cc @@ -291,9 +291,17 @@ function_info::add_insn_after (insn_info *insn, insn_info *after) first->set_last_debug_insn (insn); } else // !insn->is_debug_insn () && next->is_debug_insn () - // At present we don't (need to) support inserting a nondebug - // instruction between two existing debug instructions. - gcc_assert (!after->is_debug_insn ()); + { + // At present we don't (need to) support inserting a nondebug + // instruction between two existing debug instructions. + gcc_assert (!after->is_debug_insn ()); + + // Find the next nondebug insn and update its previous pointer + // to point to INSN. + auto next_nondebug = next->last_debug_insn ()->next_any_insn (); + gcc_checking_assert (!next_nondebug->is_debug_insn ()); + next_nondebug->set_prev_sametype_insn (insn); + } // If AFTER and NEXT are separated by at least two points, we can // use a unique point number for INSN. Otherwise INSN will have diff --git a/gcc/rust/ChangeLog b/gcc/rust/ChangeLog index 6c88612..186ff4c 100644 --- a/gcc/rust/ChangeLog +++ b/gcc/rust/ChangeLog @@ -1,3 +1,12 @@ +2023-10-20 Patrick Palka <ppalka@redhat.com> + + PR rust/111899 + * backend/rust-constexpr.cc (potential_constant_expression_1): + Remove NON_DEPENDENT_EXPR handling. + * backend/rust-tree.cc (mark_exp_read): Likewise. + (mark_use): Likewise. + (lvalue_kind): Likewise. + 2023-09-28 Richard Sandiford <richard.sandiford@arm.com> * backend/rust-constexpr.cc (rs_fold_indirect_ref): Remove unused diff --git a/gcc/rust/backend/rust-constexpr.cc b/gcc/rust/backend/rust-constexpr.cc index 55c2ccd..bfd7d95 100644 --- a/gcc/rust/backend/rust-constexpr.cc +++ b/gcc/rust/backend/rust-constexpr.cc @@ -6156,7 +6156,6 @@ potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now, case CLEANUP_POINT_EXPR: case EXPR_STMT: case PAREN_EXPR: - case NON_DEPENDENT_EXPR: /* For convenience. */ case LOOP_EXPR: case EXIT_EXPR: diff --git a/gcc/rust/backend/rust-tree.cc b/gcc/rust/backend/rust-tree.cc index a2c9c3f..cdb7909 100644 --- a/gcc/rust/backend/rust-tree.cc +++ b/gcc/rust/backend/rust-tree.cc @@ -72,7 +72,6 @@ mark_exp_read (tree exp) case ADDR_EXPR: case INDIRECT_REF: case FLOAT_EXPR: - case NON_DEPENDENT_EXPR: case VIEW_CONVERT_EXPR: mark_exp_read (TREE_OPERAND (exp, 0)); break; @@ -128,7 +127,6 @@ mark_use (tree expr, bool rvalue_p, bool read_p, switch (TREE_CODE (expr)) { case COMPONENT_REF: - case NON_DEPENDENT_EXPR: recurse_op[0] = true; break; case COMPOUND_EXPR: @@ -4575,7 +4573,6 @@ lvalue_kind (const_tree ref) lvalues. */ return (DECL_NONSTATIC_MEMBER_FUNCTION_P (ref) ? clk_none : clk_ordinary); - case NON_DEPENDENT_EXPR: case PAREN_EXPR: return lvalue_kind (TREE_OPERAND (ref, 0)); diff --git a/gcc/stor-layout.h b/gcc/stor-layout.h index e776892..589ce33 100644 --- a/gcc/stor-layout.h +++ b/gcc/stor-layout.h @@ -36,7 +36,6 @@ extern void place_field (record_layout_info, tree); extern void compute_record_mode (tree); extern void finish_bitfield_layout (tree); extern void finish_record_layout (record_layout_info, int); -extern unsigned int element_precision (const_tree); extern void finalize_size_functions (void); extern void fixup_unsigned_type (tree); extern void initialize_sizetypes (void); diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog index 93d4abe..413c9b7 100644 --- a/gcc/testsuite/ChangeLog +++ b/gcc/testsuite/ChangeLog @@ -1,3 +1,501 @@ +2023-10-21 Florian Weimer <fweimer@redhat.com> + + * gcc.c-torture/compile/20000403-1.c: Compile with -std=gnu89. + * gcc.c-torture/compile/20000511-1.c: Likewise. + * gcc.c-torture/compile/20000804-1.c: Likewise. + * gcc.c-torture/compile/20020418-1.c: Likewise. + * gcc.c-torture/compile/20020927-1.c: Likewise. + * gcc.c-torture/compile/20030109-1.c: Likewise. + * gcc.c-torture/compile/20030224-1.c: Likewise. + * gcc.c-torture/compile/20030415-1.c: Likewise. + * gcc.c-torture/compile/20030612-1.c: Likewise. + * gcc.c-torture/compile/20030917-1.c: Likewise. + * gcc.c-torture/compile/20031113-1.c: Likewise. + * gcc.c-torture/compile/20031220-2.c: Likewise. + * gcc.c-torture/compile/20040309-1.c: Likewise. + * gcc.c-torture/compile/20040310-1.c: Likewise. + * gcc.c-torture/compile/20040317-3.c: Likewise. + * gcc.c-torture/compile/20040817-1.c: Likewise. + * gcc.c-torture/compile/20091215-1.c: Likewise. + * gcc.c-torture/compile/86.c: Likewise. + * gcc.c-torture/compile/900216-1.c: Likewise. + * gcc.c-torture/compile/900313-1.c: Likewise. + * gcc.c-torture/compile/900407-1.c: Likewise. + * gcc.c-torture/compile/900516-1.c: Likewise. + * gcc.c-torture/compile/920409-2.c: Likewise. + * gcc.c-torture/compile/920415-1.c: Likewise. + * gcc.c-torture/compile/920428-1.c: Likewise. + * gcc.c-torture/compile/920428-5.c: Likewise. + * gcc.c-torture/compile/920428-7.c: Likewise. + * gcc.c-torture/compile/920501-1.c: Likewise. + * gcc.c-torture/compile/920501-13.c: Likewise. + * gcc.c-torture/compile/920501-15.c: Likewise. + * gcc.c-torture/compile/920501-16.c: Likewise. + * gcc.c-torture/compile/920501-18.c: Likewise. + * gcc.c-torture/compile/920501-20.c: Likewise. + * gcc.c-torture/compile/920501-6.c: Likewise. + * gcc.c-torture/compile/920501-7.c: Likewise. + * gcc.c-torture/compile/920502-1.c: Likewise. + * gcc.c-torture/compile/920502-2.c: Likewise. + * gcc.c-torture/compile/920520-1.c: Likewise. + * gcc.c-torture/compile/920521-1.c: Likewise. + * gcc.c-torture/compile/920608-1.c: Likewise. + * gcc.c-torture/compile/920617-1.c: Likewise. + * gcc.c-torture/compile/920617-2.c: Likewise. + * gcc.c-torture/compile/920625-1.c: Likewise. + * gcc.c-torture/compile/920625-2.c: Likewise. + * gcc.c-torture/compile/920626-1.c: Likewise. + * gcc.c-torture/compile/920706-1.c: Likewise. + * gcc.c-torture/compile/920710-2.c: Likewise. + * gcc.c-torture/compile/920723-1.c: Likewise. + * gcc.c-torture/compile/920808-1.c: Likewise. + * gcc.c-torture/compile/920809-1.c: Likewise. + * gcc.c-torture/compile/920817-1.c: Likewise. + * gcc.c-torture/compile/920831-1.c: Likewise. + * gcc.c-torture/compile/920917-1.c: Likewise. + * gcc.c-torture/compile/920928-2.c: Likewise. + * gcc.c-torture/compile/920928-5.c: Likewise. + * gcc.c-torture/compile/921012-1.c: Likewise. + * gcc.c-torture/compile/921021-1.c: Likewise. + * gcc.c-torture/compile/921024-1.c: Likewise. + * gcc.c-torture/compile/921103-1.c: Likewise. + * gcc.c-torture/compile/921109-1.c: Likewise. + * gcc.c-torture/compile/921111-1.c: Likewise. + * gcc.c-torture/compile/921116-2.c: Likewise. + * gcc.c-torture/compile/921118-1.c: Likewise. + * gcc.c-torture/compile/921202-1.c: Likewise. + * gcc.c-torture/compile/921202-2.c: Likewise. + * gcc.c-torture/compile/921203-1.c: Likewise. + * gcc.c-torture/compile/921203-2.c: Likewise. + * gcc.c-torture/compile/921206-1.c: Likewise. + * gcc.c-torture/compile/930109-1.c: Likewise. + * gcc.c-torture/compile/930111-1.c: Likewise. + * gcc.c-torture/compile/930117-1.c: Likewise. + * gcc.c-torture/compile/930118-1.c: Likewise. + * gcc.c-torture/compile/930120-1.c: Likewise. + * gcc.c-torture/compile/930217-1.c: Likewise. + * gcc.c-torture/compile/930325-1.c: Likewise. + * gcc.c-torture/compile/930411-1.c: Likewise. + * gcc.c-torture/compile/930427-2.c: Likewise. + * gcc.c-torture/compile/930503-2.c: Likewise. + * gcc.c-torture/compile/930506-2.c: Likewise. + * gcc.c-torture/compile/930513-2.c: Likewise. + * gcc.c-torture/compile/930530-1.c: Likewise. + * gcc.c-torture/compile/930602-1.c: Likewise. + * gcc.c-torture/compile/930618-1.c: Likewise. + * gcc.c-torture/compile/930623-1.c: Likewise. + * gcc.c-torture/compile/931003-1.c: Likewise. + * gcc.c-torture/compile/931013-1.c: Likewise. + * gcc.c-torture/compile/931013-2.c: Likewise. + * gcc.c-torture/compile/931102-2.c: Likewise. + * gcc.c-torture/compile/931203-1.c: Likewise. + * gcc.c-torture/compile/940718-1.c: Likewise. + * gcc.c-torture/compile/941014-1.c: Likewise. + * gcc.c-torture/compile/941014-2.c: Likewise. + * gcc.c-torture/compile/941014-3.c: Likewise. + * gcc.c-torture/compile/941014-4.c: Likewise. + * gcc.c-torture/compile/941111-1.c: Likewise. + * gcc.c-torture/compile/941113-1.c: Likewise. + * gcc.c-torture/compile/950124-1.c: Likewise. + * gcc.c-torture/compile/950329-1.c: Likewise. + * gcc.c-torture/compile/950612-1.c: Likewise. + * gcc.c-torture/compile/950618-1.c: Likewise. + * gcc.c-torture/compile/950719-1.c: Likewise. + * gcc.c-torture/compile/950910-1.c: Likewise. + * gcc.c-torture/compile/950922-1.c: Likewise. + * gcc.c-torture/compile/951106-1.c: Likewise. + * gcc.c-torture/compile/951222-1.c: Likewise. + * gcc.c-torture/compile/960106-1.c: Likewise. + * gcc.c-torture/compile/960319-1.c: Likewise. + * gcc.c-torture/compile/960829-1.c: Likewise. + * gcc.c-torture/compile/970206-1.c: Likewise. + * gcc.c-torture/compile/980825-1.c: Likewise. + * gcc.c-torture/compile/990829-1.c: Likewise. + * gcc.c-torture/compile/991213-2.c: Likewise. + +2023-10-21 Pan Li <pan2.li@intel.com> + + PR target/111857 + * gcc.target/riscv/rvv/autovec/vls-vlmax/perm-4.c: Adjust checker. + * gcc.target/riscv/rvv/autovec/vls/def.h: Add help define. + * gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-0.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-1.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-2.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-3.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-4.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-5.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-6.c: New test. + +2023-10-20 Roger Sayle <roger@nextmovesoftware.com> + Uros Bizjak <ubizjak@gmail.com> + + PR middle-end/101955 + PR tree-optimization/106245 + * gcc.target/i386/pr106245-2.c: New test case. + * gcc.target/i386/pr106245-3.c: New 32-bit test case. + * gcc.target/i386/pr106245-4.c: New 64-bit test case. + * gcc.target/i386/pr106245-5.c: Likewise. + +2023-10-20 Jason Merrill <jason@redhat.com> + + * g++.dg/template/sfinae-dr657.C: Adjust. + +2023-10-20 Jason Merrill <jason@redhat.com> + + * g++.dg/cpp1y/constexpr-diag1.C: Add -fno-implicit-constexpr. + +2023-10-20 Jason Merrill <jason@redhat.com> + + * g++.dg/warn/Wsign-promo1.C: New test. + +2023-10-20 Nathan Sidwell <nathan@acm.org> + + * g++.dg/modules/decltype-1_a.C: New. + * g++.dg/modules/decltype-1_b.C: New. + * g++.dg/modules/lambda-5_a.C: New. + * g++.dg/modules/lambda-5_b.C: New. + +2023-10-20 Florian Weimer <fweimer@redhat.com> + + * gcc.dg/Wint-conversion-3.c: New. + +2023-10-20 Florian Weimer <fweimer@redhat.com> + + * gcc.dg/Wincompatible-pointer-types-2.c: New. + * gcc.dg/Wincompatible-pointer-types-3.c: New. + * gcc.dg/Wincompatible-pointer-types-4.c: New. + +2023-10-20 Marek Polacek <polacek@redhat.com> + + PR c/111884 + * c-c++-common/alias-1.c: New test. + +2023-10-20 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * gcc.dg/vect/pr111882.c: New test. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + PR c++/106086 + * g++.dg/template/crash127.C: Expect additional error due to + being able to check the member access expression ahead of time. + Strengthen the test by not instantiating the class template. + * g++.dg/cpp1y/lambda-generic-this5.C: New test. + +2023-10-20 Patrick Palka <ppalka@redhat.com> + + * g++.dg/concepts/var-concept3.C: Adjust expected diagnostic + for attempting to call a variable concept. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111383 + PR tree-optimization/110243 + * gcc.dg/torture/pr111383.c: New testcase. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111445 + * gcc.dg/torture/pr111445.c: New testcase. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/110243 + PR tree-optimization/111336 + * gcc.dg/torture/pr110243.c: New testcase. + * gcc.dg/torture/pr111336.c: Likewise. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111891 + * gfortran.dg/pr111891.f90: New testcase. + +2023-10-20 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111000 + * gcc.dg/torture/pr111000.c: New testcase. + +2023-10-20 Alexandre Oliva <oliva@adacore.com> + + * c-c++-common/torture/harden-cfr.c: New. + * c-c++-common/harden-cfr-noret-never-O0.c: New. + * c-c++-common/torture/harden-cfr-noret-never.c: New. + * c-c++-common/torture/harden-cfr-noret-noexcept.c: New. + * c-c++-common/torture/harden-cfr-noret-nothrow.c: New. + * c-c++-common/torture/harden-cfr-noret.c: New. + * c-c++-common/torture/harden-cfr-notail.c: New. + * c-c++-common/torture/harden-cfr-returning.c: New. + * c-c++-common/torture/harden-cfr-tail.c: New. + * c-c++-common/torture/harden-cfr-abrt-always.c: New. + * c-c++-common/torture/harden-cfr-abrt-never.c: New. + * c-c++-common/torture/harden-cfr-abrt-no-xthrow.c: New. + * c-c++-common/torture/harden-cfr-abrt-nothrow.c: New. + * c-c++-common/torture/harden-cfr-abrt.c: New. + * c-c++-common/torture/harden-cfr-always.c: New. + * c-c++-common/torture/harden-cfr-never.c: New. + * c-c++-common/torture/harden-cfr-no-xthrow.c: New. + * c-c++-common/torture/harden-cfr-nothrow.c: New. + * c-c++-common/torture/harden-cfr-bret-always.c: New. + * c-c++-common/torture/harden-cfr-bret-never.c: New. + * c-c++-common/torture/harden-cfr-bret-noopt.c: New. + * c-c++-common/torture/harden-cfr-bret-noret.c: New. + * c-c++-common/torture/harden-cfr-bret-no-xthrow.c: New. + * c-c++-common/torture/harden-cfr-bret-nothrow.c: New. + * c-c++-common/torture/harden-cfr-bret-retcl.c: New. + * c-c++-common/torture/harden-cfr-bret.c: New. + * g++.dg/harden-cfr-throw-always-O0.C: New. + * g++.dg/harden-cfr-throw-returning-O0.C: New. + * g++.dg/torture/harden-cfr-noret-always-no-nothrow.C: New. + * g++.dg/torture/harden-cfr-noret-never-no-nothrow.C: New. + * g++.dg/torture/harden-cfr-noret-no-nothrow.C: New. + * g++.dg/torture/harden-cfr-throw-always.C: New. + * g++.dg/torture/harden-cfr-throw-never.C: New. + * g++.dg/torture/harden-cfr-throw-no-xthrow.C: New. + * g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C: New. + * g++.dg/torture/harden-cfr-throw-nothrow.C: New. + * g++.dg/torture/harden-cfr-throw-nocleanup.C: New. + * g++.dg/torture/harden-cfr-throw-returning.C: New. + * g++.dg/torture/harden-cfr-throw.C: New. + * gcc.dg/torture/harden-cfr-noret-no-nothrow.c: New. + * gcc.dg/torture/harden-cfr-tail-ub.c: New. + * gnat.dg/hardcfr.adb: New. + * c-c++-common/torture/harden-cfr-skip-leaf.c: New file. + * g++.dg/harden-cfr-throw-returning-enabled-O0.C: New file. + +2023-10-20 Tamar Christina <tamar.christina@arm.com> + Andre Vieira <andre.simoesdiasvieira@arm.com> + + * gcc.dg/vect/vect-bitfield-read-1-not.c: New test. + * gcc.dg/vect/vect-bitfield-read-2-not.c: New test. + * gcc.dg/vect/vect-bitfield-read-8.c: New test. + * gcc.dg/vect/vect-bitfield-read-9.c: New test. + +2023-10-20 Hu, Lin1 <lin1.hu@intel.com> + + * gcc.target/i386/pr89229-5b.c: Modify test. + * gcc.target/i386/pr89229-6b.c: Ditto. + * gcc.target/i386/pr89229-7b.c: Ditto. + +2023-10-20 Juzhe-Zhong <juzhe.zhong@rivai.ai> + + PR target/111848 + * gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c: Adapt test. + * gcc.dg/vect/costmodel/riscv/rvv/pr111848.c: New test. + +2023-10-20 Lehua Ding <lehua.ding@rivai.ai> + + PR target/111037 + PR target/111234 + PR target/111725 + * gcc.target/riscv/rvv/base/scalar_move-1.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/avl_single-23.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/avl_single-46.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/avl_single-84.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/avl_single-89.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/avl_single-95.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/imm_bb_prop-1.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/pr109743-2.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/pr109773-1.c: Adjust. + * gcc.target/riscv/rvv/base/pr111037-1.c: Moved to... + * gcc.target/riscv/rvv/vsetvl/pr111037-1.c: ...here. + * gcc.target/riscv/rvv/base/pr111037-2.c: Moved to... + * gcc.target/riscv/rvv/vsetvl/pr111037-2.c: ...here. + * gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/vlmax_conflict-12.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/vlmax_conflict-3.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/vsetvl-13.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/vsetvl-18.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/vsetvl-23.c: Adjust. + * gcc.target/riscv/rvv/vsetvl/avl_single-104.c: New test. + * gcc.target/riscv/rvv/vsetvl/avl_single-105.c: New test. + * gcc.target/riscv/rvv/vsetvl/avl_single-106.c: New test. + * gcc.target/riscv/rvv/vsetvl/avl_single-107.c: New test. + * gcc.target/riscv/rvv/vsetvl/avl_single-108.c: New test. + * gcc.target/riscv/rvv/vsetvl/avl_single-109.c: New test. + * gcc.target/riscv/rvv/vsetvl/pr111037-3.c: New test. + * gcc.target/riscv/rvv/vsetvl/pr111037-4.c: New test. + +2023-10-20 Nathaniel Shead <nathanieloshead@gmail.com> + + PR c++/101631 + PR c++/102286 + * g++.dg/cpp1y/constexpr-89336-3.C: Fix union initialisation. + * g++.dg/cpp1y/constexpr-union6.C: New test. + * g++.dg/cpp1y/constexpr-union7.C: New test. + * g++.dg/cpp2a/constexpr-union2.C: New test. + * g++.dg/cpp2a/constexpr-union3.C: New test. + * g++.dg/cpp2a/constexpr-union4.C: New test. + * g++.dg/cpp2a/constexpr-union5.C: New test. + * g++.dg/cpp2a/constexpr-union6.C: New test. + +2023-10-20 Nathaniel Shead <nathanieloshead@gmail.com> + + * g++.dg/cpp0x/constexpr-cast4.C: New test. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * gcc.dg/gomp/pr110485.c: New test. + +2023-10-19 Andre Vieira <andre.simoesdiasvieira@arm.com> + + * gcc.dg/vect/vect-simd-clone-16f.c: Remove unnecessary differentation + between targets with different pointer sizes. + * gcc.dg/vect/vect-simd-clone-17f.c: Likewise. + * gcc.dg/vect/vect-simd-clone-18f.c: Likewise. + +2023-10-19 Andrew Pinski <pinskia@gmail.com> + + PR c/100532 + * gcc.dg/pr100532-1.c: New test. + +2023-10-19 Andrew Pinski <pinskia@gmail.com> + + PR c/104822 + * gcc.dg/sso-18.c: New test. + * gcc.dg/sso-19.c: New test. + +2023-10-19 Lewis Hyatt <lhyatt@gmail.com> + + PR c++/89038 + * c-c++-common/cpp/Wunknown-pragmas-1.c: New test. + +2023-10-19 Lewis Hyatt <lhyatt@gmail.com> + + PR preprocessor/82335 + * c-c++-common/cpp/diagnostic-pragma-3.c: New test. + +2023-10-19 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/111860 + * gcc.dg/vect/pr111860.c: New test. + +2023-10-19 Richard Biener <rguenther@suse.de> + + PR tree-optimization/111131 + * gcc.dg/vect/vect-gather-1.c: Now expected to vectorize + everywhere. + * gcc.dg/vect/vect-gather-2.c: Expected to not SLP anywhere. + Massage the scale case to more reliably produce a different + one. Scan for the specific messages. + * gcc.dg/vect/vect-gather-3.c: Masked gather is also supported + for AVX2, but not emulated. + * gcc.dg/vect/vect-gather-4.c: Expected to not SLP anywhere. + Massage to more properly ensure this. + * gcc.dg/vect/tsvc/vect-tsvc-s353.c: Expect to vectorize + everywhere. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * gcc.target/aarch64/pr71727.c: Adjust scan-assembler-not to + make sure we don't have q-register stores with -mstrict-align. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * gcc.target/aarch64/sve/pcs/args_9.c: Adjust scan-assemblers to + allow for stp. + +2023-10-19 Alex Coplan <alex.coplan@arm.com> + + * gcc.target/aarch64/lr_free_1.c: Add + --param=aarch64-stp-policy=never to dg-options. + +2023-10-19 Haochen Jiang <haochen.jiang@intel.com> + + * gcc.target/i386/funcspec-56.inc: Group Clearwater Forest + with atom cores. + +2023-10-19 Jiahao Xu <xujiahao@loongson.cn> + + * gcc.target/loongarch/vect-widen-add.c: New test. + * gcc.target/loongarch/vect-widen-mul.c: New test. + * gcc.target/loongarch/vect-widen-sub.c: New test. + +2023-10-19 Jiahao Xu <xujiahao@loongson.cn> + + * gcc.target/loongarch/avg-ceil-lasx.c: New test. + * gcc.target/loongarch/avg-ceil-lsx.c: New test. + * gcc.target/loongarch/avg-floor-lasx.c: New test. + * gcc.target/loongarch/avg-floor-lsx.c: New test. + * gcc.target/loongarch/sad-lasx.c: New test. + * gcc.target/loongarch/sad-lsx.c: New test. + +2023-10-18 Andrew Pinski <pinskia@gmail.com> + + PR middle-end/111863 + * gcc.c-torture/execute/pr111863-1.c: New test. + +2023-10-18 Andrew Pinski <pinskia@gmail.com> + + PR c/101364 + * gcc.dg/pr101364-1.c: New test. + +2023-10-18 Andrew Pinski <pinskia@gmail.com> + + PR c/101285 + * gcc.dg/pr101285-1.c: New test. + +2023-10-18 Jason Merrill <jason@redhat.com> + + * g++.dg/ext/integer-pack2.C: Add -fpermissive. + * g++.dg/diagnostic/sys-narrow.h: New test. + * g++.dg/diagnostic/sys-narrow1.C: New test. + * g++.dg/diagnostic/sys-narrow1a.C: New test. + * g++.dg/diagnostic/sys-narrow1b.C: New test. + * g++.dg/diagnostic/sys-narrow1c.C: New test. + * g++.dg/diagnostic/sys-narrow1d.C: New test. + * g++.dg/diagnostic/sys-narrow1e.C: New test. + * g++.dg/diagnostic/sys-narrow1f.C: New test. + * g++.dg/diagnostic/sys-narrow1g.C: New test. + * g++.dg/diagnostic/sys-narrow1h.C: New test. + * g++.dg/diagnostic/sys-narrow1i.C: New test. + +2023-10-18 Tobias Burnus <tobias@codesourcery.com> + + * gfortran.dg/gomp/allocate-13a.f90: New test. + +2023-10-18 Jakub Jelinek <jakub@redhat.com> + + PR tree-optimization/111845 + * gcc.dg/pr111845.c: New test. + * gcc.target/i386/pr111845.c: New test. + +2023-10-18 Tamar Christina <tamar.christina@arm.com> + + PR tree-optimization/109154 + * gcc.target/aarch64/sve/pre_cond_share_1.c: New test. + +2023-10-18 Juzhe-Zhong <juzhe.zhong@rivai.ai> + + * gcc.target/riscv/rvv/autovec/vls/def.h: Add new test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-1.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive-2.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-1.c: New test. + * gcc.target/riscv/rvv/autovec/vls-vlmax/consecutive_run-2.c: New test. + * gcc.target/riscv/rvv/autovec/vls/consecutive-1.c: New test. + * gcc.target/riscv/rvv/autovec/vls/consecutive-2.c: New test. + * gcc.target/riscv/rvv/autovec/vls/consecutive-3.c: New test. + +2023-10-18 Haochen Jiang <haochen.jiang@intel.com> + + * g++.target/i386/mv16.C: Ditto. + * gcc.target/i386/funcspec-56.inc: Handle new march. + +2023-10-18 Haochen Jiang <haochen.jiang@intel.com> + + * g++.target/i386/mv16.C: Ditto. + * gcc.target/i386/funcspec-56.inc: Handle new march. + +2023-10-18 liuhongt <hongtao.liu@intel.com> + + * gcc.target/i386/part-vect-fmaddsubhf-1.c: New test. + * gcc.target/i386/part-vect-fmahf-1.c: New test. + +2023-10-18 Juzhe-Zhong <juzhe.zhong@rivai.ai> + + PR target/111832 + * gcc.target/riscv/rvv/rvv.exp: Enable more dynamic tests. + 2023-10-17 Richard Sandiford <richard.sandiford@arm.com> * gcc.target/aarch64/test_frame_2.c: Expect x30 to come before x19. diff --git a/gcc/testsuite/c-c++-common/alias-1.c b/gcc/testsuite/c-c++-common/alias-1.c new file mode 100644 index 0000000..d72fec4 --- /dev/null +++ b/gcc/testsuite/c-c++-common/alias-1.c @@ -0,0 +1,23 @@ +/* PR c/111884 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -Wall" } */ +/* { dg-additional-options "-std=c++20" { target c++ } } */ +/* { dg-additional-options "-std=c2x" { target c } } */ + +int f(int i) +{ + int f = 1; + return i[(unsigned char *)&f]; +} + +int g(int i) +{ + int f = 1; + return i[(signed char *)&f]; +} + +int h(int i) +{ + int f = 1; + return i[(char *)&f]; +} diff --git a/gcc/testsuite/c-c++-common/cpp/Wunknown-pragmas-1.c b/gcc/testsuite/c-c++-common/cpp/Wunknown-pragmas-1.c new file mode 100644 index 0000000..fb58739 --- /dev/null +++ b/gcc/testsuite/c-c++-common/cpp/Wunknown-pragmas-1.c @@ -0,0 +1,13 @@ +/* PR c++/89038 */ +/* { dg-additional-options "-Wunknown-pragmas" } */ + +#pragma oops /* { dg-warning "-:-Wunknown-pragmas" } */ +#pragma GGC diagnostic push /* { dg-warning "-:-Wunknown-pragmas" } */ +#pragma GCC diagnostics push /* { dg-warning "-:-Wunknown-pragmas" } */ + +/* Test we can disable the warnings. */ +#pragma GCC diagnostic ignored "-Wunknown-pragmas" + +#pragma oops /* { dg-bogus "-:-Wunknown-pragmas" } */ +#pragma GGC diagnostic push /* { dg-bogus "-:-Wunknown-pragmas" } */ +#pragma GCC diagnostics push /* { dg-bogus "-:-Wunknown-pragmas" } */ diff --git a/gcc/testsuite/c-c++-common/cpp/diagnostic-pragma-3.c b/gcc/testsuite/c-c++-common/cpp/diagnostic-pragma-3.c new file mode 100644 index 0000000..459dcec --- /dev/null +++ b/gcc/testsuite/c-c++-common/cpp/diagnostic-pragma-3.c @@ -0,0 +1,37 @@ +/* This is like diagnostic-pragma-2.c, but handles the case where everything + is wrapped inside a macro, which previously caused additional issues tracked + in PR preprocessor/82335. */ + +/* { dg-do compile } */ +/* { dg-additional-options "-save-temps -Wattributes -Wtype-limits" } */ + +#define B _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wattributes\"") +#define E _Pragma("GCC diagnostic pop") + +#define X() B int __attribute((unknown_attr)) x; E +#define Y B int __attribute((unknown_attr)) y; E +#define WRAP(x) x + +void test1(void) +{ + WRAP(X()) + WRAP(Y) +} + +/* Additional test provided on the PR. */ +#define PRAGMA(...) _Pragma(#__VA_ARGS__) +#define PUSH_IGN(X) PRAGMA(GCC diagnostic push) PRAGMA(GCC diagnostic ignored X) +#define POP() PRAGMA(GCC diagnostic pop) +#define TEST(X, Y) \ + PUSH_IGN("-Wtype-limits") \ + int Y = (__typeof(X))-1 < 0; \ + POP() + +int test2() +{ + unsigned x; + TEST(x, i1); + WRAP(TEST(x, i2)) + return i1 + i2; +} diff --git a/gcc/testsuite/c-c++-common/harden-cfr-noret-never-O0.c b/gcc/testsuite/c-c++-common/harden-cfr-noret-never-O0.c new file mode 100644 index 0000000..a6992eb --- /dev/null +++ b/gcc/testsuite/c-c++-common/harden-cfr-noret-never-O0.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -O0 -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we don't insert checking before noreturn calls. -O0 is tested + separately because h is not found to be noreturn without optimization. */ + +#include "torture/harden-cfr-noret.c" + +/* No out-of-line checks. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */ +/* Only one inline check at the end of f and of h2. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-always.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-always.c new file mode 100644 index 0000000..26c0f27 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-always.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check the noreturn handling of a builtin call with always. */ + +#include "harden-cfr-abrt.c" + +/* Out-of-line checking, before both builtin_abort and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_abort in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-never.c new file mode 100644 index 0000000..a9eca98 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-never.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check the noreturn handling of a builtin call with never. */ + +#include "harden-cfr-abrt.c" + +/* No out-of-line checking. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 0 "hardcfr" } } */ +/* Inline checking only before return in f. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-no-xthrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-no-xthrow.c new file mode 100644 index 0000000..eb7589f --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-no-xthrow.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check the noreturn handling of a builtin call with no-xthrow. */ + +#include "harden-cfr-abrt.c" + +/* Out-of-line checking, before both builtin_abort and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_abort in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-nothrow.c new file mode 100644 index 0000000..24363bd --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-nothrow.c @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check the noreturn handling of a builtin call with =nothrow. */ + +#include "harden-cfr-abrt.c" + +/* Out-of-line checking, before both builtin_abort and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_abort in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt.c new file mode 100644 index 0000000..1ed7273 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt.c @@ -0,0 +1,19 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check the noreturn handling of a builtin call. */ + +int f(int i) { + if (!i) + __builtin_abort (); + return i; +} + +int g() { + __builtin_abort (); +} + +/* Out-of-line checking, before both builtin_abort and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-always.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-always.c new file mode 100644 index 0000000..6e0767a --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-always.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */ + +/* Check the instrumentation and the parameters with checking before + all noreturn calls. */ + +#include "harden-cfr.c" + +/* Inlined checking thus trap for f. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ +/* Out-of-line checking for g (param), and before both noreturn calls in main. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */ +/* No checking for h (too many blocks). */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c new file mode 100644 index 0000000..779896c --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that, even enabling all checks before noreturn calls (leaving + returning calls enabled), we get checks before __builtin_return without + duplication (__builtin_return is both noreturn and a returning call). */ + +#include "harden-cfr-bret.c" + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c new file mode 100644 index 0000000..49ce17f --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that, even enabling checks before never noreturn calls (leaving + returning calls enabled), we get checks before __builtin_return without + duplication (__builtin_return is both noreturn and a returning call). */ + +#include "harden-cfr-bret.c" + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c new file mode 100644 index 0000000..78e5bf4 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c @@ -0,0 +1,14 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that, even enabling checks before no-xthrow-throwing noreturn calls + (leaving returning calls enabled), we get checks before __builtin_return + without duplication (__builtin_return is both noreturn and a returning + call). */ + +#include "harden-cfr-bret.c" + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c new file mode 100644 index 0000000..1512614 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that, even disabling checks before both noreturn and returning + calls, we still get checks before __builtin_return. */ + +#include "harden-cfr-bret.c" + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c new file mode 100644 index 0000000..fd95bb7 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that, even disabling checks before returning calls (leaving noreturn + calls enabled), we still get checks before __builtin_return. */ + +#include "harden-cfr-bret.c" + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c new file mode 100644 index 0000000..c5c3612 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that, even enabling checks before nothrow noreturn calls (leaving + returning calls enabled), we get checks before __builtin_return without + duplication (__builtin_return is both noreturn and a returning call). */ + +#include "harden-cfr-bret.c" + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c new file mode 100644 index 0000000..137dfbb --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that, even disabling checks before noreturn calls (leaving returning + calls enabled), we still get checks before __builtin_return. */ + +#include "harden-cfr-bret.c" + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c new file mode 100644 index 0000000..b459ff6 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c @@ -0,0 +1,17 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr -ffat-lto-objects" } */ + +int f(int i) { + if (i) + __builtin_return (&i); + return i; +} + +int g(int i) { + __builtin_return (&i); +} + +/* Out-of-line checking, before both builtin_return and return in f. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking before builtin_return in g. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-never.c new file mode 100644 index 0000000..7fe0bb4 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-never.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */ + +/* Check the instrumentation and the parameters without checking before + noreturn calls. */ + +#include "harden-cfr.c" + +/* Inlined checking thus trap for f. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ +/* Out-of-line checking for g (param). */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 1 "hardcfr" } } */ +/* No checking for h (too many blocks) or main (no edges to exit block). */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-no-xthrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-no-xthrow.c new file mode 100644 index 0000000..56ed9d5 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-no-xthrow.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */ + +/* Check the instrumentation and the parameters with checking before + all noreturn calls that aren't expected to throw. */ + +#include "harden-cfr.c" + +/* Inlined checking thus trap for f. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ +/* Out-of-line checking for g (param), and before both noreturn calls in main. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */ +/* No checking for h (too many blocks). */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-never.c new file mode 100644 index 0000000..8bd2d13 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-never.c @@ -0,0 +1,18 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we don't insert checking before noreturn calls. -O0 is tested + separately because h is not found to be noreturn without optimization, which + affects codegen for h2, so h2 is omitted here at -O0. */ + +#if !__OPTIMIZE__ +# define OMIT_H2 +#endif + +#include "harden-cfr-noret.c" + + +/* No out-of-line checks. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */ +/* Only one inline check at the end of f. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-noexcept.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-noexcept.c new file mode 100644 index 0000000..a804a6c --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-noexcept.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that -fno-exceptions makes for implicit nothrow in noreturn + handling. */ + +#define ATTR_NOTHROW_OPT + +#include "harden-cfr-noret.c" + +/* One out-of-line check before the noreturn call in f, and another at the end + of f. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* One inline check in h, before the noreturn call, and another in h2, before + or after the call, depending on noreturn detection. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-nothrow.c new file mode 100644 index 0000000..f390cfd --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-nothrow.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert checking before nothrow noreturn calls. */ + +#include "harden-cfr-noret.c" + +/* One out-of-line check before the noreturn call in f, and another at the end + of f. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* One inline check in h, before the noreturn call, and another in h2, before + or after the call, depending on noreturn detection. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret.c new file mode 100644 index 0000000..fdd8031 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret.c @@ -0,0 +1,38 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert checking before all noreturn calls. */ + +#ifndef ATTR_NOTHROW_OPT /* Overridden in harden-cfr-noret-noexcept. */ +#define ATTR_NOTHROW_OPT __attribute__ ((__nothrow__)) +#endif + +extern void __attribute__ ((__noreturn__)) ATTR_NOTHROW_OPT g (void); + +void f(int i) { + if (i) + /* Out-of-line checks here... */ + g (); + /* ... and here. */ +} + +void __attribute__ ((__noinline__, __noclone__)) +h(void) { + /* Inline check here. */ + g (); +} + +#ifndef OMIT_H2 /* from harden-cfr-noret-never. */ +void h2(void) { + /* Inline check either here, whether because of noreturn or tail call... */ + h (); + /* ... or here, if not optimizing. */ +} +#endif + +/* One out-of-line check before the noreturn call in f, and another at the end + of f. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* One inline check in h, before the noreturn call, and another in h2, before + or after the call, depending on noreturn detection. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-notail.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-notail.c new file mode 100644 index 0000000..6d11487 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-notail.c @@ -0,0 +1,8 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-exceptions -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */ + +#include "harden-cfr-tail.c" + +/* Inline checking after the calls, disabling tail calling. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 5 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 0 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-nothrow.c new file mode 100644 index 0000000..da54fc0 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-nothrow.c @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */ + +/* Check the instrumentation and the parameters without checking before + nothrow noreturn calls. */ + +#include "harden-cfr.c" + +/* Inlined checking thus trap for f. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ +/* Out-of-line checking for g (param), and before both noreturn calls in main. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */ +/* No checking for h (too many blocks). */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-returning.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-returning.c new file mode 100644 index 0000000..550b02c --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-returning.c @@ -0,0 +1,35 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert checks before returning calls and alternate paths, even + at -O0, because of the explicit command-line flag. */ + +void g (void); +void g2 (void); +void g3 (void); + +void f (int i) { + if (!i) + /* Out-of-line checks here... */ + g (); + else if (i > 0) + /* here... */ + g2 (); + /* else */ + /* and in the implicit else here. */ +} + +void f2 (int i) { + if (!i) + /* Out-of-line check here... */ + g (); + else if (i > 0) + /* here... */ + g2 (); + else + /* and here. */ + g3 (); +} + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 6 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 0 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-skip-leaf.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-skip-leaf.c new file mode 100644 index 0000000..85ecaa0 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-skip-leaf.c @@ -0,0 +1,10 @@ +/* { dg-do run } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-skip-leaf -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Test skipping instrumentation of leaf functions. */ + +#include "harden-cfr.c" + +/* { dg-final { scan-tree-dump-times "__builtin_trap" 0 "hardcfr" } } */ +/* Only main isn't leaf. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-tail.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-tail.c new file mode 100644 index 0000000..d5467ea --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-tail.c @@ -0,0 +1,52 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-hardcfr-check-exceptions -fdump-tree-hardcfr -ffat-lto-objects -Wno-return-type" } */ + +/* Check that we insert CFR checking so as to not disrupt tail calls. + Mandatory tail calls are not available in C, and optimizing calls as tail + calls only takes place after hardcfr, so we insert checking before calls + followed by copies and return stmts with the same return value, that might + (or might not) end up optimized to tail calls. */ + +extern int g (int i); + +int f1(int i) { + /* Inline check before the returning call. */ + return g (i); +} + +extern void g2 (int i); + +void f2(int i) { + /* Inline check before the returning call, that ignores the returned value, + matching the value-less return. */ + g2 (i); + return; +} + +void f3(int i) { + /* Inline check before the returning call. */ + g (i); +} + +void f4(int i) { + if (i) + /* Out-of-line check before the returning call. */ + return g2 (i); + /* Out-of-line check before implicit return. */ +} + +int f5(int i) { + /* Not regarded as a returning call, returning value other than callee's + returned value. */ + g (i); + /* Inline check after the non-returning call. */ + return i; +} + +/* Out-of-line checks in f4, before returning calls and before return. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking in all other functions. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 4 "hardcfr" } } */ +/* Check before tail-call in all but f5, but f4 is out-of-line. */ +/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 3 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr.c b/gcc/testsuite/c-c++-common/torture/harden-cfr.c new file mode 100644 index 0000000..73824c6 --- /dev/null +++ b/gcc/testsuite/c-c++-common/torture/harden-cfr.c @@ -0,0 +1,84 @@ +/* { dg-do run } */ +/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects" } */ + +/* Check the instrumentation and the parameters. */ + +int +f (int i, int j) +{ + if (i < j) + return 2 * i; + else + return 3 * j; +} + +int +g (unsigned i, int j) +{ + switch (i) + { + case 0: + return j * 2; + + case 1: + return j * 3; + + case 2: + return j * 5; + + default: + return j * 7; + } +} + +int +h (unsigned i, int j) /* { dg-warning "has more than 9 blocks, the requested maximum" } */ +{ + switch (i) + { + case 0: + return j * 2; + + case 1: + return j * 3; + + case 2: + return j * 5; + + case 3: + return j * 7; + + case 4: + return j * 11; + + case 5: + return j * 13; + + case 6: + return j * 17; + + case 7: + return j * 19; + + default: + return j * 23; + } +} + +int +main (int argc, char *argv[]) +{ + if (f (1, 2) != 2 || g (2, 5) != 25 || h (4, 3) != 33 + || argc < 0) + __builtin_abort (); + /* Call exit, instead of returning, to avoid an edge to the exit block and + thus implicitly disable hardening of main, when checking before noreturn + calls is disabled. */ + __builtin_exit (0); +} + +/* Inlined checking thus trap for f. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */ +/* Out-of-line checking for g (param), and before both noreturn calls in main. */ +/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */ +/* No checking for h (too many blocks). */ diff --git a/gcc/testsuite/g++.dg/concepts/var-concept3.C b/gcc/testsuite/g++.dg/concepts/var-concept3.C index 6fd96a5..b4483eb 100644 --- a/gcc/testsuite/g++.dg/concepts/var-concept3.C +++ b/gcc/testsuite/g++.dg/concepts/var-concept3.C @@ -12,7 +12,7 @@ template<typename T> template<typename U> - requires C1<U>() // { dg-error "cannot be used as a function" } + requires C1<U>() // { dg-error "cannot call a concept" } void f1(U) { } template<typename U> diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-cast4.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-cast4.C new file mode 100644 index 0000000..884b6a5 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-cast4.C @@ -0,0 +1,11 @@ +// { dg-do compile { target c++11 } } + +constexpr int&& r = 1 + 2; // { dg-message "pointed-to object declared here" "" { target c++26 } } +constexpr void* vpr = &r; +constexpr int* pi = static_cast<int*>(vpr); // { dg-error "cast from .void\\*. is not allowed" "" { target c++23_down } } +constexpr float* pf = static_cast<float*>(vpr); // { dg-error "cast from .void\\*. is not allowed" "" { target c++23_down } } +// { dg-error "cast from .void\\*. is not allowed in a constant expression because pointed-to type .int. is not similar to .float." "" { target c++26 } .-1 } + +constexpr void* vnp = nullptr; +constexpr int* pi2 = static_cast<int*>(vnp); // { dg-error "cast from .void\\*. is not allowed" "" { target c++23_down } } +// { dg-error "cast from .void\\*. is not allowed in a constant expression because .vnp. does not point to an object" "" { target c++26 } .-1 } diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-89336-3.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-89336-3.C index 9d370dd..6a60966 100644 --- a/gcc/testsuite/g++.dg/cpp1y/constexpr-89336-3.C +++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-89336-3.C @@ -18,7 +18,7 @@ constexpr int bar () { union U { int a[5]; long b; }; - union V { union U u; short v; }; + union V { short v; union U u; }; V w {}; w.v = 5; w.u.a[3] = w.u.a[1] = w.v; // { dg-error "change of the active member of a union from" "" { target c++17_down } } diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C index 0e2909e..e12633c 100644 --- a/gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C +++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-diag1.C @@ -1,6 +1,6 @@ // PR c++/111272 // { dg-do compile { target c++14 } } -// { dg-options "-Werror=invalid-constexpr" } +// { dg-options "-Werror=invalid-constexpr -fno-implicit-constexpr" } // { dg-prune-output "some warnings being treated as errors" } struct Jam diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-union6.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-union6.C new file mode 100644 index 0000000..ff7ebf1 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-union6.C @@ -0,0 +1,13 @@ +// { dg-do compile { target c++14 } } + +union U { + int a; + float b; +}; + +constexpr bool foo() { + U u {}; + u.b = 1.0f; // { dg-error "change of the active member" "" { target c++17_down } } + return u.b == 1.0f; +} +constexpr bool x = foo(); diff --git a/gcc/testsuite/g++.dg/cpp1y/constexpr-union7.C b/gcc/testsuite/g++.dg/cpp1y/constexpr-union7.C new file mode 100644 index 0000000..6fc41f9 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp1y/constexpr-union7.C @@ -0,0 +1,18 @@ +// { dg-do compile { target c++14 } } + +// this type is not value-initialisable +struct S { const int a; int b; }; + +union U1 { int k; S s; }; +constexpr int test1() { + U1 u {}; + return u.s.b; // { dg-error "accessing .U1::s. member instead of initialized .U1::k. member" } +} +constexpr int x = test1(); + +union U2 { int :0; static int s; void foo(); int k; }; +constexpr int test2() { + U2 u {}; // should skip zero-width bitfields, static members, and functions + return u.k; +} +static_assert(test2() == 0, ""); diff --git a/gcc/testsuite/g++.dg/cpp1y/lambda-generic-this5.C b/gcc/testsuite/g++.dg/cpp1y/lambda-generic-this5.C new file mode 100644 index 0000000..42f9170 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp1y/lambda-generic-this5.C @@ -0,0 +1,22 @@ +// PR c++/106086 +// { dg-do compile { target c++14 } } + +template<class T> +struct A { + void f(int) const; + static void g(int); +}; + +template<class T> +struct B : A<T> { + auto f() const { + auto l1 = [&](auto x) { A<T>::f(x); }; + auto l2 = [&](auto x) { A<T>::g(x); }; + static_assert(sizeof(l1) == sizeof(this), ""); + static_assert(sizeof(l2) == 1, ""); + l1(0); + l2(0); + } +}; + +template struct B<void>; diff --git a/gcc/testsuite/g++.dg/cpp2a/constexpr-union2.C b/gcc/testsuite/g++.dg/cpp2a/constexpr-union2.C new file mode 100644 index 0000000..1712395 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp2a/constexpr-union2.C @@ -0,0 +1,30 @@ +// PR c++/101631 +// { dg-do compile { target c++20 } } + +struct sso { + union { + int buf[10]; + int* alloc; + }; +}; + +constexpr bool direct() { + sso val; + val.alloc = nullptr; + val.buf[5] = 42; + return true; +} +constexpr bool ok = direct(); + + +constexpr void perform_assignment(int& left, int right) noexcept { + left = right; // { dg-error "accessing .+ member instead of initialized" } +} + +constexpr bool indirect() { + sso val; + val.alloc = nullptr; + perform_assignment(val.buf[5], 42); // { dg-message "in .constexpr. expansion" } + return true; +} +constexpr bool err = indirect(); // { dg-message "in .constexpr. expansion" } diff --git a/gcc/testsuite/g++.dg/cpp2a/constexpr-union3.C b/gcc/testsuite/g++.dg/cpp2a/constexpr-union3.C new file mode 100644 index 0000000..6d30bb2 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp2a/constexpr-union3.C @@ -0,0 +1,45 @@ +// { dg-do compile { target c++20 } } + +struct S +{ + union { + char buf[8]; + char* ptr; + }; + unsigned len; + + constexpr S(const char* s, unsigned n) + { + char* p; + if (n > 7) + p = ptr = new char[n+1]; + else + p = buf; + for (len = 0; len < n; ++len) + p[len] = s[len]; // { dg-error "accessing uninitialized member" } + p[len] = '\0'; + } + + constexpr ~S() + { + if (len > 7) + delete[] ptr; + } +}; + +constexpr bool test1() +{ + S s("test", 4); // { dg-message "in .constexpr. expansion" } + return true; +} + +constexpr bool a = test1(); // { dg-message "in .constexpr. expansion" } + + +constexpr bool test2() +{ + S s("hello world", 11); + return true; +} + +constexpr bool b = test2(); diff --git a/gcc/testsuite/g++.dg/cpp2a/constexpr-union4.C b/gcc/testsuite/g++.dg/cpp2a/constexpr-union4.C new file mode 100644 index 0000000..429ab20 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp2a/constexpr-union4.C @@ -0,0 +1,29 @@ +// { dg-do compile { target c++20 } } + +// from [class.union.general] p5 + +union A { int x; int y[4]; }; +struct B { A a; }; +union C { B b; int k; }; +constexpr int f() { + C c; // does not start lifetime of any union member + c.b.a.y[3] = 4; // OK, S(c.b.a.y[3]) contains c.b and c.b.a.y; + // creates objects to hold union members c.b and c.b.a.y + return c.b.a.y[3]; // OK, c.b.a.y refers to newly created object (see [basic.life]) +} +constexpr int a = f(); + +struct X { const int a; int b; }; +union Y { X x; int k; };// { dg-message "does not implicitly begin its lifetime" } +constexpr int g() { + Y y = { { 1, 2 } }; // OK, y.x is active union member ([class.mem]) + int n = y.x.a; + y.k = 4; // OK, ends lifetime of y.x, y.k is active member of union + + y.x.b = n; // { dg-error "accessing .* member instead of initialized .* member" } + // undefined behavior: y.x.b modified outside its lifetime, + // S(y.x.b) is empty because X's default constructor is deleted, + // so union member y.x's lifetime does not implicitly start + return 0; +} +constexpr int b = g(); // { dg-message "in .constexpr. expansion" } diff --git a/gcc/testsuite/g++.dg/cpp2a/constexpr-union5.C b/gcc/testsuite/g++.dg/cpp2a/constexpr-union5.C new file mode 100644 index 0000000..7e42522 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp2a/constexpr-union5.C @@ -0,0 +1,80 @@ +// { dg-do compile { target c++20 } } + +union U { int a; int b; int c[2]; }; + +constexpr int test1() { + U u; + u.a = 10; + *&u.b = 20; // { dg-error "accessing" } + return u.b; +} +constexpr int x1 = test1(); // { dg-message "in .constexpr. expansion" } + +constexpr int test2() { + U u; + u.a = 10; + (0, u.b) = 20; // { dg-error "accessing" } + return u.b; +} +constexpr int x2 = test2(); // { dg-message "in .constexpr. expansion" } + +constexpr int test3() { + U u; + u.a = 0; + int* p = &u.b; + p[u.a] = 10; // { dg-error "accessing" } + return u.b; +} +constexpr int x3 = test3(); // { dg-message "in .constexpr. expansion" } + +constexpr int test4() { + U u; + u.a = 0; + int* p = &u.b; + u.a[p] = 10; // { dg-error "accessing" } + return u.b; +} +constexpr int x4 = test4(); // { dg-message "in .constexpr. expansion" } + +struct S { U u[10]; }; +constexpr int test5() { + S s; + s.u[4].a = 10; + 6[s.u].b = 15; + return 4[s.u].a + s.u[6].b; +} +static_assert(test5() == 25); + +constexpr int test6() { + U u; + u.a = 5; + u.c[0] = 3; + 1[u.c] = 8; + return 1[u.c] + u.c[0]; +} +static_assert(test6() == 11); + +constexpr int test7() { + U u; // default initialisation leaves no member initialised + int* p = &u.a; + *p = 10; // { dg-error "accessing" } + return *p; +} +constexpr int x7 = test7(); // { dg-message "in .constexpr. expansion" } + +constexpr int test8() { + U u {}; // value initialisation initialises first member + int* p = &u.a; + *p = 8; + return *p; +} +static_assert(test8() == 8); + +union V { int :0; static int x; void foo(); int a; }; +constexpr int test9() { + V v {}; // should skip zero-width bit fields, static members, and functions + int* p = &v.a; + *p = 9; + return *p; +} +static_assert(test9() == 9); diff --git a/gcc/testsuite/g++.dg/cpp2a/constexpr-union6.C b/gcc/testsuite/g++.dg/cpp2a/constexpr-union6.C new file mode 100644 index 0000000..00bda53 --- /dev/null +++ b/gcc/testsuite/g++.dg/cpp2a/constexpr-union6.C @@ -0,0 +1,53 @@ +// { dg-do compile { target c++20 } } +// PR c++/102286 + +#include "construct_at.h" + +struct S { const int a; int b; }; +union U { int k; S s; }; + +constexpr int test1() { + U u {}; + std::construct_at(&u.s, S{ 1, 2 }); + return u.s.b; +} +static_assert(test1() == 2); + +constexpr int test2() { + U u {}; + int* p = &u.s.b; + std::construct_at(p, 5); // { dg-message "in .constexpr. expansion" } + return u.s.b; +} +constexpr int x2 = test2(); // { dg-message "in .constexpr. expansion" } + +constexpr void foo(S* s) { + s->b = 10; // { dg-error "accessing .U::s. member instead of initialized .U::k." } +} +constexpr int test3() { + U u {}; + foo(&u.s); // { dg-message "in .constexpr. expansion" } + return u.s.b; +} +constexpr int x3 = test3(); // { dg-message "in .constexpr. expansion" } + +struct S2 { int a; int b; }; +union U2 { int k; S2 s; }; +constexpr int test4() { + U2 u; + int* p = &u.s.b; + std::construct_at(p, 8); // { dg-message "in .constexpr. expansion" } + return u.s.b; +}; +constexpr int x4 = test4(); // { dg-message "in .constexpr. expansion" } + +constexpr int test5() { + union { + int data[1]; + } u; + std::construct_at(u.data, 0); // { dg-message "in .constexpr. expansion" } + return 0; +} +constexpr int x5 = test5(); // { dg-message "in .constexpr. expansion" } + +// { dg-error "accessing (uninitialized member|.* member instead of)" "" { target *-*-* } 0 } diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow.h b/gcc/testsuite/g++.dg/diagnostic/sys-narrow.h new file mode 100644 index 0000000..ff042c5 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow.h @@ -0,0 +1,2 @@ +#pragma GCC system_header +int i = { 2.4 }; // C++11 error: narrowing conversion diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1.C new file mode 100644 index 0000000..7d3bca9 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1.C @@ -0,0 +1,4 @@ +// { dg-do compile { target c++11 } } + +// { dg-error "narrowing" "" { target *-*-* } 2 } +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1a.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1a.C new file mode 100644 index 0000000..58cdaf2 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1a.C @@ -0,0 +1,5 @@ +// { dg-do compile { target c++11 } } +// { dg-additional-options "-w" } + +// { dg-error "narrowing" "" { target *-*-* } 2 } +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1b.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1b.C new file mode 100644 index 0000000..8528f5d3 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1b.C @@ -0,0 +1,5 @@ +// { dg-do compile { target c++11 } } +// { dg-additional-options "-Wno-error" } + +// { dg-error "narrowing" "" { target *-*-* } 2 } +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1c.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1c.C new file mode 100644 index 0000000..1243eb0 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1c.C @@ -0,0 +1,5 @@ +// { dg-do compile { target c++11 } } +// { dg-additional-options "-Wno-error=narrowing" } + +// No diagnostic. +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1d.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1d.C new file mode 100644 index 0000000..0653df9 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1d.C @@ -0,0 +1,5 @@ +// { dg-do compile { target c++11 } } +// { dg-options "-fpermissive" } + +// No diagnostic. +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1e.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1e.C new file mode 100644 index 0000000..d219829 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1e.C @@ -0,0 +1,5 @@ +// { dg-do compile { target c++11 } } +// { dg-options "-fpermissive -Wsystem-headers" } + +// { dg-warning "narrowing" "" { target *-*-* } 2 } +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1f.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1f.C new file mode 100644 index 0000000..28f7fe2 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1f.C @@ -0,0 +1,5 @@ +// { dg-do compile { target c++11 } } +// { dg-options "-Wno-narrowing" } + +// No diagnostic +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1g.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1g.C new file mode 100644 index 0000000..d052bea --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1g.C @@ -0,0 +1,5 @@ +// { dg-do compile { target c++11 } } +// { dg-options "-Wno-error=narrowing -Wsystem-headers" } + +// { dg-warning "narrowing" "" { target *-*-* } 2 } +#include "sys-narrow.h" diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1h.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1h.C new file mode 100644 index 0000000..e96ee1f --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1h.C @@ -0,0 +1,6 @@ +// { dg-do compile { target c++11 } } +// { dg-options "-Wno-error=narrowing -w" } + +// No diagnostic +int i = { 2.4 }; // C++11 error: narrowing conversion + diff --git a/gcc/testsuite/g++.dg/diagnostic/sys-narrow1i.C b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1i.C new file mode 100644 index 0000000..f7d9585 --- /dev/null +++ b/gcc/testsuite/g++.dg/diagnostic/sys-narrow1i.C @@ -0,0 +1,6 @@ +// { dg-do compile { target c++11 } } +// { dg-options "-fpermissive -w" } + +// No diagnostic +int i = { 2.4 }; // C++11 error: narrowing conversion + diff --git a/gcc/testsuite/g++.dg/ext/integer-pack2.C b/gcc/testsuite/g++.dg/ext/integer-pack2.C index 406e195..fa6a881 100644 --- a/gcc/testsuite/g++.dg/ext/integer-pack2.C +++ b/gcc/testsuite/g++.dg/ext/integer-pack2.C @@ -1,5 +1,5 @@ // { dg-do compile { target { c++11 && int32 } } } -// { dg-options -w } +// { dg-options "-fpermissive -w" } template<typename T, T...> struct integer_sequence { }; template<typename T, T num> diff --git a/gcc/testsuite/g++.dg/harden-cfr-throw-always-O0.C b/gcc/testsuite/g++.dg/harden-cfr-throw-always-O0.C new file mode 100644 index 0000000..e3c109b --- /dev/null +++ b/gcc/testsuite/g++.dg/harden-cfr-throw-always-O0.C @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects -O0" } */ + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions, and also checking before noreturn + calls. h2 and h2b get an extra resx without ehcleanup. */ + +#define NO_OPTIMIZE + +#include "torture/harden-cfr-throw.C" + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 16 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/harden-cfr-throw-returning-O0.C b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-O0.C new file mode 100644 index 0000000..207bdb7 --- /dev/null +++ b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-O0.C @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -foptimize-sibling-calls -fdump-tree-hardcfr -O0" } */ + +/* -fhardcfr-check-returning-calls gets implicitly disabled because, + -at O0, -foptimize-sibling-calls has no effect. */ + +#define NO_OPTIMIZE + +#include "torture/harden-cfr-throw.C" + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/harden-cfr-throw-returning-enabled-O0.C b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-enabled-O0.C new file mode 100644 index 0000000..b2df689 --- /dev/null +++ b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-enabled-O0.C @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fdump-tree-hardcfr -O0" } */ + +/* Explicitly enable -fhardcfr-check-returning-calls -at O0. */ + +#include "torture/harden-cfr-throw.C" + +/* Same expectations as those in torture/harden-cfr-throw-returning.C. */ + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 10 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/modules/decltype-1_a.C b/gcc/testsuite/g++.dg/modules/decltype-1_a.C new file mode 100644 index 0000000..ca66e8b --- /dev/null +++ b/gcc/testsuite/g++.dg/modules/decltype-1_a.C @@ -0,0 +1,28 @@ +// PR c++/105322 +// { dg-module-do link +// { dg-additional-options -fmodules-ts } +// { dg-module-cmi pr105322.Decltype } + +export module pr105322.Decltype; + +auto f() { + struct A { int m; + int get () { return m; } + }; + return A{}; +} + +export +inline void g1() { + auto r = decltype(f()){0}; +} + +export +inline void g2() { + auto r = f().m; +} + +export +inline void g3() { + auto r = f().get(); +} diff --git a/gcc/testsuite/g++.dg/modules/decltype-1_b.C b/gcc/testsuite/g++.dg/modules/decltype-1_b.C new file mode 100644 index 0000000..6bebe13 --- /dev/null +++ b/gcc/testsuite/g++.dg/modules/decltype-1_b.C @@ -0,0 +1,10 @@ +// PR c++/105322 +// { dg-additional-options -fmodules-ts } + +import pr105322.Decltype; + +int main() { + g1(); + g2(); + g3(); +} diff --git a/gcc/testsuite/g++.dg/modules/lambda-5_a.C b/gcc/testsuite/g++.dg/modules/lambda-5_a.C new file mode 100644 index 0000000..6b589d4 --- /dev/null +++ b/gcc/testsuite/g++.dg/modules/lambda-5_a.C @@ -0,0 +1,24 @@ +// PR c++/105322 +// { dg-module-do link +// { dg-additional-options -fmodules-ts } +// { dg-module-cmi pr105322.Lambda } + +export module pr105322.Lambda; + +struct A { }; + +export +inline void f1() { + A a; + auto g1 = [a] { }; // used to ICE here during stream out +} + +export +template<class...> +void f2() { + A a; + auto g2 = [a] { }; +} + +export +inline auto g3 = [a=A{}] { }; diff --git a/gcc/testsuite/g++.dg/modules/lambda-5_b.C b/gcc/testsuite/g++.dg/modules/lambda-5_b.C new file mode 100644 index 0000000..a7ce709 --- /dev/null +++ b/gcc/testsuite/g++.dg/modules/lambda-5_b.C @@ -0,0 +1,10 @@ +// PR c++/105322 +// { dg-additional-options -fmodules-ts } + +import pr105322.Lambda; + +int main() { + f1(); + f2(); + g3(); +} diff --git a/gcc/testsuite/g++.dg/template/crash127.C b/gcc/testsuite/g++.dg/template/crash127.C index b7c0325..fcf72d8 100644 --- a/gcc/testsuite/g++.dg/template/crash127.C +++ b/gcc/testsuite/g++.dg/template/crash127.C @@ -16,7 +16,6 @@ struct C : public A { B < &A::A > b; // { dg-error "taking address of constructor 'A::A" "" { target c++98_only } } // { dg-error "taking address of constructor 'constexpr A::A" "" { target c++11 } .-1 } + // { dg-error "template argument 1 is invalid" "" { target *-*-* } .-2 } } }; - -template class C < int >; diff --git a/gcc/testsuite/g++.dg/template/sfinae-dr657.C b/gcc/testsuite/g++.dg/template/sfinae-dr657.C index 36c11e6..bb19108 100644 --- a/gcc/testsuite/g++.dg/template/sfinae-dr657.C +++ b/gcc/testsuite/g++.dg/template/sfinae-dr657.C @@ -1,7 +1,6 @@ -// DR 657 SUPERSEDED BY DR 1646 +// DR 657 SUPERSEDED BY P0929 // Test that a return or parameter type with abstract class type DOES NOT cause -// a deduction failure, but there is no implicit conversion sequence for -// a parameter of abstract class type. +// a deduction failure or conversion failure. struct A { @@ -19,5 +18,5 @@ template<class T> int arg(...); int main() { int i = declval<A>(); // { dg-error "ambiguous" } - i = arg<A>(1); + i = arg<A>(1); // { dg-error "abstract" } } diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-noret-always-no-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-always-no-nothrow.C new file mode 100644 index 0000000..0d35920 --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-always-no-nothrow.C @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that C++ does NOT make for implicit nothrow in noreturn + handling. */ + +#include "harden-cfr-noret-no-nothrow.C" + +/* All 3 noreturn calls. */ +/* { dg-final { scan-tree-dump-times "Bypassing cleanup" 3 "hardcfr" } } */ +/* Out-of-line checks in f. */ +/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* Inline checks in h and h2. */ +/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-noret-never-no-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-never-no-nothrow.C new file mode 100644 index 0000000..b7d247f --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-never-no-nothrow.C @@ -0,0 +1,18 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that C++ does NOT make for implicit nothrow in noreturn + handling. Expected results for =never and =nothrow are the same, + since the functions are not nothrow. */ + +#include "harden-cfr-noret-no-nothrow.C" + +/* All 3 noreturn calls. */ +/* { dg-final { scan-tree-dump-times "Associated cleanup" 3 "hardcfr" } } */ +/* Out-of-line checks in f. */ +/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* Inline checks in h and h2. */ +/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-noret-no-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-no-nothrow.C new file mode 100644 index 0000000..62c58cf --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-no-nothrow.C @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that C++ does NOT make for implicit nothrow in noreturn + handling. */ + +#define ATTR_NOTHROW_OPT + +#if ! __OPTIMIZE__ +void __attribute__ ((__noreturn__)) h (void); +#endif + +#include "../../c-c++-common/torture/harden-cfr-noret.c" + +/* All 3 noreturn calls. */ +/* { dg-final { scan-tree-dump-times "Associated cleanup" 3 "hardcfr" } } */ +/* Out-of-line checks in f. */ +/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* Inline checks in h and h2. */ +/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-always.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-always.C new file mode 100644 index 0000000..4d303e7 --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-always.C @@ -0,0 +1,13 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions, and also checking before noreturn + calls. */ + +#include "harden-cfr-throw.C" + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 14 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */ +/* h, h2, h2b, and h4. */ +/* { dg-final { scan-tree-dump-times "Bypassing" 4 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-never.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-never.C new file mode 100644 index 0000000..81c1b1a --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-never.C @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions, without checking before noreturn + calls. */ + +#include "harden-cfr-throw.C" + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C new file mode 100644 index 0000000..de37b2a --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions, and also checking before noreturn + calls. */ + +extern void __attribute__ ((__noreturn__, __expected_throw__)) g (void); +extern void __attribute__ ((__noreturn__, __expected_throw__)) g2 (void); + +#include "harden-cfr-throw.C" + +/* In f and h3, there are checkpoints at return and exception escape. . */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 4 "hardcfr" } } */ +/* Other functions get a single cleanup checkpoint. */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 5 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow.C new file mode 100644 index 0000000..720498b --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow.C @@ -0,0 +1,12 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions, and also checking before noreturn + calls. */ + +#include "harden-cfr-throw.C" + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nocleanup.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nocleanup.C new file mode 100644 index 0000000..9f35936 --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nocleanup.C @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-exceptions -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we do not insert cleanups for checking around the bodies + of maybe-throwing functions. h4 doesn't get any checks, because we + don't have noreturn checking enabled. */ + +#include "harden-cfr-throw.C" + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 6 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nothrow.C new file mode 100644 index 0000000..e1c2e8d --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nothrow.C @@ -0,0 +1,11 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions, without checking before noreturn + calls. */ + +#include "harden-cfr-throw.C" + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-returning.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-returning.C new file mode 100644 index 0000000..37e4551 --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-returning.C @@ -0,0 +1,31 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -foptimize-sibling-calls -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions. These results depend on checking before + returning calls, which is only enabled when sibcall optimizations + are enabled, so change the optimization mode to -O1 for f and f2, + so that -foptimize-sibling-calls can take effect and enable + -fhardcfr-check-returning-calls, so that we get the same results. + There is a separate test for -O0. */ + +#if ! __OPTIMIZE__ +void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) f(int i); +void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) f2(int i); +void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) h3(void); +#endif + +#include "harden-cfr-throw.C" + +/* f gets out-of-line checks before the unwrapped tail call and in the + else edge. */ +/* f2 gets out-of-line checks before both unwrapped tail calls. */ +/* h gets out-of-line checks before the implicit return and in the + cleanup block. */ +/* h2 and h2b get out-of-line checks before the cleanup returning + call, and in the cleanup block. */ +/* h3 gets an inline check before the __cxa_end_catch returning call. */ +/* h4 gets an inline check in the cleanup block. */ + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 10 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw.C new file mode 100644 index 0000000..8e46b90 --- /dev/null +++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw.C @@ -0,0 +1,73 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */ + +#if ! __OPTIMIZE__ && ! defined NO_OPTIMIZE +/* Without optimization, functions with cleanups end up with an extra + resx that is not optimized out, so arrange to optimize them. */ +void __attribute__ ((__optimize__ (1))) h2(void); +void __attribute__ ((__optimize__ (1))) h2b(void); +#endif + +/* Check that we insert cleanups for checking around the bodies of + maybe-throwing functions. */ + +extern void g (void); +extern void g2 (void); + +void f(int i) { + if (i) + g (); + /* Out-of-line checks here, and in the implicit handler. */ +} + +void f2(int i) { + if (i) + g (); + else + g2 (); + /* Out-of-line checks here, and in the implicit handler. */ +} + +void h(void) { + try { + g (); + } catch (...) { + throw; + } + /* Out-of-line checks here, and in the implicit handler. */ +} + +struct needs_cleanup { + ~needs_cleanup(); +}; + +void h2(void) { + needs_cleanup y; /* No check in the cleanup handler. */ + g(); + /* Out-of-line checks here, and in the implicit handler. */ +} + +extern void __attribute__ ((__nothrow__)) another_cleanup (void*); + +void h2b(void) { + int x __attribute__ ((cleanup (another_cleanup))); + g(); + /* Out-of-line checks here, and in the implicit handler. */ +} + +void h3(void) { + try { + throw 1; + } catch (...) { + } + /* Out-of-line checks here, and in the implicit handler. */ +} + +void h4(void) { + throw 1; + /* Inline check in the cleanup around the __cxa_throw noreturn call. */ +} + +/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */ diff --git a/gcc/testsuite/g++.dg/warn/Wsign-promo1.C b/gcc/testsuite/g++.dg/warn/Wsign-promo1.C new file mode 100644 index 0000000..51b76ee --- /dev/null +++ b/gcc/testsuite/g++.dg/warn/Wsign-promo1.C @@ -0,0 +1,15 @@ +// Check that we get joust warnings from comparing the final champ to a +// candidate between it and the previous champ. + +// { dg-additional-options -Wsign-promo } + +struct A { A(int); }; + +enum E { e }; + +int f(int, A); +int f(unsigned, A); +int f(int, int); + +int i = f(e, 42); // { dg-warning "passing 'E'" } +// { dg-warning "in call to 'int f" "" { target *-*-* } .-1 } diff --git a/gcc/testsuite/gcc.c-torture/compile/20000403-1.c b/gcc/testsuite/gcc.c-torture/compile/20000403-1.c index cb56028..ef8f77d 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20000403-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20000403-1.c @@ -1,4 +1,5 @@ /* { dg-skip-if "too many arguments in function call" { bpf-*-* } } */ +/* { dg-additional-options "-std=gnu89" } */ struct utsname { char sysname[32 ]; diff --git a/gcc/testsuite/gcc.c-torture/compile/20000511-1.c b/gcc/testsuite/gcc.c-torture/compile/20000511-1.c index 11c1315..cbabe2f 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20000511-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20000511-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct { char y; char x[32]; diff --git a/gcc/testsuite/gcc.c-torture/compile/20000804-1.c b/gcc/testsuite/gcc.c-torture/compile/20000804-1.c index 95bb0fa..c6f6497 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20000804-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20000804-1.c @@ -8,6 +8,7 @@ /* { dg-xfail-if "Inconsistent constraint on asm" { bpf-*-* } { "-O0" } { "" } } */ /* { dg-xfail-if "" { h8300-*-* } } */ /* { dg-require-stack-size "99*4+16" } */ +/* { dg-additional-options "-std=gnu89" } */ /* Copyright (C) 2000, 2003 Free Software Foundation */ __complex__ long long f () diff --git a/gcc/testsuite/gcc.c-torture/compile/20020418-1.c b/gcc/testsuite/gcc.c-torture/compile/20020418-1.c index df01e68..ee70a28 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20020418-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20020418-1.c @@ -3,6 +3,7 @@ was assigned a hard register only after expand_null_return was called, thus return pseudo was clobbered twice and the hard register not at all. */ +/* { dg-additional-options "-std=gnu89" } */ void baz (void); diff --git a/gcc/testsuite/gcc.c-torture/compile/20020927-1.c b/gcc/testsuite/gcc.c-torture/compile/20020927-1.c index b93d8a1..15b0787 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20020927-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20020927-1.c @@ -1,6 +1,7 @@ /* PR optimization/7520 */ /* ICE at -O3 on x86 due to register life problems caused by the return-without-value in bar. */ +/* { dg-additional-options "-std=gnu89" } */ int foo () diff --git a/gcc/testsuite/gcc.c-torture/compile/20030109-1.c b/gcc/testsuite/gcc.c-torture/compile/20030109-1.c index 4df7d18..cee4381 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20030109-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20030109-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + void foo () { int x1, x2, x3; diff --git a/gcc/testsuite/gcc.c-torture/compile/20030224-1.c b/gcc/testsuite/gcc.c-torture/compile/20030224-1.c index 11bc3af..10c4619 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20030224-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20030224-1.c @@ -1,4 +1,6 @@ /* { dg-require-effective-target alloca } */ +/* { dg-additional-options "-std=gnu89" } */ + void zzz (char *s1, char *s2, int len, int *q) { int z = 5; diff --git a/gcc/testsuite/gcc.c-torture/compile/20030415-1.c b/gcc/testsuite/gcc.c-torture/compile/20030415-1.c index bcca4c8..733e0d7 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20030415-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20030415-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + float g(float f) { return fabs(f); diff --git a/gcc/testsuite/gcc.c-torture/compile/20030612-1.c b/gcc/testsuite/gcc.c-torture/compile/20030612-1.c index 8edbd92..1da8118 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20030612-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20030612-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + static inline void foo (long long const v0, long long const v1) { diff --git a/gcc/testsuite/gcc.c-torture/compile/20030917-1.c b/gcc/testsuite/gcc.c-torture/compile/20030917-1.c index 38b6598..fa38025 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20030917-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20030917-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct string STR; typedef struct atbl ARRAY; struct string { diff --git a/gcc/testsuite/gcc.c-torture/compile/20031113-1.c b/gcc/testsuite/gcc.c-torture/compile/20031113-1.c index 74c031c..15c2cd5 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20031113-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20031113-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + /* On Darwin, the stub for simple_cst_equal was not being emitted at all causing the as to die and not create an object file. */ diff --git a/gcc/testsuite/gcc.c-torture/compile/20031220-2.c b/gcc/testsuite/gcc.c-torture/compile/20031220-2.c index 45c8277..6f53079 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20031220-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/20031220-2.c @@ -2,6 +2,7 @@ Orgin: Matt Thomas <matt@3am-software.com> This used to cause GCC to write out an instruction for i386 when using a L64 host which gas could not handle because GCC would write a full 64bit hex string out. */ +/* { dg-additional-options "-std=gnu89" } */ float fabsf (float); diff --git a/gcc/testsuite/gcc.c-torture/compile/20040309-1.c b/gcc/testsuite/gcc.c-torture/compile/20040309-1.c index df8390f..d6ed376 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20040309-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20040309-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + static const char default_tupleseps[] = ", \t"; diff --git a/gcc/testsuite/gcc.c-torture/compile/20040310-1.c b/gcc/testsuite/gcc.c-torture/compile/20040310-1.c index f0c85f0..d7f4b10 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20040310-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20040310-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + void I_wacom () { char buffer[50], *p; diff --git a/gcc/testsuite/gcc.c-torture/compile/20040317-3.c b/gcc/testsuite/gcc.c-torture/compile/20040317-3.c index e6982c3..9757175 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20040317-3.c +++ b/gcc/testsuite/gcc.c-torture/compile/20040317-3.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + I_wacom () { char buffer[50], *p; diff --git a/gcc/testsuite/gcc.c-torture/compile/20040817-1.c b/gcc/testsuite/gcc.c-torture/compile/20040817-1.c index 158596c..c99fc33 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20040817-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20040817-1.c @@ -1,4 +1,5 @@ /* PR 17051: SRA failed to rename the VOPS properly. */ +/* { dg-additional-options "-std=gnu89" } */ struct A { diff --git a/gcc/testsuite/gcc.c-torture/compile/20091215-1.c b/gcc/testsuite/gcc.c-torture/compile/20091215-1.c index 1df5098..017bb1e 100644 --- a/gcc/testsuite/gcc.c-torture/compile/20091215-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/20091215-1.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-additional-options "-std=gnu89" } */ void bar (); diff --git a/gcc/testsuite/gcc.c-torture/compile/86.c b/gcc/testsuite/gcc.c-torture/compile/86.c index deea85e..6b0017d 100644 --- a/gcc/testsuite/gcc.c-torture/compile/86.c +++ b/gcc/testsuite/gcc.c-torture/compile/86.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + m32 (a) int *a; { diff --git a/gcc/testsuite/gcc.c-torture/compile/900216-1.c b/gcc/testsuite/gcc.c-torture/compile/900216-1.c index 8a4bfb7..8796599 100644 --- a/gcc/testsuite/gcc.c-torture/compile/900216-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/900216-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + foo (p, a, b) unsigned short *p; { diff --git a/gcc/testsuite/gcc.c-torture/compile/900313-1.c b/gcc/testsuite/gcc.c-torture/compile/900313-1.c index 12252b4..82552dc 100644 --- a/gcc/testsuite/gcc.c-torture/compile/900313-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/900313-1.c @@ -1,5 +1,6 @@ /* { dg-require-effective-target alloca } */ /* { dg-skip-if "too many arguments in function call" { bpf-*-* } } */ +/* { dg-additional-options "-std=gnu89" } */ main () { diff --git a/gcc/testsuite/gcc.c-torture/compile/900407-1.c b/gcc/testsuite/gcc.c-torture/compile/900407-1.c index fdf4779..07cbe07 100644 --- a/gcc/testsuite/gcc.c-torture/compile/900407-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/900407-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + foo (a, b, p) int *p; { diff --git a/gcc/testsuite/gcc.c-torture/compile/900516-1.c b/gcc/testsuite/gcc.c-torture/compile/900516-1.c index 69e9614..b51b870 100644 --- a/gcc/testsuite/gcc.c-torture/compile/900516-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/900516-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(c){return!(c?2.0:1.0);} diff --git a/gcc/testsuite/gcc.c-torture/compile/920409-2.c b/gcc/testsuite/gcc.c-torture/compile/920409-2.c index ef2bdb8b..b6b8686 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920409-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/920409-2.c @@ -1,2 +1,4 @@ +/* { dg-additional-options "-std=gnu89" } */ + double x(){int x1,x2;double v; if(((long)(x1-x2))<1)return -1.0;v=t(v);v=y(1,v>0.0?(int)v:((int)v-1));} diff --git a/gcc/testsuite/gcc.c-torture/compile/920415-1.c b/gcc/testsuite/gcc.c-torture/compile/920415-1.c index 96ac7fe..261020a 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920415-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920415-1.c @@ -1,5 +1,6 @@ /* { dg-require-effective-target indirect_jumps } */ /* { dg-require-effective-target label_values } */ +/* { dg-additional-options "-std=gnu89" } */ extern void abort (void); extern void exit (int); diff --git a/gcc/testsuite/gcc.c-torture/compile/920428-1.c b/gcc/testsuite/gcc.c-torture/compile/920428-1.c index fdc478a..32b17cb 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920428-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920428-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + x(){char*q;return(long)q>>8&0xff;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920428-5.c b/gcc/testsuite/gcc.c-torture/compile/920428-5.c index ff31379..4aa4f60 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920428-5.c +++ b/gcc/testsuite/gcc.c-torture/compile/920428-5.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct{unsigned b0:1;}*t;x(a,b)t a,b;{b->b0=a->b0;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920428-7.c b/gcc/testsuite/gcc.c-torture/compile/920428-7.c index 69b1d97..47da940 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920428-7.c +++ b/gcc/testsuite/gcc.c-torture/compile/920428-7.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + x(float*x){int a[4],i=0,j;for(j=0;j<2;j++){f(a[i++]);f(a[i++]);}} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-1.c b/gcc/testsuite/gcc.c-torture/compile/920501-1.c index 84cc122..6215852 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-1.c @@ -1,3 +1,4 @@ /* { dg-require-effective-target label_values } */ +/* { dg-additional-options "-std=gnu89" } */ a(){int**b[]={&&c};c:;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-13.c b/gcc/testsuite/gcc.c-torture/compile/920501-13.c index b5a6798e..e7670e3 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-13.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-13.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct{int i;}t;inline y(t u){}x(){t u;y(u);} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-15.c b/gcc/testsuite/gcc.c-torture/compile/920501-15.c index 81fd664..aaae9d3 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-15.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-15.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + x(a)double a;{int i;return i>a?i:i+1;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-16.c b/gcc/testsuite/gcc.c-torture/compile/920501-16.c index f82b752..732ef0f 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-16.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-16.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(n){struct z{int a,b[n],c[n];};} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-18.c b/gcc/testsuite/gcc.c-torture/compile/920501-18.c index 70f7c0a..e2110b7 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-18.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-18.c @@ -1,2 +1,4 @@ +/* { dg-additional-options "-std=gnu89" } */ + union u{int i;float f;}; x(p)int p;{union u x;for(x.i=0;x.i<p;x.i++)if(x.f>(float)3.0)break;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-20.c b/gcc/testsuite/gcc.c-torture/compile/920501-20.c index 170d050..dcbb039 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-20.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-20.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + int*f(x)int*x;{if(x[4]){int h[1];if(setjmp(h))return x;}} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-6.c b/gcc/testsuite/gcc.c-torture/compile/920501-6.c index d0a4063..dcf00ef 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-6.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-6.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + x(y,z)float*y;{*y=z;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920501-7.c b/gcc/testsuite/gcc.c-torture/compile/920501-7.c index 0fac5f3..08b8535 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920501-7.c +++ b/gcc/testsuite/gcc.c-torture/compile/920501-7.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target label_values } */ /* { dg-skip-if "no support for indirect jumps" { bpf-*-* } } */ +/* { dg-additional-options "-std=gnu89" } */ x(){if(&&e-&&b<0)x();b:goto*&&b;e:;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920502-1.c b/gcc/testsuite/gcc.c-torture/compile/920502-1.c index ec2405a..7156702 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920502-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920502-1.c @@ -1,3 +1,4 @@ /* { dg-require-effective-target indirect_jumps } */ +/* { dg-additional-options "-std=gnu89" } */ extern void*t[];x(i){goto*t[i];} diff --git a/gcc/testsuite/gcc.c-torture/compile/920502-2.c b/gcc/testsuite/gcc.c-torture/compile/920502-2.c index d3aa1e2..20c2f6e 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920502-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/920502-2.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + x(c){1LL<<c;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920520-1.c b/gcc/testsuite/gcc.c-torture/compile/920520-1.c index 037102e..c069dcc 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920520-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920520-1.c @@ -1,4 +1,5 @@ /* { dg-do compile } */ /* { dg-skip-if "" { pdp11-*-* } } */ +/* { dg-additional-options "-std=gnu89" } */ f(){asm("%0"::"r"(1.5F));}g(){asm("%0"::"r"(1.5));} diff --git a/gcc/testsuite/gcc.c-torture/compile/920521-1.c b/gcc/testsuite/gcc.c-torture/compile/920521-1.c index 6c4c99d..aba078f 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920521-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920521-1.c @@ -1,3 +1,4 @@ /* { dg-do compile } */ +/* { dg-additional-options "-std=gnu89" } */ f(){asm("f":::"cc");}g(x,y){asm("g"::"%r"(x), "r"(y));} diff --git a/gcc/testsuite/gcc.c-torture/compile/920608-1.c b/gcc/testsuite/gcc.c-torture/compile/920608-1.c index cae1978..4ba8351 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920608-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920608-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + foo (p) int *p; { diff --git a/gcc/testsuite/gcc.c-torture/compile/920617-1.c b/gcc/testsuite/gcc.c-torture/compile/920617-1.c index 2d84e8d..48e16e8 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920617-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920617-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(){double*xp,y;*xp++=sqrt(y);} diff --git a/gcc/testsuite/gcc.c-torture/compile/920617-2.c b/gcc/testsuite/gcc.c-torture/compile/920617-2.c index 3a54509..590b8f9 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920617-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/920617-2.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(a,b,c,d)float a[],d;int b[],c;{} diff --git a/gcc/testsuite/gcc.c-torture/compile/920625-1.c b/gcc/testsuite/gcc.c-torture/compile/920625-1.c index 759a356..20ef35e 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920625-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920625-1.c @@ -1,4 +1,5 @@ /* { dg-skip-if "too many arguments in function call" { bpf-*-* } } */ +/* { dg-additional-options "-std=gnu89" } */ typedef unsigned long int unsigned_word; typedef signed long int signed_word; diff --git a/gcc/testsuite/gcc.c-torture/compile/920625-2.c b/gcc/testsuite/gcc.c-torture/compile/920625-2.c index c0cb6db..40c3d9b 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920625-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/920625-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef char * caddr_t; typedef unsigned Cursor; typedef char *String; diff --git a/gcc/testsuite/gcc.c-torture/compile/920626-1.c b/gcc/testsuite/gcc.c-torture/compile/920626-1.c index 5441986..7c2c99f 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920626-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920626-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(x)unsigned x;{return x>>-5;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920706-1.c b/gcc/testsuite/gcc.c-torture/compile/920706-1.c index 13b0ff1..b5e4fa0 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920706-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920706-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(){float i[2],o[1];g(o);return*o;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920710-2.c b/gcc/testsuite/gcc.c-torture/compile/920710-2.c index dcaf1be..782bac6 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920710-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/920710-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + union u { struct {unsigned h, l;} i; diff --git a/gcc/testsuite/gcc.c-torture/compile/920723-1.c b/gcc/testsuite/gcc.c-torture/compile/920723-1.c index 2d77875..4295124 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920723-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920723-1.c @@ -1,5 +1,6 @@ /* { dg-add-options stack_size } */ /* { dg-require-stack-size "75*75*4" } */ +/* { dg-additional-options "-std=gnu89" } */ #if defined(STACK_SIZE) && STACK_SIZE < 65536 # define GITT_SIZE 75 diff --git a/gcc/testsuite/gcc.c-torture/compile/920808-1.c b/gcc/testsuite/gcc.c-torture/compile/920808-1.c index 17510a9..72b8e30 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920808-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920808-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(i){for(i=1;i<=2;({;}),i++){({;}),g();}} diff --git a/gcc/testsuite/gcc.c-torture/compile/920809-1.c b/gcc/testsuite/gcc.c-torture/compile/920809-1.c index ad35e12..fe5af52 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920809-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920809-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(x,y){memcpy (&x,&y,8192);} diff --git a/gcc/testsuite/gcc.c-torture/compile/920817-1.c b/gcc/testsuite/gcc.c-torture/compile/920817-1.c index 31a5733..be80730 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920817-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920817-1.c @@ -1 +1,3 @@ +/* { dg-additional-options "-std=gnu89" } */ + int v;static inline f(){return 0;}g(){return f();}void h(){return v++;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920831-1.c b/gcc/testsuite/gcc.c-torture/compile/920831-1.c index 945c521..c990e08 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920831-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920831-1.c @@ -1,3 +1,4 @@ /* { dg-require-effective-target indirect_jumps } */ +/* { dg-additional-options "-std=gnu89" } */ f(x){goto*(void *)x;} diff --git a/gcc/testsuite/gcc.c-torture/compile/920917-1.c b/gcc/testsuite/gcc.c-torture/compile/920917-1.c index 139f60e..28e5211 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920917-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/920917-1.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-additional-options "-std=gnu89" } */ inline f(x){switch(x){case 6:case 4:case 3:case 1:;}return x;} g(){f(sizeof("xxxxxx"));} diff --git a/gcc/testsuite/gcc.c-torture/compile/920928-2.c b/gcc/testsuite/gcc.c-torture/compile/920928-2.c index a0964f9..ba7c868 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920928-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/920928-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct{struct{char*d;int b;}*i;}*t; double f(); g(p)t p; diff --git a/gcc/testsuite/gcc.c-torture/compile/920928-5.c b/gcc/testsuite/gcc.c-torture/compile/920928-5.c index b9f9dcb..f38c7cd 100644 --- a/gcc/testsuite/gcc.c-torture/compile/920928-5.c +++ b/gcc/testsuite/gcc.c-torture/compile/920928-5.c @@ -1,6 +1,7 @@ /* REPRODUCED:CC1:SIGNAL MACHINE:m68k OPTIONS:-fpcc-struct-return */ /* { dg-require-effective-target indirect_calls } */ +/* { dg-additional-options "-std=gnu89" } */ struct b{}; f(struct b(*f)()) diff --git a/gcc/testsuite/gcc.c-torture/compile/921012-1.c b/gcc/testsuite/gcc.c-torture/compile/921012-1.c index 7b5cb6b..4f7d797 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921012-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921012-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f() { g(({int x;0;})); diff --git a/gcc/testsuite/gcc.c-torture/compile/921021-1.c b/gcc/testsuite/gcc.c-torture/compile/921021-1.c index 18dffeb..2cd6de5 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921021-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921021-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + void g(); f() diff --git a/gcc/testsuite/gcc.c-torture/compile/921024-1.c b/gcc/testsuite/gcc.c-torture/compile/921024-1.c index e723246..0d5ff95 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921024-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921024-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + long long f(s,r) { return *(long long*)(s+r); diff --git a/gcc/testsuite/gcc.c-torture/compile/921103-1.c b/gcc/testsuite/gcc.c-torture/compile/921103-1.c index 578e91a..0dbb9cc 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921103-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921103-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + struct { unsigned int f1, f2; } s; diff --git a/gcc/testsuite/gcc.c-torture/compile/921109-1.c b/gcc/testsuite/gcc.c-torture/compile/921109-1.c index eda4345..be04e68 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921109-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921109-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct { double x, y; } p; typedef struct { int s; float r; } t; t *e, i; diff --git a/gcc/testsuite/gcc.c-torture/compile/921111-1.c b/gcc/testsuite/gcc.c-torture/compile/921111-1.c index 11b4674..798afe6 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921111-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921111-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + int ps; struct vp { int wa; diff --git a/gcc/testsuite/gcc.c-torture/compile/921116-2.c b/gcc/testsuite/gcc.c-torture/compile/921116-2.c index 9eac91c..49b4d4f 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921116-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/921116-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct { long l[5]; } t; diff --git a/gcc/testsuite/gcc.c-torture/compile/921118-1.c b/gcc/testsuite/gcc.c-torture/compile/921118-1.c index 857c74e..d45fdf4 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921118-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921118-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + inline f(i) { h((long long) i * 2); diff --git a/gcc/testsuite/gcc.c-torture/compile/921202-1.c b/gcc/testsuite/gcc.c-torture/compile/921202-1.c index d97ddf6..83a3ad4 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921202-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921202-1.c @@ -1,4 +1,5 @@ /* { dg-require-stack-size "2055*3*8" } */ +/* { dg-additional-options "-std=gnu89" } */ f () { diff --git a/gcc/testsuite/gcc.c-torture/compile/921202-2.c b/gcc/testsuite/gcc.c-torture/compile/921202-2.c index 97d482b..2ad1440 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921202-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/921202-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(x, c) { for (;;) diff --git a/gcc/testsuite/gcc.c-torture/compile/921203-1.c b/gcc/testsuite/gcc.c-torture/compile/921203-1.c index 50fe3eb..5e60541 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921203-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921203-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + char dispstr[]; f() { diff --git a/gcc/testsuite/gcc.c-torture/compile/921203-2.c b/gcc/testsuite/gcc.c-torture/compile/921203-2.c index 929afb1..88b22c8 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921203-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/921203-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct { char x; diff --git a/gcc/testsuite/gcc.c-torture/compile/921206-1.c b/gcc/testsuite/gcc.c-torture/compile/921206-1.c index 10fa5ea..4e9c3e0 100644 --- a/gcc/testsuite/gcc.c-torture/compile/921206-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/921206-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + double sqrt(double),fabs(double),sin(double); int sxs; int sys; diff --git a/gcc/testsuite/gcc.c-torture/compile/930109-1.c b/gcc/testsuite/gcc.c-torture/compile/930109-1.c index dbd15a1..e5ce10e 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930109-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930109-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f(x) unsigned x; { diff --git a/gcc/testsuite/gcc.c-torture/compile/930111-1.c b/gcc/testsuite/gcc.c-torture/compile/930111-1.c index 2857419..5661cdf 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930111-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930111-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + /* 2.3.3 crashes on 386 with -traditional */ f(a) char *a; diff --git a/gcc/testsuite/gcc.c-torture/compile/930117-1.c b/gcc/testsuite/gcc.c-torture/compile/930117-1.c index 06466f5..d8a023c 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930117-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930117-1.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target indirect_calls } */ +/* { dg-additional-options "-std=gnu89" } */ f(x) { diff --git a/gcc/testsuite/gcc.c-torture/compile/930118-1.c b/gcc/testsuite/gcc.c-torture/compile/930118-1.c index b29543c..e1dbc1f 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930118-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930118-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f() { __label__ l; diff --git a/gcc/testsuite/gcc.c-torture/compile/930120-1.c b/gcc/testsuite/gcc.c-torture/compile/930120-1.c index a1d8df8..c4fd6a4 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930120-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930120-1.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-additional-options "-std=gnu89" } */ union { short I[2]; long int L; diff --git a/gcc/testsuite/gcc.c-torture/compile/930217-1.c b/gcc/testsuite/gcc.c-torture/compile/930217-1.c index 0f64a04..cdad9f5 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930217-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930217-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + /* The bit-field below would have a problem if __INT_MAX__ is too small. */ #if __INT_MAX__ < 2147483647 diff --git a/gcc/testsuite/gcc.c-torture/compile/930325-1.c b/gcc/testsuite/gcc.c-torture/compile/930325-1.c index 24cea50..f493d49 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930325-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930325-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef unsigned uint; inline diff --git a/gcc/testsuite/gcc.c-torture/compile/930411-1.c b/gcc/testsuite/gcc.c-torture/compile/930411-1.c index 6dd87ee..cf1a171 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930411-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930411-1.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-additional-options "-std=gnu89" } */ int heap; diff --git a/gcc/testsuite/gcc.c-torture/compile/930427-2.c b/gcc/testsuite/gcc.c-torture/compile/930427-2.c index 53b6337..ad084ca 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930427-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/930427-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + struct s { int f; }; diff --git a/gcc/testsuite/gcc.c-torture/compile/930503-2.c b/gcc/testsuite/gcc.c-torture/compile/930503-2.c index 1b0ea34..7eb58f8 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930503-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/930503-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f() { struct { char x; } r; diff --git a/gcc/testsuite/gcc.c-torture/compile/930506-2.c b/gcc/testsuite/gcc.c-torture/compile/930506-2.c index bc982ac..aa56fe5 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930506-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/930506-2.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target trampolines } */ +/* { dg-additional-options "-std=gnu89" } */ int f1() { diff --git a/gcc/testsuite/gcc.c-torture/compile/930513-2.c b/gcc/testsuite/gcc.c-torture/compile/930513-2.c index a7f508c..d81ae05 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930513-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/930513-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + double g (); f (x) diff --git a/gcc/testsuite/gcc.c-torture/compile/930530-1.c b/gcc/testsuite/gcc.c-torture/compile/930530-1.c index 848e040..6d07c98 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930530-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930530-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f () { struct { char a, b; } x; diff --git a/gcc/testsuite/gcc.c-torture/compile/930602-1.c b/gcc/testsuite/gcc.c-torture/compile/930602-1.c index 5a0eb21..3045e40 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930602-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930602-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct { int f[8]; } T; diff --git a/gcc/testsuite/gcc.c-torture/compile/930618-1.c b/gcc/testsuite/gcc.c-torture/compile/930618-1.c index 74c2157..fa728f2 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930618-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930618-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f (s) { int r; diff --git a/gcc/testsuite/gcc.c-torture/compile/930623-1.c b/gcc/testsuite/gcc.c-torture/compile/930623-1.c index dd45bbc..7736fa95 100644 --- a/gcc/testsuite/gcc.c-torture/compile/930623-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/930623-1.c @@ -1,5 +1,6 @@ /* { dg-require-effective-target untyped_assembly } */ /* { dg-skip-if "no __builtin_apply in eBPF" { bpf-*-* } } */ +/* { dg-additional-options "-std=gnu89" } */ g (a, b) {} diff --git a/gcc/testsuite/gcc.c-torture/compile/931003-1.c b/gcc/testsuite/gcc.c-torture/compile/931003-1.c index 637b9aa..b14e920 100644 --- a/gcc/testsuite/gcc.c-torture/compile/931003-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/931003-1.c @@ -1,4 +1,5 @@ /* { dg-require-stack-size "52*8" } */ +/* { dg-additional-options "-std=gnu89" } */ f (n, a) int n; diff --git a/gcc/testsuite/gcc.c-torture/compile/931013-1.c b/gcc/testsuite/gcc.c-torture/compile/931013-1.c index c237c0b..ab6138b 100644 --- a/gcc/testsuite/gcc.c-torture/compile/931013-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/931013-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + g (); f () diff --git a/gcc/testsuite/gcc.c-torture/compile/931013-2.c b/gcc/testsuite/gcc.c-torture/compile/931013-2.c index 8fc0db4..8c78fde 100644 --- a/gcc/testsuite/gcc.c-torture/compile/931013-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/931013-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f (unsigned short Z[48]) { int j; diff --git a/gcc/testsuite/gcc.c-torture/compile/931102-2.c b/gcc/testsuite/gcc.c-torture/compile/931102-2.c index f39b271..10ba03f 100644 --- a/gcc/testsuite/gcc.c-torture/compile/931102-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/931102-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef struct { int a; } VCR; diff --git a/gcc/testsuite/gcc.c-torture/compile/931203-1.c b/gcc/testsuite/gcc.c-torture/compile/931203-1.c index 3007ff5..b4fef4e 100644 --- a/gcc/testsuite/gcc.c-torture/compile/931203-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/931203-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + v (a, i) unsigned *a, i; { diff --git a/gcc/testsuite/gcc.c-torture/compile/940718-1.c b/gcc/testsuite/gcc.c-torture/compile/940718-1.c index 505280f..82953b1 100644 --- a/gcc/testsuite/gcc.c-torture/compile/940718-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/940718-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + extern double log (double) __attribute__ ((const)); f (double x) diff --git a/gcc/testsuite/gcc.c-torture/compile/941014-1.c b/gcc/testsuite/gcc.c-torture/compile/941014-1.c index d317015..760c8aa 100644 --- a/gcc/testsuite/gcc.c-torture/compile/941014-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/941014-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f (to) char *to; { diff --git a/gcc/testsuite/gcc.c-torture/compile/941014-2.c b/gcc/testsuite/gcc.c-torture/compile/941014-2.c index 01e9a67..c7ac0ac 100644 --- a/gcc/testsuite/gcc.c-torture/compile/941014-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/941014-2.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + void f (n, ppt, xrot) { diff --git a/gcc/testsuite/gcc.c-torture/compile/941014-3.c b/gcc/testsuite/gcc.c-torture/compile/941014-3.c index 6dcb893..db1ac6c 100644 --- a/gcc/testsuite/gcc.c-torture/compile/941014-3.c +++ b/gcc/testsuite/gcc.c-torture/compile/941014-3.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef unsigned char byte; typedef unsigned int uint; typedef unsigned long ulong; diff --git a/gcc/testsuite/gcc.c-torture/compile/941014-4.c b/gcc/testsuite/gcc.c-torture/compile/941014-4.c index 9c4cec8..f7cdcc0 100644 --- a/gcc/testsuite/gcc.c-torture/compile/941014-4.c +++ b/gcc/testsuite/gcc.c-torture/compile/941014-4.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target label_values } */ +/* { dg-additional-options "-std=gnu89" } */ f (int *re) { diff --git a/gcc/testsuite/gcc.c-torture/compile/941111-1.c b/gcc/testsuite/gcc.c-torture/compile/941111-1.c index 3f0c28b..22ed2c4 100644 --- a/gcc/testsuite/gcc.c-torture/compile/941111-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/941111-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + main () { struct S { int i; char c; } obj1, obj2; diff --git a/gcc/testsuite/gcc.c-torture/compile/941113-1.c b/gcc/testsuite/gcc.c-torture/compile/941113-1.c index 5c2e0b6..78f7c05 100644 --- a/gcc/testsuite/gcc.c-torture/compile/941113-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/941113-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef void foo (void); f (x) diff --git a/gcc/testsuite/gcc.c-torture/compile/950124-1.c b/gcc/testsuite/gcc.c-torture/compile/950124-1.c index e723954..1467e7a 100644 --- a/gcc/testsuite/gcc.c-torture/compile/950124-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/950124-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f () { if (g ()) diff --git a/gcc/testsuite/gcc.c-torture/compile/950329-1.c b/gcc/testsuite/gcc.c-torture/compile/950329-1.c index 63ec6b2..adb3ad5 100644 --- a/gcc/testsuite/gcc.c-torture/compile/950329-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/950329-1.c @@ -1,4 +1,6 @@ /* { dg-require-effective-target untyped_assembly } */ +/* { dg-additional-options "-std=gnu89" } */ + f () { int i; diff --git a/gcc/testsuite/gcc.c-torture/compile/950612-1.c b/gcc/testsuite/gcc.c-torture/compile/950612-1.c index cb3cb0a..93e5e71 100644 --- a/gcc/testsuite/gcc.c-torture/compile/950612-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/950612-1.c @@ -1,3 +1,6 @@ +/* { dg-additional-options "-std=gnu89" } */ + + typedef enum { LODI, diff --git a/gcc/testsuite/gcc.c-torture/compile/950618-1.c b/gcc/testsuite/gcc.c-torture/compile/950618-1.c index 4229da4..3b83286 100644 --- a/gcc/testsuite/gcc.c-torture/compile/950618-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/950618-1.c @@ -1,2 +1,4 @@ +/* { dg-additional-options "-std=gnu89" } */ + static __inline__ int f () { return g (); } int g () { return f (); } diff --git a/gcc/testsuite/gcc.c-torture/compile/950719-1.c b/gcc/testsuite/gcc.c-torture/compile/950719-1.c index e1ac117..ce5c7e1 100644 --- a/gcc/testsuite/gcc.c-torture/compile/950719-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/950719-1.c @@ -1,4 +1,5 @@ /* { dg-require-stack-size "4092+4" } */ +/* { dg-additional-options "-std=gnu89" } */ typedef struct { diff --git a/gcc/testsuite/gcc.c-torture/compile/950910-1.c b/gcc/testsuite/gcc.c-torture/compile/950910-1.c index 1be2aa5..75204bc 100644 --- a/gcc/testsuite/gcc.c-torture/compile/950910-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/950910-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f (char *p) { char c; diff --git a/gcc/testsuite/gcc.c-torture/compile/950922-1.c b/gcc/testsuite/gcc.c-torture/compile/950922-1.c index 73c52f7..ecd987e 100644 --- a/gcc/testsuite/gcc.c-torture/compile/950922-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/950922-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + struct sw { const void *x; int r; diff --git a/gcc/testsuite/gcc.c-torture/compile/951106-1.c b/gcc/testsuite/gcc.c-torture/compile/951106-1.c index 09cba20..ee3b261 100644 --- a/gcc/testsuite/gcc.c-torture/compile/951106-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/951106-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f (double a, double b) { g (a, 0, b); diff --git a/gcc/testsuite/gcc.c-torture/compile/951222-1.c b/gcc/testsuite/gcc.c-torture/compile/951222-1.c index f1818e3..60a2969 100644 --- a/gcc/testsuite/gcc.c-torture/compile/951222-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/951222-1.c @@ -1,4 +1,5 @@ /* { dg-require-stack-size "10000+3*8" } */ +/* { dg-additional-options "-std=gnu89" } */ extern long long foo (); diff --git a/gcc/testsuite/gcc.c-torture/compile/960106-1.c b/gcc/testsuite/gcc.c-torture/compile/960106-1.c index c8228aa..4f2f95d 100644 --- a/gcc/testsuite/gcc.c-torture/compile/960106-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/960106-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f (a) { return (a & 1) && !(a & 2 & 4); diff --git a/gcc/testsuite/gcc.c-torture/compile/960319-1.c b/gcc/testsuite/gcc.c-torture/compile/960319-1.c index f3d95ab..345f8b9 100644 --- a/gcc/testsuite/gcc.c-torture/compile/960319-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/960319-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + static void f() { diff --git a/gcc/testsuite/gcc.c-torture/compile/960829-1.c b/gcc/testsuite/gcc.c-torture/compile/960829-1.c index 8c6163d..8bd71b0 100644 --- a/gcc/testsuite/gcc.c-torture/compile/960829-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/960829-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + f () { g (0, 0.0, 0.0, 0.0, 0.0); diff --git a/gcc/testsuite/gcc.c-torture/compile/970206-1.c b/gcc/testsuite/gcc.c-torture/compile/970206-1.c index 95196cd..233d330 100644 --- a/gcc/testsuite/gcc.c-torture/compile/970206-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/970206-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + struct Rect { int iA; diff --git a/gcc/testsuite/gcc.c-torture/compile/980825-1.c b/gcc/testsuite/gcc.c-torture/compile/980825-1.c index 34e35bf..4b3831c3 100644 --- a/gcc/testsuite/gcc.c-torture/compile/980825-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/980825-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + typedef enum { FALSE, TRUE } boolean; enum _errorTypes { FATAL = 1, WARNING = 2, PERROR = 4 }; typedef struct _optionValues { diff --git a/gcc/testsuite/gcc.c-torture/compile/990829-1.c b/gcc/testsuite/gcc.c-torture/compile/990829-1.c index 2252821..97579f0 100644 --- a/gcc/testsuite/gcc.c-torture/compile/990829-1.c +++ b/gcc/testsuite/gcc.c-torture/compile/990829-1.c @@ -1,3 +1,5 @@ +/* { dg-additional-options "-std=gnu89" } */ + struct x { int a:16; diff --git a/gcc/testsuite/gcc.c-torture/compile/991213-2.c b/gcc/testsuite/gcc.c-torture/compile/991213-2.c index dfbedf7d..d528530 100644 --- a/gcc/testsuite/gcc.c-torture/compile/991213-2.c +++ b/gcc/testsuite/gcc.c-torture/compile/991213-2.c @@ -1,4 +1,5 @@ /* { dg-require-effective-target indirect_calls } */ +/* { dg-additional-options "-std=gnu89" } */ typedef long __kernel_time_t; typedef __kernel_time_t time_t; diff --git a/gcc/testsuite/gcc.c-torture/execute/pr111863-1.c b/gcc/testsuite/gcc.c-torture/execute/pr111863-1.c new file mode 100644 index 0000000..4e27fe6 --- /dev/null +++ b/gcc/testsuite/gcc.c-torture/execute/pr111863-1.c @@ -0,0 +1,16 @@ +/* { dg-options " -fno-tree-ccp -fno-tree-dominator-opts -fno-tree-vrp" } */ + +__attribute__((noipa)) +int f(int a) +{ + a &= 2; + return a != 1; +} +int main(void) +{ + int t = f(1); + if (!t) + __builtin_abort(); + __builtin_printf("%d\n",t); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c new file mode 100644 index 0000000..19276bb --- /dev/null +++ b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-2.c @@ -0,0 +1,10 @@ +/* { dg-do compile } */ +/* { dg-options "" } */ + +void * +f1 (int flag, int *a, long *b) +{ + return flag ? a : b; /* { dg-warning "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */ + /* { dg-note "first expression has type 'int \\*'" "" { target *-*-* } .-1 } */ + /* { dg-note "second expression has type 'long int \\*'" "" { target *-*-* } .-2 } */ +} diff --git a/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-3.c b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-3.c new file mode 100644 index 0000000..3a6f35e --- /dev/null +++ b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-3.c @@ -0,0 +1,10 @@ +/* { dg-do compile } */ +/* { dg-options "-std=c90 -pedantic-errors" } */ + +void * +f1 (int flag, int *a, long *b) +{ + return flag ? a : b; /* { dg-error "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */ + /* { dg-note "first expression has type 'int \\*'" "" { target *-*-* } .-1 } */ + /* { dg-note "second expression has type 'long int \\*'" "" { target *-*-* } .-2 } */ +} diff --git a/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-4.c b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-4.c new file mode 100644 index 0000000..c62a7e0 --- /dev/null +++ b/gcc/testsuite/gcc.dg/Wincompatible-pointer-types-4.c @@ -0,0 +1,10 @@ +/* { dg-do compile } */ +/* { dg-options "-std=c90 -pedantic-errors" } */ + +void * +f1 (int flag) +{ + return flag ? __builtin_memcpy : __builtin_memcmp; /* { dg-warning "pointer type mismatch in conditional expression \\\[-Wincompatible-pointer-types\\\]" } */ + /* { dg-note "first expression has type 'void \\* \\(\\*\\)\\(void \\*," "" { target *-*-* } .-1 } */ + /* { dg-note "second expression has type 'int \\(\\*\\)\\(const void \\*," "" { target *-*-* } .-2 } */ +} diff --git a/gcc/testsuite/gcc.dg/Wint-conversion-3.c b/gcc/testsuite/gcc.dg/Wint-conversion-3.c new file mode 100644 index 0000000..4e51476 --- /dev/null +++ b/gcc/testsuite/gcc.dg/Wint-conversion-3.c @@ -0,0 +1,14 @@ +/* { dg-do compile } */ +/* { dg-options "" } */ + +const char * +f1 (int flag) +{ + return flag ? "" : 1; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */ +} + +const char * +f2 (int flag) +{ + return flag ? 1 : ""; /* { dg-warning "pointer/integer type mismatch in conditional expression \\\[-Wint-conversion\\\]" } */ +} diff --git a/gcc/testsuite/gcc.dg/gomp/pr110485.c b/gcc/testsuite/gcc.dg/gomp/pr110485.c new file mode 100644 index 0000000..ba6817a --- /dev/null +++ b/gcc/testsuite/gcc.dg/gomp/pr110485.c @@ -0,0 +1,19 @@ +/* PR 110485 */ +/* { dg-do compile } */ +/* { dg-additional-options "-Ofast -fdump-tree-vect-details" } */ +/* { dg-additional-options "-march=znver4 --param=vect-partial-vector-usage=1" { target x86_64-*-* } } */ +#pragma omp declare simd notinbranch uniform(p) +extern double __attribute__ ((const)) bar (double a, double p); + +double a[1024]; +double b[1024]; + +void foo (int n) +{ + #pragma omp simd + for (int i = 0; i < n; ++i) + a[i] = bar (b[i], 71.2); +} + +/* { dg-final { scan-tree-dump-not "MASK_LOAD" "vect" } } */ +/* { dg-final { scan-tree-dump "can't use a fully-masked loop because a non-masked simd clone was selected." "vect" { target x86_64-*-* } } } */ diff --git a/gcc/testsuite/gcc.dg/pr100532-1.c b/gcc/testsuite/gcc.dg/pr100532-1.c new file mode 100644 index 0000000..81e37c6 --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr100532-1.c @@ -0,0 +1,7 @@ +/* { dg-do compile } */ +/* PR c/100532 */ + +typedef __SIZE_TYPE__ size_t; +void *memcpy(void[], const void *, size_t); /* { dg-error "declaration of type name" } */ +void c(void) { memcpy(c, "a", 2); } /* { dg-error "type of formal parameter" } */ + diff --git a/gcc/testsuite/gcc.dg/pr101285-1.c b/gcc/testsuite/gcc.dg/pr101285-1.c new file mode 100644 index 0000000..831e35f --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr101285-1.c @@ -0,0 +1,10 @@ +/* { dg-do compile } */ +/* { dg-options "-W -Wall" } */ +const int b; +typedef void (*ft1)(int[b++]); /* { dg-error "read-only variable" } */ +void bar(int * z); +void baz() +{ + (ft1) bar; /* { dg-warning "statement with no effect" } */ +} + diff --git a/gcc/testsuite/gcc.dg/pr101364-1.c b/gcc/testsuite/gcc.dg/pr101364-1.c new file mode 100644 index 0000000..e7c94a0 --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr101364-1.c @@ -0,0 +1,8 @@ +/* { dg-do compile } */ +/* { dg-options "-std=c90 "} */ + +void fruit(); /* { dg-message "previous declaration" } */ +void fruit( /* { dg-error "conflicting types for" } */ + int b[x], /* { dg-error "undeclared " } */ + short c) +{} /* { dg-message "an argument type that has a" } */ diff --git a/gcc/testsuite/gcc.dg/sso-18.c b/gcc/testsuite/gcc.dg/sso-18.c new file mode 100644 index 0000000..799a0c85 --- /dev/null +++ b/gcc/testsuite/gcc.dg/sso-18.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* PR c/104822 */ + +#include <stddef.h> + +struct Sb { + int i; +} __attribute__((scalar_storage_order("big-endian"))); +struct Sl { + int i; +} __attribute__((scalar_storage_order("little-endian"))); + +/* Neither of these should warn about incompatible scalar storage order + as NULL pointers are compatiable with both endian. */ +struct Sb *pb = NULL; /* { dg-bogus "" } */ +struct Sl *pl = NULL; /* { dg-bogus "" } */ diff --git a/gcc/testsuite/gcc.dg/sso-19.c b/gcc/testsuite/gcc.dg/sso-19.c new file mode 100644 index 0000000..50f7b40 --- /dev/null +++ b/gcc/testsuite/gcc.dg/sso-19.c @@ -0,0 +1,17 @@ +/* { dg-do compile } */ +/* { dg-options "-std=c2x" } */ +/* PR c/104822 */ + +#include <stddef.h> + +struct Sb { + int i; +} __attribute__((scalar_storage_order("big-endian"))); +struct Sl { + int i; +} __attribute__((scalar_storage_order("little-endian"))); + +/* Neither of these should warn about incompatible scalar storage order + as NULL pointers are compatiable with both endian. */ +struct Sb *pb = nullptr; /* { dg-bogus "" } */ +struct Sl *pl = nullptr; /* { dg-bogus "" } */ diff --git a/gcc/testsuite/gcc.dg/torture/harden-cfr-noret-no-nothrow.c b/gcc/testsuite/gcc.dg/torture/harden-cfr-noret-no-nothrow.c new file mode 100644 index 0000000..8e4ee1f --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/harden-cfr-noret-no-nothrow.c @@ -0,0 +1,15 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */ + +/* Check that C makes for implicit nothrow in noreturn handling. */ + +#define ATTR_NOTHROW_OPT + +#include "../../c-c++-common/torture/harden-cfr-noret.c" + +/* One out-of-line check before the noreturn call in f, and another at the end + of f. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* One inline check in h, before the noreturn call, and another in h2, before + or after the call, depending on noreturn detection. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ diff --git a/gcc/testsuite/gcc.dg/torture/harden-cfr-tail-ub.c b/gcc/testsuite/gcc.dg/torture/harden-cfr-tail-ub.c new file mode 100644 index 0000000..634d98f --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/harden-cfr-tail-ub.c @@ -0,0 +1,40 @@ +/* { dg-do compile } */ +/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-hardcfr-check-exceptions -fdump-tree-hardcfr -ffat-lto-objects -Wno-return-type" } */ + +/* In C only, check some additional cases (comparing with + c-c++-common/torture/harden-cfr-tail.c) of falling off the end of non-void + function. C++ would issue an unreachable call in these cases. */ + +extern int g (int i); + +int f1(int i) { + /* Inline check before the returning call, that doesn't return anything. */ + g (i); + /* Implicit return without value, despite the return type; this combination + enables tail-calling of g, and is recognized as a returning call. */ +} + +extern void g2 (int i); + +int f2(int i) { + /* Inline check before the returning call, that disregards its return + value. */ + g2 (i); + /* Implicit return without value, despite the return type; this combination + enables tail-calling of g2, and is recognized as a returning call. */ +} + +int f3(int i) { + if (i) + /* Out-of-line check before the returning call. */ + return g (i); + /* Out-of-line check before implicit return. */ +} + +/* Out-of-line checks in f3, before returning calls and before return. */ +/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */ +/* Inline checking in all other functions. */ +/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */ +/* Check before tail-call in all functions, but f3 is out-of-line. */ +/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */ +/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */ diff --git a/gcc/testsuite/gcc.dg/torture/pr110243.c b/gcc/testsuite/gcc.dg/torture/pr110243.c new file mode 100644 index 0000000..07dffd9 --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr110243.c @@ -0,0 +1,22 @@ +/* { dg-do run } */ +/* { dg-require-effective-target lp64 } */ + +#define X 1100000000 +unsigned char a; +long b = X; +int c[9][1]; +unsigned d; +static long *e = &b, *f = &b; +int g() { + if (a && a <= '9') + return '0'; + if (a) + return 10; + return -1; +} +int main() { + d = 0; + for (; (int)*f -(X-1) + d < 9; d++) + c[g() + (int)*f + ((int)*e - X) -(X-1) + d] + [0] = 0; +} diff --git a/gcc/testsuite/gcc.dg/torture/pr111000.c b/gcc/testsuite/gcc.dg/torture/pr111000.c new file mode 100644 index 0000000..e6821e1 --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr111000.c @@ -0,0 +1,21 @@ +/* { dg-do run } */ + +volatile int a = 68; +int b, d, e; +int main() +{ + int t = a; + for (; d <= 6; d++) { + for (b = 0; b <= 6; b++) { + if (t >= 31) + e = d; + else if (d > (647 >> t)) + e = d; + else + e = 0; + } + } + if (e != 6) + __builtin_abort(); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/torture/pr111336.c b/gcc/testsuite/gcc.dg/torture/pr111336.c new file mode 100644 index 0000000..a2fa122 --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr111336.c @@ -0,0 +1,25 @@ +/* { dg-do run } */ +/* { dg-require-effective-target lp64 } */ + +extern void abort (void); +int a, b; +long c = 3521733542; +int d[2]; +int e(int f, int g) { + if (f == 0) + return 0; + if (f > 200) + return 0; + if (g) + return 5 * f; + return 0; +} +int main() +{ + int h = 0; + for (; e((int)c + 773233762, c + 60) + 773163185 + h < 2; h++) + d[h] = b; + if (a != 0) + abort (); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/torture/pr111383.c b/gcc/testsuite/gcc.dg/torture/pr111383.c new file mode 100644 index 0000000..cce873b --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr111383.c @@ -0,0 +1,29 @@ +/* { dg-do run } */ +/* { dg-require-effective-target int32plus } */ + +extern void abort (void); +int a, d = 1625015426; +char e; +short b; +short *f = &b, *g = &b; +void h(char *k) { + char c = *k; + for (; c;) + ; +} +int main() +{ + *g = 25330; + int i, j; + i = 0; + for (; *f + d - 1625040257 < 7;) + ; + for (; i < 4; i++) { + j = 0; + for (; (d - 1625015511) + (d - 1625015341) + j < 1; j++) + h(&e); + } + if (a != 0) + abort (); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/torture/pr111445.c b/gcc/testsuite/gcc.dg/torture/pr111445.c new file mode 100644 index 0000000..320e0b9 --- /dev/null +++ b/gcc/testsuite/gcc.dg/torture/pr111445.c @@ -0,0 +1,29 @@ +/* { dg-do run } */ + +extern void abort (void); +short a, b; +unsigned char c = 255; +unsigned cnt; +void __attribute__((noipa)) +check (int x) +{ + if (x != 0) + abort (); + cnt++; +} +int main() +{ + int d; + unsigned char e; + d = 0; + for (; a >= 0; a--) { + int *f = &d; + *f = c; + } + e = 0; + for (; (unsigned char)(d - 255) + e <= 1; e++) + check (b); + if (cnt != 2) + abort (); + return 0; +} diff --git a/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c index 3dfc6f1..2a735d8 100644 --- a/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c +++ b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/dynamic-lmul2-7.c @@ -18,7 +18,8 @@ bar (int *x, int a, int b, int n) } /* { dg-final { scan-assembler {e32,m2} } } */ -/* { dg-final { scan-assembler-times {csrr} 1 } } */ +/* { dg-final { scan-assembler-not {jr} } } */ +/* { dg-final { scan-assembler-times {ret} 2 } } * /* { dg-final { scan-tree-dump-times "Maximum lmul = 8" 1 "vect" } } */ /* { dg-final { scan-tree-dump-times "Maximum lmul = 4" 1 "vect" } } */ /* { dg-final { scan-tree-dump-times "Maximum lmul = 2" 1 "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/pr111848.c b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/pr111848.c new file mode 100644 index 0000000..b203ca9 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/costmodel/riscv/rvv/pr111848.c @@ -0,0 +1,35 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -fdump-tree-vect-details" } */ + +void +f3 (uint8_t *restrict a, uint8_t *restrict b, + uint8_t *restrict c, uint8_t *restrict d, + int n) +{ + for (int i = 0; i < n; ++i) + { + a[i * 8] = c[i * 8] + d[i * 8]; + a[i * 8 + 1] = c[i * 8] + d[i * 8 + 1]; + a[i * 8 + 2] = c[i * 8 + 2] + d[i * 8 + 2]; + a[i * 8 + 3] = c[i * 8 + 2] + d[i * 8 + 3]; + a[i * 8 + 4] = c[i * 8 + 4] + d[i * 8 + 4]; + a[i * 8 + 5] = c[i * 8 + 4] + d[i * 8 + 5]; + a[i * 8 + 6] = c[i * 8 + 6] + d[i * 8 + 6]; + a[i * 8 + 7] = c[i * 8 + 6] + d[i * 8 + 7]; + b[i * 8] = c[i * 8 + 1] + d[i * 8]; + b[i * 8 + 1] = c[i * 8 + 1] + d[i * 8 + 1]; + b[i * 8 + 2] = c[i * 8 + 3] + d[i * 8 + 2]; + b[i * 8 + 3] = c[i * 8 + 3] + d[i * 8 + 3]; + b[i * 8 + 4] = c[i * 8 + 5] + d[i * 8 + 4]; + b[i * 8 + 5] = c[i * 8 + 5] + d[i * 8 + 5]; + b[i * 8 + 6] = c[i * 8 + 7] + d[i * 8 + 6]; + b[i * 8 + 7] = c[i * 8 + 7] + d[i * 8 + 7]; + } +} + +/* { dg-final { scan-assembler {e8,m4} } } */ +/* { dg-final { scan-assembler-not {jr} } } */ +/* { dg-final { scan-assembler-times {ret} 1 } } * +/* { dg-final { scan-tree-dump-times "Maximum lmul = 4" 1 "vect" } } */ +/* { dg-final { scan-tree-dump-not "Maximum lmul = 2" "vect" } } */ +/* { dg-final { scan-tree-dump-not "Maximum lmul = 1" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/pr111860.c b/gcc/testsuite/gcc.dg/vect/pr111860.c new file mode 100644 index 0000000..36f0774 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr111860.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ + +int optimize_path_n, optimize_path_d; +int *optimize_path_d_0; +extern void path_threeOpt( long); +void optimize_path() { + int i; + long length; + i = 0; + for (; i <= optimize_path_n; i++) + optimize_path_d = 0; + i = 0; + for (; i < optimize_path_n; i++) + length += optimize_path_d_0[i]; + path_threeOpt(length); +} diff --git a/gcc/testsuite/gcc.dg/vect/pr111882.c b/gcc/testsuite/gcc.dg/vect/pr111882.c new file mode 100644 index 0000000..024ad57 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/pr111882.c @@ -0,0 +1,15 @@ +/* { dg-do compile } */ +/* { dg-additional-options { -fdump-tree-ifcvt-all } } */ + +static void __attribute__((noipa)) f(int n) { + int i, j; + struct S { char d[n]; int a; int b : 17; int c : 12; }; + struct S A[100][1111]; + for (i = 0; i < 100; i++) { + asm volatile("" : : "g"(&A[0][0]) : "memory"); + for (j = 0; j < 1111; j++) A[i][j].b = 2; + } +} +void g(void) { f(1); } + +/* { dg-final { scan-tree-dump-not "Bitfield OK to lower" "ifcvt" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c index 98ba752..2c4fa3f 100644 --- a/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c +++ b/gcc/testsuite/gcc.dg/vect/tsvc/vect-tsvc-s353.c @@ -44,4 +44,4 @@ int main (int argc, char **argv) return 0; } -/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" { xfail { ! riscv_v } } } } */ +/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-1-not.c b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-1-not.c new file mode 100644 index 0000000..0d91067 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-1-not.c @@ -0,0 +1,60 @@ +/* { dg-require-effective-target vect_shift } */ +/* { dg-require-effective-target vect_long_long } */ +/* { dg-additional-options { "-fdump-tree-ifcvt-all" } } */ + +#include <stdarg.h> +#include "tree-vect.h" + +extern void abort(void); + +struct s { + char a : 4; +}; + +#define N 32 +#define ELT0 {0} +#define ELT1 {1} +#define ELT2 {2} +#define ELT3 {3} +#define RES 56 +struct s A[N] + = { ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3}; + +int __attribute__ ((noipa)) +f(struct s *ptr, unsigned n) { + int res = 0; + for (int i = 0; i < n; ++i) + { + switch (ptr[i].a) + { + case 0: + res += ptr[i].a + 1; + break; + case 1: + case 2: + case 3: + res += ptr[i].a; + break; + default: + return 0; + } + } + return res; +} + +int main (void) +{ + check_vect (); + + if (f(&A[0], N) != RES) + abort (); + + return 0; +} + +/* { dg-final { scan-tree-dump-not "Bitfield OK to lower." "ifcvt" } } */ + + diff --git a/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-2-not.c b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-2-not.c new file mode 100644 index 0000000..4ac7b3f --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-2-not.c @@ -0,0 +1,49 @@ +/* { dg-require-effective-target vect_shift } */ +/* { dg-require-effective-target vect_long_long } */ +/* { dg-additional-options { "-fdump-tree-ifcvt-all" } } */ + +#include <stdarg.h> +#include "tree-vect.h" + +extern void abort(void); + +struct s { + char a : 4; +}; + +#define N 32 +#define ELT0 {0} +#define ELT1 {1} +#define ELT2 {2} +#define ELT3 {3} +#define RES 48 +struct s A[N] + = { ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3}; + +int __attribute__ ((noipa)) +f(struct s *ptr, unsigned n) { + int res = 0; + for (int i = 0; i < n; ++i) + { + asm volatile ("" ::: "memory"); + res += ptr[i].a; + } + return res; +} + +int main (void) +{ + check_vect (); + + if (f(&A[0], N) != RES) + abort (); + + return 0; +} + +/* { dg-final { scan-tree-dump-not "Bitfield OK to lower." "ifcvt" } } */ + + diff --git a/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-8.c b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-8.c new file mode 100644 index 0000000..52cfd33 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-8.c @@ -0,0 +1,49 @@ +/* { dg-require-effective-target vect_int } */ +/* { dg-require-effective-target vect_shift } */ +/* { dg-additional-options { "-fdump-tree-ifcvt-all" } } */ + +#include <stdarg.h> +#include "tree-vect.h" + +extern void abort(void); + +struct s { int i : 31; }; + +#define ELT0 {0} +#define ELT1 {1} +#define ELT2 {2} +#define ELT3 {3} +#define ELT4 {4} +#define N 32 +#define RES 25 +struct s A[N] + = { ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT4, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3}; + +int __attribute__ ((noipa)) +f(struct s *ptr, unsigned n) { + int res = 0; + for (int i = 0; i < n; ++i) + { + if (ptr[i].i == 4) + return res; + res += ptr[i].i; + } + + return res; +} + +int main (void) +{ + check_vect (); + + if (f(&A[0], N) != RES) + abort (); + + return 0; +} + +/* { dg-final { scan-tree-dump "Bitfield OK to lower." "ifcvt" } } */ + diff --git a/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-9.c b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-9.c new file mode 100644 index 0000000..ab81469 --- /dev/null +++ b/gcc/testsuite/gcc.dg/vect/vect-bitfield-read-9.c @@ -0,0 +1,51 @@ +/* { dg-require-effective-target vect_shift } */ +/* { dg-require-effective-target vect_long_long } */ +/* { dg-additional-options { "-fdump-tree-ifcvt-all" } } */ + +#include <stdarg.h> +#include "tree-vect.h" + +extern void abort(void); + +struct s { + unsigned i : 31; + char a : 4; +}; + +#define N 32 +#define ELT0 {0x7FFFFFFFUL, 0} +#define ELT1 {0x7FFFFFFFUL, 1} +#define ELT2 {0x7FFFFFFFUL, 2} +#define ELT3 {0x7FFFFFFFUL, 3} +#define ELT4 {0x7FFFFFFFUL, 4} +#define RES 9 +struct s A[N] + = { ELT0, ELT4, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3, + ELT0, ELT1, ELT2, ELT3, ELT0, ELT1, ELT2, ELT3}; + +int __attribute__ ((noipa)) +f(struct s *ptr, unsigned n) { + int res = 0; + for (int i = 0; i < n; ++i) + { + if (ptr[i].a) + return 9; + res += ptr[i].a; + } + return res; +} + +int main (void) +{ + check_vect (); + + if (f(&A[0], N) != RES) + abort (); + + return 0; +} + +/* { dg-final { scan-tree-dump "Bitfield OK to lower." "ifcvt" } } */ + diff --git a/gcc/testsuite/gcc.dg/vect/vect-gather-1.c b/gcc/testsuite/gcc.dg/vect/vect-gather-1.c index e3bbf5c..5f6640d 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-gather-1.c +++ b/gcc/testsuite/gcc.dg/vect/vect-gather-1.c @@ -58,4 +58,4 @@ main (void) return 0; } -/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" vect { target vect_gather_load_ifn } } } */ +/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" vect } } */ diff --git a/gcc/testsuite/gcc.dg/vect/vect-gather-2.c b/gcc/testsuite/gcc.dg/vect/vect-gather-2.c index a1f6ba4..4c23b80 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-gather-2.c +++ b/gcc/testsuite/gcc.dg/vect/vect-gather-2.c @@ -8,6 +8,7 @@ f1 (int *restrict y, int *restrict x1, int *restrict x2, { for (int i = 0; i < N; ++i) { + /* Different base. */ y[i * 2] = x1[indices[i * 2]] + 1; y[i * 2 + 1] = x2[indices[i * 2 + 1]] + 2; } @@ -18,8 +19,9 @@ f2 (int *restrict y, int *restrict x, int *restrict indices) { for (int i = 0; i < N; ++i) { - y[i * 2] = x[indices[i * 2]] + 1; - y[i * 2 + 1] = x[indices[i * 2 + 1] * 2] + 2; + /* Different scale. */ + y[i * 2] = *(int *)((char *)x + (__UINTPTR_TYPE__)indices[i * 2] * 4) + 1; + y[i * 2 + 1] = *(int *)((char *)x + (__UINTPTR_TYPE__)indices[i * 2 + 1] * 2) + 2; } } @@ -28,9 +30,12 @@ f3 (int *restrict y, int *restrict x, int *restrict indices) { for (int i = 0; i < N; ++i) { + /* Different type. */ y[i * 2] = x[indices[i * 2]] + 1; - y[i * 2 + 1] = x[(unsigned int) indices[i * 2 + 1]] + 2; + y[i * 2 + 1] = x[((unsigned int *) indices)[i * 2 + 1]] + 2; } } -/* { dg-final { scan-tree-dump-not "Loop contains only SLP stmts" vect { target vect_gather_load_ifn } } } */ +/* { dg-final { scan-tree-dump-not "Loop contains only SLP stmts" vect } } */ +/* { dg-final { scan-tree-dump "different gather base" vect { target { ! vect_gather_load_ifn } } } } */ +/* { dg-final { scan-tree-dump "different gather scale" vect { target { ! vect_gather_load_ifn } } } } */ diff --git a/gcc/testsuite/gcc.dg/vect/vect-gather-3.c b/gcc/testsuite/gcc.dg/vect/vect-gather-3.c index adfef3bf..30ba678 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-gather-3.c +++ b/gcc/testsuite/gcc.dg/vect/vect-gather-3.c @@ -62,4 +62,4 @@ main (void) return 0; } -/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" vect { target { vect_gather_load_ifn && vect_masked_load } } } } */ +/* { dg-final { scan-tree-dump "Loop contains only SLP stmts" vect { target { { vect_gather_load_ifn || avx2 } && vect_masked_load } } } } */ diff --git a/gcc/testsuite/gcc.dg/vect/vect-gather-4.c b/gcc/testsuite/gcc.dg/vect/vect-gather-4.c index ee2e4e4..1ce63e6 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-gather-4.c +++ b/gcc/testsuite/gcc.dg/vect/vect-gather-4.c @@ -39,10 +39,10 @@ f3 (int *restrict y, int *restrict x, int *restrict indices) y[i * 2] = (indices[i * 2] < N * 2 ? x[indices[i * 2]] + 1 : 1); - y[i * 2 + 1] = (indices[i * 2 + 1] < N * 2 - ? x[(unsigned int) indices[i * 2 + 1]] + 2 + y[i * 2 + 1] = (((unsigned int *)indices)[i * 2 + 1] < N * 2 + ? x[((unsigned int *) indices)[i * 2 + 1]] + 2 : 2); } } -/* { dg-final { scan-tree-dump-not "Loop contains only SLP stmts" vect { target vect_gather_load_ifn } } } */ +/* { dg-final { scan-tree-dump-not "Loop contains only SLP stmts" vect } } */ diff --git a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-16f.c b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-16f.c index 574698d..7cd29e8 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-16f.c +++ b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-16f.c @@ -7,9 +7,8 @@ #include "vect-simd-clone-16.c" /* Ensure the the in-branch simd clones are used on targets that support them. - Some targets use pairs of vectors and do twice the calls. */ -/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 2 "vect" { target { ! { { i?86-*-* x86_64-*-* } && { ! lp64 } } } } } } */ -/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 4 "vect" { target { { i?86*-*-* x86_64-*-* } && { ! lp64 } } } } } */ + */ +/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 2 "vect" } } */ /* The LTO test produces two dump files and we scan the wrong one. */ /* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-17f.c b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-17f.c index 8bb6d19..177521d 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-17f.c +++ b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-17f.c @@ -7,9 +7,8 @@ #include "vect-simd-clone-17.c" /* Ensure the the in-branch simd clones are used on targets that support them. - Some targets use pairs of vectors and do twice the calls. */ -/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 2 "vect" { target { ! { { i?86-*-* x86_64-*-* } && { ! lp64 } } } } } } */ -/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 4 "vect" { target { { i?86*-*-* x86_64-*-* } && { ! lp64 } } } } } */ + */ +/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 2 "vect" } } */ /* The LTO test produces two dump files and we scan the wrong one. */ /* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */ diff --git a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-18f.c b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-18f.c index d34f23f..4dd5138 100644 --- a/gcc/testsuite/gcc.dg/vect/vect-simd-clone-18f.c +++ b/gcc/testsuite/gcc.dg/vect/vect-simd-clone-18f.c @@ -7,9 +7,8 @@ #include "vect-simd-clone-18.c" /* Ensure the the in-branch simd clones are used on targets that support them. - Some targets use pairs of vectors and do twice the calls. */ -/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 2 "vect" { target { ! { { i?86-*-* x86_64-*-* } && { ! lp64 } } } } } } */ -/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 4 "vect" { target { { i?86*-*-* x86_64-*-* } && { ! lp64 } } } } } */ + */ +/* { dg-final { scan-tree-dump-times {[\n\r] [^\n]* = foo\.simdclone} 2 "vect" } } */ /* The LTO test produces two dump files and we scan the wrong one. */ /* { dg-skip-if "" { *-*-* } { "-flto" } { "" } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/lr_free_1.c b/gcc/testsuite/gcc.target/aarch64/lr_free_1.c index 50dcf04..9949061 100644 --- a/gcc/testsuite/gcc.target/aarch64/lr_free_1.c +++ b/gcc/testsuite/gcc.target/aarch64/lr_free_1.c @@ -1,5 +1,5 @@ /* { dg-do run } */ -/* { dg-options "-fno-inline -O2 -fomit-frame-pointer -ffixed-x2 -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 -ffixed-x7 -ffixed-x8 -ffixed-x9 -ffixed-x10 -ffixed-x11 -ffixed-x12 -ffixed-x13 -ffixed-x14 -ffixed-x15 -ffixed-x16 -ffixed-x17 -ffixed-x18 -ffixed-x19 -ffixed-x20 -ffixed-x21 -ffixed-x22 -ffixed-x23 -ffixed-x24 -ffixed-x25 -ffixed-x26 -ffixed-x27 -ffixed-28 -ffixed-29 --save-temps -mgeneral-regs-only -fno-ipa-cp -fno-schedule-fusion -fno-peephole2" } */ +/* { dg-options "-fno-inline -O2 -fomit-frame-pointer -ffixed-x2 -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 -ffixed-x7 -ffixed-x8 -ffixed-x9 -ffixed-x10 -ffixed-x11 -ffixed-x12 -ffixed-x13 -ffixed-x14 -ffixed-x15 -ffixed-x16 -ffixed-x17 -ffixed-x18 -ffixed-x19 -ffixed-x20 -ffixed-x21 -ffixed-x22 -ffixed-x23 -ffixed-x24 -ffixed-x25 -ffixed-x26 -ffixed-x27 -ffixed-28 -ffixed-29 --save-temps -mgeneral-regs-only -fno-ipa-cp -fno-schedule-fusion -fno-peephole2 --param=aarch64-stp-policy=never" } */ extern void abort (); diff --git a/gcc/testsuite/gcc.target/aarch64/pr71727.c b/gcc/testsuite/gcc.target/aarch64/pr71727.c index 41fa72b..226258a 100644 --- a/gcc/testsuite/gcc.target/aarch64/pr71727.c +++ b/gcc/testsuite/gcc.target/aarch64/pr71727.c @@ -30,4 +30,4 @@ _start (void) } /* { dg-final { scan-assembler-times "mov\tx" 5 {target lp64} } } */ -/* { dg-final { scan-assembler-not "add\tx0, x0, :" {target lp64} } } */ +/* { dg-final { scan-assembler-not {st[rp]\tq[0-9]+} {target lp64} } } */ diff --git a/gcc/testsuite/gcc.target/aarch64/sve/pcs/args_9.c b/gcc/testsuite/gcc.target/aarch64/sve/pcs/args_9.c index ad9affa..942a44a 100644 --- a/gcc/testsuite/gcc.target/aarch64/sve/pcs/args_9.c +++ b/gcc/testsuite/gcc.target/aarch64/sve/pcs/args_9.c @@ -45,5 +45,5 @@ caller (int64_t *x0, int16_t *x1, svbool_t p0) return svcntp_b8 (res, res); } -/* { dg-final { scan-assembler {\tptrue\t(p[0-9]+)\.b, mul3\n\tstr\t\1, \[(x[0-9]+)\]\n.*\tstr\t\2, \[sp\]\n} } } */ -/* { dg-final { scan-assembler {\tptrue\t(p[0-9]+)\.h, mul3\n\tstr\t\1, \[(x[0-9]+)\]\n.*\tstr\t\2, \[sp, 8\]\n} } } */ +/* { dg-final { scan-assembler {\tptrue\t(p[0-9]+)\.b, mul3\n\tstr\t\1, \[(x[0-9]+)\]\n.*\t(?:str\t\2, \[sp\]|stp\t\2, x[0-9]+, \[sp\])\n} } } */ +/* { dg-final { scan-assembler {\tptrue\t(p[0-9]+)\.h, mul3\n\tstr\t\1, \[(x[0-9]+)\]\n.*\t(?:str\t\2, \[sp, 8\]|stp\tx[0-9]+, \2, \[sp\])\n} } } */ diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc index c6307fb..eae6b13 100644 --- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc +++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc @@ -199,6 +199,7 @@ extern void test_arch_goldmont_plus (void) __attribute__((__target__("arch=goldm extern void test_arch_tremont (void) __attribute__((__target__("arch=tremont"))); extern void test_arch_sierraforest (void) __attribute__((__target__("arch=sierraforest"))); extern void test_arch_grandridge (void) __attribute__((__target__("arch=grandridge"))); +extern void test_arch_clearwaterforest (void) __attribute__((__target__("arch=clearwaterforest"))); extern void test_arch_knl (void) __attribute__((__target__("arch=knl"))); extern void test_arch_knm (void) __attribute__((__target__("arch=knm"))); extern void test_arch_skylake (void) __attribute__((__target__("arch=skylake"))); @@ -216,7 +217,6 @@ extern void test_arch_graniterapids (void) __attribute__((__target__("arch=grani extern void test_arch_graniterapids_d (void) __attribute__((__target__("arch=graniterapids-d"))); extern void test_arch_arrowlake (void) __attribute__((__target__("arch=arrowlake"))); extern void test_arch_arrowlake_s (void) __attribute__((__target__("arch=arrowlake-s"))); -extern void test_arch_clearwaterforest (void) __attribute__((__target__("arch=clearwaterforest"))); extern void test_arch_pantherlake (void) __attribute__((__target__("arch=pantherlake"))); extern void test_arch_lujiazui (void) __attribute__((__target__("arch=lujiazui"))); extern void test_arch_k8 (void) __attribute__((__target__("arch=k8"))); diff --git a/gcc/testsuite/gcc.target/i386/pr106245-2.c b/gcc/testsuite/gcc.target/i386/pr106245-2.c new file mode 100644 index 0000000..47b0d27 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr106245-2.c @@ -0,0 +1,10 @@ +/* { dg-do compile } */ +/* { dg-options "-O2" } */ + +int f(int a) +{ + return (a << 31) >> 31; +} + +/* { dg-final { scan-assembler "andl" } } */ +/* { dg-final { scan-assembler "negl" } } */ diff --git a/gcc/testsuite/gcc.target/i386/pr106245-3.c b/gcc/testsuite/gcc.target/i386/pr106245-3.c new file mode 100644 index 0000000..4ec6342 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr106245-3.c @@ -0,0 +1,11 @@ +/* { dg-do compile { target ia32 } } */ +/* { dg-options "-O2" } */ + +long long f(long long a) +{ + return (a << 63) >> 63; +} + +/* { dg-final { scan-assembler "andl" } } */ +/* { dg-final { scan-assembler "negl" } } */ +/* { dg-final { scan-assembler "cltd" } } */ diff --git a/gcc/testsuite/gcc.target/i386/pr106245-4.c b/gcc/testsuite/gcc.target/i386/pr106245-4.c new file mode 100644 index 0000000..ef77ee5 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr106245-4.c @@ -0,0 +1,10 @@ +/* { dg-do compile { target { ! ia32 } } } */ +/* { dg-options "-O2" } */ + +long long f(long long a) +{ + return (a << 63) >> 63; +} + +/* { dg-final { scan-assembler "andl" } } */ +/* { dg-final { scan-assembler "negq" } } */ diff --git a/gcc/testsuite/gcc.target/i386/pr106245-5.c b/gcc/testsuite/gcc.target/i386/pr106245-5.c new file mode 100644 index 0000000..0351866 --- /dev/null +++ b/gcc/testsuite/gcc.target/i386/pr106245-5.c @@ -0,0 +1,11 @@ +/* { dg-do compile { target int128 } } */ +/* { dg-options "-O2" } */ + +__int128 f(__int128 a) +{ + return (a << 127) >> 127; +} + +/* { dg-final { scan-assembler "andl" } } */ +/* { dg-final { scan-assembler "negq" } } */ +/* { dg-final { scan-assembler "cqto" } } */ diff --git a/gcc/testsuite/gcc.target/i386/pr89229-5b.c b/gcc/testsuite/gcc.target/i386/pr89229-5b.c index 261f2e1..8a81585 100644 --- a/gcc/testsuite/gcc.target/i386/pr89229-5b.c +++ b/gcc/testsuite/gcc.target/i386/pr89229-5b.c @@ -3,4 +3,4 @@ #include "pr89229-5a.c" -/* { dg-final { scan-assembler-times "vmovdqa32\[^\n\r]*zmm1\[67]\[^\n\r]*zmm1\[67]" 1 } } */ +/* { dg-final { scan-assembler-times "vmovsd\[^\n\r]*xmm1\[67]\[^\n\r]*xmm1\[67]\[^\n\r]*xmm1\[67]" 1 } } */ diff --git a/gcc/testsuite/gcc.target/i386/pr89229-6b.c b/gcc/testsuite/gcc.target/i386/pr89229-6b.c index a74f716..0c27daa 100644 --- a/gcc/testsuite/gcc.target/i386/pr89229-6b.c +++ b/gcc/testsuite/gcc.target/i386/pr89229-6b.c @@ -3,4 +3,4 @@ #include "pr89229-6a.c" -/* { dg-final { scan-assembler-times "vmovaps\[^\n\r]*zmm1\[67]\[^\n\r]*zmm1\[67]" 1 } } */ +/* { dg-final { scan-assembler-times "vmovss\[^\n\r]*xmm1\[67]\[^\n\r]*xmm1\[67]\[^\n\r]*xmm1\[67]" 1 } } */ diff --git a/gcc/testsuite/gcc.target/i386/pr89229-7b.c b/gcc/testsuite/gcc.target/i386/pr89229-7b.c index d3a56e6..baba99e 100644 --- a/gcc/testsuite/gcc.target/i386/pr89229-7b.c +++ b/gcc/testsuite/gcc.target/i386/pr89229-7b.c @@ -3,4 +3,4 @@ #include "pr89229-7a.c" -/* { dg-final { scan-assembler-times "vmovdqa32\[^\n\r]*zmm1\[67]\[^\n\r]*zmm1\[67]" 1 } } */ +/* { dg-final { scan-assembler-times "vmovss\[^\n\r]*xmm1\[67]\[^\n\r]*xmm1\[67]\[^\n\r]*xmm1\[67]" 1 } } */ diff --git a/gcc/testsuite/gcc.target/loongarch/avg-ceil-lasx.c b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lasx.c new file mode 100644 index 0000000..16db7bf --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lasx.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlasx" } */ +/* { dg-final { scan-assembler "xvavgr.b" } } */ +/* { dg-final { scan-assembler "xvavgr.bu" } } */ +/* { dg-final { scan-assembler "xvavgr.hu" } } */ +/* { dg-final { scan-assembler "xvavgr.h" } } */ + +#define N 1024 + +#define TEST(TYPE, NAME) \ + TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ + void f_##NAME (void) \ + { \ + int i; \ + for (i = 0; i < N; i++) \ + a_##NAME[i] = (b_##NAME[i] + c_##NAME[i] + 1) >> 1; \ + } + +TEST(char, 1); +TEST(short, 2); +TEST(unsigned char, 3); +TEST(unsigned short, 4); diff --git a/gcc/testsuite/gcc.target/loongarch/avg-ceil-lsx.c b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lsx.c new file mode 100644 index 0000000..94119c2 --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lsx.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlsx" } */ +/* { dg-final { scan-assembler "vavgr.b" } } */ +/* { dg-final { scan-assembler "vavgr.bu" } } */ +/* { dg-final { scan-assembler "vavgr.hu" } } */ +/* { dg-final { scan-assembler "vavgr.h" } } */ + +#define N 1024 + +#define TEST(TYPE, NAME) \ + TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ + void f_##NAME (void) \ + { \ + int i; \ + for (i = 0; i < N; i++) \ + a_##NAME[i] = (b_##NAME[i] + c_##NAME[i] + 1) >> 1; \ + } + +TEST(char, 1); +TEST(short, 2); +TEST(unsigned char, 3); +TEST(unsigned short, 4); diff --git a/gcc/testsuite/gcc.target/loongarch/avg-floor-lasx.c b/gcc/testsuite/gcc.target/loongarch/avg-floor-lasx.c new file mode 100644 index 0000000..da68965 --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/avg-floor-lasx.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlasx" } */ +/* { dg-final { scan-assembler "xvavg.b" } } */ +/* { dg-final { scan-assembler "xvavg.bu" } } */ +/* { dg-final { scan-assembler "xvavg.hu" } } */ +/* { dg-final { scan-assembler "xvavg.h" } } */ + +#define N 1024 + +#define TEST(TYPE, NAME) \ + TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ + void f_##NAME (void) \ + { \ + int i; \ + for (i = 0; i < N; i++) \ + a_##NAME[i] = (b_##NAME[i] + c_##NAME[i]) >> 1; \ + } + +TEST(char, 1); +TEST(short, 2); +TEST(unsigned char, 3); +TEST(unsigned short, 4); diff --git a/gcc/testsuite/gcc.target/loongarch/avg-floor-lsx.c b/gcc/testsuite/gcc.target/loongarch/avg-floor-lsx.c new file mode 100644 index 0000000..bbb9db5 --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/avg-floor-lsx.c @@ -0,0 +1,22 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlsx" } */ +/* { dg-final { scan-assembler "vavg.b" } } */ +/* { dg-final { scan-assembler "vavg.bu" } } */ +/* { dg-final { scan-assembler "vavg.hu" } } */ +/* { dg-final { scan-assembler "vavg.h" } } */ + +#define N 1024 + +#define TEST(TYPE, NAME) \ + TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ + void f_##NAME (void) \ + { \ + int i; \ + for (i = 0; i < N; i++) \ + a_##NAME[i] = (b_##NAME[i] + c_##NAME[i]) >> 1; \ + } + +TEST(char, 1); +TEST(short, 2); +TEST(unsigned char, 3); +TEST(unsigned short, 4); diff --git a/gcc/testsuite/gcc.target/loongarch/sad-lasx.c b/gcc/testsuite/gcc.target/loongarch/sad-lasx.c new file mode 100644 index 0000000..6c0cdfd --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/sad-lasx.c @@ -0,0 +1,20 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlasx" } */ + +#define N 1024 + +#define TEST(SIGN) \ + SIGN char a_##SIGN[N], b_##SIGN[N]; \ + int f_##SIGN (void) \ + { \ + int i, sum = 0; \ + for (i = 0; i < N; i++) \ + sum += __builtin_abs (a_##SIGN[i] - b_##SIGN[i]);; \ + return sum; \ + } + +TEST(signed); +TEST(unsigned); + +/* { dg-final { scan-assembler {\txvabsd.bu\t} } } */ +/* { dg-final { scan-assembler {\txvabsd.b\t} } } */ diff --git a/gcc/testsuite/gcc.target/loongarch/sad-lsx.c b/gcc/testsuite/gcc.target/loongarch/sad-lsx.c new file mode 100644 index 0000000..b92110a --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/sad-lsx.c @@ -0,0 +1,20 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlsx" } */ + +#define N 1024 + +#define TEST(SIGN) \ + SIGN char a_##SIGN[N], b_##SIGN[N]; \ + int f_##SIGN (void) \ + { \ + int i, sum = 0; \ + for (i = 0; i < N; i++) \ + sum += __builtin_abs (a_##SIGN[i] - b_##SIGN[i]);; \ + return sum; \ + } + +TEST(signed); +TEST(unsigned); + +/* { dg-final { scan-assembler {\tvabsd.bu\t} } } */ +/* { dg-final { scan-assembler {\tvabsd.b\t} } } */ diff --git a/gcc/testsuite/gcc.target/loongarch/vect-widen-add.c b/gcc/testsuite/gcc.target/loongarch/vect-widen-add.c new file mode 100644 index 0000000..0bf832d --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/vect-widen-add.c @@ -0,0 +1,24 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlasx" } */ +/* { dg-final { scan-assembler "xvaddwev.w.h" } } */ +/* { dg-final { scan-assembler "xvaddwod.w.h" } } */ +/* { dg-final { scan-assembler "xvaddwev.w.hu" } } */ +/* { dg-final { scan-assembler "xvaddwod.w.hu" } } */ + +#include <stdint.h> + +#define SIZE 1024 + +void +wide_uadd (uint32_t *foo, uint16_t *a, uint16_t *b) +{ + for ( int i = 0; i < SIZE; i++) + foo[i] = a[i] + b[i]; +} + +void +wide_sadd (int32_t *foo, int16_t *a, int16_t *b) +{ + for ( int i = 0; i < SIZE; i++) + foo[i] = a[i] + b[i]; +} diff --git a/gcc/testsuite/gcc.target/loongarch/vect-widen-mul.c b/gcc/testsuite/gcc.target/loongarch/vect-widen-mul.c new file mode 100644 index 0000000..84b020e --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/vect-widen-mul.c @@ -0,0 +1,24 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlasx" } */ +/* { dg-final { scan-assembler "xvmulwev.w.h" } } */ +/* { dg-final { scan-assembler "xvmulwod.w.h" } } */ +/* { dg-final { scan-assembler "xvmulwev.w.hu" } } */ +/* { dg-final { scan-assembler "xvmulwod.w.hu" } } */ + +#include <stdint.h> + +#define SIZE 1024 + +void +wide_umul (uint32_t *foo, uint16_t *a, uint16_t *b) +{ + for ( int i = 0; i < SIZE; i++) + foo[i] = a[i] * b[i]; +} + +void +wide_smul (int32_t *foo, int16_t *a, int16_t *b) +{ + for ( int i = 0; i < SIZE; i++) + foo[i] = a[i] * b[i]; +} diff --git a/gcc/testsuite/gcc.target/loongarch/vect-widen-sub.c b/gcc/testsuite/gcc.target/loongarch/vect-widen-sub.c new file mode 100644 index 0000000..69fc3a5 --- /dev/null +++ b/gcc/testsuite/gcc.target/loongarch/vect-widen-sub.c @@ -0,0 +1,24 @@ +/* { dg-do compile } */ +/* { dg-options "-O3 -mlasx" } */ +/* { dg-final { scan-assembler "xvsubwev.w.h" } } */ +/* { dg-final { scan-assembler "xvsubwod.w.h" } } */ +/* { dg-final { scan-assembler "xvsubwev.w.hu" } } */ +/* { dg-final { scan-assembler "xvsubwod.w.hu" } } */ + +#include <stdint.h> + +#define SIZE 1024 + +void +wide_usub (uint32_t *foo, uint16_t *a, uint16_t *b) +{ + for ( int i = 0; i < SIZE; i++) + foo[i] = a[i] - b[i]; +} + +void +wide_ssub (int32_t *foo, int16_t *a, int16_t *b) +{ + for ( int i = 0; i < SIZE; i++) + foo[i] = a[i] - b[i]; +} diff --git a/gcc/testsuite/gcc.target/riscv/arch-26.c b/gcc/testsuite/gcc.target/riscv/arch-26.c new file mode 100644 index 0000000..0b48bc9 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/arch-26.c @@ -0,0 +1,4 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32eh -mabi=ilp32e" } */ +int foo() {} +/* { dg-error "'-march=rv32eh': h extension requires i extension" "" { target *-*-* } 0 } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm-4.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm-4.c index 7ab3104..b235ec7 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm-4.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/perm-4.c @@ -55,7 +55,7 @@ TEST_ALL (PERMUTE) -/* { dg-final { scan-assembler-times {vrgather\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 19 } } */ +/* { dg-final { scan-assembler-times {vrgather\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 18 } } */ /* { dg-final { scan-assembler-times {vrgatherei16\.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 12 } } */ -/* { dg-final { scan-assembler-times {vrsub\.vi} 24 } } */ +/* { dg-final { scan-assembler-times {vrsub\.vi} 23 } } */ /* { dg-final { scan-assembler-times {vrsub\.vx} 7 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-0.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-0.c new file mode 100644 index 0000000..57af7db --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-0.c @@ -0,0 +1,536 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -std=c99 -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "../vls/def.h" + +DEF_OP_VV_VA (__builtin_shufflevector, v1qi, + 0) +DEF_OP_VV_VA (__builtin_shufflevector, v2qi, + 0, 1) +DEF_OP_VV_VA (__builtin_shufflevector, v4qi, + 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v8qi, + 0, 1, 2, 3, 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v16qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v32qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v64qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v128qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v256qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v512qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v1024qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v2048qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v4096qi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) + +/* { dg-final { scan-assembler-times {vrgather\.v[vi]\s+v[0-9]+,\s*v[0-9]+,\s*v?[0-9]+} 10 } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-1.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-1.c new file mode 100644 index 0000000..2619658 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-1.c @@ -0,0 +1,279 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -std=c99 -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "../vls/def.h" + +DEF_OP_VV_VA (__builtin_shufflevector, v1hi, + 0) +DEF_OP_VV_VA (__builtin_shufflevector, v2hi, + 0, 1) +DEF_OP_VV_VA (__builtin_shufflevector, v4hi, + 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v8hi, + 0, 1, 2, 3, 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v16hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v32hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v64hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v128hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v256hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v512hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v1024hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v2048hi, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) + +/* { dg-final { scan-assembler-times {vrgather\.v[vi]\s+v[0-9]+,\s*v[0-9]+,\s*v?[0-9]+} 9 } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-2.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-2.c new file mode 100644 index 0000000..16aadba --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-2.c @@ -0,0 +1,151 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -std=c99 -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "../vls/def.h" + +DEF_OP_VV_VA (__builtin_shufflevector, v1si, + 0) +DEF_OP_VV_VA (__builtin_shufflevector, v2si, + 0, 1) +DEF_OP_VV_VA (__builtin_shufflevector, v4si, + 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v8si, + 0, 1, 2, 3, 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v16si, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v32si, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v64si, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v128si, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v256si, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v512si, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v1024si, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) + +/* { dg-final { scan-assembler-times {vrgatherei16\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vrgather\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 5 } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-3.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-3.c new file mode 100644 index 0000000..5c5e93a --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-3.c @@ -0,0 +1,86 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -std=c99 -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "../vls/def.h" + +DEF_OP_VV_VA (__builtin_shufflevector, v1di, + 0) +DEF_OP_VV_VA (__builtin_shufflevector, v2di, + 0, 1) +DEF_OP_VV_VA (__builtin_shufflevector, v4di, + 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v8di, + 0, 1, 2, 3, 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v16di, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v32di, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v64di, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v128di, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v256di, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v512di, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) + +/* { dg-final { scan-assembler-times {vrgatherei16\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vrgather\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-4.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-4.c new file mode 100644 index 0000000..6aa4a7e --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-4.c @@ -0,0 +1,279 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -std=c99 -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "../vls/def.h" + +DEF_OP_VV_VA (__builtin_shufflevector, v1hf, + 0) +DEF_OP_VV_VA (__builtin_shufflevector, v2hf, + 0, 1) +DEF_OP_VV_VA (__builtin_shufflevector, v4hf, + 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v8hf, + 0, 1, 2, 3, 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v16hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v32hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v64hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v128hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v256hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v512hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v1024hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v2048hf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) + +/* { dg-final { scan-assembler-times {vrgather\.v[vi]\s+v[0-9]+,\s*v[0-9]+,\s*[v]?[0-9]+} 9 } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-5.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-5.c new file mode 100644 index 0000000..a31dad5 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-5.c @@ -0,0 +1,151 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -std=c99 -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "../vls/def.h" + +DEF_OP_VV_VA (__builtin_shufflevector, v1sf, + 0) +DEF_OP_VV_VA (__builtin_shufflevector, v2sf, + 0, 1) +DEF_OP_VV_VA (__builtin_shufflevector, v4sf, + 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v8sf, + 0, 1, 2, 3, 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v16sf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v32sf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v64sf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v128sf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v256sf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v512sf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v1024sf, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) + +/* { dg-final { scan-assembler-times {vrgatherei16\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vrgather\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 5 } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-6.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-6.c new file mode 100644 index 0000000..8312b21 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls-vlmax/pr111857-6.c @@ -0,0 +1,86 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv_zvfh_zvl4096b -std=c99 -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "../vls/def.h" + +DEF_OP_VV_VA (__builtin_shufflevector, v1df, + 0) +DEF_OP_VV_VA (__builtin_shufflevector, v2df, + 0, 1) +DEF_OP_VV_VA (__builtin_shufflevector, v4df, + 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v8df, + 0, 1, 2, 3, 0, 1, 2, 3) +DEF_OP_VV_VA (__builtin_shufflevector, v16df, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v32df, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v64df, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v128df, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v256df, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) +DEF_OP_VV_VA (__builtin_shufflevector, v512df, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, + 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15) + +/* { dg-final { scan-assembler-times {vrgatherei16\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 3 } } */ +/* { dg-final { scan-assembler-times {vrgather\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+} 4 } } */ +/* { dg-final { scan-assembler-not {csrr} } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h index 8dd5bcf..8ee8b7b 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h +++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vls/def.h @@ -441,6 +441,12 @@ typedef double v512df __attribute__ ((vector_size (4096))); *(TYPE1 *) out = v; \ } +#define DEF_OP_VV_VA(OP, TYPE1, ...) \ + TYPE1 test_##OP##_##TYPE1 (TYPE1 a, TYPE1 b) \ + { \ + return OP (a, b, __VA_ARGS__); \ + } + #define DEF_REPEAT(TYPE1, TYPE2, NUM, ...) \ void init_##TYPE1##_##TYPE2##_##NUM (TYPE2 var0, TYPE2 var1, \ TYPE2 *__restrict out) \ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/scalar_move-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/scalar_move-1.c index 1834913..c833d89 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/base/scalar_move-1.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/base/scalar_move-1.c @@ -46,8 +46,8 @@ int32_t foo3 (int32_t *base, size_t vl) ** vl1re32\.v\tv[0-9]+,0\([a-x0-9]+\) ** vsetvli\tzero,[a-x0-9]+,e32,m1,t[au],m[au] ** vadd.vv\tv[0-9]+,\s*v[0-9]+,\s*v[0-9]+ -** vsetvli\tzero,[a-x0-9]+,e32,m2,t[au],m[au] ** vmv.x.s\t[a-x0-9]+,\s*v[0-9]+ +** vsetvli\tzero,[a-x0-9]+,e32,m2,t[au],m[au] ** vmv.v.x\tv[0-9]+,\s*[a-x0-9]+ ** vmv.x.s\t[a-x0-9]+,\s*v[0-9]+ ** ret diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-104.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-104.c new file mode 100644 index 0000000..fb3577d --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-104.c @@ -0,0 +1,35 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns -fno-schedule-insns2 -fno-tree-vectorize" } */ + +#include "riscv_vector.h" + +void +foo (int cond, int vl, int *in, int *out, int n) +{ + if (cond > 30) + { + vint32m1_t v = __riscv_vle32_v_i32m1 ((int32_t *) in, vl); + __riscv_vse32_v_i32m1 ((int32_t *) out, v, vl); + } + else if (cond < 10) + { + vint8mf4_t v = __riscv_vle8_v_i8mf4 ((int8_t *) in, vl); + v = __riscv_vle8_v_i8mf4_tu (v, (int8_t *) in + 10, vl); + __riscv_vse8_v_i8mf4 ((int8_t *) out, v, vl); + } + else + { + vl = vl * 2; + } + + for (int i = 0; i < n; i += 1) + { + vint16mf2_t v = __riscv_vle16_v_i16mf2 ((int16_t *) in + i, vl); + v = __riscv_vle16_v_i16mf2_tu (v, (int16_t *) in + i + 10, vl); + v = __riscv_vadd_vv_i16mf2 (v, v, vl); + __riscv_vse16_v_i16mf2 ((int16_t *) out + i, v, vl); + } +} + +/* { dg-final { scan-assembler-not {vsetvli\s+zero,zero,e16,mf2,t[au],m[au]} { target { no-opts "-O0" no-opts "-Os" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-105.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-105.c new file mode 100644 index 0000000..708a3bd --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-105.c @@ -0,0 +1,23 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns -fno-schedule-insns2 -fno-tree-vectorize" } */ + +#include "riscv_vector.h" + +void +foo (int i, int n, int m, int32_t *in, int32_t *out) +{ + vint32m1_t v = __riscv_vle32_v_i32m1 (in, i); + __riscv_vse32_v_i32m1 (out, v, i); + for (; i < n; i += 1) + { + vint32m1_t v = __riscv_vle32_v_i32m1 (in + i, i); + __riscv_vse32_v_i32m1 (out + i, v, i); + for (int j = 0; j < m; j += 1) + { + vint32m1_t v = __riscv_vle32_v_i32m1 (in + i * n + j, j); + __riscv_vse32_v_i32m1 (out + i * n + j, v, i); + } + } +} + +/* { dg-final { scan-assembler-times {vsetvli} 4 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-106.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-106.c new file mode 100644 index 0000000..e63d52d --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-106.c @@ -0,0 +1,34 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns -fno-schedule-insns2 -fno-tree-vectorize" } */ + +#include "riscv_vector.h" + +void +foo (int vl, int n, int m, int32_t *in, int32_t *out) +{ + size_t avl; + if (m > 10) + { + avl = __riscv_vsetvl_e8mf4 (vl); + vint8mf4_t v = __riscv_vle8_v_i8mf4 ((int8_t *) in + 10, avl); + v = __riscv_vadd_vv_i8mf4 (v, v, avl); + __riscv_vse8_v_i8mf4 ((int8_t *) out + 10, v, avl); + } + else + { + avl = __riscv_vsetvl_e16mf2 (vl); + vint16mf2_t v = __riscv_vle16_v_i16mf2 ((int16_t *) in + 10, avl); + v = __riscv_vadd_vv_i16mf2 (v, v, avl); + __riscv_vse16_v_i16mf2 ((int16_t *) out + 10, v, avl); + } + + for (int i = 0; i < n; i += 1) + { + vint32m1_t v = __riscv_vle32_v_i32m1 (in + i, avl); + v = __riscv_vadd_vv_i32m1 (v, v, avl); + __riscv_vse32_v_i32m1 (out + i, v, avl); + } +} + +/* { dg-final { scan-assembler-times {vsetvli} 3 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\tzero,zero,e32,m1,t[au],m[au]} 1 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-107.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-107.c new file mode 100644 index 0000000..e424b4b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-107.c @@ -0,0 +1,41 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns -fno-schedule-insns2 -fno-tree-vectorize" } */ + +#include "riscv_vector.h" + +void +foo (int vl, int n, int m, int32_t *in, int32_t *out) +{ + size_t avl; + if (m > 10) + { + avl = __riscv_vsetvl_e8mf4 (vl); + vint8mf4_t v = __riscv_vle8_v_i8mf4 ((int8_t *) in + 10, avl); + v = __riscv_vadd_vv_i8mf4 (v, v, avl); + __riscv_vse8_v_i8mf4 ((int8_t *) out + 10, v, avl); + } + else if (m > -10) + { + avl = __riscv_vsetvl_e16mf2 (vl); + vint16mf2_t v = __riscv_vle16_v_i16mf2 ((int16_t *) in + 10, avl); + v = __riscv_vadd_vv_i16mf2 (v, v, avl); + __riscv_vse16_v_i16mf2 ((int16_t *) out + 10, v, avl); + } + else + { + avl = __riscv_vsetvl_e64m2 (vl); + vint64m2_t v = __riscv_vle64_v_i64m2 ((int64_t *) in + 10, avl); + v = __riscv_vadd_vv_i64m2 (v, v, avl); + __riscv_vse64_v_i64m2 ((int64_t *) out + 10, v, avl); + } + + for (int i = 0; i < n; i += 1) + { + vint32m1_t v = __riscv_vle32_v_i32m1 (in + i, avl); + v = __riscv_vadd_vv_i32m1 (v, v, avl); + __riscv_vse32_v_i32m1 (out + i, v, avl); + } +} + +/* { dg-final { scan-assembler-times {vsetvli} 4 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\tzero,zero,e32,m1,t[au],m[au]} 1 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-108.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-108.c new file mode 100644 index 0000000..95f1a95 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-108.c @@ -0,0 +1,41 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns -fno-schedule-insns2 -fno-tree-vectorize" } */ + +#include "riscv_vector.h" + +void +foo (int vl, int n, int m, int32_t *in, int32_t *out) +{ + size_t avl; + if (m > 10) + { + avl = __riscv_vsetvl_e8mf4 (vl); + vint8mf4_t v = __riscv_vle8_v_i8mf4 ((int8_t *) in + 10, avl); + v = __riscv_vadd_vv_i8mf4 (v, v, avl); + __riscv_vse8_v_i8mf4 ((int8_t *) out + 10, v, avl); + } + else if (m > -10) + { + avl = __riscv_vsetvl_e16mf2 (vl); + vint16mf2_t v = __riscv_vle16_v_i16mf2 ((int16_t *) in + 10, avl); + v = __riscv_vadd_vv_i16mf2 (v, v, avl); + __riscv_vse16_v_i16mf2 ((int16_t *) out + 10, v, avl); + } + else + { + avl = __riscv_vsetvl_e64m4 (vl); + vint64m4_t v = __riscv_vle64_v_i64m4 ((int64_t *) in + 10, avl); + v = __riscv_vadd_vv_i64m4 (v, v, avl); + __riscv_vse64_v_i64m4 ((int64_t *) out + 10, v, avl); + } + + for (int i = 0; i < n; i += 1) + { + vint32m1_t v = __riscv_vle32_v_i32m1 (in + i, avl); + v = __riscv_vadd_vv_i32m1 (v, v, avl); + __riscv_vse32_v_i32m1 (out + i, v, avl); + } +} + +/* { dg-final { scan-assembler-times {vsetvli} 4 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ +/* { dg-final { scan-assembler-not {vsetvli\tzero,zero,e32,m1,t[au],m[au]} { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-109.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-109.c new file mode 100644 index 0000000..a45e52b --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-109.c @@ -0,0 +1,45 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32 -fno-schedule-insns -fno-schedule-insns2 -fno-tree-vectorize" } */ + +#include "riscv_vector.h" + +void +foo (int vl, int n, int m, int32_t *in, int32_t *out) +{ + size_t avl; + if (m > 10) + { + avl = __riscv_vsetvl_e8mf4 (vl); + vint8mf4_t v = __riscv_vle8_v_i8mf4 ((int8_t *) in + 10, avl); + v = __riscv_vadd_vv_i8mf4 (v, v, avl); + __riscv_vse8_v_i8mf4 ((int8_t *) out + 10, v, avl); + } + else if (m > -10) + { + avl = __riscv_vsetvl_e16mf2 (vl); + vint16mf2_t v = __riscv_vle16_v_i16mf2 ((int16_t *) in + 10, avl); + v = __riscv_vadd_vv_i16mf2 (v, v, avl); + __riscv_vse16_v_i16mf2 ((int16_t *) out + 10, v, avl); + } + else if (m > -100) + { + avl = __riscv_vsetvl_e64m4 (vl); + vint64m4_t v = __riscv_vle64_v_i64m4 ((int64_t *) in + 10, avl); + v = __riscv_vadd_vv_i64m4 (v, v, avl); + __riscv_vse64_v_i64m4 ((int64_t *) out + 10, v, avl); + } + else + { + avl = 123; + } + + for (int i = 0; i < n; i += 1) + { + vint32m1_t v = __riscv_vle32_v_i32m1 (in + i, avl); + v = __riscv_vadd_vv_i32m1 (v, v, avl); + __riscv_vse32_v_i32m1 (out + i, v, avl); + } +} + +/* { dg-final { scan-assembler-times {vsetvli} 4 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ +/* { dg-final { scan-assembler-not {vsetvli\tzero,zero,e32,m1,t[au],m[au]} { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-23.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-23.c index 708f04b..ec59d58 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-23.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-23.c @@ -7,7 +7,7 @@ void f (int8_t * restrict in, int8_t * restrict out, int n, int m, int cond) { vbool64_t mask = *(vbool64_t*) (in + 1000000); for (size_t j = 0; j < m; j++){ - + size_t vl = 101; for (size_t i = 0; i < n; i++) { @@ -20,7 +20,7 @@ void f (int8_t * restrict in, int8_t * restrict out, int n, int m, int cond) { vfloat32mf2_t v = __riscv_vle32_v_f32mf2 ((float *)(in + i + j + 200), vl); __riscv_vse32_v_f32mf2 ((float *)(out + i + j + 200), v, vl); - + vfloat32mf2_t v2 = __riscv_vle32_v_f32mf2_tumu (mask, v, (float *)(in + i + j + 300), vl); __riscv_vse32_v_f32mf2_m (mask, (float *)(out + i + j + 300), v2, vl); } @@ -29,6 +29,7 @@ void f (int8_t * restrict in, int8_t * restrict out, int n, int m, int cond) /* { dg-final { scan-assembler {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ -/* { dg-final { scan-assembler-times {vsetvli} 4 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 4 { target { { any-opts "-O1" } && { no-opts "-g" "-funroll-loops" } } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 3 { target { { any-opts "-Os" "-O2" } && { no-opts "-g" "-funroll-loops" } } } } } */ /* { dg-final { scan-assembler-times {li\s+[a-x0-9]+,101} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {li\s+[a-x0-9]+,102} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-46.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-46.c index 99fdd67..a60674d 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-46.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-46.c @@ -21,5 +21,6 @@ void f (int8_t * restrict in, int8_t * restrict out, int n, int cond, size_t vl) } } -/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]} 2 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {vsetvli} 2 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c index b3e90d2..a584dd9 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-84.c @@ -17,7 +17,6 @@ double f0 (int8_t * restrict in, int8_t * restrict out, int n, int m, unsigned c } /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*m2,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ -/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*3,\s*e64,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ -/* { dg-final { scan-assembler-times {vsetvli} 1 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ -/* { dg-final { scan-assembler-times {vsetivli} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-89.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-89.c index 9f85088..65bff4d 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-89.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-89.c @@ -11,11 +11,11 @@ float f (int8_t * restrict in, int8_t * restrict out, int n, int m, unsigned con { vfloat32mf2_t v = __riscv_vle32_v_f32mf2 ((float *)(in + i + 200), __riscv_vsetvlmax_e32mf2 ()); __riscv_vse32_v_f32mf2 ((float *)(out + i + 200), v, __riscv_vsetvlmax_e32mf2 ()); - + vfloat32mf2_t v2 = __riscv_vle32_v_f32mf2_tumu (mask, v, (float *)(in + i + 300), __riscv_vsetvlmax_e32mf2 ()); __riscv_vse32_v_f32mf2_m (mask, (float *)(out + i + 300), v2, __riscv_vsetvlmax_e32mf2 ()); } - + vfloat32m1_t v = *(vfloat32m1_t*)(in + 300000); for (size_t i = 0; i < n; i++) { @@ -25,7 +25,7 @@ float f (int8_t * restrict in, int8_t * restrict out, int n, int m, unsigned con return __riscv_vfmv_f_s_f32m1_f32 (v); } -/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*3,\s*e32,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*3,\s*e32,\s*m1,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*tu,\s*mu} 1 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {vsetvli} 1 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ -/* { dg-final { scan-assembler-times {vsetivli} 1 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetivli} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-95.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-95.c index 5dac25e..128a629 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-95.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/avl_single-95.c @@ -15,6 +15,6 @@ float f (int8_t * restrict in, int8_t * restrict out, int n, int m, unsigned con *(vfloat32m1_t*)(out + 100000) = v; } -/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*3,\s*e64,\s*m4,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler {vsetivli\s+zero,\s*3,\s*e32,\s*m2,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-O1" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-not {vsetvli} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {vsetivli} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/imm_bb_prop-1.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/imm_bb_prop-1.c index ed32a40..691980f 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/imm_bb_prop-1.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/imm_bb_prop-1.c @@ -16,17 +16,18 @@ void f (int8_t * restrict in, int8_t * restrict out, int n, int cond) for (int i = 0 ; i < n * n; i++) out[i] = out[i] + out[i]; - + for (int i = 0 ; i < n * n * n; i++) out[i] = out[i] * out[i]; for (int i = 0 ; i < n * n * n * n; i++) out[i] = out[i] * out[i]; - + for (int i = 0 ; i < n * n * n * n; i++) { vint8mf8_t v = __riscv_vle8_v_i8mf8 (in + 900 + i, 5); __riscv_vse8_v_i8mf8 (out + 900 + i, v, 5); } } -/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*5,\s*e8,\s*mf8,\s*tu,\s*m[au]} 2 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*5,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler {vsetivli\s+zero,\s*5,\s*e8,\s*mf8,\s*tu,\s*m[au]} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109743-2.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109743-2.c index 5f6647b..e87dc03 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109743-2.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109743-2.c @@ -23,5 +23,5 @@ void f (int32_t * a, int32_t * b, int n) } -/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*[a-x0-9]+,\s*(?:e8,mf4|e32,m1),\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {vsetvli} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109773-1.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109773-1.c index 8656e47..7a9882b 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109773-1.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr109773-1.c @@ -17,4 +17,4 @@ void f (int32_t *a, int32_t *b, int n) } } -/* { dg-final { scan-assembler-times {vsetvli} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr111037-1.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-1.c index 0b7b32f..0b7b32f 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/base/pr111037-1.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-1.c diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pr111037-2.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-2.c index ac50da7..ac50da7 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/base/pr111037-2.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-2.c diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c new file mode 100644 index 0000000..0f40642 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-3.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gc_zve64f_zvfh -mabi=ilp32d -O3" } */ + +#include "riscv_vector.h" + +void foo(_Float16 y, int16_t z, int64_t *i64p) +{ + vint64m1_t vx =__riscv_vle64_v_i64m1 (i64p, 1); + vx = __riscv_vadd_vv_i64m1 (vx, vx, 1); + vint16m1_t vz =__riscv_vmv_s_x_i16m1 (z, 1); + vfloat16m1_t vy =__riscv_vfmv_s_f_f16m1 (y, 1); + asm volatile ("# use %0 %1" : : "vr"(vx), "vr" (vy), "vr" (vz)); +} + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*1,\s*e64,\s*m1,\s*t[au],\s*m[au]} 1 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*zero,\s*e16,\s*m1,\s*t[au],\s*m[au]} 1 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-4.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-4.c new file mode 100644 index 0000000..1a0fcf7 --- /dev/null +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/pr111037-4.c @@ -0,0 +1,16 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gc_zve64f_zvfh -mabi=ilp32d -O3" } */ + +#include "riscv_vector.h" + +void foo(_Float16 y, int16_t z, int64_t *i64p) +{ + vint64m1_t vx =__riscv_vle64_v_i64m1 (i64p, 1); + vx = __riscv_vadd_vv_i64m1 (vx, vx, 1); + vfloat16m1_t vy =__riscv_vfmv_s_f_f16m1 (y, 1); + vint16m1_t vz =__riscv_vmv_s_x_i16m1 (z, 1); + asm volatile ("# use %0 %1" : : "vr"(vx), "vr" (vy), "vr" (vz)); +} + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*1,\s*e16,\s*mf4,\s*t[au],\s*m[au]} 1 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} 1 } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c index 2ec9487..d0fdc5f 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-25.c @@ -88,8 +88,8 @@ void f (void * restrict in, void * restrict out, int n, int cond) } } -/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 10 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-times {vsetvli} 19 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 10 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c index bcafce3..d0e7525 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_back_prop-26.c @@ -80,8 +80,8 @@ void f (void * restrict in, void * restrict out, int n, int cond) } } -/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 9 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-not {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-times {vsetvli} 17 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e16,\s*mf4,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e32,\s*mf2,\s*t[au],\s*m[au]} 3 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e64,\s*m1,\s*t[au],\s*m[au]} 1 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 9 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-12.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-12.c index d1611dd..00a7594 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-12.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-12.c @@ -36,4 +36,3 @@ void f2 (int32_t * restrict in, int32_t * restrict in2, int32_t * restrict out, } /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-times {vsetvli} 4 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-3.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-3.c index 3e89cfc..88a565f 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-3.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vlmax_conflict-3.c @@ -27,4 +27,4 @@ void f2 (int32_t * restrict in, int32_t * restrict in2, int32_t * restrict out, } /* { dg-final { scan-assembler-times {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf8,\s*t[au],\s*m[au]} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ -/* { dg-final { scan-assembler-times {vsetvli} 4 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 2 { target { no-opts "-O0" no-opts "-O1" no-opts "-Os" no-opts "-Oz" no-opts "-funroll-loops" no-opts "-g" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-13.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-13.c index 6157a2c..1ea248d 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-13.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-13.c @@ -4,7 +4,7 @@ #include "riscv_vector.h" void foo(int32_t *in1, int32_t *in2, int32_t *in3, int32_t *out, size_t n, int cond, int avl) { - + size_t vl; if (cond) vl = __riscv_vsetvl_e32m1(avl); @@ -18,5 +18,5 @@ void foo(int32_t *in1, int32_t *in2, int32_t *in3, int32_t *out, size_t n, int c } } -/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]} 2 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {vsetvli} 2 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-18.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-18.c index 7ad277e..5799220 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-18.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-18.c @@ -16,5 +16,7 @@ void f(int8_t *base, int8_t *out, size_t vl, size_t m, size_t n) { } } -/* { dg-final { scan-assembler {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler {vsetvli\s+[a-x0-9]+,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler {vsetvli\s+[a-x0-9]+,\s*zero,\s*e8,\s*mf4,\s*t[au],\s*m[au]} { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 3 { target { { any-opts "-O2" "-O3" } && { no-opts "-g" "-funroll-loops" } } } } } */ diff --git a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-23.c b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-23.c index fc4015e..e3d069f 100644 --- a/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-23.c +++ b/gcc/testsuite/gcc.target/riscv/rvv/vsetvl/vsetvl-23.c @@ -33,4 +33,4 @@ void f(int8_t *base, int8_t *out, size_t vl, size_t m, size_t k) { /* { dg-final { scan-assembler-times {slli\s+[a-x0-9]+,\s*[a-x0-9]+,\s*4} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ /* { dg-final { scan-assembler-times {srli\s+[a-x0-9]+,\s*[a-x0-9]+,\s*8} 1 { target { no-opts "-O0" no-opts "-g" no-opts "-funroll-loops" } } } } */ -/* { dg-final { scan-assembler-times {vsetvli} 6 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ +/* { dg-final { scan-assembler-times {vsetvli} 5 { target { no-opts "-O0" no-opts "-Os" no-opts "-Oz" no-opts "-g" no-opts "-funroll-loops" } } } } */ diff --git a/gcc/testsuite/gfortran.dg/pr111891.f90 b/gcc/testsuite/gfortran.dg/pr111891.f90 new file mode 100644 index 0000000..1167ed6 --- /dev/null +++ b/gcc/testsuite/gfortran.dg/pr111891.f90 @@ -0,0 +1,21 @@ +! { dg-do compile } +! { dg-options "-O2" } +! { dg-additional-options "-mavx" { target avx } } + +!GCC$ builtin (powf) attributes simd (notinbranch) if('x86_64') + +PARAMETER (NX=3, G=1.4) +DIMENSION T(NX,NX), P(NX,NX) +INTEGER Apx +COMMON P, T + +DO i = 1, 3 + IF (i < 0.0 ) THEN + P(Apx,i) = i**G + T(Apx,i) = i**G + ELSE + P(Apx,i) = 0 + T(Apx,i) = 0 + ENDIF +ENDDO +END diff --git a/gcc/testsuite/gnat.dg/hardcfr.adb b/gcc/testsuite/gnat.dg/hardcfr.adb new file mode 100644 index 0000000..abe1605 --- /dev/null +++ b/gcc/testsuite/gnat.dg/hardcfr.adb @@ -0,0 +1,76 @@ +-- { dg-do run } +-- { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-exceptions -fdump-tree-hardcfr --param=hardcfr-max-blocks=22 --param=hardcfr-max-inline-blocks=12 -O0" } + +procedure HardCFR is + function F (I, J : Integer) return Integer is + begin + if (I < J) then + return 2 * I; + else + return 3 * J; + end if; + end F; + + function G (I : Natural; J : Integer) return Integer is + begin + case I is + when 0 => + return J * 2; + + when 1 => + return J * 3; + + when 2 => + return J * 5; + + when others => + return J * 7; + end case; + end G; + + function H (I : Natural; -- { dg-warning "has more than 22 blocks, the requested maximum" } + J : Integer) + return Integer is + begin + case I is + when 0 => + return J * 2; + + when 1 => + return J * 3; + + when 2 => + return J * 5; + + when 3 => + return J * 7; + + when 4 => + return J * 11; + + when 5 => + return J * 13; + + when 6 => + return J * 17; + + when 7 => + return J * 19; + + when others => + return J * 23; + end case; + end H; +begin + if (F (1, 2) /= 2 or else F (3, 2) /= 6 + or else G (2, 5) /= 25 or else H (4, 3) /= 33) + then + raise Program_Error; + end if; +end HardCFR; + +-- HardCFR and HardCFR.F: +-- { dg-final { scan-tree-dump-times ".builtin_trap" 2 "hardcfr" } } + +-- This is __builtin___hardcfr_check in HardCFR.G: +-- { dg-final { scan-tree-dump-times ".builtin " 1 "hardcfr" } } diff --git a/gcc/tree-core.h b/gcc/tree-core.h index 77417db..2c89b65 100644 --- a/gcc/tree-core.h +++ b/gcc/tree-core.h @@ -95,6 +95,9 @@ struct die_struct; /* Nonzero if this is a cold function. */ #define ECF_COLD (1 << 15) +/* Nonzero if this is a function expected to end with an exception. */ +#define ECF_XTHROW (1 << 16) + /* Call argument flags. */ /* Nonzero if the argument is not used by the function. */ diff --git a/gcc/tree-eh.cc b/gcc/tree-eh.cc index e8ceff3..1cb8e08 100644 --- a/gcc/tree-eh.cc +++ b/gcc/tree-eh.cc @@ -2274,7 +2274,7 @@ make_eh_dispatch_edges (geh_dispatch *stmt) /* Create the single EH edge from STMT to its nearest landing pad, if there is such a landing pad within the current function. */ -void +edge make_eh_edges (gimple *stmt) { basic_block src, dst; @@ -2283,14 +2283,14 @@ make_eh_edges (gimple *stmt) lp_nr = lookup_stmt_eh_lp (stmt); if (lp_nr <= 0) - return; + return NULL; lp = get_eh_landing_pad_from_number (lp_nr); gcc_assert (lp != NULL); src = gimple_bb (stmt); dst = label_to_block (cfun, lp->post_landing_pad); - make_edge (src, dst, EDGE_EH); + return make_edge (src, dst, EDGE_EH); } /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree; diff --git a/gcc/tree-eh.h b/gcc/tree-eh.h index 771be50..1382568 100644 --- a/gcc/tree-eh.h +++ b/gcc/tree-eh.h @@ -30,7 +30,7 @@ extern bool remove_stmt_from_eh_lp (gimple *); extern int lookup_stmt_eh_lp_fn (struct function *, const gimple *); extern int lookup_stmt_eh_lp (const gimple *); extern bool make_eh_dispatch_edges (geh_dispatch *); -extern void make_eh_edges (gimple *); +extern edge make_eh_edges (gimple *); extern edge redirect_eh_edge (edge, basic_block); extern void redirect_eh_dispatch_edge (geh_dispatch *, edge, basic_block); extern bool operation_could_trap_helper_p (enum tree_code, bool, bool, bool, diff --git a/gcc/tree-if-conv.cc b/gcc/tree-if-conv.cc index c381d14..2627651 100644 --- a/gcc/tree-if-conv.cc +++ b/gcc/tree-if-conv.cc @@ -610,7 +610,7 @@ add_to_dst_predicate_list (class loop *loop, edge e, /* Return true if one of the successor edges of BB exits LOOP. */ static bool -bb_with_exit_edge_p (class loop *loop, basic_block bb) +bb_with_exit_edge_p (const class loop *loop, basic_block bb) { edge e; edge_iterator ei; @@ -1297,6 +1297,44 @@ get_loop_body_in_if_conv_order (const class loop *loop) } free (blocks_in_bfs_order); BITMAP_FREE (visited); + + /* Go through loop and reject if-conversion or lowering of bitfields if we + encounter statements we do not believe the vectorizer will be able to + handle. If adding a new type of statement here, make sure + 'ifcvt_local_dce' is also able to handle it propertly. */ + for (index = 0; index < loop->num_nodes; index++) + { + basic_block bb = blocks[index]; + gimple_stmt_iterator gsi; + + bool may_have_nonlocal_labels + = bb_with_exit_edge_p (loop, bb) || bb == loop->latch; + for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) + switch (gimple_code (gsi_stmt (gsi))) + { + case GIMPLE_LABEL: + if (!may_have_nonlocal_labels) + { + tree label + = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi))); + if (DECL_NONLOCAL (label) || FORCED_LABEL (label)) + { + free (blocks); + return NULL; + } + } + /* Fallthru. */ + case GIMPLE_ASSIGN: + case GIMPLE_CALL: + case GIMPLE_DEBUG: + case GIMPLE_COND: + gimple_set_uid (gsi_stmt (gsi), 0); + break; + default: + free (blocks); + return NULL; + } + } return blocks; } @@ -1467,36 +1505,6 @@ if_convertible_loop_p_1 (class loop *loop, vec<data_reference_p> *refs) exit_bb = bb; } - for (i = 0; i < loop->num_nodes; i++) - { - basic_block bb = ifc_bbs[i]; - gimple_stmt_iterator gsi; - - bool may_have_nonlocal_labels - = bb_with_exit_edge_p (loop, bb) || bb == loop->latch; - for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) - switch (gimple_code (gsi_stmt (gsi))) - { - case GIMPLE_LABEL: - if (!may_have_nonlocal_labels) - { - tree label - = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi))); - if (DECL_NONLOCAL (label) || FORCED_LABEL (label)) - return false; - } - /* Fallthru. */ - case GIMPLE_ASSIGN: - case GIMPLE_CALL: - case GIMPLE_DEBUG: - case GIMPLE_COND: - gimple_set_uid (gsi_stmt (gsi), 0); - break; - default: - return false; - } - } - data_reference_p dr; innermost_DR_map @@ -1608,14 +1616,6 @@ if_convertible_loop_p (class loop *loop, vec<data_reference_p> *refs) return false; } - /* More than one loop exit is too much to handle. */ - if (!single_exit (loop)) - { - if (dump_file && (dump_flags & TDF_DETAILS)) - fprintf (dump_file, "multiple exits\n"); - return false; - } - /* If one of the loop header's edge is an exit edge then do not apply if-conversion. */ FOR_EACH_EDGE (e, ei, loop->header->succs) @@ -3495,6 +3495,7 @@ get_bitfield_rep (gassign *stmt, bool write, tree *bitpos, : gimple_assign_rhs1 (stmt); tree field_decl = TREE_OPERAND (comp_ref, 1); + tree ref_offset = component_ref_field_offset (comp_ref); tree rep_decl = DECL_BIT_FIELD_REPRESENTATIVE (field_decl); /* Bail out if the representative is not a suitable type for a scalar @@ -3509,6 +3510,15 @@ get_bitfield_rep (gassign *stmt, bool write, tree *bitpos, if (compare_tree_int (DECL_SIZE (field_decl), bf_prec) != 0) return NULL_TREE; + if (TREE_CODE (DECL_FIELD_OFFSET (rep_decl)) != INTEGER_CST + || TREE_CODE (ref_offset) != INTEGER_CST) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, "\t Bitfield NOT OK to lower," + " offset is non-constant.\n"); + return NULL_TREE; + } + if (struct_expr) *struct_expr = TREE_OPERAND (comp_ref, 0); @@ -3529,7 +3539,7 @@ get_bitfield_rep (gassign *stmt, bool write, tree *bitpos, the structure and the container from the number of bits from the start of the structure and the actual bitfield member. */ tree bf_pos = fold_build2 (MULT_EXPR, bitsizetype, - DECL_FIELD_OFFSET (field_decl), + ref_offset, build_int_cst (bitsizetype, BITS_PER_UNIT)); bf_pos = fold_build2 (PLUS_EXPR, bitsizetype, bf_pos, DECL_FIELD_BIT_OFFSET (field_decl)); @@ -3736,9 +3746,6 @@ tree_if_conversion (class loop *loop, vec<gimple *> *preds) aggressive_if_conv = true; } - if (!single_exit (loop)) - goto cleanup; - /* If there are more than two BBs in the loop then there is at least one if to convert. */ if (loop->num_nodes > 2 @@ -3758,15 +3765,25 @@ tree_if_conversion (class loop *loop, vec<gimple *> *preds) if (loop->num_nodes > 2) { - need_to_ifcvt = true; + /* More than one loop exit is too much to handle. */ + if (!single_exit (loop)) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + fprintf (dump_file, "Can not ifcvt due to multiple exits\n"); + } + else + { + need_to_ifcvt = true; - if (!if_convertible_loop_p (loop, &refs) || !dbg_cnt (if_conversion_tree)) - goto cleanup; + if (!if_convertible_loop_p (loop, &refs) + || !dbg_cnt (if_conversion_tree)) + goto cleanup; - if ((need_to_predicate || any_complicated_phi) - && ((!flag_tree_loop_vectorize && !loop->force_vectorize) - || loop->dont_vectorize)) - goto cleanup; + if ((need_to_predicate || any_complicated_phi) + && ((!flag_tree_loop_vectorize && !loop->force_vectorize) + || loop->dont_vectorize)) + goto cleanup; + } } if ((flag_tree_loop_vectorize || loop->force_vectorize) @@ -3864,7 +3881,8 @@ tree_if_conversion (class loop *loop, vec<gimple *> *preds) PHIs, those are to be kept in sync with the non-if-converted copy. ??? We'll still keep dead stores though. */ exit_bbs = BITMAP_ALLOC (NULL); - bitmap_set_bit (exit_bbs, single_exit (loop)->dest->index); + for (edge exit : get_loop_exit_edges (loop)) + bitmap_set_bit (exit_bbs, exit->dest->index); bitmap_set_bit (exit_bbs, loop->latch->index); std::pair <tree, tree> *name_pair; diff --git a/gcc/tree-nested.cc b/gcc/tree-nested.cc index 31c7b60..d2fe3fc 100644 --- a/gcc/tree-nested.cc +++ b/gcc/tree-nested.cc @@ -611,6 +611,14 @@ get_trampoline_type (struct nesting_info *info) if (trampoline_type) return trampoline_type; + /* When trampolines are created off-stack then the only thing we need in the + local frame is a single pointer. */ + if (flag_trampoline_impl == TRAMPOLINE_IMPL_HEAP) + { + trampoline_type = build_pointer_type (void_type_node); + return trampoline_type; + } + align = TRAMPOLINE_ALIGNMENT; size = TRAMPOLINE_SIZE; @@ -2790,17 +2798,27 @@ convert_tramp_reference_op (tree *tp, int *walk_subtrees, void *data) /* Compute the address of the field holding the trampoline. */ x = get_frame_field (info, target_context, x, &wi->gsi); - x = build_addr (x); - x = gsi_gimplify_val (info, x, &wi->gsi); - /* Do machine-specific ugliness. Normally this will involve - computing extra alignment, but it can really be anything. */ - if (descr) - builtin = builtin_decl_implicit (BUILT_IN_ADJUST_DESCRIPTOR); + /* APB: We don't need to do the adjustment calls when using off-stack + trampolines, any such adjustment will be done when the off-stack + trampoline is created. */ + if (!descr && flag_trampoline_impl == TRAMPOLINE_IMPL_HEAP) + x = gsi_gimplify_val (info, x, &wi->gsi); else - builtin = builtin_decl_implicit (BUILT_IN_ADJUST_TRAMPOLINE); - call = gimple_build_call (builtin, 1, x); - x = init_tmp_var_with_call (info, &wi->gsi, call); + { + x = build_addr (x); + + x = gsi_gimplify_val (info, x, &wi->gsi); + + /* Do machine-specific ugliness. Normally this will involve + computing extra alignment, but it can really be anything. */ + if (descr) + builtin = builtin_decl_implicit (BUILT_IN_ADJUST_DESCRIPTOR); + else + builtin = builtin_decl_implicit (BUILT_IN_ADJUST_TRAMPOLINE); + call = gimple_build_call (builtin, 1, x); + x = init_tmp_var_with_call (info, &wi->gsi, call); + } /* Cast back to the proper function type. */ x = build1 (NOP_EXPR, TREE_TYPE (t), x); @@ -3380,6 +3398,7 @@ build_init_call_stmt (struct nesting_info *info, tree decl, tree field, static void finalize_nesting_tree_1 (struct nesting_info *root) { + gimple_seq cleanup_list = NULL; gimple_seq stmt_list = NULL; gimple *stmt; tree context = root->context; @@ -3511,9 +3530,48 @@ finalize_nesting_tree_1 (struct nesting_info *root) if (!field) continue; - x = builtin_decl_implicit (BUILT_IN_INIT_TRAMPOLINE); - stmt = build_init_call_stmt (root, i->context, field, x); - gimple_seq_add_stmt (&stmt_list, stmt); + if (flag_trampoline_impl == TRAMPOLINE_IMPL_HEAP) + { + /* We pass a whole bunch of arguments to the builtin function that + creates the off-stack trampoline, these are + 1. The nested function chain value (that must be passed to the + nested function so it can find the function arguments). + 2. A pointer to the nested function implementation, + 3. The address in the local stack frame where we should write + the address of the trampoline. + + When this code was originally written I just kind of threw + everything at the builtin, figuring I'd work out what was + actually needed later, I think, the stack pointer could + certainly be dropped, arguments #2 and #4 are based off the + stack pointer anyway, so #1 doesn't seem to add much value. */ + tree arg1, arg2, arg3; + + gcc_assert (DECL_STATIC_CHAIN (i->context)); + arg1 = build_addr (root->frame_decl); + arg2 = build_addr (i->context); + + x = build3 (COMPONENT_REF, TREE_TYPE (field), + root->frame_decl, field, NULL_TREE); + arg3 = build_addr (x); + + x = builtin_decl_implicit (BUILT_IN_NESTED_PTR_CREATED); + stmt = gimple_build_call (x, 3, arg1, arg2, arg3); + gimple_seq_add_stmt (&stmt_list, stmt); + + /* This call to delete the nested function trampoline is added to + the cleanup list, and called when we exit the current scope. */ + x = builtin_decl_implicit (BUILT_IN_NESTED_PTR_DELETED); + stmt = gimple_build_call (x, 0); + gimple_seq_add_stmt (&cleanup_list, stmt); + } + else + { + /* Original code to initialise the on stack trampoline. */ + x = builtin_decl_implicit (BUILT_IN_INIT_TRAMPOLINE); + stmt = build_init_call_stmt (root, i->context, field, x); + gimple_seq_add_stmt (&stmt_list, stmt); + } } } @@ -3538,11 +3596,40 @@ finalize_nesting_tree_1 (struct nesting_info *root) /* If we created initialization statements, insert them. */ if (stmt_list) { - gbind *bind; - annotate_all_with_location (stmt_list, DECL_SOURCE_LOCATION (context)); - bind = gimple_seq_first_stmt_as_a_bind (gimple_body (context)); - gimple_seq_add_seq (&stmt_list, gimple_bind_body (bind)); - gimple_bind_set_body (bind, stmt_list); + if (flag_trampoline_impl == TRAMPOLINE_IMPL_HEAP) + { + /* Handle off-stack trampolines. */ + gbind *bind; + annotate_all_with_location (stmt_list, DECL_SOURCE_LOCATION (context)); + annotate_all_with_location (cleanup_list, DECL_SOURCE_LOCATION (context)); + bind = gimple_seq_first_stmt_as_a_bind (gimple_body (context)); + gimple_seq_add_seq (&stmt_list, gimple_bind_body (bind)); + + gimple_seq xxx_list = NULL; + + if (cleanup_list != NULL) + { + /* Maybe we shouldn't be creating this try/finally if -fno-exceptions is + in use. If this is the case, then maybe we should, instead, be + inserting the cleanup code onto every path out of this function? Not + yet figured out how we would do this. */ + gtry *t = gimple_build_try (stmt_list, cleanup_list, GIMPLE_TRY_FINALLY); + gimple_seq_add_stmt (&xxx_list, t); + } + else + xxx_list = stmt_list; + + gimple_bind_set_body (bind, xxx_list); + } + else + { + /* The traditional, on stack trampolines. */ + gbind *bind; + annotate_all_with_location (stmt_list, DECL_SOURCE_LOCATION (context)); + bind = gimple_seq_first_stmt_as_a_bind (gimple_body (context)); + gimple_seq_add_seq (&stmt_list, gimple_bind_body (bind)); + gimple_bind_set_body (bind, stmt_list); + } } /* If a chain_decl was created, then it needs to be registered with diff --git a/gcc/tree-parloops.cc b/gcc/tree-parloops.cc index e495bbd..80f3dd6 100644 --- a/gcc/tree-parloops.cc +++ b/gcc/tree-parloops.cc @@ -2203,6 +2203,11 @@ create_loop_fn (location_t loc) DECL_CONTEXT (t) = decl; TREE_USED (t) = 1; DECL_ARGUMENTS (decl) = t; + DECL_FUNCTION_SPECIFIC_TARGET (decl) + = DECL_FUNCTION_SPECIFIC_TARGET (act_cfun->decl); + DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl) + = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (act_cfun->decl); + allocate_struct_function (decl, false); @@ -2526,14 +2531,15 @@ try_transform_to_exit_first_loop_alt (class loop *loop, tree nit_type = TREE_TYPE (nit); /* Figure out whether nit + 1 overflows. */ - if (TREE_CODE (nit) == INTEGER_CST) + if (poly_int_tree_p (nit)) { if (!tree_int_cst_equal (nit, TYPE_MAX_VALUE (nit_type))) { alt_bound = fold_build2_loc (UNKNOWN_LOCATION, PLUS_EXPR, nit_type, nit, build_one_cst (nit_type)); - gcc_assert (TREE_CODE (alt_bound) == INTEGER_CST); + gcc_assert (TREE_CODE (alt_bound) == INTEGER_CST + || TREE_CODE (alt_bound) == POLY_INT_CST); transform_to_exit_first_loop_alt (loop, reduction_list, alt_bound); return true; } diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h index 79a5f33..09e6ada 100644 --- a/gcc/tree-pass.h +++ b/gcc/tree-pass.h @@ -657,6 +657,8 @@ extern gimple_opt_pass *make_pass_gimple_isel (gcc::context *ctxt); extern gimple_opt_pass *make_pass_harden_compares (gcc::context *ctxt); extern gimple_opt_pass *make_pass_harden_conditional_branches (gcc::context *ctxt); +extern gimple_opt_pass *make_pass_harden_control_flow_redundancy (gcc::context + *ctxt); /* Current optimization pass. */ extern opt_pass *current_pass; diff --git a/gcc/tree-scalar-evolution.cc b/gcc/tree-scalar-evolution.cc index 7cafe5c..95a15fe 100644 --- a/gcc/tree-scalar-evolution.cc +++ b/gcc/tree-scalar-evolution.cc @@ -3286,7 +3286,8 @@ simple_iv_with_niters (class loop *wrto_loop, class loop *use_loop, type = TREE_TYPE (iv->base); e = TREE_OPERAND (iv->base, 0); - if (TREE_CODE (e) != PLUS_EXPR + if (!tree_nop_conversion_p (type, TREE_TYPE (e)) + || TREE_CODE (e) != PLUS_EXPR || TREE_CODE (TREE_OPERAND (e, 1)) != INTEGER_CST || !tree_int_cst_equal (iv->step, fold_convert (type, TREE_OPERAND (e, 1)))) diff --git a/gcc/tree-ssa-loop-im.cc b/gcc/tree-ssa-loop-im.cc index 49aeb68..396963b 100644 --- a/gcc/tree-ssa-loop-im.cc +++ b/gcc/tree-ssa-loop-im.cc @@ -400,6 +400,24 @@ movement_possibility_1 (gimple *stmt) || gimple_could_trap_p (stmt)) return MOVE_PRESERVE_EXECUTION; + if (is_gimple_assign (stmt)) + { + auto code = gimple_assign_rhs_code (stmt); + tree type = TREE_TYPE (gimple_assign_rhs1 (stmt)); + /* For shifts and rotates and possibly out-of-bound shift operands + we currently cannot rewrite them into something unconditionally + well-defined. */ + if ((code == LSHIFT_EXPR + || code == RSHIFT_EXPR + || code == LROTATE_EXPR + || code == RROTATE_EXPR) + && (TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST + /* We cannot use ranges at 'stmt' here. */ + || wi::ltu_p (wi::to_wide (gimple_assign_rhs2 (stmt)), + element_precision (type)))) + ret = MOVE_PRESERVE_EXECUTION; + } + /* Non local loads in a transaction cannot be hoisted out. Well, unless the load happens on every path out of the loop, but we don't take this into account yet. */ diff --git a/gcc/tree-ssa-loop-ivopts.cc b/gcc/tree-ssa-loop-ivopts.cc index 2c1f084..98e5b30 100644 --- a/gcc/tree-ssa-loop-ivopts.cc +++ b/gcc/tree-ssa-loop-ivopts.cc @@ -2829,12 +2829,29 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, else if (integer_zerop (op0)) { if (code == MINUS_EXPR) - expr = fold_build1 (NEGATE_EXPR, type, op1); + { + if (TYPE_OVERFLOW_UNDEFINED (type)) + { + type = unsigned_type_for (type); + op1 = fold_convert (type, op1); + } + expr = fold_build1 (NEGATE_EXPR, type, op1); + } else expr = op1; } else - expr = fold_build2 (code, type, op0, op1); + { + if (TYPE_OVERFLOW_UNDEFINED (type)) + { + type = unsigned_type_for (type); + if (code == POINTER_PLUS_EXPR) + code = PLUS_EXPR; + op0 = fold_convert (type, op0); + op1 = fold_convert (type, op1); + } + expr = fold_build2 (code, type, op0, op1); + } return fold_convert (orig_type, expr); @@ -2852,7 +2869,15 @@ strip_offset_1 (tree expr, bool inside_addr, bool top_compref, if (integer_zerop (op0)) expr = op0; else - expr = fold_build2 (MULT_EXPR, type, op0, op1); + { + if (TYPE_OVERFLOW_UNDEFINED (type)) + { + type = unsigned_type_for (type); + op0 = fold_convert (type, op0); + op1 = fold_convert (type, op1); + } + expr = fold_build2 (MULT_EXPR, type, op0, op1); + } return fold_convert (orig_type, expr); diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc index 9607a9fb..d5c9c4a 100644 --- a/gcc/tree-vect-data-refs.cc +++ b/gcc/tree-vect-data-refs.cc @@ -97,6 +97,34 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab, return true; } +/* Helper function to identify a simd clone call. If this is a call to a + function with simd clones then return the corresponding cgraph_node, + otherwise return NULL. */ + +static cgraph_node* +simd_clone_call_p (gimple *stmt) +{ + gcall *call = dyn_cast <gcall *> (stmt); + if (!call) + return NULL; + + tree fndecl = NULL_TREE; + if (gimple_call_internal_p (call, IFN_MASK_CALL)) + fndecl = TREE_OPERAND (gimple_call_arg (stmt, 0), 0); + else + fndecl = gimple_call_fndecl (stmt); + + if (fndecl == NULL_TREE) + return NULL; + + cgraph_node *node = cgraph_node::get (fndecl); + if (node && node->simd_clones != NULL) + return node; + + return NULL; +} + + /* Return the smallest scalar part of STMT_INFO. This is used to determine the vectype of the stmt. We generally set the @@ -145,6 +173,23 @@ vect_get_smallest_scalar_type (stmt_vec_info stmt_info, tree scalar_type) scalar_type = rhs_type; } } + else if (cgraph_node *node = simd_clone_call_p (stmt_info->stmt)) + { + auto clone = node->simd_clones->simdclone; + for (unsigned int i = 0; i < clone->nargs; ++i) + { + if (clone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) + { + tree arg_scalar_type = TREE_TYPE (clone->args[i].vector_type); + rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (arg_scalar_type)); + if (rhs < lhs) + { + scalar_type = arg_scalar_type; + lhs = rhs; + } + } + } + } else if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt)) { unsigned int i = 0; diff --git a/gcc/tree-vect-loop-manip.cc b/gcc/tree-vect-loop-manip.cc index 1f7779b..9c25512 100644 --- a/gcc/tree-vect-loop-manip.cc +++ b/gcc/tree-vect-loop-manip.cc @@ -1633,6 +1633,21 @@ slpeel_tree_duplicate_loop_to_edge_cfg (class loop *loop, edge loop_exit, { tree new_arg = gimple_phi_arg (phi, 0)->def; new_phi_args.put (new_arg, gimple_phi_result (phi)); + + if (TREE_CODE (new_arg) != SSA_NAME) + continue; + /* If the PHI node dominates the loop then we shouldn't create + a new LC-SSSA PHI for it in the intermediate block. Unless the + the loop has been versioned. If it has then we need the PHI + node such that later when the loop guard is added the original + dominating PHI can be found. */ + basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (new_arg)); + if (loop == scalar_loop + && (!def_bb || !flow_bb_inside_loop_p (loop, def_bb))) + { + auto gsi = gsi_for_stmt (phi); + remove_phi_node (&gsi, true); + } } /* Copy the current loop LC PHI nodes between the original loop exit @@ -3220,7 +3235,7 @@ vect_do_peeling (loop_vec_info loop_vinfo, tree niters, tree nitersm1, /* Update the number of iterations for prolog loop. */ tree step_prolog = build_one_cst (TREE_TYPE (niters_prolog)); - vect_set_loop_condition (prolog, prolog_e, loop_vinfo, niters_prolog, + vect_set_loop_condition (prolog, prolog_e, NULL, niters_prolog, step_prolog, NULL_TREE, false); /* Skip the prolog loop. */ diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc index ebab195..4a8b0a1 100644 --- a/gcc/tree-vect-loop.cc +++ b/gcc/tree-vect-loop.cc @@ -11361,9 +11361,12 @@ update_epilogue_loop_vinfo (class loop *epilogue, tree advance) /* Data references for gather loads and scatter stores do not use the updated offset we set using ADVANCE. Instead we have to make sure the reference in the data references point to the corresponding copy of - the original in the epilogue. */ - if (STMT_VINFO_MEMORY_ACCESS_TYPE (vect_stmt_to_vectorize (stmt_vinfo)) - == VMAT_GATHER_SCATTER) + the original in the epilogue. Make sure to update both + gather/scatters recognized by dataref analysis and also other + refs that get_load_store_type classified as VMAT_GATHER_SCATTER. */ + auto vstmt_vinfo = vect_stmt_to_vectorize (stmt_vinfo); + if (STMT_VINFO_MEMORY_ACCESS_TYPE (vstmt_vinfo) == VMAT_GATHER_SCATTER + || STMT_VINFO_GATHER_SCATTER_P (vstmt_vinfo)) { DR_REF (dr) = simplify_replace_tree (DR_REF (dr), NULL_TREE, NULL_TREE, diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc index 6964c99..6b6b412 100644 --- a/gcc/tree-vect-patterns.cc +++ b/gcc/tree-vect-patterns.cc @@ -128,6 +128,7 @@ vect_init_pattern_stmt (vec_info *vinfo, gimple *pattern_stmt, STMT_VINFO_RELATED_STMT (pattern_stmt_info) = orig_stmt_info; STMT_VINFO_DEF_TYPE (pattern_stmt_info) = STMT_VINFO_DEF_TYPE (orig_stmt_info); + STMT_VINFO_TYPE (pattern_stmt_info) = STMT_VINFO_TYPE (orig_stmt_info); if (!STMT_VINFO_VECTYPE (pattern_stmt_info)) { gcc_assert (!vectype @@ -2539,6 +2540,10 @@ vect_recog_widen_sum_pattern (vec_info *vinfo, bf_value = BIT_FIELD_REF (container, bitsize, bitpos); result = (type_out) bf_value; + or + + if (BIT_FIELD_REF (container, bitsize, bitpos) `cmp` <constant>) + where type_out is a non-bitfield type, that is to say, it's precision matches 2^(TYPE_SIZE(type_out) - (TYPE_UNSIGNED (type_out) ? 1 : 2)). @@ -2548,6 +2553,10 @@ vect_recog_widen_sum_pattern (vec_info *vinfo, here it starts with: result = (type_out) bf_value; + or + + if (BIT_FIELD_REF (container, bitsize, bitpos) `cmp` <constant>) + Output: * TYPE_OUT: The vector type of the output of this pattern. @@ -2589,33 +2598,45 @@ vect_recog_widen_sum_pattern (vec_info *vinfo, The shifting is always optional depending on whether bitpos != 0. + When the original bitfield was inside a gcond then an new gcond is also + generated with the newly `result` as the operand to the comparison. + */ static gimple * vect_recog_bitfield_ref_pattern (vec_info *vinfo, stmt_vec_info stmt_info, tree *type_out) { - gassign *first_stmt = dyn_cast <gassign *> (stmt_info->stmt); - - if (!first_stmt) - return NULL; - - gassign *bf_stmt; - if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (first_stmt)) - && TREE_CODE (gimple_assign_rhs1 (first_stmt)) == SSA_NAME) + gimple *bf_stmt = NULL; + tree lhs = NULL_TREE; + tree ret_type = NULL_TREE; + gimple *stmt = STMT_VINFO_STMT (stmt_info); + if (gcond *cond_stmt = dyn_cast <gcond *> (stmt)) + { + tree op = gimple_cond_lhs (cond_stmt); + if (TREE_CODE (op) != SSA_NAME) + return NULL; + bf_stmt = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op)); + if (TREE_CODE (gimple_cond_rhs (cond_stmt)) != INTEGER_CST) + return NULL; + } + else if (is_gimple_assign (stmt) + && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)) + && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME) { - gimple *second_stmt - = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (first_stmt)); + gimple *second_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); bf_stmt = dyn_cast <gassign *> (second_stmt); - if (!bf_stmt - || gimple_assign_rhs_code (bf_stmt) != BIT_FIELD_REF) - return NULL; + lhs = gimple_assign_lhs (stmt); + ret_type = TREE_TYPE (lhs); } - else + + if (!bf_stmt + || gimple_assign_rhs_code (bf_stmt) != BIT_FIELD_REF) return NULL; tree bf_ref = gimple_assign_rhs1 (bf_stmt); tree container = TREE_OPERAND (bf_ref, 0); + ret_type = ret_type ? ret_type : TREE_TYPE (container); if (!bit_field_offset (bf_ref).is_constant () || !bit_field_size (bf_ref).is_constant () @@ -2629,8 +2650,6 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo, stmt_vec_info stmt_info, gimple *use_stmt, *pattern_stmt; use_operand_p use_p; - tree ret = gimple_assign_lhs (first_stmt); - tree ret_type = TREE_TYPE (ret); bool shift_first = true; tree container_type = TREE_TYPE (container); tree vectype = get_vectype_for_scalar_type (vinfo, container_type); @@ -2675,7 +2694,7 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo, stmt_vec_info stmt_info, /* If the only use of the result of this BIT_FIELD_REF + CONVERT is a PLUS_EXPR then do the shift last as some targets can combine the shift and add into a single instruction. */ - if (single_imm_use (gimple_assign_lhs (first_stmt), &use_p, &use_stmt)) + if (lhs && single_imm_use (lhs, &use_p, &use_stmt)) { if (gimple_code (use_stmt) == GIMPLE_ASSIGN && gimple_assign_rhs_code (use_stmt) == PLUS_EXPR) @@ -2748,6 +2767,19 @@ vect_recog_bitfield_ref_pattern (vec_info *vinfo, stmt_vec_info stmt_info, NOP_EXPR, result); } + if (!lhs) + { + append_pattern_def_seq (vinfo, stmt_info, pattern_stmt, vectype); + gcond *cond_stmt = dyn_cast <gcond *> (stmt_info->stmt); + tree cond_cst = gimple_cond_rhs (cond_stmt); + pattern_stmt + = gimple_build_cond (gimple_cond_code (cond_stmt), + gimple_get_lhs (pattern_stmt), + fold_convert (ret_type, cond_cst), + gimple_cond_true_label (cond_stmt), + gimple_cond_false_label (cond_stmt)); + } + *type_out = STMT_VINFO_VECTYPE (stmt_info); vect_pattern_detected ("bitfield_ref pattern", stmt_info->stmt); diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc index d081999..24bf658 100644 --- a/gcc/tree-vect-slp.cc +++ b/gcc/tree-vect-slp.cc @@ -283,10 +283,12 @@ typedef struct _slp_oprnd_info vec<tree> ops; /* Information about the first statement, its vector def-type, type, the operand itself in case it's constant, and an indication if it's a pattern - stmt. */ + stmt and gather/scatter info. */ tree first_op_type; enum vect_def_type first_dt; bool any_pattern; + bool first_gs_p; + gather_scatter_info first_gs_info; } *slp_oprnd_info; @@ -308,6 +310,7 @@ vect_create_oprnd_info (int nops, int group_size) oprnd_info->first_dt = vect_uninitialized_def; oprnd_info->first_op_type = NULL_TREE; oprnd_info->any_pattern = false; + oprnd_info->first_gs_p = false; oprnds_info.quick_push (oprnd_info); } @@ -507,6 +510,10 @@ static const int arg2_map[] = { 1, 2 }; static const int arg1_arg4_map[] = { 2, 1, 4 }; static const int arg3_arg2_map[] = { 2, 3, 2 }; static const int op1_op0_map[] = { 2, 1, 0 }; +static const int off_map[] = { 1, -3 }; +static const int off_op0_map[] = { 2, -3, 0 }; +static const int off_arg2_map[] = { 2, -3, 2 }; +static const int off_arg3_arg2_map[] = { 3, -3, 3, 2 }; static const int mask_call_maps[6][7] = { { 1, 1, }, { 2, 1, 2, }, @@ -524,11 +531,14 @@ static const int mask_call_maps[6][7] = { - for each child node, the index of the argument associated with that node. The special index -1 is the first operand of an embedded comparison and the special index -2 is the second operand of an embedded comparison. + The special indes -3 is the offset of a gather as analyzed by + vect_check_gather_scatter. SWAP is as for vect_get_and_check_slp_defs. */ static const int * -vect_get_operand_map (const gimple *stmt, unsigned char swap = 0) +vect_get_operand_map (const gimple *stmt, bool gather_scatter_p = false, + unsigned char swap = 0) { if (auto assign = dyn_cast<const gassign *> (stmt)) { @@ -538,6 +548,8 @@ vect_get_operand_map (const gimple *stmt, unsigned char swap = 0) if (TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) == tcc_comparison && swap) return op1_op0_map; + if (gather_scatter_p) + return gimple_vdef (stmt) ? off_op0_map : off_map; } gcc_assert (!swap); if (auto call = dyn_cast<const gcall *> (stmt)) @@ -546,7 +558,7 @@ vect_get_operand_map (const gimple *stmt, unsigned char swap = 0) switch (gimple_call_internal_fn (call)) { case IFN_MASK_LOAD: - return arg2_map; + return gather_scatter_p ? off_arg2_map : arg2_map; case IFN_GATHER_LOAD: return arg1_map; @@ -555,7 +567,7 @@ vect_get_operand_map (const gimple *stmt, unsigned char swap = 0) return arg1_arg4_map; case IFN_MASK_STORE: - return arg3_arg2_map; + return gather_scatter_p ? off_arg3_arg2_map : arg3_arg2_map; case IFN_MASK_CALL: { @@ -609,6 +621,8 @@ vect_get_and_check_slp_defs (vec_info *vinfo, unsigned char swap, unsigned int i, number_of_oprnds; enum vect_def_type dt = vect_uninitialized_def; slp_oprnd_info oprnd_info; + gather_scatter_info gs_info; + unsigned int gs_op = -1u; unsigned int commutative_op = -1U; bool first = stmt_num == 0; @@ -618,7 +632,9 @@ vect_get_and_check_slp_defs (vec_info *vinfo, unsigned char swap, return -1; number_of_oprnds = gimple_num_args (stmt_info->stmt); - const int *map = vect_get_operand_map (stmt_info->stmt, swap); + const int *map + = vect_get_operand_map (stmt_info->stmt, + STMT_VINFO_GATHER_SCATTER_P (stmt_info), swap); if (map) number_of_oprnds = *map++; if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt)) @@ -640,8 +656,30 @@ vect_get_and_check_slp_defs (vec_info *vinfo, unsigned char swap, enum vect_def_type *dts = XALLOCAVEC (enum vect_def_type, number_of_oprnds); for (i = 0; i < number_of_oprnds; i++) { + oprnd_info = (*oprnds_info)[i]; int opno = map ? map[i] : int (i); - if (opno < 0) + if (opno == -3) + { + gcc_assert (STMT_VINFO_GATHER_SCATTER_P (stmt_info)); + if (!is_a <loop_vec_info> (vinfo) + || !vect_check_gather_scatter (stmt_info, + as_a <loop_vec_info> (vinfo), + first ? &oprnd_info->first_gs_info + : &gs_info)) + return -1; + + if (first) + { + oprnd_info->first_gs_p = true; + oprnd = oprnd_info->first_gs_info.offset; + } + else + { + gs_op = i; + oprnd = gs_info.offset; + } + } + else if (opno < 0) oprnd = TREE_OPERAND (gimple_arg (stmt_info->stmt, 0), -1 - opno); else { @@ -658,8 +696,6 @@ vect_get_and_check_slp_defs (vec_info *vinfo, unsigned char swap, if (TREE_CODE (oprnd) == VIEW_CONVERT_EXPR) oprnd = TREE_OPERAND (oprnd, 0); - oprnd_info = (*oprnds_info)[i]; - stmt_vec_info def_stmt_info; if (!vect_is_simple_use (oprnd, vinfo, &dts[i], &def_stmt_info)) { @@ -792,6 +828,32 @@ vect_get_and_check_slp_defs (vec_info *vinfo, unsigned char swap, return 1; } + if ((gs_op == i) != oprnd_info->first_gs_p) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: mixed gather and non-gather\n"); + return 1; + } + else if (gs_op == i) + { + if (!operand_equal_p (oprnd_info->first_gs_info.base, + gs_info.base)) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: different gather base\n"); + return 1; + } + if (oprnd_info->first_gs_info.scale != gs_info.scale) + { + if (dump_enabled_p ()) + dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, + "Build SLP failed: different gather scale\n"); + return 1; + } + } + /* Not first stmt of the group, check that the def-stmt/s match the def-stmt/s of the first stmt. Allow different definition types for reduction chains: the first stmt must be a @@ -1235,6 +1297,9 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap, || rhs_code == INDIRECT_REF || rhs_code == COMPONENT_REF || rhs_code == MEM_REF))) + || (ldst_p + && (STMT_VINFO_GATHER_SCATTER_P (stmt_info) + != STMT_VINFO_GATHER_SCATTER_P (first_stmt_info))) || first_stmt_ldst_p != ldst_p || first_stmt_phi_p != phi_p) { @@ -1357,12 +1422,12 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap, if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)) && rhs_code != CFN_GATHER_LOAD && rhs_code != CFN_MASK_GATHER_LOAD + && !STMT_VINFO_GATHER_SCATTER_P (stmt_info) /* Not grouped loads are handled as externals for BB vectorization. For loop vectorization we can handle splats the same we handle single element interleaving. */ && (is_a <bb_vec_info> (vinfo) - || stmt_info != first_stmt_info - || STMT_VINFO_GATHER_SCATTER_P (stmt_info))) + || stmt_info != first_stmt_info)) { /* Not grouped load. */ if (dump_enabled_p ()) @@ -1780,7 +1845,9 @@ vect_build_slp_tree_2 (vec_info *vinfo, slp_tree node, return NULL; nops = gimple_num_args (stmt_info->stmt); - if (const int *map = vect_get_operand_map (stmt_info->stmt)) + if (const int *map = vect_get_operand_map (stmt_info->stmt, + STMT_VINFO_GATHER_SCATTER_P + (stmt_info))) nops = map[0]; /* If the SLP node is a PHI (induction or reduction), terminate @@ -1858,6 +1925,8 @@ vect_build_slp_tree_2 (vec_info *vinfo, slp_tree node, gcc_assert (gimple_call_internal_p (stmt, IFN_MASK_LOAD) || gimple_call_internal_p (stmt, IFN_GATHER_LOAD) || gimple_call_internal_p (stmt, IFN_MASK_GATHER_LOAD)); + else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) + gcc_assert (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info))); else { *max_nunits = this_max_nunits; diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc index e5ff44c..337b24c 100644 --- a/gcc/tree-vect-stmts.cc +++ b/gcc/tree-vect-stmts.cc @@ -2547,7 +2547,8 @@ vect_build_all_ones_mask (vec_info *vinfo, { if (TREE_CODE (masktype) == INTEGER_TYPE) return build_int_cst (masktype, -1); - else if (TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE) + else if (VECTOR_BOOLEAN_TYPE_P (masktype) + || TREE_CODE (TREE_TYPE (masktype)) == INTEGER_TYPE) { tree mask = build_int_cst (TREE_TYPE (masktype), -1); mask = build_vector_from_val (masktype, mask); @@ -2595,268 +2596,99 @@ vect_build_zero_merge_argument (vec_info *vinfo, /* Build a gather load call while vectorizing STMT_INFO. Insert new instructions before GSI and add them to VEC_STMT. GS_INFO describes the gather load operation. If the load is conditional, MASK is the - unvectorized condition and MASK_DT is its definition type, otherwise - MASK is null. */ + vectorized condition, otherwise MASK is null. PTR is the base + pointer and OFFSET is the vectorized offset. */ -static void -vect_build_gather_load_calls (vec_info *vinfo, stmt_vec_info stmt_info, - gimple_stmt_iterator *gsi, - gimple **vec_stmt, - gather_scatter_info *gs_info, - tree mask, - stmt_vector_for_cost *cost_vec) +static gimple * +vect_build_one_gather_load_call (vec_info *vinfo, stmt_vec_info stmt_info, + gimple_stmt_iterator *gsi, + gather_scatter_info *gs_info, + tree ptr, tree offset, tree mask) { - loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo); - class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); tree vectype = STMT_VINFO_VECTYPE (stmt_info); - poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); - int ncopies = vect_get_num_copies (loop_vinfo, vectype); - edge pe = loop_preheader_edge (loop); - enum { NARROW, NONE, WIDEN } modifier; - poly_uint64 gather_off_nunits - = TYPE_VECTOR_SUBPARTS (gs_info->offset_vectype); - - /* FIXME: Keep the previous costing way in vect_model_load_cost by costing - N scalar loads, but it should be tweaked to use target specific costs - on related gather load calls. */ - if (cost_vec) - { - unsigned int assumed_nunits = vect_nunits_for_cost (vectype); - unsigned int inside_cost; - inside_cost = record_stmt_cost (cost_vec, ncopies * assumed_nunits, - scalar_load, stmt_info, 0, vect_body); - if (dump_enabled_p ()) - dump_printf_loc (MSG_NOTE, vect_location, - "vect_model_load_cost: inside_cost = %d, " - "prologue_cost = 0 .\n", - inside_cost); - return; - } - tree arglist = TYPE_ARG_TYPES (TREE_TYPE (gs_info->decl)); tree rettype = TREE_TYPE (TREE_TYPE (gs_info->decl)); tree srctype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); - tree ptrtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); + /* ptrtype */ arglist = TREE_CHAIN (arglist); tree idxtype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); tree masktype = TREE_VALUE (arglist); arglist = TREE_CHAIN (arglist); tree scaletype = TREE_VALUE (arglist); - tree real_masktype = masktype; + tree var; gcc_checking_assert (types_compatible_p (srctype, rettype) && (!mask || TREE_CODE (masktype) == INTEGER_TYPE || types_compatible_p (srctype, masktype))); - if (mask) - masktype = truth_type_for (srctype); - tree mask_halftype = masktype; - tree perm_mask = NULL_TREE; - tree mask_perm_mask = NULL_TREE; - if (known_eq (nunits, gather_off_nunits)) - modifier = NONE; - else if (known_eq (nunits * 2, gather_off_nunits)) + tree op = offset; + if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) { - modifier = WIDEN; - - /* Currently widening gathers and scatters are only supported for - fixed-length vectors. */ - int count = gather_off_nunits.to_constant (); - vec_perm_builder sel (count, count, 1); - for (int i = 0; i < count; ++i) - sel.quick_push (i | (count / 2)); - - vec_perm_indices indices (sel, 1, count); - perm_mask = vect_gen_perm_mask_checked (gs_info->offset_vectype, - indices); - } - else if (known_eq (nunits, gather_off_nunits * 2)) - { - modifier = NARROW; - - /* Currently narrowing gathers and scatters are only supported for - fixed-length vectors. */ - int count = nunits.to_constant (); - vec_perm_builder sel (count, count, 1); - sel.quick_grow (count); - for (int i = 0; i < count; ++i) - sel[i] = i < count / 2 ? i : i + count / 2; - vec_perm_indices indices (sel, 2, count); - perm_mask = vect_gen_perm_mask_checked (vectype, indices); - - ncopies *= 2; - - if (mask && VECTOR_TYPE_P (real_masktype)) - { - for (int i = 0; i < count; ++i) - sel[i] = i | (count / 2); - indices.new_vector (sel, 2, count); - mask_perm_mask = vect_gen_perm_mask_checked (masktype, indices); - } - else if (mask) - mask_halftype = truth_type_for (gs_info->offset_vectype); - } - else - gcc_unreachable (); - - tree scalar_dest = gimple_get_lhs (stmt_info->stmt); - tree vec_dest = vect_create_destination_var (scalar_dest, vectype); - - tree ptr = fold_convert (ptrtype, gs_info->base); - if (!is_gimple_min_invariant (ptr)) - { - gimple_seq seq; - ptr = force_gimple_operand (ptr, &seq, true, NULL_TREE); - basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq); - gcc_assert (!new_bb); + gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)), + TYPE_VECTOR_SUBPARTS (idxtype))); + var = vect_get_new_ssa_name (idxtype, vect_simple_var); + op = build1 (VIEW_CONVERT_EXPR, idxtype, op); + gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + op = var; } - tree scale = build_int_cst (scaletype, gs_info->scale); - - tree vec_oprnd0 = NULL_TREE; - tree vec_mask = NULL_TREE; tree src_op = NULL_TREE; tree mask_op = NULL_TREE; - tree prev_res = NULL_TREE; - - if (!mask) - { - src_op = vect_build_zero_merge_argument (vinfo, stmt_info, rettype); - mask_op = vect_build_all_ones_mask (vinfo, stmt_info, masktype); - } - - auto_vec<tree> vec_oprnds0; - auto_vec<tree> vec_masks; - vect_get_vec_defs_for_operand (vinfo, stmt_info, - modifier == WIDEN ? ncopies / 2 : ncopies, - gs_info->offset, &vec_oprnds0); if (mask) - vect_get_vec_defs_for_operand (vinfo, stmt_info, - modifier == NARROW ? ncopies / 2 : ncopies, - mask, &vec_masks, masktype); - for (int j = 0; j < ncopies; ++j) { - tree op, var; - if (modifier == WIDEN && (j & 1)) - op = permute_vec_elements (vinfo, vec_oprnd0, vec_oprnd0, - perm_mask, stmt_info, gsi); - else - op = vec_oprnd0 = vec_oprnds0[modifier == WIDEN ? j / 2 : j]; - - if (!useless_type_conversion_p (idxtype, TREE_TYPE (op))) - { - gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)), - TYPE_VECTOR_SUBPARTS (idxtype))); - var = vect_get_new_ssa_name (idxtype, vect_simple_var); - op = build1 (VIEW_CONVERT_EXPR, idxtype, op); - gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - op = var; - } - - if (mask) - { - if (mask_perm_mask && (j & 1)) - mask_op = permute_vec_elements (vinfo, mask_op, mask_op, - mask_perm_mask, stmt_info, gsi); - else - { - if (modifier == NARROW) - { - if ((j & 1) == 0) - vec_mask = vec_masks[j / 2]; - } - else - vec_mask = vec_masks[j]; - - mask_op = vec_mask; - if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask))) - { - poly_uint64 sub1 = TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)); - poly_uint64 sub2 = TYPE_VECTOR_SUBPARTS (masktype); - gcc_assert (known_eq (sub1, sub2)); - var = vect_get_new_ssa_name (masktype, vect_simple_var); - mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op); - gassign *new_stmt - = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - mask_op = var; - } - } - if (modifier == NARROW && !VECTOR_TYPE_P (real_masktype)) - { - var = vect_get_new_ssa_name (mask_halftype, vect_simple_var); - gassign *new_stmt - = gimple_build_assign (var, (j & 1) ? VEC_UNPACK_HI_EXPR - : VEC_UNPACK_LO_EXPR, - mask_op); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - mask_op = var; - } - src_op = mask_op; - } - - tree mask_arg = mask_op; - if (masktype != real_masktype) + if (!useless_type_conversion_p (masktype, TREE_TYPE (mask))) { - tree utype, optype = TREE_TYPE (mask_op); - if (VECTOR_TYPE_P (real_masktype) - || TYPE_MODE (real_masktype) == TYPE_MODE (optype)) - utype = real_masktype; + tree utype, optype = TREE_TYPE (mask); + if (VECTOR_TYPE_P (masktype) + || TYPE_MODE (masktype) == TYPE_MODE (optype)) + utype = masktype; else utype = lang_hooks.types.type_for_mode (TYPE_MODE (optype), 1); var = vect_get_new_ssa_name (utype, vect_scalar_var); - mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask_op); + tree mask_arg = build1 (VIEW_CONVERT_EXPR, utype, mask); gassign *new_stmt - = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg); + = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_arg); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_arg = var; - if (!useless_type_conversion_p (real_masktype, utype)) + if (!useless_type_conversion_p (masktype, utype)) { gcc_assert (TYPE_PRECISION (utype) - <= TYPE_PRECISION (real_masktype)); - var = vect_get_new_ssa_name (real_masktype, vect_scalar_var); + <= TYPE_PRECISION (masktype)); + var = vect_get_new_ssa_name (masktype, vect_scalar_var); new_stmt = gimple_build_assign (var, NOP_EXPR, mask_arg); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); mask_arg = var; } src_op = build_zero_cst (srctype); - } - gimple *new_stmt = gimple_build_call (gs_info->decl, 5, src_op, ptr, op, - mask_arg, scale); - - if (!useless_type_conversion_p (vectype, rettype)) - { - gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype), - TYPE_VECTOR_SUBPARTS (rettype))); - op = vect_get_new_ssa_name (rettype, vect_simple_var); - gimple_call_set_lhs (new_stmt, op); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - var = make_ssa_name (vec_dest); - op = build1 (VIEW_CONVERT_EXPR, vectype, op); - new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + mask_op = mask_arg; } else { - var = make_ssa_name (vec_dest, new_stmt); - gimple_call_set_lhs (new_stmt, var); - vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + src_op = mask; + mask_op = mask; } + } + else + { + src_op = vect_build_zero_merge_argument (vinfo, stmt_info, rettype); + mask_op = vect_build_all_ones_mask (vinfo, stmt_info, masktype); + } - if (modifier == NARROW) - { - if ((j & 1) == 0) - { - prev_res = var; - continue; - } - var = permute_vec_elements (vinfo, prev_res, var, perm_mask, - stmt_info, gsi); - new_stmt = SSA_NAME_DEF_STMT (var); - } + tree scale = build_int_cst (scaletype, gs_info->scale); + gimple *new_stmt = gimple_build_call (gs_info->decl, 5, src_op, ptr, op, + mask_op, scale); - STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt); + if (!useless_type_conversion_p (vectype, rettype)) + { + gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype), + TYPE_VECTOR_SUBPARTS (rettype))); + op = vect_get_new_ssa_name (rettype, vect_simple_var); + gimple_call_set_lhs (new_stmt, op); + vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); + op = build1 (VIEW_CONVERT_EXPR, vectype, op); + new_stmt = gimple_build_assign (NULL_TREE, VIEW_CONVERT_EXPR, op); } - *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0]; + + return new_stmt; } /* Build a scatter store call while vectorizing STMT_INFO. Insert new @@ -4126,16 +3958,6 @@ vect_simd_lane_linear (tree op, class loop *loop, } } -/* Return the number of elements in vector type VECTYPE, which is associated - with a SIMD clone. At present these vectors always have a constant - length. */ - -static unsigned HOST_WIDE_INT -simd_clone_subparts (tree vectype) -{ - return TYPE_VECTOR_SUBPARTS (vectype).to_constant (); -} - /* Function vectorizable_simd_clone_call. Check if STMT_INFO performs a function call that can be vectorized @@ -4166,7 +3988,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, size_t i, nargs; tree lhs, rtype, ratype; vec<constructor_elt, va_gc> *ret_ctor_elts = NULL; - int arg_offset = 0; + int masked_call_offset = 0; /* Is STMT a vectorizable call? */ gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt); @@ -4181,7 +4003,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, gcc_checking_assert (TREE_CODE (fndecl) == ADDR_EXPR); fndecl = TREE_OPERAND (fndecl, 0); gcc_checking_assert (TREE_CODE (fndecl) == FUNCTION_DECL); - arg_offset = 1; + masked_call_offset = 1; } if (fndecl == NULL_TREE) return false; @@ -4209,7 +4031,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, return false; /* Process function arguments. */ - nargs = gimple_call_num_args (stmt) - arg_offset; + nargs = gimple_call_num_args (stmt) - masked_call_offset; /* Bail out if the function has zero arguments. */ if (nargs == 0) @@ -4231,7 +4053,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, thisarginfo.op = NULL_TREE; thisarginfo.simd_lane_linear = false; - int op_no = i + arg_offset; + int op_no = i + masked_call_offset; if (slp_node) op_no = vect_slp_child_index_for_operand (stmt, op_no); if (!vect_is_simple_use (vinfo, stmt_info, slp_node, @@ -4248,7 +4070,13 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, if (thisarginfo.dt == vect_constant_def || thisarginfo.dt == vect_external_def) { - gcc_assert (vec_stmt || thisarginfo.vectype == NULL_TREE); + /* With SLP we determine the vector type of constants/externals + at analysis time, handling conflicts via + vect_maybe_update_slp_op_vectype. At transform time + we have a vector type recorded for SLP. */ + gcc_assert (!vec_stmt + || !slp_node + || thisarginfo.vectype != NULL_TREE); if (!vec_stmt) thisarginfo.vectype = get_vectype_for_scalar_type (vinfo, TREE_TYPE (op), @@ -4313,16 +4141,6 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, arginfo.quick_push (thisarginfo); } - if (loop_vinfo - && !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant ()) - { - if (dump_enabled_p ()) - dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, - "not considering SIMD clones; not yet supported" - " for variable-width vectors.\n"); - return false; - } - poly_uint64 vf = loop_vinfo ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) : 1; unsigned group_size = slp_node ? SLP_TREE_LANES (slp_node) : 1; unsigned int badness = 0; @@ -4335,9 +4153,10 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, { unsigned int this_badness = 0; unsigned int num_calls; - if (!constant_multiple_p (vf * group_size, - n->simdclone->simdlen, &num_calls) - || n->simdclone->nargs != nargs) + if (!constant_multiple_p (vf * group_size, n->simdclone->simdlen, + &num_calls) + || (!n->simdclone->inbranch && (masked_call_offset > 0)) + || nargs != n->simdclone->nargs) continue; if (num_calls != 1) this_badness += exact_log2 (num_calls) * 4096; @@ -4354,7 +4173,8 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, case SIMD_CLONE_ARG_TYPE_VECTOR: if (!useless_type_conversion_p (n->simdclone->args[i].orig_type, - TREE_TYPE (gimple_call_arg (stmt, i + arg_offset)))) + TREE_TYPE (gimple_call_arg (stmt, + i + masked_call_offset)))) i = -1; else if (arginfo[i].dt == vect_constant_def || arginfo[i].dt == vect_external_def @@ -4409,6 +4229,17 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, } if (i == (size_t) -1) continue; + if (masked_call_offset == 0 + && n->simdclone->inbranch + && n->simdclone->nargs > nargs) + { + gcc_assert (n->simdclone->args[n->simdclone->nargs - 1].arg_type == + SIMD_CLONE_ARG_TYPE_MASK); + /* Penalize using a masked SIMD clone in a non-masked loop, that is + not in a branch, as we'd have to construct an all-true mask. */ + if (!loop_vinfo || !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) + this_badness += 64; + } if (bestn == NULL || this_badness < badness) { bestn = n; @@ -4431,12 +4262,13 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, || arginfo[i].dt == vect_external_def) && bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR) { - tree arg_type = TREE_TYPE (gimple_call_arg (stmt, i + arg_offset)); + tree arg_type = TREE_TYPE (gimple_call_arg (stmt, + i + masked_call_offset)); arginfo[i].vectype = get_vectype_for_scalar_type (vinfo, arg_type, slp_node); if (arginfo[i].vectype == NULL || !constant_multiple_p (bestn->simdclone->simdlen, - simd_clone_subparts (arginfo[i].vectype))) + TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))) return false; } @@ -4451,10 +4283,11 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, if (bestn->simdclone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_MASK) { + tree clone_arg_vectype = bestn->simdclone->args[i].vector_type; if (bestn->simdclone->mask_mode == VOIDmode) { - if (simd_clone_subparts (bestn->simdclone->args[i].vector_type) - != simd_clone_subparts (arginfo[i].vectype)) + if (maybe_ne (TYPE_VECTOR_SUBPARTS (clone_arg_vectype), + TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))) { /* FORNOW we only have partial support for vector-type masks that can't hold all of simdlen. */ @@ -4471,7 +4304,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, if (!SCALAR_INT_MODE_P (TYPE_MODE (arginfo[i].vectype)) || maybe_ne (exact_div (bestn->simdclone->simdlen, num_mask_args), - simd_clone_subparts (arginfo[i].vectype))) + TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))) { /* FORNOW we only have partial support for integer-type masks that represent the same number of lanes as the @@ -4539,22 +4372,48 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, if (gimple_vuse (stmt) && slp_node) vinfo->any_known_not_updated_vssa = true; simd_clone_info.safe_push (bestn->decl); - for (i = 0; i < nargs; i++) - if ((bestn->simdclone->args[i].arg_type - == SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP) - || (bestn->simdclone->args[i].arg_type - == SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP)) - { - simd_clone_info.safe_grow_cleared (i * 3 + 1, true); - simd_clone_info.safe_push (arginfo[i].op); - tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op)) - ? size_type_node : TREE_TYPE (arginfo[i].op); - tree ls = build_int_cst (lst, arginfo[i].linear_step); - simd_clone_info.safe_push (ls); - tree sll = arginfo[i].simd_lane_linear - ? boolean_true_node : boolean_false_node; - simd_clone_info.safe_push (sll); - } + for (i = 0; i < bestn->simdclone->nargs; i++) + { + switch (bestn->simdclone->args[i].arg_type) + { + default: + continue; + case SIMD_CLONE_ARG_TYPE_LINEAR_CONSTANT_STEP: + case SIMD_CLONE_ARG_TYPE_LINEAR_REF_CONSTANT_STEP: + { + auto &clone_info = STMT_VINFO_SIMD_CLONE_INFO (stmt_info); + clone_info.safe_grow_cleared (i * 3 + 1, true); + clone_info.safe_push (arginfo[i].op); + tree lst = POINTER_TYPE_P (TREE_TYPE (arginfo[i].op)) + ? size_type_node : TREE_TYPE (arginfo[i].op); + tree ls = build_int_cst (lst, arginfo[i].linear_step); + clone_info.safe_push (ls); + tree sll = arginfo[i].simd_lane_linear + ? boolean_true_node : boolean_false_node; + clone_info.safe_push (sll); + } + break; + case SIMD_CLONE_ARG_TYPE_MASK: + if (loop_vinfo + && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) + vect_record_loop_mask (loop_vinfo, + &LOOP_VINFO_MASKS (loop_vinfo), + ncopies, vectype, op); + + break; + } + } + + if (!bestn->simdclone->inbranch && loop_vinfo) + { + if (dump_enabled_p () + && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)) + dump_printf_loc (MSG_NOTE, vect_location, + "can't use a fully-masked loop because a" + " non-masked simd clone was selected.\n"); + LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false; + } + STMT_VINFO_TYPE (stmt_info) = call_simd_clone_vec_info_type; DUMP_VECT_SCOPE ("vectorizable_simd_clone_call"); /* vect_model_simple_cost (vinfo, stmt_info, ncopies, @@ -4595,6 +4454,8 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, vec_oprnds.safe_grow_cleared (nargs, true); for (j = 0; j < ncopies; ++j) { + poly_uint64 callee_nelements; + poly_uint64 caller_nelements; /* Build argument list for the vectorized call. */ if (j == 0) vargs.create (nargs); @@ -4605,21 +4466,23 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, { unsigned int k, l, m, o; tree atype; - op = gimple_call_arg (stmt, i + arg_offset); + op = gimple_call_arg (stmt, i + masked_call_offset); switch (bestn->simdclone->args[i].arg_type) { case SIMD_CLONE_ARG_TYPE_VECTOR: atype = bestn->simdclone->args[i].vector_type; - o = vector_unroll_factor (nunits, - simd_clone_subparts (atype)); + caller_nelements = TYPE_VECTOR_SUBPARTS (arginfo[i].vectype); + callee_nelements = TYPE_VECTOR_SUBPARTS (atype); + o = vector_unroll_factor (nunits, callee_nelements); for (m = j * o; m < (j + 1) * o; m++) { - if (simd_clone_subparts (atype) - < simd_clone_subparts (arginfo[i].vectype)) + if (known_lt (callee_nelements, caller_nelements)) { poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (atype)); - k = (simd_clone_subparts (arginfo[i].vectype) - / simd_clone_subparts (atype)); + if (!constant_multiple_p (caller_nelements, + callee_nelements, &k)) + gcc_unreachable (); + gcc_assert ((k & (k - 1)) == 0); if (m == 0) { @@ -4650,8 +4513,9 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, } else { - k = (simd_clone_subparts (atype) - / simd_clone_subparts (arginfo[i].vectype)); + if (!constant_multiple_p (callee_nelements, + caller_nelements, &k)) + gcc_unreachable (); gcc_assert ((k & (k - 1)) == 0); vec<constructor_elt, va_gc> *ctor_elts; if (k != 1) @@ -4682,14 +4546,14 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, if (!useless_type_conversion_p (TREE_TYPE (vec_oprnd0), atype)) { - vec_oprnd0 - = build1 (VIEW_CONVERT_EXPR, atype, vec_oprnd0); + vec_oprnd0 = build1 (VIEW_CONVERT_EXPR, atype, + vec_oprnd0); gassign *new_stmt = gimple_build_assign (make_ssa_name (atype), vec_oprnd0); vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi); - vargs.safe_push (gimple_assign_lhs (new_stmt)); + vargs.safe_push (gimple_get_lhs (new_stmt)); } else vargs.safe_push (vec_oprnd0); @@ -4713,20 +4577,19 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, tree elt_type = TREE_TYPE (atype); tree one = fold_convert (elt_type, integer_one_node); tree zero = fold_convert (elt_type, integer_zero_node); - o = vector_unroll_factor (nunits, - simd_clone_subparts (atype)); + callee_nelements = TYPE_VECTOR_SUBPARTS (atype); + caller_nelements = TYPE_VECTOR_SUBPARTS (arginfo[i].vectype); + o = vector_unroll_factor (nunits, callee_nelements); for (m = j * o; m < (j + 1) * o; m++) { - if (simd_clone_subparts (atype) - < simd_clone_subparts (arginfo[i].vectype)) + if (maybe_lt (callee_nelements, caller_nelements)) { /* The mask type has fewer elements than simdlen. */ /* FORNOW */ gcc_unreachable (); } - else if (simd_clone_subparts (atype) - == simd_clone_subparts (arginfo[i].vectype)) + else if (known_eq (callee_nelements, caller_nelements)) { /* The SIMD clone function has the same number of elements as the current function. */ @@ -4740,6 +4603,24 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, vec_oprnds_i[i] = 0; } vec_oprnd0 = vec_oprnds[i][vec_oprnds_i[i]++]; + if (loop_vinfo + && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) + { + vec_loop_masks *loop_masks + = &LOOP_VINFO_MASKS (loop_vinfo); + tree loop_mask + = vect_get_loop_mask (loop_vinfo, gsi, + loop_masks, ncopies, + vectype, j); + vec_oprnd0 + = prepare_vec_mask (loop_vinfo, + TREE_TYPE (loop_mask), + loop_mask, vec_oprnd0, + gsi); + loop_vinfo->vec_cond_masked_set.add ({ vec_oprnd0, + loop_mask }); + + } vec_oprnd0 = build3 (VEC_COND_EXPR, atype, vec_oprnd0, build_vector_from_val (atype, one), @@ -4764,9 +4645,9 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, { atype = bestn->simdclone->args[i].vector_type; /* Guess the number of lanes represented by atype. */ - unsigned HOST_WIDE_INT atype_subparts + poly_uint64 atype_subparts = exact_div (bestn->simdclone->simdlen, - num_mask_args).to_constant (); + num_mask_args); o = vector_unroll_factor (nunits, atype_subparts); for (m = j * o; m < (j + 1) * o; m++) { @@ -4779,16 +4660,16 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, &vec_oprnds[i]); vec_oprnds_i[i] = 0; } - if (atype_subparts - < simd_clone_subparts (arginfo[i].vectype)) + if (maybe_lt (atype_subparts, + TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))) { /* The mask argument has fewer elements than the input vector. */ /* FORNOW */ gcc_unreachable (); } - else if (atype_subparts - == simd_clone_subparts (arginfo[i].vectype)) + else if (known_eq (atype_subparts, + TYPE_VECTOR_SUBPARTS (arginfo[i].vectype))) { /* The vector mask argument matches the input in the number of lanes, but not necessarily @@ -4903,11 +4784,69 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, } } + if (masked_call_offset == 0 + && bestn->simdclone->inbranch + && bestn->simdclone->nargs > nargs) + { + unsigned long m, o; + size_t mask_i = bestn->simdclone->nargs - 1; + tree mask; + gcc_assert (bestn->simdclone->args[mask_i].arg_type == + SIMD_CLONE_ARG_TYPE_MASK); + + tree masktype = bestn->simdclone->args[mask_i].vector_type; + callee_nelements = TYPE_VECTOR_SUBPARTS (masktype); + o = vector_unroll_factor (nunits, callee_nelements); + for (m = j * o; m < (j + 1) * o; m++) + { + if (loop_vinfo && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)) + { + vec_loop_masks *loop_masks = &LOOP_VINFO_MASKS (loop_vinfo); + mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks, + ncopies, vectype, j); + } + else + mask = vect_build_all_ones_mask (vinfo, stmt_info, masktype); + + if (!useless_type_conversion_p (TREE_TYPE (mask), masktype)) + { + gassign *new_stmt; + if (bestn->simdclone->mask_mode != VOIDmode) + { + /* This means we are dealing with integer mask modes. + First convert to an integer type with the same size as + the current vector type. */ + unsigned HOST_WIDE_INT intermediate_size + = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (mask))); + tree mid_int_type = + build_nonstandard_integer_type (intermediate_size, 1); + mask = build1 (VIEW_CONVERT_EXPR, mid_int_type, mask); + new_stmt + = gimple_build_assign (make_ssa_name (mid_int_type), + mask); + gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); + /* Then zero-extend to the mask mode. */ + mask = fold_build1 (NOP_EXPR, masktype, + gimple_get_lhs (new_stmt)); + } + else + mask = build1 (VIEW_CONVERT_EXPR, masktype, mask); + + new_stmt = gimple_build_assign (make_ssa_name (masktype), + mask); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + mask = gimple_assign_lhs (new_stmt); + } + vargs.safe_push (mask); + } + } + gcall *new_call = gimple_build_call_vec (fndecl, vargs); if (vec_dest) { gcc_assert (ratype - || known_eq (simd_clone_subparts (rtype), nunits)); + || known_eq (TYPE_VECTOR_SUBPARTS (rtype), nunits)); if (ratype) new_temp = create_tmp_var (ratype); else if (useless_type_conversion_p (vectype, rtype)) @@ -4921,13 +4860,13 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, if (vec_dest) { - if (!multiple_p (simd_clone_subparts (vectype), nunits)) + if (!multiple_p (TYPE_VECTOR_SUBPARTS (vectype), nunits)) { unsigned int k, l; poly_uint64 prec = GET_MODE_BITSIZE (TYPE_MODE (vectype)); poly_uint64 bytes = GET_MODE_SIZE (TYPE_MODE (vectype)); k = vector_unroll_factor (nunits, - simd_clone_subparts (vectype)); + TYPE_VECTOR_SUBPARTS (vectype)); gcc_assert ((k & (k - 1)) == 0); for (l = 0; l < k; l++) { @@ -4957,10 +4896,12 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, vect_clobber_variable (vinfo, stmt_info, gsi, new_temp); continue; } - else if (!multiple_p (nunits, simd_clone_subparts (vectype))) + else if (!multiple_p (nunits, TYPE_VECTOR_SUBPARTS (vectype))) { - unsigned int k = (simd_clone_subparts (vectype) - / simd_clone_subparts (rtype)); + unsigned int k; + if (!constant_multiple_p (TYPE_VECTOR_SUBPARTS (vectype), + TYPE_VECTOR_SUBPARTS (rtype), &k)) + gcc_unreachable (); gcc_assert ((k & (k - 1)) == 0); if ((j & (k - 1)) == 0) vec_alloc (ret_ctor_elts, k); @@ -4968,7 +4909,7 @@ vectorizable_simd_clone_call (vec_info *vinfo, stmt_vec_info stmt_info, { unsigned int m, o; o = vector_unroll_factor (nunits, - simd_clone_subparts (rtype)); + TYPE_VECTOR_SUBPARTS (rtype)); for (m = 0; m < o; m++) { tree tem = build4 (ARRAY_REF, rtype, new_temp, @@ -10112,13 +10053,6 @@ vectorizable_load (vec_info *vinfo, dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL; ensure_base_align (dr_info); - if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) - { - vect_build_gather_load_calls (vinfo, stmt_info, gsi, vec_stmt, &gs_info, - mask, cost_vec); - return true; - } - if (memory_access_type == VMAT_INVARIANT) { gcc_assert (!grouped_load && !mask && !bb_vinfo); @@ -11016,6 +10950,134 @@ vectorizable_load (vec_info *vinfo, new_stmt = call; data_ref = NULL_TREE; } + else if (gs_info.decl) + { + /* The builtin decls path for gather is legacy, x86 only. */ + gcc_assert (!final_len && nunits.is_constant ()); + if (costing_p) + { + unsigned int cnunits = vect_nunits_for_cost (vectype); + inside_cost + = record_stmt_cost (cost_vec, cnunits, scalar_load, + stmt_info, 0, vect_body); + continue; + } + poly_uint64 offset_nunits + = TYPE_VECTOR_SUBPARTS (gs_info.offset_vectype); + if (known_eq (nunits, offset_nunits)) + { + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, + dataref_ptr, vec_offsets[vec_num * j + i], + final_mask); + data_ref = NULL_TREE; + } + else if (known_eq (nunits, offset_nunits * 2)) + { + /* We have a offset vector with half the number of + lanes but the builtins will produce full vectype + data with just the lower lanes filled. */ + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, + dataref_ptr, vec_offsets[2 * vec_num * j + 2 * i], + final_mask); + tree low = make_ssa_name (vectype); + gimple_set_lhs (new_stmt, low); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + + /* now put upper half of final_mask in final_mask low. */ + if (final_mask + && !SCALAR_INT_MODE_P + (TYPE_MODE (TREE_TYPE (final_mask)))) + { + int count = nunits.to_constant (); + vec_perm_builder sel (count, count, 1); + sel.quick_grow (count); + for (int i = 0; i < count; ++i) + sel[i] = i | (count / 2); + vec_perm_indices indices (sel, 2, count); + tree perm_mask = vect_gen_perm_mask_checked + (TREE_TYPE (final_mask), indices); + new_stmt = gimple_build_assign (NULL_TREE, + VEC_PERM_EXPR, + final_mask, + final_mask, + perm_mask); + final_mask = make_ssa_name (TREE_TYPE (final_mask)); + gimple_set_lhs (new_stmt, final_mask); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + } + else if (final_mask) + { + new_stmt = gimple_build_assign (NULL_TREE, + VEC_UNPACK_HI_EXPR, + final_mask); + final_mask = make_ssa_name + (truth_type_for (gs_info.offset_vectype)); + gimple_set_lhs (new_stmt, final_mask); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + } + + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, + dataref_ptr, + vec_offsets[2 * vec_num * j + 2 * i + 1], + final_mask); + tree high = make_ssa_name (vectype); + gimple_set_lhs (new_stmt, high); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + + /* compose low + high. */ + int count = nunits.to_constant (); + vec_perm_builder sel (count, count, 1); + sel.quick_grow (count); + for (int i = 0; i < count; ++i) + sel[i] = i < count / 2 ? i : i + count / 2; + vec_perm_indices indices (sel, 2, count); + tree perm_mask + = vect_gen_perm_mask_checked (vectype, indices); + new_stmt = gimple_build_assign (NULL_TREE, + VEC_PERM_EXPR, + low, high, perm_mask); + data_ref = NULL_TREE; + } + else if (known_eq (nunits * 2, offset_nunits)) + { + /* We have a offset vector with double the number of + lanes. Select the low/high part accordingly. */ + vec_offset = vec_offsets[(vec_num * j + i) / 2]; + if ((vec_num * j + i) & 1) + { + int count = offset_nunits.to_constant (); + vec_perm_builder sel (count, count, 1); + sel.quick_grow (count); + for (int i = 0; i < count; ++i) + sel[i] = i | (count / 2); + vec_perm_indices indices (sel, 2, count); + tree perm_mask = vect_gen_perm_mask_checked + (TREE_TYPE (vec_offset), indices); + new_stmt = gimple_build_assign (NULL_TREE, + VEC_PERM_EXPR, + vec_offset, + vec_offset, + perm_mask); + vec_offset = make_ssa_name (TREE_TYPE (vec_offset)); + gimple_set_lhs (new_stmt, vec_offset); + vect_finish_stmt_generation (vinfo, stmt_info, + new_stmt, gsi); + } + new_stmt = vect_build_one_gather_load_call + (vinfo, stmt_info, gsi, &gs_info, + dataref_ptr, vec_offset, final_mask); + data_ref = NULL_TREE; + } + else + gcc_unreachable (); + } else { /* Emulated gather-scatter. */ diff --git a/gcc/tree.cc b/gcc/tree.cc index 69369c6..f9fa7b7 100644 --- a/gcc/tree.cc +++ b/gcc/tree.cc @@ -9748,6 +9748,10 @@ set_call_expr_flags (tree decl, int flags) DECL_ATTRIBUTES (decl)); if ((flags & ECF_TM_PURE) && flag_tm) apply_tm_attr (decl, get_identifier ("transaction_pure")); + if ((flags & ECF_XTHROW)) + DECL_ATTRIBUTES (decl) + = tree_cons (get_identifier ("expected_throw"), + NULL, DECL_ATTRIBUTES (decl)); /* Looping const or pure is implied by noreturn. There is currently no way to declare looping const or looping pure alone. */ gcc_assert (!(flags & ECF_LOOPING_CONST_OR_PURE) @@ -9918,6 +9922,23 @@ build_common_builtin_nodes (void) "__builtin_nonlocal_goto", ECF_NORETURN | ECF_NOTHROW); + tree ptr_ptr_type_node = build_pointer_type (ptr_type_node); + + ftype = build_function_type_list (void_type_node, + ptr_type_node, // void *chain + ptr_type_node, // void *func + ptr_ptr_type_node, // void **dst + NULL_TREE); + local_define_builtin ("__builtin_nested_func_ptr_created", ftype, + BUILT_IN_NESTED_PTR_CREATED, + "__builtin_nested_func_ptr_created", ECF_NOTHROW); + + ftype = build_function_type_list (void_type_node, + NULL_TREE); + local_define_builtin ("__builtin_nested_func_ptr_deleted", ftype, + BUILT_IN_NESTED_PTR_DELETED, + "__builtin_nested_func_ptr_deleted", ECF_NOTHROW); + ftype = build_function_type_list (void_type_node, ptr_type_node, ptr_type_node, NULL_TREE); local_define_builtin ("__builtin_setjmp_setup", ftype, @@ -9960,7 +9981,8 @@ build_common_builtin_nodes (void) ftype = build_function_type_list (void_type_node, NULL_TREE); local_define_builtin ("__builtin_cxa_end_cleanup", ftype, BUILT_IN_CXA_END_CLEANUP, - "__cxa_end_cleanup", ECF_NORETURN | ECF_LEAF); + "__cxa_end_cleanup", + ECF_NORETURN | ECF_XTHROW | ECF_LEAF); } ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE); @@ -9969,7 +9991,7 @@ build_common_builtin_nodes (void) ((targetm_common.except_unwind_info (&global_options) == UI_SJLJ) ? "_Unwind_SjLj_Resume" : "_Unwind_Resume"), - ECF_NORETURN); + ECF_NORETURN | ECF_XTHROW); if (builtin_decl_explicit (BUILT_IN_RETURN_ADDRESS) == NULL_TREE) { @@ -2235,6 +2235,7 @@ class auto_suppress_location_wrappers #define SET_TYPE_MODE(NODE, MODE) \ (TYPE_CHECK (NODE)->type_common.mode = (MODE)) +extern unsigned int element_precision (const_tree); extern machine_mode element_mode (const_tree); extern machine_mode vector_type_mode (const_tree); extern unsigned int vector_element_bits (const_tree); |