aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog239
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/Makefile.in1
-rw-r--r--gcc/c-family/ChangeLog16
-rw-r--r--gcc/c-family/c-omp.cc4
-rw-r--r--gcc/c/ChangeLog5
-rw-r--r--gcc/c/c-typeck.cc13
-rw-r--r--gcc/config/i386/i386-expand.cc85
-rw-r--r--gcc/config/i386/i386-features.cc4
-rw-r--r--gcc/config/i386/i386-options.cc4
-rw-r--r--gcc/config/i386/i386.cc89
-rw-r--r--gcc/config/i386/i386.h4
-rw-r--r--gcc/config/i386/i386.md4
-rw-r--r--gcc/config/i386/mmx.md60
-rw-r--r--gcc/config/i386/predicates.md26
-rw-r--r--gcc/config/i386/sse.md2
-rw-r--r--gcc/config/loongarch/lasx.md4
-rw-r--r--gcc/config/loongarch/loongarch.cc126
-rw-r--r--gcc/config/loongarch/lsx.md4
-rw-r--r--gcc/config/s390/s390.cc16
-rw-r--r--gcc/config/s390/s390.md83
-rw-r--r--gcc/config/s390/vector.md60
-rw-r--r--gcc/cp/ChangeLog19
-rw-r--r--gcc/cp/pt.cc4
-rw-r--r--gcc/cp/semantics.cc8
-rw-r--r--gcc/diagnostic-state-to-dot.cc6
-rw-r--r--gcc/doc/sourcebuild.texi3
-rw-r--r--gcc/fortran/ChangeLog11
-rw-r--r--gcc/gimplify.cc321
-rw-r--r--gcc/internal-fn.cc27
-rw-r--r--gcc/internal-fn.h1
-rw-r--r--gcc/pta-andersen.cc2565
-rw-r--r--gcc/pta-andersen.h31
-rw-r--r--gcc/testsuite/ChangeLog392
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/constexpr-array29.C13
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/sme_throw_1.C55
-rw-r--r--gcc/testsuite/g++.target/aarch64/sme/sme_throw_2.C4
-rw-r--r--gcc/testsuite/gcc.dg/20021014-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/aru-2.c2
-rw-r--r--gcc/testsuite/gcc.dg/nest.c2
-rw-r--r--gcc/testsuite/gcc.dg/pr121035.c94
-rw-r--r--gcc/testsuite/gcc.dg/pr32450.c3
-rw-r--r--gcc/testsuite/gcc.dg/pr43643.c2
-rw-r--r--gcc/testsuite/gcc.dg/vect/bb-slp-39.c3
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr121049.c25
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-reduc-cond-1.c59
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-reduc-cond-2.c61
-rw-r--r--gcc/testsuite/gcc.dg/vect/vect-reduc-cond-3.c56
-rw-r--r--gcc/testsuite/gcc.target/i386/pr104447.c3
-rw-r--r--gcc/testsuite/gcc.target/i386/pr113122-3.c3
-rw-r--r--gcc/testsuite/gcc.target/i386/pr119386-1.c4
-rw-r--r--gcc/testsuite/gcc.target/i386/pr119386-2.c4
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-1.c34
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-2.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-3a.c23
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-3b.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-3c.c6
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-4.c14
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-5.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-6.c13
-rw-r--r--gcc/testsuite/gcc.target/i386/pr121062-7.c13
-rw-r--r--gcc/testsuite/gcc.target/loongarch/pr121064.c38
-rw-r--r--gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-2.c6
-rw-r--r--gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-3.c6
-rw-r--r--gcc/testsuite/gcc.target/s390/signbit-1.c40
-rw-r--r--gcc/testsuite/gcc.target/s390/signbit-2.c40
-rw-r--r--gcc/testsuite/gcc.target/s390/signbit-3.c152
-rw-r--r--gcc/testsuite/gcc.target/s390/signbit-4.c55
-rw-r--r--gcc/testsuite/gcc.target/s390/signbit-5.c35
-rw-r--r--gcc/testsuite/gcc.target/s390/signbit.h36
-rw-r--r--gcc/testsuite/gcc.target/s390/vector/vlgv-zero-extend-1.c71
-rw-r--r--gcc/testsuite/lib/target-supports.exp23
-rw-r--r--gcc/tree-if-conv.cc186
-rw-r--r--gcc/tree-inline.cc5
-rw-r--r--gcc/tree-pretty-print.cc8
-rw-r--r--gcc/tree-ssa-pre.cc21
-rw-r--r--gcc/tree-ssa-structalias.cc3366
-rw-r--r--gcc/tree-ssa-structalias.h217
-rw-r--r--gcc/tree-vect-slp.cc10
-rw-r--r--gcc/tree-vect-stmts.cc15
-rw-r--r--gcc/tree.h6
81 files changed, 5619 insertions, 3487 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 7459891..5ffa13e 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,242 @@
+2025-07-16 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-state-to-dot.cc (state_diagram::m_show_tags): Drop
+ unused field.
+
+2025-07-16 Kwok Cheung Yeung <kcyeung@baylibre.com>
+
+ * gimplify.cc (gimplify_omp_affinity): Use OMP_ITERATOR_DECL_P.
+ (compute_omp_iterator_count): New.
+ (build_omp_iterator_loop): New.
+ (gimplify_omp_depend): Use OMP_ITERATOR_DECL_P,
+ compute_omp_iterator_count and build_omp_iterator_loop.
+ * tree-inline.cc (copy_tree_body_r): Use OMP_ITERATOR_DECL_P.
+ * tree-pretty-print.cc (dump_omp_clause): Likewise.
+ * tree.h (OMP_ITERATOR_DECL_P): New macro.
+
+2025-07-16 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/121062
+ * config/i386/i386.cc (ix86_convert_const_vector_to_integer):
+ Handle E_V1SImode and E_V1DImode.
+ * config/i386/mmx.md (V_16_32_64): Add V1SI, V2BF and V1DI.
+ (mmxinsnmode): Add V1DI and V1SI.
+ Add V_16_32_64 splitter for constant vector loads from constant
+ vector pool.
+ (V_16_32_64:*mov<mode>_imm): Moved after V_16_32_64 splitter.
+ Replace lowpart_subreg with adjust_address.
+
+2025-07-16 H.J. Lu <hjl.tools@gmail.com>
+
+ PR target/120881
+ PR testsuite/121078
+ * config/i386/i386-options.cc (ix86_option_override_internal):
+ Warn -pg without -mfentry only on glibc targets.
+
+2025-07-16 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386-expand.cc (ix86_expand_move):
+ Use MEM_P predicate instead of open coding it.
+ (ix86_erase_embedded_rounding):
+ Use NONJUMP_INSN_P predicate instead of open coding it.
+ * config/i386/i386-features.cc (convertible_comparison_p):
+ Use REG_P predicate instead of open coding it.
+ * config/i386/i386.cc (ix86_rtx_costs):
+ Use SUBREG_P predicate instead of open coding it.
+
+2025-07-16 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386.cc (symbolic_reference_mentioned_p):
+ Use LABEL_REF_P predicate instead of open coding it.
+ (ix86_legitimate_constant_p): Ditto.
+ (legitimate_pic_address_disp_p): Ditto.
+ (ix86_legitimate_address_p): Ditto.
+ (legitimize_pic_address): Ditto.
+ (ix86_print_operand): Ditto.
+ (ix86_print_operand_address_as): Ditto.
+ (ix86_rip_relative_addr_p): Ditto.
+ * config/i386/i386.h (SYMBOLIC_CONST): Ditto.
+ * config/i386/i386.md (*anddi_1 to *andsi_1_zext splitter): Ditto.
+ * config/i386/predicates.md (symbolic_operand): Ditto.
+ (local_symbolic_operand): Ditto.
+ (vsib_address_operand): Ditto.
+
+2025-07-16 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386-expand.cc (ix86_expand_move):
+ Use SYMBOL_REF_P predicate instead of open coding it.
+ (ix86_split_long_move): Ditto.
+ (construct_plt_address): Ditto.
+ (ix86_expand_call): Ditto.
+ (ix86_notrack_prefixed_insn_p): Ditto.
+ * config/i386/i386-features.cc
+ (rest_of_insert_endbr_and_patchable_area): Ditto.
+ * config/i386/i386.cc (symbolic_reference_mentioned_p): Ditto.
+ (ix86_force_load_from_GOT_p): Ditto.
+ (ix86_legitimate_constant_p): Ditto.
+ (legitimate_pic_operand_p): Ditto.
+ (legitimate_pic_address_disp_p): Ditto.
+ (ix86_legitimate_address_p): Ditto.
+ (legitimize_pic_address): Ditto.
+ (ix86_legitimize_address): Ditto.
+ (ix86_delegitimize_tls_address): Ditto.
+ (ix86_print_operand): Ditto.
+ (ix86_print_operand_address_as): Ditto.
+ (ix86_rip_relative_addr_p): Ditto.
+ (symbolic_base_address_p): Ditto.
+ * config/i386/i386.h (SYMBOLIC_CONST): Ditto.
+ * config/i386/i386.md (*anddi_1 to *andsi_1_zext splitter): Ditto.
+ * config/i386/predicates.md (symbolic_operand): Ditto.
+ (local_symbolic_operand): Ditto.
+ (local_func_symbolic_operand): Ditto.
+
+2025-07-16 Uros Bizjak <ubizjak@gmail.com>
+
+ * config/i386/i386-expand.cc (ix86_expand_vector_logical_operator):
+ Use CONST_VECTOR_P instead of open coding it.
+ (ix86_expand_int_sse_cmp): Ditto.
+ (ix86_extract_perm_from_pool_constant): Ditto.
+ (ix86_split_to_parts): Ditto.
+ (const_vector_equal_evenodd_p): Ditto.
+ * config/i386/i386.cc (ix86_print_operand): Ditto.
+ * config/i386/predicates.md (zero_extended_scalar_load_operand): Ditto.
+ (float_vector_all_ones_operand): Ditto.
+ * config/i386/sse.md (avx512vl_vextractf128<mode>): Ditto.
+
+2025-07-16 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/121049
+ * internal-fn.h (widening_evenodd_fn_p): Declare.
+ * internal-fn.cc (widening_evenodd_fn_p): New function.
+ * tree-vect-stmts.cc (vectorizable_conversion): When using
+ an even/odd widening function disable loop masking.
+
+2025-07-16 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR tree-optimization/119920
+ PR tree-optimization/112324
+ PR tree-optimization/110015
+ * tree-if-conv.cc (find_different_opnum): New function.
+ (factor_out_operators): New function.
+ (predicate_scalar_phi): Call factor_out_operators when
+ there is only 2 elements of a phi.
+
+2025-07-16 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * tree-if-conv.cc (fold_build_cond_expr): Return early if lhs and rhs
+ are the same.
+
+2025-07-16 Andrew Pinski <quic_apinski@quicinc.com>
+
+ * tree-if-conv.cc (combine_blocks): Remove predicated
+ dynamic array.
+
+2025-07-16 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/121116
+ * tree-vect-loop.cc (vectorizable_induction): Use the
+ step vector element type for further processing.
+
+2025-07-16 Andrew Stubbs <ams@baylibre.com>
+
+ * config/gcn/gcn-valu.md (add<mode>3_vcc_dup<exec_vcc>): Change
+ operand 2 to allow gcn_alu_operand. Swap the operands in the VCC
+ update RTL.
+ (add<mode>3_vcc_zext_dup): Likewise.
+ (add<mode>3_vcc_zext_dup_exec): Likewise.
+ (add<mode>3_vcc_zext_dup2): Likewise.
+ (add<mode>3_vcc_zext_dup2_exec): Likewise.
+
+2025-07-16 Spencer Abson <spencer.abson@arm.com>
+
+ PR target/117850
+ * config/aarch64/aarch64-builtins.cc (LO_HI_PAIRINGS): New, group the
+ lo/hi pairs from aarch64-builtin-pairs.def.
+ (aarch64_get_highpart_builtin): New function.
+ (aarch64_v128_highpart_ref): New function, helper to look for vector
+ highparts.
+ (aarch64_build_vector_cst): New function, helper to build duplicated
+ VECTOR_CSTs.
+ (aarch64_fold_lo_call_to_hi): New function.
+ (aarch64_general_gimple_fold_builtin): Add cases for the lo builtins
+ in aarch64-builtin-pairs.def.
+ * config/aarch64/aarch64-builtin-pairs.def: New file, declare the
+ parirs of lowpart-operating and highpart-operating builtins.
+
+2025-07-16 Alfie Richards <alfie.richards@arm.com>
+
+ * tree.cc (get_clone_versions): New function.
+ (get_clone_attr_versions): New function.
+ (get_version): New function.
+ * tree.h (get_clone_versions): New function.
+ (get_clone_attr_versions): New function.
+ (get_target_version): New function.
+
+2025-07-16 Alfie Richards <alfie.richards@arm.com>
+
+ * attribs.cc (make_attribute): Change arguments.
+ * attribs.h (make_attribute): Change arguments.
+
+2025-07-16 Alfie Richards <alfie.richards@arm.com>
+
+ * pretty-print.cc (format_phase_2): Add support for string_slice.
+ * vec.cc (string_slice::tokenize): New static method.
+ (string_slice::strcmp): New static method.
+ (string_slice::strip): New method.
+ (test_string_slice_initializers): New test.
+ (test_string_slice_tokenize): Ditto.
+ (test_string_slice_strcmp): Ditto.
+ (test_string_slice_equality): Ditto.
+ (test_string_slice_inequality): Ditto.
+ (test_string_slice_invalid): Ditto.
+ (test_string_slice_strip): Ditto.
+ (vec_cc_tests): Add new tests.
+ * vec.h (class string_slice): New class.
+
+2025-07-16 Robin Dapp <rdapp@ventanamicro.com>
+
+ PR middle-end/121065
+ * cfgexpand.cc (expand_debug_expr): Allow fixed-point modes for
+ RDIV_EXPR.
+ * optabs-tree.cc (optab_for_tree_code): Ditto.
+
+2025-07-16 Robin Dapp <rdapp@ventanamicro.com>
+
+ PR target/120297
+ * config/riscv/riscv-vsetvl.def: Do not forget ratio demand of
+ previous vsetvl.
+
+2025-07-16 Kyrylo Tkachov <ktkachov@nvidia.com>
+
+ * config/aarch64/aarch64-sve2.md (*aarch64_sve2_bsl2n_eon<mode>):
+ New pattern.
+ (*aarch64_sve2_eon_bsl2n_unpred<mode>): Likewise.
+
+2025-07-16 Kyrylo Tkachov <ktkachov@nvidia.com>
+
+ * config/aarch64/aarch64-sve2.md (*aarch64_sve2_unpred_nor<mode>):
+ New define_insn.
+ (*aarch64_sve2_nand_unpred<mode>): Likewise.
+
+2025-07-16 Jeremy Rifkin <jeremy@rifkin.dev>
+
+ PR c/82134
+ * gimplify.cc (gimplify_modify_expr): Add suppress_warning
+ * tree-cfg.cc (do_warn_unused_result): Check warning_suppressed_p
+
+2025-07-16 Haochen Jiang <haochen.jiang@intel.com>
+
+ * common/config/i386/i386-common.cc
+ (OPTION_MASK_ISA2_AMX_AVX512_SET): Do not set AVX10.2.
+ (OPTION_MASK_ISA2_AVX10_2_UNSET): Remove AMX-AVX512 unset.
+ (OPTION_MASK_ISA2_AVX512F_UNSET): Unset AMX-AVX512.
+ (ix86_handle_option): Imply AVX512F for AMX-AVX512.
+
+2025-07-16 Pan Li <pan2.li@intel.com>
+
+ * config/riscv/autovec.md (avg<mode>3_floor): Add new
+ pattern of avg3_floor for rvv DImode.
+
2025-07-15 David Malcolm <dmalcolm@redhat.com>
* spellcheck.cc: Define INCLUDE_ALGORITHM.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index c02c887..fc3196f 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20250716
+20250717
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index bdb5292..05dfa08 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1796,6 +1796,7 @@ OBJS = \
tree-ssa-sink.o \
tree-ssa-strlen.o \
tree-ssa-structalias.o \
+ pta-andersen.o \
tree-ssa-tail-merge.o \
tree-ssa-ter.o \
tree-ssa-threadbackward.o \
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 7e5c955..fb40698 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,19 @@
+2025-07-16 Kwok Cheung Yeung <kcyeung@baylibre.com>
+
+ * c-omp.cc (c_finish_omp_depobj): Use OMP_ITERATOR_DECL_P.
+
+2025-07-16 Alfie Richards <alfie.richards@arm.com>
+
+ * c-attribs.cc (handle_target_clones_attribute): Change to use
+ get_clone_versions.
+
+2025-07-16 Alfie Richards <alfie.richards@arm.com>
+
+ * c-format.cc (local_string_slice_node): New node type.
+ (asm_fprintf_char_table): New entry.
+ (init_dynamic_diag_info): Add support for string_slice.
+ * c-format.h (T_STRING_SLICE): New node type.
+
2025-07-15 Jakub Jelinek <jakub@redhat.com>
Jason Merrill <jason@redhat.com>
diff --git a/gcc/c-family/c-omp.cc b/gcc/c-family/c-omp.cc
index 4352214..fe272888 100644
--- a/gcc/c-family/c-omp.cc
+++ b/gcc/c-family/c-omp.cc
@@ -769,9 +769,7 @@ c_finish_omp_depobj (location_t loc, tree depobj,
kind = OMP_CLAUSE_DEPEND_KIND (clause);
t = OMP_CLAUSE_DECL (clause);
gcc_assert (t);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (clause),
"%<iterator%> modifier may not be specified on "
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index c303d2fb..01edb4c 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,8 @@
+2025-07-16 Kwok Cheung Yeung <kcyeung@baylibre.com>
+
+ * c-typeck.cc (handle_omp_array_sections): Use OMP_ITERATOR_DECL_P.
+ (c_finish_omp_clauses): Likewise.
+
2025-07-15 Jakub Jelinek <jakub@redhat.com>
Jason Merrill <jason@redhat.com>
diff --git a/gcc/c/c-typeck.cc b/gcc/c/c-typeck.cc
index 5d11e57..f161bd9 100644
--- a/gcc/c/c-typeck.cc
+++ b/gcc/c/c-typeck.cc
@@ -15637,9 +15637,7 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
tree *tp = &OMP_CLAUSE_DECL (c);
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_AFFINITY)
- && TREE_CODE (*tp) == TREE_LIST
- && TREE_PURPOSE (*tp)
- && TREE_CODE (TREE_PURPOSE (*tp)) == TREE_VEC)
+ && OMP_ITERATOR_DECL_P (*tp))
tp = &TREE_VALUE (*tp);
tree first = handle_omp_array_sections_1 (c, *tp, types,
maybe_zero_len, first_non_one,
@@ -16836,9 +16834,7 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
/* FALLTHRU */
case OMP_CLAUSE_AFFINITY:
t = OMP_CLAUSE_DECL (c);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
if (TREE_PURPOSE (t) != last_iterators)
last_iterators_remove
@@ -16938,10 +16934,7 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
break;
}
}
- if (TREE_CODE (OMP_CLAUSE_DECL (c)) == TREE_LIST
- && TREE_PURPOSE (OMP_CLAUSE_DECL (c))
- && (TREE_CODE (TREE_PURPOSE (OMP_CLAUSE_DECL (c)))
- == TREE_VEC))
+ if (OMP_ITERATOR_DECL_P (OMP_CLAUSE_DECL (c)))
TREE_VALUE (OMP_CLAUSE_DECL (c)) = t;
else
OMP_CLAUSE_DECL (c) = t;
diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
index 8f15c1c..09aa9b1 100644
--- a/gcc/config/i386/i386-expand.cc
+++ b/gcc/config/i386/i386-expand.cc
@@ -387,7 +387,7 @@ ix86_expand_move (machine_mode mode, rtx operands[])
tmp = XEXP (op1, 0);
if (GET_CODE (tmp) != PLUS
- || GET_CODE (XEXP (tmp, 0)) != SYMBOL_REF)
+ || !SYMBOL_REF_P (XEXP (tmp, 0)))
break;
op1 = XEXP (tmp, 0);
@@ -487,7 +487,7 @@ ix86_expand_move (machine_mode mode, rtx operands[])
op1 = machopic_legitimize_pic_address (op1, mode,
tmp == op1 ? 0 : tmp);
}
- if (op0 != op1 && GET_CODE (op0) != MEM)
+ if (op0 != op1 && !MEM_P (op0))
{
rtx insn = gen_rtx_SET (op0, op1);
emit_insn (insn);
@@ -1396,11 +1396,11 @@ ix86_expand_vector_logical_operator (enum rtx_code code, machine_mode mode,
to cast them temporarily to integer vectors. */
if (op1
&& !TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL
- && (SUBREG_P (op2) || GET_CODE (op2) == CONST_VECTOR)
+ && (SUBREG_P (op2) || CONST_VECTOR_P (op2))
&& GET_MODE_CLASS (GET_MODE (SUBREG_REG (op1))) == MODE_VECTOR_FLOAT
&& GET_MODE_SIZE (GET_MODE (SUBREG_REG (op1))) == GET_MODE_SIZE (mode)
&& SUBREG_BYTE (op1) == 0
- && (GET_CODE (op2) == CONST_VECTOR
+ && (CONST_VECTOR_P (op2)
|| (GET_MODE (SUBREG_REG (op1)) == GET_MODE (SUBREG_REG (op2))
&& SUBREG_BYTE (op2) == 0))
&& can_create_pseudo_p ())
@@ -1415,7 +1415,7 @@ ix86_expand_vector_logical_operator (enum rtx_code code, machine_mode mode,
case E_V4DFmode:
case E_V8DFmode:
dst = gen_reg_rtx (GET_MODE (SUBREG_REG (op1)));
- if (GET_CODE (op2) == CONST_VECTOR)
+ if (CONST_VECTOR_P (op2))
{
op2 = gen_lowpart (GET_MODE (dst), op2);
op2 = force_reg (GET_MODE (dst), op2);
@@ -4918,7 +4918,7 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1,
case LEU:
/* x <= cst can be handled as x < cst + 1 unless there is
wrap around in cst + 1. */
- if (GET_CODE (cop1) == CONST_VECTOR
+ if (CONST_VECTOR_P (cop1)
&& GET_MODE_INNER (mode) != TImode)
{
unsigned int n_elts = GET_MODE_NUNITS (mode), i;
@@ -4962,7 +4962,7 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1,
case GEU:
/* x >= cst can be handled as x > cst - 1 unless there is
wrap around in cst - 1. */
- if (GET_CODE (cop1) == CONST_VECTOR
+ if (CONST_VECTOR_P (cop1)
&& GET_MODE_INNER (mode) != TImode)
{
unsigned int n_elts = GET_MODE_NUNITS (mode), i;
@@ -5033,9 +5033,9 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1,
}
}
- if (GET_CODE (cop0) == CONST_VECTOR)
+ if (CONST_VECTOR_P (cop0))
cop0 = force_reg (mode, cop0);
- else if (GET_CODE (cop1) == CONST_VECTOR)
+ else if (CONST_VECTOR_P (cop1))
cop1 = force_reg (mode, cop1);
rtx optrue = op_true ? op_true : CONSTM1_RTX (data_mode);
@@ -5234,7 +5234,7 @@ ix86_expand_int_sse_cmp (rtx dest, enum rtx_code code, rtx cop0, rtx cop1,
if (*negate)
std::swap (op_true, op_false);
- if (GET_CODE (cop1) == CONST_VECTOR)
+ if (CONST_VECTOR_P (cop1))
cop1 = force_reg (mode, cop1);
/* Allow the comparison to be done in one mode, but the movcc to
@@ -6188,7 +6188,7 @@ ix86_extract_perm_from_pool_constant (int* perm, rtx mem)
rtx constant = get_pool_constant (XEXP (mem, 0));
- if (GET_CODE (constant) != CONST_VECTOR)
+ if (!CONST_VECTOR_P (constant))
return false;
/* There could be some rtx like
@@ -6198,7 +6198,7 @@ ix86_extract_perm_from_pool_constant (int* perm, rtx mem)
{
constant = simplify_subreg (mode, constant, GET_MODE (constant), 0);
- if (constant == nullptr || GET_CODE (constant) != CONST_VECTOR)
+ if (constant == nullptr || !CONST_VECTOR_P (constant))
return false;
}
@@ -6244,7 +6244,7 @@ ix86_split_to_parts (rtx operand, rtx *parts, machine_mode mode)
return size;
}
- if (GET_CODE (operand) == CONST_VECTOR)
+ if (CONST_VECTOR_P (operand))
{
scalar_int_mode imode = int_mode_for_mode (mode).require ();
/* Caution: if we looked through a constant pool memory above,
@@ -6378,7 +6378,7 @@ ix86_split_long_move (rtx operands[])
fp moves, that force all constants to memory to allow combining. */
if (MEM_P (operands[1])
- && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
+ && SYMBOL_REF_P (XEXP (operands[1], 0))
&& CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0)))
operands[1] = get_pool_constant (XEXP (operands[1], 0));
if (push_operand (operands[0], VOIDmode))
@@ -10245,7 +10245,7 @@ construct_plt_address (rtx symbol)
{
rtx tmp, unspec;
- gcc_assert (GET_CODE (symbol) == SYMBOL_REF);
+ gcc_assert (SYMBOL_REF_P (symbol));
gcc_assert (ix86_cmodel == CM_LARGE_PIC && !TARGET_PECOFF);
gcc_assert (Pmode == DImode);
@@ -10279,7 +10279,7 @@ ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
tree fndecl;
bool call_no_callee_saved_registers = false;
- if (GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
+ if (SYMBOL_REF_P (XEXP (fnaddr, 0)))
{
fndecl = SYMBOL_REF_DECL (XEXP (fnaddr, 0));
if (fndecl)
@@ -10316,7 +10316,7 @@ ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
if (TARGET_MACHO && !TARGET_64BIT)
{
#if TARGET_MACHO
- if (flag_pic && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
+ if (flag_pic && SYMBOL_REF_P (XEXP (fnaddr, 0)))
fnaddr = machopic_indirect_call_target (fnaddr);
#endif
}
@@ -10326,7 +10326,7 @@ ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
check if PLT was explicitly avoided via no-plt or "noplt" attribute, making
it an indirect call. */
if (flag_pic
- && GET_CODE (addr) == SYMBOL_REF
+ && SYMBOL_REF_P (addr)
&& ix86_call_use_plt_p (addr))
{
if (flag_plt
@@ -10400,7 +10400,7 @@ ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
if (ix86_cmodel == CM_LARGE_PIC
&& !TARGET_PECOFF
&& MEM_P (fnaddr)
- && GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF
+ && SYMBOL_REF_P (XEXP (fnaddr, 0))
&& !local_symbolic_operand (XEXP (fnaddr, 0), VOIDmode))
fnaddr = gen_rtx_MEM (QImode, construct_plt_address (XEXP (fnaddr, 0)));
/* Since x32 GOT slot is 64 bit with zero upper 32 bits, indirect
@@ -10503,7 +10503,7 @@ ix86_expand_call (rtx retval, rtx fnaddr, rtx callarg1,
}
if (TARGET_MACHO && TARGET_64BIT && !sibcall
- && ((GET_CODE (addr) == SYMBOL_REF && !SYMBOL_REF_LOCAL_P (addr))
+ && ((SYMBOL_REF_P (addr) && !SYMBOL_REF_LOCAL_P (addr))
|| !fndecl || TREE_PUBLIC (fndecl)))
{
/* We allow public functions defined in a TU to bind locally for PIC
@@ -12612,7 +12612,7 @@ ix86_expand_args_builtin (const struct builtin_description *d,
static rtx
ix86_erase_embedded_rounding (rtx pat)
{
- if (GET_CODE (pat) == INSN)
+ if (NONJUMP_INSN_P (pat))
pat = PATTERN (pat);
gcc_assert (GET_CODE (pat) == SET);
@@ -25327,7 +25327,7 @@ const_vector_equal_evenodd_p (rtx op)
{
machine_mode mode = GET_MODE (op);
int i, nunits = GET_MODE_NUNITS (mode);
- if (GET_CODE (op) != CONST_VECTOR
+ if (!CONST_VECTOR_P (op)
|| nunits != CONST_VECTOR_NUNITS (op))
return false;
for (i = 0; i < nunits; i += 2)
@@ -25670,7 +25670,7 @@ ix86_notrack_prefixed_insn_p (rtx_insn *insn)
/* Do not emit 'notrack' if it's not an indirect call. */
if (MEM_P (addr)
- && GET_CODE (XEXP (addr, 0)) == SYMBOL_REF)
+ && SYMBOL_REF_P (XEXP (addr, 0)))
return false;
else
return find_reg_note (insn, REG_CALL_NOCF_CHECK, 0);
@@ -26487,8 +26487,8 @@ do_mem_operand:
if (rtx_equal_p (op, args[2]))
return 0xaa;
/* Check if CONST_VECTOR is the ones-complement of args[2]. */
- if (GET_CODE (op) == CONST_VECTOR
- && GET_CODE (args[2]) == CONST_VECTOR
+ if (CONST_VECTOR_P (op)
+ && CONST_VECTOR_P (args[2])
&& rtx_equal_p (simplify_const_unary_operation (NOT, GET_MODE (op),
op, GET_MODE (op)),
args[2]))
@@ -26501,8 +26501,8 @@ do_mem_operand:
if (rtx_equal_p (op, args[0]))
return 0xf0;
/* Check if CONST_VECTOR is the ones-complement of args[0]. */
- if (GET_CODE (op) == CONST_VECTOR
- && GET_CODE (args[0]) == CONST_VECTOR
+ if (CONST_VECTOR_P (op)
+ && CONST_VECTOR_P (args[0])
&& rtx_equal_p (simplify_const_unary_operation (NOT, GET_MODE (op),
op, GET_MODE (op)),
args[0]))
@@ -26515,8 +26515,8 @@ do_mem_operand:
if (rtx_equal_p (op, args[1]))
return 0xcc;
/* Check if CONST_VECTOR is the ones-complement of args[1]. */
- if (GET_CODE (op) == CONST_VECTOR
- && GET_CODE (args[1]) == CONST_VECTOR
+ if (CONST_VECTOR_P (op)
+ && CONST_VECTOR_P (args[1])
&& rtx_equal_p (simplify_const_unary_operation (NOT, GET_MODE (op),
op, GET_MODE (op)),
args[1]))
@@ -26746,15 +26746,6 @@ ix86_expand_ternlog (machine_mode mode, rtx op0, rtx op1, rtx op2, int idx,
&& (!op2 || !side_effects_p (op2))
&& op0)
{
- if (GET_MODE (op0) != mode)
- op0 = gen_lowpart (mode, op0);
- if (!TARGET_64BIT && !register_operand (op0, mode))
- {
- /* Avoid force_reg (mode, op0). */
- rtx reg = gen_reg_rtx (mode);
- emit_move_insn (reg, op0);
- op0 = reg;
- }
emit_move_insn (target, gen_rtx_XOR (mode, op0, CONSTM1_RTX (mode)));
return target;
}
@@ -26779,15 +26770,6 @@ ix86_expand_ternlog (machine_mode mode, rtx op0, rtx op1, rtx op2, int idx,
&& (!op2 || !side_effects_p (op2))
&& op1)
{
- if (GET_MODE (op1) != mode)
- op1 = gen_lowpart (mode, op1);
- if (!TARGET_64BIT && !register_operand (op1, mode))
- {
- /* Avoid force_reg (mode, op1). */
- rtx reg = gen_reg_rtx (mode);
- emit_move_insn (reg, op1);
- op1 = reg;
- }
emit_move_insn (target, gen_rtx_XOR (mode, op1, CONSTM1_RTX (mode)));
return target;
}
@@ -26819,15 +26801,6 @@ ix86_expand_ternlog (machine_mode mode, rtx op0, rtx op1, rtx op2, int idx,
&& (!op1 || !side_effects_p (op1))
&& op2)
{
- if (GET_MODE (op2) != mode)
- op2 = gen_lowpart (mode, op2);
- if (!TARGET_64BIT && !register_operand (op2, mode))
- {
- /* Avoid force_reg (mode, op2). */
- rtx reg = gen_reg_rtx (mode);
- emit_move_insn (reg, op2);
- op2 = reg;
- }
emit_move_insn (target, gen_rtx_XOR (mode, op2, CONSTM1_RTX (mode)));
return target;
}
diff --git a/gcc/config/i386/i386-features.cc b/gcc/config/i386/i386-features.cc
index 734ab70..c131577 100644
--- a/gcc/config/i386/i386-features.cc
+++ b/gcc/config/i386/i386-features.cc
@@ -2141,7 +2141,7 @@ convertible_comparison_p (rtx_insn *insn, enum machine_mode mode)
gcc_assert (GET_CODE (src) == COMPARE);
- if (GET_CODE (dst) != REG
+ if (!REG_P (dst)
|| REGNO (dst) != FLAGS_REG
|| GET_MODE (dst) != CCZmode)
return false;
@@ -2953,7 +2953,7 @@ rest_of_insert_endbr_and_patchable_area (bool need_endbr,
/* Also generate ENDBRANCH for non-tail call which
may return via indirect branch. */
- if (GET_CODE (XEXP (fnaddr, 0)) == SYMBOL_REF)
+ if (SYMBOL_REF_P (XEXP (fnaddr, 0)))
fndecl = SYMBOL_REF_DECL (XEXP (fnaddr, 0));
if (fndecl == NULL_TREE)
fndecl = MEM_EXPR (fnaddr);
diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
index 5365849..d244b225 100644
--- a/gcc/config/i386/i386-options.cc
+++ b/gcc/config/i386/i386-options.cc
@@ -2852,12 +2852,16 @@ ix86_option_override_internal (bool main_args_p,
sorry ("%<-mno-fentry%> isn%'t compatible with SEH");
}
+#ifdef OPTION_GLIBC_P
+ /* -mfentry is supported only on glibc targets. */
if (!opts->x_flag_fentry
+ && OPTION_GLIBC_P (opts)
&& (TARGET_64BIT_P (opts->x_ix86_isa_flags) || !opts->x_flag_pic)
&& opts->x_flag_shrink_wrap
&& opts->x_profile_flag)
warning (0, "%<-pg%> without %<-mfentry%> may be unreliable with "
"shrink wrapping");
+#endif
if (TARGET_SEH && TARGET_CALL_MS2SYSV_XLOGUES)
sorry ("%<-mcall-ms2sysv-xlogues%> isn%'t currently supported with SEH");
diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
index 313522b..49bd393 100644
--- a/gcc/config/i386/i386.cc
+++ b/gcc/config/i386/i386.cc
@@ -5979,7 +5979,7 @@ symbolic_reference_mentioned_p (rtx op)
const char *fmt;
int i;
- if (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF)
+ if (SYMBOL_REF_P (op) || LABEL_REF_P (op))
return true;
fmt = GET_RTX_FORMAT (GET_CODE (op));
@@ -10724,8 +10724,7 @@ split_stack_prologue_scratch_regno (void)
static GTY(()) rtx split_stack_fn;
-/* A SYMBOL_REF for the more stack function when using the large
- model. */
+/* A SYMBOL_REF for the more stack function when using the large model. */
static GTY(()) rtx split_stack_fn_large;
@@ -11413,7 +11412,7 @@ ix86_force_load_from_GOT_p (rtx x, bool call_p)
&& (!flag_pic || this_is_asm_operands)
&& ix86_cmodel != CM_LARGE
&& ix86_cmodel != CM_LARGE_PIC
- && GET_CODE (x) == SYMBOL_REF
+ && SYMBOL_REF_P (x)
&& ((!call_p
&& (!ix86_direct_extern_access
|| (SYMBOL_REF_DECL (x)
@@ -11459,23 +11458,23 @@ ix86_legitimate_constant_p (machine_mode mode, rtx x)
case UNSPEC_TPOFF:
case UNSPEC_NTPOFF:
x = XVECEXP (x, 0, 0);
- return (GET_CODE (x) == SYMBOL_REF
+ return (SYMBOL_REF_P (x)
&& SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
case UNSPEC_DTPOFF:
x = XVECEXP (x, 0, 0);
- return (GET_CODE (x) == SYMBOL_REF
+ return (SYMBOL_REF_P (x)
&& SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC);
case UNSPEC_SECREL32:
x = XVECEXP (x, 0, 0);
- return GET_CODE (x) == SYMBOL_REF;
+ return SYMBOL_REF_P (x);
default:
return false;
}
/* We must have drilled down to a symbol. */
- if (GET_CODE (x) == LABEL_REF)
+ if (LABEL_REF_P (x))
return true;
- if (GET_CODE (x) != SYMBOL_REF)
+ if (!SYMBOL_REF_P (x))
return false;
/* FALLTHRU */
@@ -11602,11 +11601,11 @@ legitimate_pic_operand_p (rtx x)
return TARGET_64BIT;
case UNSPEC_TPOFF:
x = XVECEXP (inner, 0, 0);
- return (GET_CODE (x) == SYMBOL_REF
+ return (SYMBOL_REF_P (x)
&& SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_EXEC);
case UNSPEC_SECREL32:
x = XVECEXP (inner, 0, 0);
- return GET_CODE (x) == SYMBOL_REF;
+ return SYMBOL_REF_P (x);
case UNSPEC_MACHOPIC_OFFSET:
return legitimate_pic_address_disp_p (x);
default:
@@ -11657,7 +11656,7 @@ legitimate_pic_address_disp_p (rtx disp)
if (INTVAL (op1) >= 16*1024*1024
|| INTVAL (op1) < -16*1024*1024)
break;
- if (GET_CODE (op0) == LABEL_REF)
+ if (LABEL_REF_P (op0))
return true;
if (GET_CODE (op0) == CONST
&& GET_CODE (XEXP (op0, 0)) == UNSPEC
@@ -11666,7 +11665,7 @@ legitimate_pic_address_disp_p (rtx disp)
if (GET_CODE (op0) == UNSPEC
&& XINT (op0, 1) == UNSPEC_PCREL)
return true;
- if (GET_CODE (op0) != SYMBOL_REF)
+ if (!SYMBOL_REF_P (op0))
break;
/* FALLTHRU */
@@ -11731,8 +11730,8 @@ legitimate_pic_address_disp_p (rtx disp)
&& XINT (disp, 1) != UNSPEC_PLTOFF))
return false;
- if (GET_CODE (XVECEXP (disp, 0, 0)) != SYMBOL_REF
- && GET_CODE (XVECEXP (disp, 0, 0)) != LABEL_REF)
+ if (!SYMBOL_REF_P (XVECEXP (disp, 0, 0))
+ && !LABEL_REF_P (XVECEXP (disp, 0, 0)))
return false;
return true;
}
@@ -11760,14 +11759,14 @@ legitimate_pic_address_disp_p (rtx disp)
/* We need to check for both symbols and labels because VxWorks loads
text labels with @GOT rather than @GOTOFF. See gotoff_operand for
details. */
- return (GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
- || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF);
+ return (SYMBOL_REF_P (XVECEXP (disp, 0, 0))
+ || LABEL_REF_P (XVECEXP (disp, 0, 0)));
case UNSPEC_GOTOFF:
/* Refuse GOTOFF in 64bit mode since it is always 64bit when used.
While ABI specify also 32bit relocation but we don't produce it in
small PIC model at all. */
- if ((GET_CODE (XVECEXP (disp, 0, 0)) == SYMBOL_REF
- || GET_CODE (XVECEXP (disp, 0, 0)) == LABEL_REF)
+ if ((SYMBOL_REF_P (XVECEXP (disp, 0, 0))
+ || LABEL_REF_P (XVECEXP (disp, 0, 0)))
&& !TARGET_64BIT)
return !TARGET_PECOFF && gotoff_operand (XVECEXP (disp, 0, 0), Pmode);
return false;
@@ -11777,19 +11776,19 @@ legitimate_pic_address_disp_p (rtx disp)
if (saw_plus)
return false;
disp = XVECEXP (disp, 0, 0);
- return (GET_CODE (disp) == SYMBOL_REF
+ return (SYMBOL_REF_P (disp)
&& SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_INITIAL_EXEC);
case UNSPEC_NTPOFF:
disp = XVECEXP (disp, 0, 0);
- return (GET_CODE (disp) == SYMBOL_REF
+ return (SYMBOL_REF_P (disp)
&& SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_EXEC);
case UNSPEC_DTPOFF:
disp = XVECEXP (disp, 0, 0);
- return (GET_CODE (disp) == SYMBOL_REF
+ return (SYMBOL_REF_P (disp)
&& SYMBOL_REF_TLS_MODEL (disp) == TLS_MODEL_LOCAL_DYNAMIC);
case UNSPEC_SECREL32:
disp = XVECEXP (disp, 0, 0);
- return GET_CODE (disp) == SYMBOL_REF;
+ return SYMBOL_REF_P (disp);
}
return false;
@@ -12131,11 +12130,11 @@ ix86_legitimate_address_p (machine_mode, rtx addr, bool strict,
that never results in lea, this seems to be easier and
correct fix for crash to disable this test. */
}
- else if (GET_CODE (disp) != LABEL_REF
+ else if (!LABEL_REF_P (disp)
&& !CONST_INT_P (disp)
&& (GET_CODE (disp) != CONST
|| !ix86_legitimate_constant_p (Pmode, disp))
- && (GET_CODE (disp) != SYMBOL_REF
+ && (!SYMBOL_REF_P (disp)
|| !ix86_legitimate_constant_p (Pmode, disp)))
/* Displacement is not constant. */
return false;
@@ -12242,10 +12241,10 @@ legitimize_pic_address (rtx orig, rtx reg)
else
new_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, new_rtx);
}
- else if ((GET_CODE (addr) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (addr) == 0)
+ else if ((SYMBOL_REF_P (addr) && SYMBOL_REF_TLS_MODEL (addr) == 0)
/* We can't always use @GOTOFF for text labels
on VxWorks, see gotoff_operand. */
- || (TARGET_VXWORKS_VAROFF && GET_CODE (addr) == LABEL_REF))
+ || (TARGET_VXWORKS_VAROFF && LABEL_REF_P (addr)))
{
#if TARGET_PECOFF
rtx tmp = legitimize_pe_coff_symbol (addr, true);
@@ -12380,8 +12379,8 @@ legitimize_pic_address (rtx orig, rtx reg)
/* For %rip addressing, we have to use
just disp32, not base nor index. */
if (TARGET_64BIT
- && (GET_CODE (base) == SYMBOL_REF
- || GET_CODE (base) == LABEL_REF))
+ && (SYMBOL_REF_P (base)
+ || LABEL_REF_P (base)))
base = force_reg (mode, base);
if (GET_CODE (new_rtx) == PLUS
&& CONSTANT_P (XEXP (new_rtx, 1)))
@@ -12883,12 +12882,12 @@ ix86_legitimize_address (rtx x, rtx, machine_mode mode)
bool changed = false;
unsigned log;
- log = GET_CODE (x) == SYMBOL_REF ? SYMBOL_REF_TLS_MODEL (x) : 0;
+ log = SYMBOL_REF_P (x) ? SYMBOL_REF_TLS_MODEL (x) : 0;
if (log)
return legitimize_tls_address (x, (enum tls_model) log, false);
if (GET_CODE (x) == CONST
&& GET_CODE (XEXP (x, 0)) == PLUS
- && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
&& (log = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0))))
{
rtx t = legitimize_tls_address (XEXP (XEXP (x, 0), 0),
@@ -13305,7 +13304,7 @@ ix86_delegitimize_tls_address (rtx orig_x)
if (GET_CODE (unspec) != UNSPEC || XINT (unspec, 1) != UNSPEC_NTPOFF)
return orig_x;
x = XVECEXP (unspec, 0, 0);
- gcc_assert (GET_CODE (x) == SYMBOL_REF);
+ gcc_assert (SYMBOL_REF_P (x));
if (unspec != XEXP (addr.disp, 0))
x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.disp, 0), 1));
if (addr.index)
@@ -14669,7 +14668,7 @@ ix86_print_operand (FILE *file, rtx x, int code)
/* We have patterns that allow zero sets of memory, for instance.
In 64-bit mode, we should probably support all 8-byte vectors,
since we can in fact encode that into an immediate. */
- if (GET_CODE (x) == CONST_VECTOR)
+ if (CONST_VECTOR_P (x))
{
if (x != CONST0_RTX (GET_MODE (x)))
output_operand_lossage ("invalid vector immediate");
@@ -14699,8 +14698,8 @@ ix86_print_operand (FILE *file, rtx x, int code)
if (ASSEMBLER_DIALECT == ASM_ATT)
putc ('$', file);
}
- else if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF
- || GET_CODE (x) == LABEL_REF)
+ else if (GET_CODE (x) == CONST || SYMBOL_REF_P (x)
+ || LABEL_REF_P (x))
{
if (ASSEMBLER_DIALECT == ASM_ATT)
putc ('$', file);
@@ -14795,8 +14794,8 @@ ix86_print_operand_address_as (FILE *file, rtx addr,
&& CONST_INT_P (XEXP (XEXP (disp, 0), 1)))
symbol = XEXP (XEXP (disp, 0), 0);
- if (GET_CODE (symbol) == LABEL_REF
- || (GET_CODE (symbol) == SYMBOL_REF
+ if (LABEL_REF_P (symbol)
+ || (SYMBOL_REF_P (symbol)
&& SYMBOL_REF_TLS_MODEL (symbol) == 0))
base = pc_rtx;
}
@@ -14884,7 +14883,7 @@ ix86_print_operand_address_as (FILE *file, rtx addr,
{
if (flag_pic)
output_pic_addr_const (file, disp, 0);
- else if (GET_CODE (disp) == LABEL_REF)
+ else if (LABEL_REF_P (disp))
output_asm_label (disp);
else
output_addr_const (file, disp);
@@ -14920,7 +14919,7 @@ ix86_print_operand_address_as (FILE *file, rtx addr,
if (flag_pic)
output_pic_addr_const (file, disp, 0);
- else if (GET_CODE (disp) == LABEL_REF)
+ else if (LABEL_REF_P (disp))
output_asm_label (disp);
else if (CONST_INT_P (disp))
offset = disp;
@@ -16704,6 +16703,10 @@ ix86_convert_const_vector_to_integer (rtx op, machine_mode mode)
val = wi::insert (val, wv, innermode_bits * i, innermode_bits);
}
break;
+ case E_V1SImode:
+ case E_V1DImode:
+ op = CONST_VECTOR_ELT (op, 0);
+ return INTVAL (op);
case E_V2HFmode:
case E_V2BFmode:
case E_V4HFmode:
@@ -17679,8 +17682,8 @@ ix86_rip_relative_addr_p (struct ix86_address *parts)
&& CONST_INT_P (XEXP (symbol, 1)))
symbol = XEXP (symbol, 0);
- if (GET_CODE (symbol) == LABEL_REF
- || (GET_CODE (symbol) == SYMBOL_REF
+ if (LABEL_REF_P (symbol)
+ || (SYMBOL_REF_P (symbol)
&& SYMBOL_REF_TLS_MODEL (symbol) == 0)
|| (GET_CODE (symbol) == UNSPEC
&& (XINT (symbol, 1) == UNSPEC_GOTPCREL
@@ -23087,7 +23090,7 @@ ix86_rtx_costs (rtx x, machine_mode mode, int outer_code_i, int opno,
/* Make (subreg:V4SI (not:V16QI (reg:V16QI ..)) 0)
cost the same as register.
This is used by avx_cmp<mode>3_ltint_not. */
- if (GET_CODE (unsop0) == SUBREG)
+ if (SUBREG_P (unsop0))
unsop0 = XEXP (unsop0, 0);
if (GET_CODE (unsop0) == NOT)
unsop0 = XEXP (unsop0, 0);
@@ -26798,7 +26801,7 @@ ix86_reloc_rw_mask (void)
static bool
symbolic_base_address_p (rtx addr)
{
- if (GET_CODE (addr) == SYMBOL_REF)
+ if (SYMBOL_REF_P (addr))
return true;
if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_GOTOFF)
diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
index bfc6c6f..791f3b9 100644
--- a/gcc/config/i386/i386.h
+++ b/gcc/config/i386/i386.h
@@ -1842,8 +1842,8 @@ typedef struct ix86_args {
#define STRIP_UNARY(X) (UNARY_P (X) ? XEXP (X, 0) : X)
#define SYMBOLIC_CONST(X) \
- (GET_CODE (X) == SYMBOL_REF \
- || GET_CODE (X) == LABEL_REF \
+ (SYMBOL_REF_P (X) \
+ || LABEL_REF_P (X) \
|| (GET_CODE (X) == CONST && symbolic_reference_mentioned_p (X)))
/* Max number of args passed in registers. If this is more than 3, we will
diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index 83c438b..eb52699 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -12810,8 +12810,8 @@
(zero_extend:DI (and:SI (match_dup 1) (match_dup 2))))
(clobber (reg:CC FLAGS_REG))])]
{
- if (GET_CODE (operands[2]) == SYMBOL_REF
- || GET_CODE (operands[2]) == LABEL_REF)
+ if (SYMBOL_REF_P (operands[2])
+ || LABEL_REF_P (operands[2]))
{
operands[2] = shallow_copy_rtx (operands[2]);
PUT_MODE (operands[2], SImode);
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index 29a8cb5..1f97993 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -81,12 +81,13 @@
;; 4-byte and 2-byte QImode vector modes
(define_mode_iterator VI1_16_32 [V4QI V2QI])
-;; All 2-byte, 4-byte and 8-byte vector modes with more than 1 element
+;; All 2-byte, 4-byte and 8-byte vector modes.
(define_mode_iterator V_16_32_64
- [V2QI V4QI V2HI V2HF
+ [V2QI V4QI V2HI V1SI V2HF V2BF
(V8QI "TARGET_64BIT") (V4HI "TARGET_64BIT")
(V4HF "TARGET_64BIT") (V4BF "TARGET_64BIT")
- (V2SI "TARGET_64BIT") (V2SF "TARGET_64BIT")])
+ (V2SI "TARGET_64BIT") (V2SF "TARGET_64BIT")
+ (V1DI "TARGET_64BIT")])
;; V2S* modes
(define_mode_iterator V2FI [V2SF V2SI])
@@ -107,6 +108,7 @@
[(V8QI "DI") (V4QI "SI") (V2QI "HI")
(V4HI "DI") (V2HI "SI")
(V2SI "DI")
+ (V1DI "DI") (V1SI "SI")
(V4HF "DI") (V2HF "SI")
(V4BF "DI") (V2BF "SI")
(V2SF "DI")])
@@ -407,22 +409,6 @@
]
(symbol_ref "true")))])
-;; 16-bit, 32-bit and 64-bit constant vector stores. After reload,
-;; convert them to immediate integer stores.
-(define_insn_and_split "*mov<mode>_imm"
- [(set (match_operand:V_16_32_64 0 "memory_operand" "=m")
- (match_operand:V_16_32_64 1 "x86_64_const_vector_operand" "i"))]
- ""
- "#"
- "&& reload_completed"
- [(set (match_dup 0) (match_dup 1))]
-{
- HOST_WIDE_INT val = ix86_convert_const_vector_to_integer (operands[1],
- <MODE>mode);
- operands[1] = GEN_INT (val);
- operands[0] = lowpart_subreg (<mmxinsnmode>mode, operands[0], <MODE>mode);
-})
-
;; For TARGET_64BIT we always round up to 8 bytes.
(define_insn "*push<mode>2_rex64"
[(set (match_operand:V_32 0 "push_operand" "=X,X")
@@ -588,6 +574,42 @@
]
(symbol_ref "true")))])
+(define_split
+ [(set (match_operand:V_16_32_64 0 "general_reg_operand")
+ (match_operand:V_16_32_64 1 "memory_operand"))]
+ "reload_completed
+ && SYMBOL_REF_P (XEXP (operands[1], 0))
+ && CONSTANT_POOL_ADDRESS_P (XEXP (operands[1], 0))"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ rtx op1 = avoid_constant_pool_reference (operands[1]);
+
+ if (!CONST_VECTOR_P (op1))
+ FAIL;
+
+ HOST_WIDE_INT val = ix86_convert_const_vector_to_integer (op1, <MODE>mode);
+
+ operands[0] = lowpart_subreg (<mmxinsnmode>mode, operands[0], <MODE>mode);
+ operands[1] = GEN_INT (val);
+})
+
+;; 16-bit, 32-bit and 64-bit constant vector stores. After reload,
+;; convert them to immediate integer stores.
+(define_insn_and_split "*mov<mode>_imm"
+ [(set (match_operand:V_16_32_64 0 "memory_operand" "=m")
+ (match_operand:V_16_32_64 1 "x86_64_const_vector_operand" "i"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 1))]
+{
+ rtx op1 = operands[1];
+ HOST_WIDE_INT val = ix86_convert_const_vector_to_integer (op1, <MODE>mode);
+
+ operands[0] = adjust_address (operands[0], <mmxinsnmode>mode, 0);
+ operands[1] = GEN_INT (val);
+})
+
;; We always round up to UNITS_PER_WORD bytes.
(define_insn "*pushv2qi2"
[(set (match_operand:V2QI 0 "push_operand" "=X,X")
diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md
index 3afaf83..b2d2eec 100644
--- a/gcc/config/i386/predicates.md
+++ b/gcc/config/i386/predicates.md
@@ -573,8 +573,8 @@
case CONST:
op = XEXP (op, 0);
- if (GET_CODE (op) == SYMBOL_REF
- || GET_CODE (op) == LABEL_REF
+ if (SYMBOL_REF_P (op)
+ || LABEL_REF_P (op)
|| (GET_CODE (op) == UNSPEC
&& (XINT (op, 1) == UNSPEC_GOT
|| XINT (op, 1) == UNSPEC_GOTOFF
@@ -586,8 +586,8 @@
return false;
op = XEXP (op, 0);
- if (GET_CODE (op) == SYMBOL_REF
- || GET_CODE (op) == LABEL_REF)
+ if (SYMBOL_REF_P (op)
+ || LABEL_REF_P (op))
return true;
/* Only @GOTOFF gets offsets. */
if (GET_CODE (op) != UNSPEC
@@ -595,8 +595,8 @@
return false;
op = XVECEXP (op, 0, 0);
- if (GET_CODE (op) == SYMBOL_REF
- || GET_CODE (op) == LABEL_REF)
+ if (SYMBOL_REF_P (op)
+ || LABEL_REF_P (op))
return true;
return false;
@@ -614,10 +614,10 @@
&& CONST_INT_P (XEXP (XEXP (op, 0), 1)))
op = XEXP (XEXP (op, 0), 0);
- if (GET_CODE (op) == LABEL_REF)
+ if (LABEL_REF_P (op))
return true;
- if (GET_CODE (op) != SYMBOL_REF)
+ if (!SYMBOL_REF_P (op))
return false;
if (SYMBOL_REF_TLS_MODEL (op))
@@ -649,7 +649,7 @@
&& CONST_INT_P (XEXP (XEXP (op, 0), 1)))
op = XEXP (XEXP (op, 0), 0);
- if (GET_CODE (op) == SYMBOL_REF
+ if (SYMBOL_REF_P (op)
&& !SYMBOL_REF_FUNCTION_P (op))
return false;
@@ -1145,7 +1145,7 @@
unsigned n_elts;
op = avoid_constant_pool_reference (op);
- if (GET_CODE (op) != CONST_VECTOR)
+ if (!CONST_VECTOR_P (op))
return false;
n_elts = CONST_VECTOR_NUNITS (op);
@@ -1173,7 +1173,7 @@
if (MEM_P (op))
{
op = get_pool_constant (XEXP (op, 0));
- if (GET_CODE (op) != CONST_VECTOR)
+ if (!CONST_VECTOR_P (op))
return false;
if (GET_MODE (op) != mode
@@ -1422,8 +1422,8 @@
}
if (TARGET_64BIT
&& flag_pic
- && (GET_CODE (disp) == SYMBOL_REF
- || GET_CODE (disp) == LABEL_REF))
+ && (SYMBOL_REF_P (disp)
+ || LABEL_REF_P (disp)))
return false;
}
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 252ba07..d88c3d6 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -12756,7 +12756,7 @@
lo insns have =m and 0C constraints. */
: (operands[2] != const0_rtx
|| (!rtx_equal_p (dest, operands[3])
- && GET_CODE (operands[3]) != CONST_VECTOR))))
+ && !CONST_VECTOR_P (operands[3])))))
dest = gen_reg_rtx (<ssehalfvecmode>mode);
switch (INTVAL (operands[2]))
{
diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
index 43e3ab0..3d71f30 100644
--- a/gcc/config/loongarch/lasx.md
+++ b/gcc/config/loongarch/lasx.md
@@ -2060,9 +2060,9 @@
[(set_attr "type" "simd_int_arith")
(set_attr "mode" "<MODE>")])
-(define_insn "lasx_xvshuf_<lasxfmt_f>"
+(define_insn "@lasx_xvshuf_<lasxfmt_f>"
[(set (match_operand:LASX_DWH 0 "register_operand" "=f")
- (unspec:LASX_DWH [(match_operand:LASX_DWH 1 "register_operand" "0")
+ (unspec:LASX_DWH [(match_operand:<VIMODE> 1 "register_operand" "0")
(match_operand:LASX_DWH 2 "register_operand" "f")
(match_operand:LASX_DWH 3 "register_operand" "f")]
UNSPEC_LASX_XVSHUF))]
diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
index f62e416..b00fcc7 100644
--- a/gcc/config/loongarch/loongarch.cc
+++ b/gcc/config/loongarch/loongarch.cc
@@ -8380,7 +8380,7 @@ static bool
loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
{
int i;
- rtx target, op0, op1, sel, tmp;
+ rtx target, op0, op1;
rtx rperm[MAX_VECT_LEN];
if (GET_MODE_SIZE (d->vmode) == 16)
@@ -8399,47 +8399,23 @@ loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
for (i = 0; i < d->nelt; i += 1)
rperm[i] = GEN_INT (d->perm[i]);
- if (d->vmode == E_V2DFmode)
- {
- sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm));
- tmp = simplify_gen_subreg (E_V2DImode, d->target, d->vmode, 0);
- emit_move_insn (tmp, sel);
- }
- else if (d->vmode == E_V4SFmode)
- {
- sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm));
- tmp = simplify_gen_subreg (E_V4SImode, d->target, d->vmode, 0);
- emit_move_insn (tmp, sel);
- }
+ machine_mode sel_mode = related_int_vector_mode (d->vmode)
+ .require ();
+ rtvec sel_v = gen_rtvec_v (d->nelt, rperm);
+
+ /* Despite vshuf.* (except vshuf.b) needs sel == target, we cannot
+ load sel into target right now: here we are dealing with
+ pseudo regs, and target may be the same pseudo as one of op0
+ or op1. Then we'd clobber the input. Instead, we use a new
+ pseudo reg here. The reload pass will look at the constraint
+ of vshuf.* and move sel into target first if needed. */
+ rtx sel = force_reg (sel_mode,
+ gen_rtx_CONST_VECTOR (sel_mode, sel_v));
+
+ if (d->vmode == E_V16QImode)
+ emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel));
else
- {
- sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm));
- emit_move_insn (d->target, sel);
- }
-
- switch (d->vmode)
- {
- case E_V2DFmode:
- emit_insn (gen_lsx_vshuf_d_f (target, target, op1, op0));
- break;
- case E_V2DImode:
- emit_insn (gen_lsx_vshuf_d (target, target, op1, op0));
- break;
- case E_V4SFmode:
- emit_insn (gen_lsx_vshuf_w_f (target, target, op1, op0));
- break;
- case E_V4SImode:
- emit_insn (gen_lsx_vshuf_w (target, target, op1, op0));
- break;
- case E_V8HImode:
- emit_insn (gen_lsx_vshuf_h (target, target, op1, op0));
- break;
- case E_V16QImode:
- emit_insn (gen_lsx_vshuf_b (target, op1, op0, target));
- break;
- default:
- break;
- }
+ emit_insn (gen_lsx_vshuf (d->vmode, target, sel, op1, op0));
return true;
}
@@ -9435,7 +9411,7 @@ loongarch_expand_vec_perm_const (struct expand_vec_perm_d *d)
bool flag = false;
unsigned int i;
unsigned char idx;
- rtx target, op0, op1, sel, tmp;
+ rtx target, op0, op1;
rtx rperm[MAX_VECT_LEN];
unsigned int remapped[MAX_VECT_LEN];
unsigned char perm2[MAX_VECT_LEN];
@@ -9615,63 +9591,23 @@ loongarch_expand_vec_perm_const (struct expand_vec_perm_d *d)
expand_perm_const_end:
if (flag)
{
- /* Copy selector vector from memory to vector register for later insn
- gen function.
- If vector's element in floating point value, we cannot fit
- selector argument into insn gen function directly, because of the
- insn template definition. As a solution, generate a integral mode
- subreg of target, then copy selector vector (that is in integral
- mode) to this subreg. */
- switch (d->vmode)
- {
- case E_V4DFmode:
- sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt,
- rperm));
- tmp = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0);
- emit_move_insn (tmp, sel);
- break;
- case E_V8SFmode:
- sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt,
- rperm));
- tmp = simplify_gen_subreg (E_V8SImode, d->target, d->vmode, 0);
- emit_move_insn (tmp, sel);
- break;
- default:
- sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt,
- rperm));
- emit_move_insn (d->target, sel);
- break;
- }
-
target = d->target;
op0 = d->op0;
op1 = d->one_vector_p ? d->op0 : d->op1;
- /* We FINALLY can generate xvshuf.* insn. */
- switch (d->vmode)
- {
- case E_V4DFmode:
- emit_insn (gen_lasx_xvshuf_d_f (target, target, op1, op0));
- break;
- case E_V4DImode:
- emit_insn (gen_lasx_xvshuf_d (target, target, op1, op0));
- break;
- case E_V8SFmode:
- emit_insn (gen_lasx_xvshuf_w_f (target, target, op1, op0));
- break;
- case E_V8SImode:
- emit_insn (gen_lasx_xvshuf_w (target, target, op1, op0));
- break;
- case E_V16HImode:
- emit_insn (gen_lasx_xvshuf_h (target, target, op1, op0));
- break;
- case E_V32QImode:
- emit_insn (gen_lasx_xvshuf_b (target, op1, op0, target));
- break;
- default:
- gcc_unreachable ();
- break;
- }
+ machine_mode sel_mode = related_int_vector_mode (d->vmode)
+ .require ();
+ rtvec sel_v = gen_rtvec_v (d->nelt, rperm);
+
+ /* See the comment in loongarch_expand_lsx_shuffle for why
+ we don't simply use a SUBREG to pun target. */
+ rtx sel = force_reg (sel_mode,
+ gen_rtx_CONST_VECTOR (sel_mode, sel_v));
+
+ if (d->vmode == E_V32QImode)
+ emit_insn (gen_lasx_xvshuf_b (target, op1, op0, sel));
+ else
+ emit_insn (gen_lasx_xvshuf (d->vmode, target, sel, op1, op0));
return true;
}
diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
index 407c868..fb0236b 100644
--- a/gcc/config/loongarch/lsx.md
+++ b/gcc/config/loongarch/lsx.md
@@ -535,9 +535,9 @@
DONE;
})
-(define_insn "lsx_vshuf_<lsxfmt_f>"
+(define_insn "@lsx_vshuf_<lsxfmt_f>"
[(set (match_operand:LSX_DWH 0 "register_operand" "=f")
- (unspec:LSX_DWH [(match_operand:LSX_DWH 1 "register_operand" "0")
+ (unspec:LSX_DWH [(match_operand:<VIMODE> 1 "register_operand" "0")
(match_operand:LSX_DWH 2 "register_operand" "f")
(match_operand:LSX_DWH 3 "register_operand" "f")]
UNSPEC_LSX_VSHUF))]
diff --git a/gcc/config/s390/s390.cc b/gcc/config/s390/s390.cc
index 737b176..b5e636c 100644
--- a/gcc/config/s390/s390.cc
+++ b/gcc/config/s390/s390.cc
@@ -3862,7 +3862,21 @@ s390_register_move_cost (machine_mode mode,
{
/* On s390, copy between fprs and gprs is expensive. */
- /* It becomes somewhat faster having ldgr/lgdr. */
+ /* With vector extensions any GPR<->VR load up to 8 bytes is supported. */
+ if (TARGET_VX && GET_MODE_SIZE (mode) <= 8)
+ {
+ /* ldgr/vlvgg take one cycle and vlvg[bhf] take two cycles. */
+ if (reg_classes_intersect_p (from, GENERAL_REGS)
+ && reg_classes_intersect_p (to, VEC_REGS))
+ return GET_MODE_SIZE (mode) == 8 ? 1 : 2;
+ /* lgdr/vlgv[fg] take three cycles and vlgv[bh] take five cycles. */
+ if (reg_classes_intersect_p (to, GENERAL_REGS)
+ && reg_classes_intersect_p (from, VEC_REGS))
+ return GET_MODE_SIZE (mode) >= 4 ? 3 : 4;
+ }
+
+ /* Without vector extensions it still becomes somewhat faster having
+ ldgr/lgdr. */
if (TARGET_Z10 && GET_MODE_SIZE (mode) == 8)
{
/* ldgr is single cycle. */
diff --git a/gcc/config/s390/s390.md b/gcc/config/s390/s390.md
index 02bc149..1edbfde 100644
--- a/gcc/config/s390/s390.md
+++ b/gcc/config/s390/s390.md
@@ -121,6 +121,7 @@
; Test Data Class (TDC)
UNSPEC_TDC_INSN
+ UNSPEC_SIGNBIT
; Byte-wise Population Count
UNSPEC_POPCNT
@@ -513,7 +514,7 @@
S390_TDC_INFINITY
S390_TDC_NORMAL_BFP])
-(define_int_attr tdc_insn [(S390_TDC_SIGNBIT_SET "signbit")
+(define_int_attr tdc_insn [(S390_TDC_SIGNBIT_SET "signbit_tdc")
(S390_TDC_FINITE "isfinite")
(S390_TDC_INFINITY "isinf")
(S390_TDC_NORMAL_BFP "isnormal")
@@ -3782,6 +3783,86 @@
(unspec:SI [(reg:CCZ CC_REGNUM)] UNSPEC_CC_TO_INT))]
"TARGET_HARD_DFP")
+(define_mode_iterator SIGNBIT_SINGLE [(SF "TARGET_HARD_FLOAT")
+ (SD "TARGET_HARD_DFP")])
+(define_expand "signbit<mode>2"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand:SIGNBIT_SINGLE 1 "nonimmediate_operand")]
+ ""
+{
+ if (TARGET_VX && TARGET_64BIT)
+ {
+ emit_insn (gen_rtx_SET (operands[0], simplify_gen_subreg (SImode, operands[1], <MODE>mode, 0)));
+ emit_insn (gen_rtx_SET (operands[0], gen_rtx_LSHIFTRT (SImode, operands[0], GEN_INT (31))));
+ }
+ else if (TARGET_Z10 && TARGET_64BIT)
+ emit_insn (gen_signbit<mode>2_z10 (operands[0], operands[1]));
+ else
+ emit_insn (gen_signbit_tdc<mode>2 (operands[0], force_reg (<MODE>mode, operands[1])));
+ DONE;
+})
+
+(define_insn "signbit<mode>2_z10"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (unspec:SI [(match_operand:SIGNBIT_SINGLE 1 "nonimmediate_operand" "fRT")]
+ UNSPEC_SIGNBIT))]
+ "TARGET_Z10 && TARGET_64BIT"
+ "#")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand")
+ (unspec:SI [(match_operand:SIGNBIT_SINGLE 1 "register_operand")]
+ UNSPEC_SIGNBIT))]
+ "TARGET_Z10 && TARGET_64BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:DI (match_dup 0) (const_int 63)))]
+{
+ operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
+ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
+})
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand")
+ (unspec:SI [(match_operand:SIGNBIT_SINGLE 1 "memory_operand")]
+ UNSPEC_SIGNBIT))]
+ "TARGET_Z10 && TARGET_64BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0) (lshiftrt:SI (match_dup 0) (const_int 31)))]
+{
+ operands[1] = change_address (operands[1], SImode, 0);
+})
+
+(define_mode_iterator SIGNBIT_DBL_TETRA [(DF "TARGET_HARD_FLOAT")
+ (TF "TARGET_HARD_FLOAT")
+ (DD "TARGET_HARD_DFP")
+ (TD "TARGET_HARD_DFP")])
+(define_expand "signbit<mode>2"
+ [(match_operand:SI 0 "register_operand")
+ (match_operand:SIGNBIT_DBL_TETRA 1 "nonimmediate_operand")]
+ ""
+{
+ if (TARGET_Z10 && TARGET_64BIT)
+ {
+ rtx reg_di = gen_reg_rtx (DImode);
+ if (<MODE>mode == TFmode || <MODE>mode == TDmode)
+ {
+ rtx reg_ti = gen_reg_rtx (TImode);
+ emit_insn (gen_rtx_SET (reg_ti, simplify_gen_subreg (TImode, operands[1], <MODE>mode, 0)));
+ emit_insn (gen_rtx_SET (reg_di, simplify_gen_subreg (DImode, reg_ti, TImode, 0)));
+ }
+ else
+ emit_insn (gen_rtx_SET (reg_di, simplify_gen_subreg (DImode, operands[1], <MODE>mode, 0)));
+ emit_insn (gen_rtx_SET (reg_di, gen_rtx_LSHIFTRT (DImode, reg_di, GEN_INT (63))));
+ rtx subreg = gen_rtx_SUBREG (SImode, reg_di, 4);
+ SUBREG_PROMOTED_VAR_P (subreg) = 1;
+ SUBREG_PROMOTED_SET (subreg, SRP_SIGNED_AND_UNSIGNED);
+ emit_insn (gen_rtx_SET (operands[0], subreg));
+ }
+ else
+ emit_insn (gen_signbit_tdc<mode>2 (operands[0], force_reg (<MODE>mode, operands[1])));
+ DONE;
+})
+
; This extracts CC into a GPR properly shifted. The actual IPM
; instruction will be issued by reload. The constraint of operand 1
; forces reload to use a GPR. So reload will issue a movcc insn for
diff --git a/gcc/config/s390/vector.md b/gcc/config/s390/vector.md
index c63360f..12bbeb6 100644
--- a/gcc/config/s390/vector.md
+++ b/gcc/config/s390/vector.md
@@ -149,13 +149,13 @@
; The instruction suffix for integer instructions and instructions
; which do not care about whether it is floating point or integer.
-(define_mode_attr bhfgq[(V1QI "b") (V2QI "b") (V4QI "b") (V8QI "b") (V16QI "b")
- (V1HI "h") (V2HI "h") (V4HI "h") (V8HI "h")
- (V1SI "f") (V2SI "f") (V4SI "f")
- (V1DI "g") (V2DI "g")
+(define_mode_attr bhfgq[(V1QI "b") (V2QI "b") (V4QI "b") (V8QI "b") (V16QI "b") (QI "b")
+ (V1HI "h") (V2HI "h") (V4HI "h") (V8HI "h") (HI "h")
+ (V1SI "f") (V2SI "f") (V4SI "f") (SI "f")
+ (V1DI "g") (V2DI "g") (DI "g")
(V1TI "q") (TI "q")
- (V1SF "f") (V2SF "f") (V4SF "f")
- (V1DF "g") (V2DF "g")
+ (V1SF "f") (V2SF "f") (V4SF "f") (SF "f")
+ (V1DF "g") (V2DF "g") (DF "g")
(V1TF "q") (TF "q")])
; This is for vmalhw. It gets an 'w' attached to avoid confusion with
@@ -501,6 +501,54 @@
SIL,SIL,RI,RI,RRE,RRE,RIL,RR,RXY,RXY,RIL")])
+; Instructions vlgvb, vlgvh, vlgvf zero all remaining bits of a GPR, i.e.,
+; an implicit zero extend is done.
+
+(define_insn "*movdi<mode>_zero_extend_A"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (zero_extend:DI (match_operand:SINT 1 "register_operand" "v")))]
+ "TARGET_VX"
+ "vlgv<bhfgq>\t%0,%v1,0"
+ [(set_attr "op_type" "VRS")])
+
+(define_insn "*movsi<mode>_zero_extend_A"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (match_operand:HQI 1 "register_operand" "v")))]
+ "TARGET_VX"
+ "vlgv<bhfgq>\t%0,%v1,0"
+ [(set_attr "op_type" "VRS")])
+
+(define_mode_iterator VLGV_DI [V1QI V2QI V4QI V8QI V16QI
+ V1HI V2HI V4HI V8HI
+ V1SI V2SI V4SI])
+(define_insn "*movdi<mode>_zero_extend_B"
+ [(set (match_operand:DI 0 "register_operand" "=d")
+ (zero_extend:DI (vec_select:<non_vec>
+ (match_operand:VLGV_DI 1 "register_operand" "v")
+ (parallel [(match_operand:SI 2 "const_int_operand" "n")]))))]
+ "TARGET_VX"
+{
+ operands[2] = GEN_INT (UINTVAL (operands[2]) & (GET_MODE_NUNITS (<MODE>mode) - 1));
+ return "vlgv<bhfgq>\t%0,%v1,%Y2";
+}
+ [(set_attr "op_type" "VRS")
+ (set_attr "mnemonic" "vlgv<bhfgq>")])
+
+(define_mode_iterator VLGV_SI [V1QI V2QI V4QI V8QI V16QI
+ V1HI V2HI V4HI V8HI])
+(define_insn "*movsi<mode>_zero_extend_B"
+ [(set (match_operand:SI 0 "register_operand" "=d")
+ (zero_extend:SI (vec_select:<non_vec>
+ (match_operand:VLGV_SI 1 "register_operand" "v")
+ (parallel [(match_operand:SI 2 "const_int_operand" "n")]))))]
+ "TARGET_VX"
+{
+ operands[2] = GEN_INT (UINTVAL (operands[2]) & (GET_MODE_NUNITS (<MODE>mode) - 1));
+ return "vlgv<bhfgq>\t%0,%v1,%Y2";
+}
+ [(set_attr "op_type" "VRS")
+ (set_attr "mnemonic" "vlgv<bhfgq>")])
+
; vec_load_lanes?
; vec_store_lanes?
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index a934377..e9da5a6 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,22 @@
+2025-07-16 Kwok Cheung Yeung <kcyeung@baylibre.com>
+
+ * pt.cc (tsubst_omp_clause_decl): Use OMP_ITERATOR_DECL_P.
+ * semantics.cc (handle_omp_array_sections): Likewise.
+ (finish_omp_clauses): Likewise.
+
+2025-07-16 Alfie Richards <alfie.richards@arm.com>
+
+ * class.cc (add_method): Remove argument.
+ * cp-tree.h (maybe_version_functions): Ditto.
+ * decl.cc (decls_match): Ditto.
+ (maybe_version_functions): Ditto.
+
+2025-07-16 Jeremy Rifkin <jeremy@rifkin.dev>
+
+ PR c/82134
+ * call.cc (build_call_a): Add suppress_warning
+ * cp-gimplify.cc (cp_gimplify_expr): Add suppress_warning
+
2025-07-15 Jason Merrill <jason@redhat.com>
PR c++/44677
diff --git a/gcc/cp/pt.cc b/gcc/cp/pt.cc
index ca8d61d..d63fa68 100644
--- a/gcc/cp/pt.cc
+++ b/gcc/cp/pt.cc
@@ -17986,9 +17986,7 @@ tsubst_omp_clause_decl (tree decl, tree args, tsubst_flags_t complain,
return decl;
/* Handle OpenMP iterators. */
- if (TREE_CODE (decl) == TREE_LIST
- && TREE_PURPOSE (decl)
- && TREE_CODE (TREE_PURPOSE (decl)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (decl))
{
tree ret;
if (iterator_cache[0] == TREE_PURPOSE (decl))
diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc
index 640e1ea..86b0904 100644
--- a/gcc/cp/semantics.cc
+++ b/gcc/cp/semantics.cc
@@ -6302,9 +6302,7 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
tree *tp = &OMP_CLAUSE_DECL (c);
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_AFFINITY)
- && TREE_CODE (*tp) == TREE_LIST
- && TREE_PURPOSE (*tp)
- && TREE_CODE (TREE_PURPOSE (*tp)) == TREE_VEC)
+ && OMP_ITERATOR_DECL_P (*tp))
tp = &TREE_VALUE (*tp);
tree first = handle_omp_array_sections_1 (c, *tp, types,
maybe_zero_len, first_non_one,
@@ -8824,9 +8822,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
/* FALLTHRU */
case OMP_CLAUSE_AFFINITY:
t = OMP_CLAUSE_DECL (c);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
if (TREE_PURPOSE (t) != last_iterators)
last_iterators_remove
diff --git a/gcc/diagnostic-state-to-dot.cc b/gcc/diagnostic-state-to-dot.cc
index 8195c11..90ceaee 100644
--- a/gcc/diagnostic-state-to-dot.cc
+++ b/gcc/diagnostic-state-to-dot.cc
@@ -78,9 +78,7 @@ class state_diagram : public dot::graph
public:
state_diagram (const diagnostics::digraphs::digraph &input_state_graph,
const logical_location_manager &logical_loc_mgr)
- : m_logical_loc_mgr (logical_loc_mgr),
- // m_next_id (0),
- m_show_tags (false)
+ : m_logical_loc_mgr (logical_loc_mgr)
{
// "node [shape=plaintext]\n"
{
@@ -541,8 +539,6 @@ private:
std::map<diagnostics::digraphs::node *, dot::node_id> m_src_node_to_port_id;
std::map<diagnostics::digraphs::node *, dot::node_id> m_dst_node_to_port_id;
-
- bool m_show_tags;
};
std::unique_ptr<dot::graph>
diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
index 85fb810..a919304 100644
--- a/gcc/doc/sourcebuild.texi
+++ b/gcc/doc/sourcebuild.texi
@@ -2379,6 +2379,9 @@ whether it does so by default).
@item aarch64_sve2p1_hw
AArch64 target that is able to generate and execute SVE2.1 code (regardless of
whether it does so by default).
+@item aarch64_sme_hw
+AArch64 target that is able to generate and execute SME code (regardless of
+whether it does so by default).
@item aarch64_fjcvtzs_hw
AArch64 target that is able to generate and execute armv8.3-a FJCVTZS
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index 43212b6..33e12f1 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,14 @@
+2025-07-16 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/121060
+ * interface.cc (matching_typebound_op): Defer determination of
+ specific procedure until resolution by returning NULL.
+
+2025-07-16 Steve Kargl <sgk@troutmask.apl.washington.edu>
+
+ * decl.cc (gfc_match_import): Correct minor whitespace snafu
+ and fix NULL pointer dereferences in two places.
+
2025-07-15 Kwok Cheung Yeung <kcyeung@baylibre.com>
PR fortran/104428
diff --git a/gcc/gimplify.cc b/gcc/gimplify.cc
index fa9890e..910314b 100644
--- a/gcc/gimplify.cc
+++ b/gcc/gimplify.cc
@@ -9527,9 +9527,7 @@ gimplify_omp_affinity (tree *list_p, gimple_seq *pre_p)
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_AFFINITY)
{
tree t = OMP_CLAUSE_DECL (c);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
if (TREE_VALUE (t) == null_pointer_node)
continue;
@@ -9634,6 +9632,155 @@ gimplify_omp_affinity (tree *list_p, gimple_seq *pre_p)
return;
}
+/* Returns a tree expression containing the total iteration count of the
+ OpenMP iterator IT. */
+
+static tree
+compute_omp_iterator_count (tree it, gimple_seq *pre_p)
+{
+ tree tcnt = size_one_node;
+ for (; it; it = TREE_CHAIN (it))
+ {
+ if (gimplify_expr (&TREE_VEC_ELT (it, 1), pre_p, NULL,
+ is_gimple_val, fb_rvalue) == GS_ERROR
+ || gimplify_expr (&TREE_VEC_ELT (it, 2), pre_p, NULL,
+ is_gimple_val, fb_rvalue) == GS_ERROR
+ || gimplify_expr (&TREE_VEC_ELT (it, 3), pre_p, NULL,
+ is_gimple_val, fb_rvalue) == GS_ERROR
+ || (gimplify_expr (&TREE_VEC_ELT (it, 4), pre_p, NULL,
+ is_gimple_val, fb_rvalue) == GS_ERROR))
+ return NULL_TREE;
+ tree var = TREE_VEC_ELT (it, 0);
+ tree begin = TREE_VEC_ELT (it, 1);
+ tree end = TREE_VEC_ELT (it, 2);
+ tree step = TREE_VEC_ELT (it, 3);
+ tree orig_step = TREE_VEC_ELT (it, 4);
+ tree type = TREE_TYPE (var);
+ tree stype = TREE_TYPE (step);
+ location_t loc = DECL_SOURCE_LOCATION (var);
+ tree endmbegin;
+ /* Compute count for this iterator as
+ orig_step > 0
+ ? (begin < end ? (end - begin + (step - 1)) / step : 0)
+ : (begin > end ? (end - begin + (step + 1)) / step : 0)
+ and compute product of those for the entire clause. */
+ if (POINTER_TYPE_P (type))
+ endmbegin = fold_build2_loc (loc, POINTER_DIFF_EXPR, stype, end, begin);
+ else
+ endmbegin = fold_build2_loc (loc, MINUS_EXPR, type, end, begin);
+ tree stepm1 = fold_build2_loc (loc, MINUS_EXPR, stype, step,
+ build_int_cst (stype, 1));
+ tree stepp1 = fold_build2_loc (loc, PLUS_EXPR, stype, step,
+ build_int_cst (stype, 1));
+ tree pos = fold_build2_loc (loc, PLUS_EXPR, stype,
+ unshare_expr (endmbegin), stepm1);
+ pos = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype, pos, step);
+ tree neg = fold_build2_loc (loc, PLUS_EXPR, stype, endmbegin, stepp1);
+ if (TYPE_UNSIGNED (stype))
+ {
+ neg = fold_build1_loc (loc, NEGATE_EXPR, stype, neg);
+ step = fold_build1_loc (loc, NEGATE_EXPR, stype, step);
+ }
+ neg = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype, neg, step);
+ step = NULL_TREE;
+ tree cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node, begin, end);
+ pos = fold_build3_loc (loc, COND_EXPR, stype, cond, pos,
+ build_int_cst (stype, 0));
+ cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node, end, begin);
+ neg = fold_build3_loc (loc, COND_EXPR, stype, cond, neg,
+ build_int_cst (stype, 0));
+ tree osteptype = TREE_TYPE (orig_step);
+ cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node, orig_step,
+ build_int_cst (osteptype, 0));
+ tree cnt = fold_build3_loc (loc, COND_EXPR, stype, cond, pos, neg);
+ cnt = fold_convert_loc (loc, sizetype, cnt);
+ if (gimplify_expr (&cnt, pre_p, NULL, is_gimple_val,
+ fb_rvalue) == GS_ERROR)
+ return NULL_TREE;
+ tcnt = size_binop_loc (loc, MULT_EXPR, tcnt, cnt);
+ }
+ if (gimplify_expr (&tcnt, pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR)
+ return NULL_TREE;
+
+ return tcnt;
+}
+
+/* Build loops iterating over the space defined by the OpenMP iterator IT.
+ Returns a pointer to the BIND_EXPR_BODY in the innermost loop body.
+ LAST_BIND is set to point to the BIND_EXPR containing the whole loop. */
+
+static tree *
+build_omp_iterator_loop (tree it, gimple_seq *pre_p, tree *last_bind)
+{
+ if (*last_bind)
+ gimplify_and_add (*last_bind, pre_p);
+ tree block = TREE_VEC_ELT (it, 5);
+ *last_bind = build3 (BIND_EXPR, void_type_node,
+ BLOCK_VARS (block), NULL, block);
+ TREE_SIDE_EFFECTS (*last_bind) = 1;
+ tree *p = &BIND_EXPR_BODY (*last_bind);
+ for (; it; it = TREE_CHAIN (it))
+ {
+ tree var = TREE_VEC_ELT (it, 0);
+ tree begin = TREE_VEC_ELT (it, 1);
+ tree end = TREE_VEC_ELT (it, 2);
+ tree step = TREE_VEC_ELT (it, 3);
+ tree orig_step = TREE_VEC_ELT (it, 4);
+ tree type = TREE_TYPE (var);
+ location_t loc = DECL_SOURCE_LOCATION (var);
+ /* Emit:
+ var = begin;
+ goto cond_label;
+ beg_label:
+ ...
+ var = var + step;
+ cond_label:
+ if (orig_step > 0) {
+ if (var < end) goto beg_label;
+ } else {
+ if (var > end) goto beg_label;
+ }
+ for each iterator, with inner iterators added to
+ the ... above. */
+ tree beg_label = create_artificial_label (loc);
+ tree cond_label = NULL_TREE;
+ tree tem = build2_loc (loc, MODIFY_EXPR, void_type_node, var, begin);
+ append_to_statement_list_force (tem, p);
+ tem = build_and_jump (&cond_label);
+ append_to_statement_list_force (tem, p);
+ tem = build1 (LABEL_EXPR, void_type_node, beg_label);
+ append_to_statement_list (tem, p);
+ tree bind = build3 (BIND_EXPR, void_type_node, NULL_TREE,
+ NULL_TREE, NULL_TREE);
+ TREE_SIDE_EFFECTS (bind) = 1;
+ SET_EXPR_LOCATION (bind, loc);
+ append_to_statement_list_force (bind, p);
+ if (POINTER_TYPE_P (type))
+ tem = build2_loc (loc, POINTER_PLUS_EXPR, type,
+ var, fold_convert_loc (loc, sizetype, step));
+ else
+ tem = build2_loc (loc, PLUS_EXPR, type, var, step);
+ tem = build2_loc (loc, MODIFY_EXPR, void_type_node, var, tem);
+ append_to_statement_list_force (tem, p);
+ tem = build1 (LABEL_EXPR, void_type_node, cond_label);
+ append_to_statement_list (tem, p);
+ tree cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node, var, end);
+ tree pos = fold_build3_loc (loc, COND_EXPR, void_type_node, cond,
+ build_and_jump (&beg_label), void_node);
+ cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node, var, end);
+ tree neg = fold_build3_loc (loc, COND_EXPR, void_type_node, cond,
+ build_and_jump (&beg_label), void_node);
+ tree osteptype = TREE_TYPE (orig_step);
+ cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node, orig_step,
+ build_int_cst (osteptype, 0));
+ tem = fold_build3_loc (loc, COND_EXPR, void_type_node, cond, pos, neg);
+ append_to_statement_list_force (tem, p);
+ p = &BIND_EXPR_BODY (bind);
+ }
+
+ return p;
+}
+
/* If *LIST_P contains any OpenMP depend clauses with iterators,
lower all the depend clauses by populating corresponding depend
array. Returns 0 if there are no such depend clauses, or
@@ -9678,89 +9825,13 @@ gimplify_omp_depend (tree *list_p, gimple_seq *pre_p)
tree t = OMP_CLAUSE_DECL (c);
if (first_loc == UNKNOWN_LOCATION)
first_loc = OMP_CLAUSE_LOCATION (c);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
if (TREE_PURPOSE (t) != last_iter)
{
- tree tcnt = size_one_node;
- for (tree it = TREE_PURPOSE (t); it; it = TREE_CHAIN (it))
- {
- if (gimplify_expr (&TREE_VEC_ELT (it, 1), pre_p, NULL,
- is_gimple_val, fb_rvalue) == GS_ERROR
- || gimplify_expr (&TREE_VEC_ELT (it, 2), pre_p, NULL,
- is_gimple_val, fb_rvalue) == GS_ERROR
- || gimplify_expr (&TREE_VEC_ELT (it, 3), pre_p, NULL,
- is_gimple_val, fb_rvalue) == GS_ERROR
- || (gimplify_expr (&TREE_VEC_ELT (it, 4), pre_p, NULL,
- is_gimple_val, fb_rvalue)
- == GS_ERROR))
- return 2;
- tree var = TREE_VEC_ELT (it, 0);
- tree begin = TREE_VEC_ELT (it, 1);
- tree end = TREE_VEC_ELT (it, 2);
- tree step = TREE_VEC_ELT (it, 3);
- tree orig_step = TREE_VEC_ELT (it, 4);
- tree type = TREE_TYPE (var);
- tree stype = TREE_TYPE (step);
- location_t loc = DECL_SOURCE_LOCATION (var);
- tree endmbegin;
- /* Compute count for this iterator as
- orig_step > 0
- ? (begin < end ? (end - begin + (step - 1)) / step : 0)
- : (begin > end ? (end - begin + (step + 1)) / step : 0)
- and compute product of those for the entire depend
- clause. */
- if (POINTER_TYPE_P (type))
- endmbegin = fold_build2_loc (loc, POINTER_DIFF_EXPR,
- stype, end, begin);
- else
- endmbegin = fold_build2_loc (loc, MINUS_EXPR, type,
- end, begin);
- tree stepm1 = fold_build2_loc (loc, MINUS_EXPR, stype,
- step,
- build_int_cst (stype, 1));
- tree stepp1 = fold_build2_loc (loc, PLUS_EXPR, stype, step,
- build_int_cst (stype, 1));
- tree pos = fold_build2_loc (loc, PLUS_EXPR, stype,
- unshare_expr (endmbegin),
- stepm1);
- pos = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype,
- pos, step);
- tree neg = fold_build2_loc (loc, PLUS_EXPR, stype,
- endmbegin, stepp1);
- if (TYPE_UNSIGNED (stype))
- {
- neg = fold_build1_loc (loc, NEGATE_EXPR, stype, neg);
- step = fold_build1_loc (loc, NEGATE_EXPR, stype, step);
- }
- neg = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype,
- neg, step);
- step = NULL_TREE;
- tree cond = fold_build2_loc (loc, LT_EXPR,
- boolean_type_node,
- begin, end);
- pos = fold_build3_loc (loc, COND_EXPR, stype, cond, pos,
- build_int_cst (stype, 0));
- cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node,
- end, begin);
- neg = fold_build3_loc (loc, COND_EXPR, stype, cond, neg,
- build_int_cst (stype, 0));
- tree osteptype = TREE_TYPE (orig_step);
- cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node,
- orig_step,
- build_int_cst (osteptype, 0));
- tree cnt = fold_build3_loc (loc, COND_EXPR, stype,
- cond, pos, neg);
- cnt = fold_convert_loc (loc, sizetype, cnt);
- if (gimplify_expr (&cnt, pre_p, NULL, is_gimple_val,
- fb_rvalue) == GS_ERROR)
- return 2;
- tcnt = size_binop_loc (loc, MULT_EXPR, tcnt, cnt);
- }
- if (gimplify_expr (&tcnt, pre_p, NULL, is_gimple_val,
- fb_rvalue) == GS_ERROR)
+ tree tcnt = compute_omp_iterator_count (TREE_PURPOSE (t),
+ pre_p);
+ if (!tcnt)
return 2;
last_iter = TREE_PURPOSE (t);
last_count = tcnt;
@@ -9914,91 +9985,13 @@ gimplify_omp_depend (tree *list_p, gimple_seq *pre_p)
gcc_unreachable ();
}
tree t = OMP_CLAUSE_DECL (c);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
if (TREE_PURPOSE (t) != last_iter)
{
- if (last_bind)
- gimplify_and_add (last_bind, pre_p);
- tree block = TREE_VEC_ELT (TREE_PURPOSE (t), 5);
- last_bind = build3 (BIND_EXPR, void_type_node,
- BLOCK_VARS (block), NULL, block);
- TREE_SIDE_EFFECTS (last_bind) = 1;
+ last_body = build_omp_iterator_loop (TREE_PURPOSE (t), pre_p,
+ &last_bind);
SET_EXPR_LOCATION (last_bind, OMP_CLAUSE_LOCATION (c));
- tree *p = &BIND_EXPR_BODY (last_bind);
- for (tree it = TREE_PURPOSE (t); it; it = TREE_CHAIN (it))
- {
- tree var = TREE_VEC_ELT (it, 0);
- tree begin = TREE_VEC_ELT (it, 1);
- tree end = TREE_VEC_ELT (it, 2);
- tree step = TREE_VEC_ELT (it, 3);
- tree orig_step = TREE_VEC_ELT (it, 4);
- tree type = TREE_TYPE (var);
- location_t loc = DECL_SOURCE_LOCATION (var);
- /* Emit:
- var = begin;
- goto cond_label;
- beg_label:
- ...
- var = var + step;
- cond_label:
- if (orig_step > 0) {
- if (var < end) goto beg_label;
- } else {
- if (var > end) goto beg_label;
- }
- for each iterator, with inner iterators added to
- the ... above. */
- tree beg_label = create_artificial_label (loc);
- tree cond_label = NULL_TREE;
- tem = build2_loc (loc, MODIFY_EXPR, void_type_node,
- var, begin);
- append_to_statement_list_force (tem, p);
- tem = build_and_jump (&cond_label);
- append_to_statement_list_force (tem, p);
- tem = build1 (LABEL_EXPR, void_type_node, beg_label);
- append_to_statement_list (tem, p);
- tree bind = build3 (BIND_EXPR, void_type_node, NULL_TREE,
- NULL_TREE, NULL_TREE);
- TREE_SIDE_EFFECTS (bind) = 1;
- SET_EXPR_LOCATION (bind, loc);
- append_to_statement_list_force (bind, p);
- if (POINTER_TYPE_P (type))
- tem = build2_loc (loc, POINTER_PLUS_EXPR, type,
- var, fold_convert_loc (loc, sizetype,
- step));
- else
- tem = build2_loc (loc, PLUS_EXPR, type, var, step);
- tem = build2_loc (loc, MODIFY_EXPR, void_type_node,
- var, tem);
- append_to_statement_list_force (tem, p);
- tem = build1 (LABEL_EXPR, void_type_node, cond_label);
- append_to_statement_list (tem, p);
- tree cond = fold_build2_loc (loc, LT_EXPR,
- boolean_type_node,
- var, end);
- tree pos
- = fold_build3_loc (loc, COND_EXPR, void_type_node,
- cond, build_and_jump (&beg_label),
- void_node);
- cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node,
- var, end);
- tree neg
- = fold_build3_loc (loc, COND_EXPR, void_type_node,
- cond, build_and_jump (&beg_label),
- void_node);
- tree osteptype = TREE_TYPE (orig_step);
- cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node,
- orig_step,
- build_int_cst (osteptype, 0));
- tem = fold_build3_loc (loc, COND_EXPR, void_type_node,
- cond, pos, neg);
- append_to_statement_list_force (tem, p);
- p = &BIND_EXPR_BODY (bind);
- }
- last_body = p;
}
last_iter = TREE_PURPOSE (t);
if (TREE_CODE (TREE_VALUE (t)) == COMPOUND_EXPR)
diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
index 044bdc2..114f5a9 100644
--- a/gcc/internal-fn.cc
+++ b/gcc/internal-fn.cc
@@ -4548,6 +4548,33 @@ widening_fn_p (code_helper code)
}
}
+/* Return true if this CODE describes an internal_fn that returns a vector with
+ elements twice as wide as the element size of the input vectors and operates
+ on even/odd parts of the input. */
+
+bool
+widening_evenodd_fn_p (code_helper code)
+{
+ if (!code.is_fn_code ())
+ return false;
+
+ if (!internal_fn_p ((combined_fn) code))
+ return false;
+
+ internal_fn fn = as_internal_fn ((combined_fn) code);
+ switch (fn)
+ {
+ #define DEF_INTERNAL_WIDENING_OPTAB_FN(NAME, F, S, SO, UO, T) \
+ case IFN_##NAME##_EVEN: \
+ case IFN_##NAME##_ODD: \
+ return true;
+ #include "internal-fn.def"
+
+ default:
+ return false;
+ }
+}
+
/* Return true if IFN_SET_EDOM is supported. */
bool
diff --git a/gcc/internal-fn.h b/gcc/internal-fn.h
index afd4f8e..02731ea 100644
--- a/gcc/internal-fn.h
+++ b/gcc/internal-fn.h
@@ -219,6 +219,7 @@ extern bool commutative_ternary_fn_p (internal_fn);
extern int first_commutative_argument (internal_fn);
extern bool associative_binary_fn_p (internal_fn);
extern bool widening_fn_p (code_helper);
+extern bool widening_evenodd_fn_p (code_helper);
extern bool set_edom_supported_p (void);
diff --git a/gcc/pta-andersen.cc b/gcc/pta-andersen.cc
new file mode 100644
index 0000000..0253f05
--- /dev/null
+++ b/gcc/pta-andersen.cc
@@ -0,0 +1,2565 @@
+/* Andersen-style solver for tree based points-to analysis
+ Copyright (C) 2005-2025 Free Software Foundation, Inc.
+ Contributed by Daniel Berlin <dberlin@dberlin.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+
+#include "tree-ssa-structalias.h"
+#include "pta-andersen.h"
+
+/* During variable substitution and the offline version of indirect
+ cycle finding, we create nodes to represent dereferences and
+ address taken constraints. These represent where these start and
+ end. */
+#define FIRST_REF_NODE (varmap).length ()
+#define LAST_REF_NODE (FIRST_REF_NODE + (FIRST_REF_NODE - 1))
+
+#define EXECUTE_IF_IN_NONNULL_BITMAP(a, b, c, d) \
+ if (a) \
+ EXECUTE_IF_SET_IN_BITMAP (a, b, c, d)
+
+using namespace pointer_analysis;
+
+/* Used for predecessor bitmaps. */
+static bitmap_obstack predbitmap_obstack;
+
+/* Used for per-solver-iteration bitmaps. */
+static bitmap_obstack iteration_obstack;
+
+typedef struct constraint_graph *constraint_graph_t;
+
+/* The constraint graph is represented as an array of bitmaps
+ containing successor nodes. */
+
+struct constraint_graph
+{
+ /* Size of this graph, which may be different than the number of
+ nodes in the variable map. */
+ unsigned int size;
+
+ /* Explicit successors of each node. */
+ bitmap *succs;
+
+ /* Implicit predecessors of each node (Used for variable
+ substitution). */
+ bitmap *implicit_preds;
+
+ /* Explicit predecessors of each node (Used for variable substitution). */
+ bitmap *preds;
+
+ /* Indirect cycle representatives, or -1 if the node has no indirect
+ cycles. */
+ int *indirect_cycles;
+
+ /* Representative node for a node. rep[a] == a unless the node has
+ been unified. */
+ unsigned int *rep;
+
+ /* Equivalence class representative for a label. This is used for
+ variable substitution. */
+ int *eq_rep;
+
+ /* Pointer equivalence label for a node. All nodes with the same
+ pointer equivalence label can be unified together at some point
+ (either during constraint optimization or after the constraint
+ graph is built). */
+ unsigned int *pe;
+
+ /* Pointer equivalence representative for a label. This is used to
+ handle nodes that are pointer equivalent but not location
+ equivalent. We can unite these once the addressof constraints
+ are transformed into initial points-to sets. */
+ int *pe_rep;
+
+ /* Pointer equivalence label for each node, used during variable
+ substitution. */
+ unsigned int *pointer_label;
+
+ /* Location equivalence label for each node, used during location
+ equivalence finding. */
+ unsigned int *loc_label;
+
+ /* Pointed-by set for each node, used during location equivalence
+ finding. This is pointed-by rather than pointed-to, because it
+ is constructed using the predecessor graph. */
+ bitmap *pointed_by;
+
+ /* Points to sets for pointer equivalence. This is *not* the actual
+ points-to sets for nodes. */
+ bitmap *points_to;
+
+ /* Bitmap of nodes where the bit is set if the node is a direct
+ node. Used for variable substitution. */
+ sbitmap direct_nodes;
+
+ /* Bitmap of nodes where the bit is set if the node is address
+ taken. Used for variable substitution. */
+ bitmap address_taken;
+
+ /* Vector of complex constraints for each graph node. Complex
+ constraints are those involving dereferences or offsets that are
+ not 0. */
+ vec<constraint_t> *complex;
+};
+
+static constraint_graph_t graph;
+
+static void unify_nodes (constraint_graph_t, unsigned int, unsigned int, bool);
+
+
+/* Return the representative node for NODE, if NODE has been unioned
+ with another NODE.
+ This function performs path compression along the way to finding
+ the representative. */
+
+static unsigned int
+find (unsigned int node)
+{
+ gcc_checking_assert (node < graph->size);
+ if (graph->rep[node] != node)
+ return graph->rep[node] = find (graph->rep[node]);
+ return node;
+}
+
+/* Union the TO and FROM nodes to the TO nodes.
+ Note that at some point in the future, we may want to do
+ union-by-rank, in which case we are going to have to return the
+ node we unified to. */
+
+static bool
+unite (unsigned int to, unsigned int from)
+{
+ gcc_checking_assert (to < graph->size && from < graph->size);
+ if (to != from && graph->rep[from] != to)
+ {
+ graph->rep[from] = to;
+ return true;
+ }
+ return false;
+}
+
+/* Perform path compression for all nodes in the node representatives
+ union-find structure. */
+
+static void
+union_find_compress_all (void)
+{
+ unsigned int i;
+ for (i = 0; i < graph->size; i++)
+ find (i);
+}
+
+/* Print the constraint graph in dot format. */
+
+static void
+dump_constraint_graph (FILE *file)
+{
+ unsigned int i;
+
+ /* Only print the graph if it has already been initialized: */
+ if (!graph)
+ return;
+
+ /* Prints the header of the dot file: */
+ fprintf (file, "strict digraph {\n");
+ fprintf (file, " node [\n shape = box\n ]\n");
+ fprintf (file, " edge [\n fontsize = \"12\"\n ]\n");
+ fprintf (file, "\n // List of nodes and complex constraints in "
+ "the constraint graph:\n");
+
+ /* The next lines print the nodes in the graph together with the
+ complex constraints attached to them. */
+ for (i = 1; i < graph->size; i++)
+ {
+ if (i == FIRST_REF_NODE)
+ continue;
+ if (find (i) != i)
+ continue;
+ if (i < FIRST_REF_NODE)
+ fprintf (file, "\"%s\"", get_varinfo (i)->name);
+ else
+ fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
+ if (graph->complex[i].exists ())
+ {
+ unsigned j;
+ constraint_t c;
+ fprintf (file, " [label=\"\\N\\n");
+ for (j = 0; graph->complex[i].iterate (j, &c); ++j)
+ {
+ dump_constraint (file, c);
+ fprintf (file, "\\l");
+ }
+ fprintf (file, "\"]");
+ }
+ fprintf (file, ";\n");
+ }
+
+ /* Go over the edges. */
+ fprintf (file, "\n // Edges in the constraint graph:\n");
+ for (i = 1; i < graph->size; i++)
+ {
+ unsigned j;
+ bitmap_iterator bi;
+ if (find (i) != i)
+ continue;
+ EXECUTE_IF_IN_NONNULL_BITMAP (graph->succs[i], 0, j, bi)
+ {
+ unsigned to = find (j);
+ if (i == to)
+ continue;
+ if (i < FIRST_REF_NODE)
+ fprintf (file, "\"%s\"", get_varinfo (i)->name);
+ else
+ fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
+ fprintf (file, " -> ");
+ if (to < FIRST_REF_NODE)
+ fprintf (file, "\"%s\"", get_varinfo (to)->name);
+ else
+ fprintf (file, "\"*%s\"", get_varinfo (to - FIRST_REF_NODE)->name);
+ fprintf (file, ";\n");
+ }
+ }
+
+ /* Prints the tail of the dot file. */
+ fprintf (file, "}\n");
+}
+
+/* Print out the constraint graph to stderr. */
+
+DEBUG_FUNCTION void
+debug_constraint_graph (void)
+{
+ dump_constraint_graph (stderr);
+}
+
+
+/* SOLVER FUNCTIONS
+
+ The solver is a simple worklist solver, that works on the following
+ algorithm:
+
+ sbitmap changed_nodes = all zeroes;
+ changed_count = 0;
+ For each node that is not already collapsed:
+ changed_count++;
+ set bit in changed nodes
+
+ while (changed_count > 0)
+ {
+ compute topological ordering for constraint graph
+
+ find and collapse cycles in the constraint graph (updating
+ changed if necessary)
+
+ for each node (n) in the graph in topological order:
+ changed_count--;
+
+ Process each complex constraint associated with the node,
+ updating changed if necessary.
+
+ For each outgoing edge from n, propagate the solution from n to
+ the destination of the edge, updating changed as necessary.
+
+ } */
+
+/* Return true if two constraint expressions A and B are equal. */
+
+static bool
+constraint_expr_equal (struct constraint_expr a, struct constraint_expr b)
+{
+ return a.type == b.type && a.var == b.var && a.offset == b.offset;
+}
+
+/* Return true if constraint expression A is less than constraint expression
+ B. This is just arbitrary, but consistent, in order to give them an
+ ordering. */
+
+static bool
+constraint_expr_less (struct constraint_expr a, struct constraint_expr b)
+{
+ if (a.type == b.type)
+ {
+ if (a.var == b.var)
+ return a.offset < b.offset;
+ else
+ return a.var < b.var;
+ }
+ else
+ return a.type < b.type;
+}
+
+/* Return true if constraint A is less than constraint B. This is just
+ arbitrary, but consistent, in order to give them an ordering. */
+
+static bool
+constraint_less (const constraint_t &a, const constraint_t &b)
+{
+ if (constraint_expr_less (a->lhs, b->lhs))
+ return true;
+ else if (constraint_expr_less (b->lhs, a->lhs))
+ return false;
+ else
+ return constraint_expr_less (a->rhs, b->rhs);
+}
+
+/* Return true if two constraints A and B are equal. */
+
+static bool
+constraint_equal (const constraint &a, const constraint &b)
+{
+ return constraint_expr_equal (a.lhs, b.lhs)
+ && constraint_expr_equal (a.rhs, b.rhs);
+}
+
+/* Find a constraint LOOKFOR in the sorted constraint vector VEC. */
+
+static constraint_t
+constraint_vec_find (vec<constraint_t> vec,
+ constraint &lookfor)
+{
+ unsigned int place;
+ constraint_t found;
+
+ if (!vec.exists ())
+ return NULL;
+
+ place = vec.lower_bound (&lookfor, constraint_less);
+ if (place >= vec.length ())
+ return NULL;
+ found = vec[place];
+ if (!constraint_equal (*found, lookfor))
+ return NULL;
+ return found;
+}
+
+/* Union two constraint vectors, TO and FROM. Put the result in TO.
+ Returns true of TO set is changed. */
+
+static bool
+constraint_set_union (vec<constraint_t> *to,
+ vec<constraint_t> *from)
+{
+ int i;
+ constraint_t c;
+ bool any_change = false;
+
+ FOR_EACH_VEC_ELT (*from, i, c)
+ {
+ if (constraint_vec_find (*to, *c) == NULL)
+ {
+ unsigned int place = to->lower_bound (c, constraint_less);
+ to->safe_insert (place, c);
+ any_change = true;
+ }
+ }
+ return any_change;
+}
+
+/* Expands the solution in SET to all sub-fields of variables included. */
+
+static bitmap
+solution_set_expand (bitmap set, bitmap *expanded)
+{
+ bitmap_iterator bi;
+ unsigned j;
+
+ if (*expanded)
+ return *expanded;
+
+ *expanded = BITMAP_ALLOC (&iteration_obstack);
+
+ /* In a first pass expand variables, once for each head to avoid
+ quadratic behavior, to include all sub-fields. */
+ unsigned prev_head = 0;
+ EXECUTE_IF_SET_IN_BITMAP (set, 0, j, bi)
+ {
+ varinfo_t v = get_varinfo (j);
+ if (v->is_artificial_var
+ || v->is_full_var)
+ continue;
+ if (v->head != prev_head)
+ {
+ varinfo_t head = get_varinfo (v->head);
+ unsigned num = 1;
+ for (varinfo_t n = vi_next (head); n != NULL; n = vi_next (n))
+ {
+ if (n->id != head->id + num)
+ {
+ /* Usually sub variables are adjacent but since we
+ create pointed-to restrict representatives there
+ can be gaps as well. */
+ bitmap_set_range (*expanded, head->id, num);
+ head = n;
+ num = 1;
+ }
+ else
+ num++;
+ }
+
+ bitmap_set_range (*expanded, head->id, num);
+ prev_head = v->head;
+ }
+ }
+
+ /* And finally set the rest of the bits from SET in an efficient way. */
+ bitmap_ior_into (*expanded, set);
+
+ return *expanded;
+}
+
+/* Union solution sets TO and DELTA, and add INC to each member of DELTA in the
+ process. */
+
+static bool
+set_union_with_increment (bitmap to, bitmap delta, HOST_WIDE_INT inc,
+ bitmap *expanded_delta)
+{
+ bool changed = false;
+ bitmap_iterator bi;
+ unsigned int i;
+
+ /* If the solution of DELTA contains anything it is good enough to transfer
+ this to TO. */
+ if (bitmap_bit_p (delta, anything_id))
+ return bitmap_set_bit (to, anything_id);
+
+ /* If the offset is unknown we have to expand the solution to
+ all subfields. */
+ if (inc == UNKNOWN_OFFSET)
+ {
+ delta = solution_set_expand (delta, expanded_delta);
+ changed |= bitmap_ior_into (to, delta);
+ return changed;
+ }
+
+ /* For non-zero offset union the offsetted solution into the destination. */
+ EXECUTE_IF_SET_IN_BITMAP (delta, 0, i, bi)
+ {
+ varinfo_t vi = get_varinfo (i);
+
+ /* If this is a variable with just one field just set its bit
+ in the result. */
+ if (vi->is_artificial_var
+ || vi->is_unknown_size_var
+ || vi->is_full_var)
+ changed |= bitmap_set_bit (to, i);
+ else
+ {
+ HOST_WIDE_INT fieldoffset = vi->offset + inc;
+ unsigned HOST_WIDE_INT size = vi->size;
+
+ /* If the offset makes the pointer point to before the
+ variable use offset zero for the field lookup. */
+ if (fieldoffset < 0)
+ vi = get_varinfo (vi->head);
+ else
+ vi = first_or_preceding_vi_for_offset (vi, fieldoffset);
+
+ do
+ {
+ changed |= bitmap_set_bit (to, vi->id);
+ if (vi->is_full_var
+ || vi->next == 0)
+ break;
+
+ /* We have to include all fields that overlap the current field
+ shifted by inc. */
+ vi = vi_next (vi);
+ }
+ while (vi->offset < fieldoffset + size);
+ }
+ }
+
+ return changed;
+}
+
+/* Insert constraint C into the list of complex constraints for graph
+ node VAR. */
+
+static void
+insert_into_complex (constraint_graph_t graph,
+ unsigned int var, constraint_t c)
+{
+ vec<constraint_t> complex = graph->complex[var];
+ unsigned int place = complex.lower_bound (c, constraint_less);
+
+ /* Only insert constraints that do not already exist. */
+ if (place >= complex.length ()
+ || !constraint_equal (*c, *complex[place]))
+ graph->complex[var].safe_insert (place, c);
+}
+
+
+/* Condense two variable nodes into a single variable node, by moving
+ all associated info from FROM to TO. Returns true if TO node's
+ constraint set changes after the merge. */
+
+static bool
+merge_node_constraints (constraint_graph_t graph, unsigned int to,
+ unsigned int from)
+{
+ unsigned int i;
+ constraint_t c;
+ bool any_change = false;
+
+ gcc_checking_assert (find (from) == to);
+
+ /* Move all complex constraints from src node into to node. */
+ FOR_EACH_VEC_ELT (graph->complex[from], i, c)
+ {
+ /* In complex constraints for node FROM, we may have either
+ a = *FROM, and *FROM = a, or an offseted constraint which are
+ always added to the rhs node's constraints. */
+
+ if (c->rhs.type == DEREF)
+ c->rhs.var = to;
+ else if (c->lhs.type == DEREF)
+ c->lhs.var = to;
+ else
+ c->rhs.var = to;
+
+ }
+ any_change = constraint_set_union (&graph->complex[to],
+ &graph->complex[from]);
+ graph->complex[from].release ();
+ return any_change;
+}
+
+/* Remove edges involving NODE from GRAPH. */
+
+static void
+clear_edges_for_node (constraint_graph_t graph, unsigned int node)
+{
+ if (graph->succs[node])
+ BITMAP_FREE (graph->succs[node]);
+}
+
+/* Merge GRAPH nodes FROM and TO into node TO. */
+
+static void
+merge_graph_nodes (constraint_graph_t graph, unsigned int to,
+ unsigned int from)
+{
+ if (graph->indirect_cycles[from] != -1)
+ {
+ /* If we have indirect cycles with the from node, and we have
+ none on the to node, the to node has indirect cycles from the
+ from node now that they are unified.
+ If indirect cycles exist on both, unify the nodes that they
+ are in a cycle with, since we know they are in a cycle with
+ each other. */
+ if (graph->indirect_cycles[to] == -1)
+ graph->indirect_cycles[to] = graph->indirect_cycles[from];
+ }
+
+ /* Merge all the successor edges. */
+ if (graph->succs[from])
+ {
+ if (!graph->succs[to])
+ graph->succs[to] = BITMAP_ALLOC (&pta_obstack);
+ bitmap_ior_into (graph->succs[to],
+ graph->succs[from]);
+ }
+
+ clear_edges_for_node (graph, from);
+}
+
+
+/* Add an indirect graph edge to GRAPH, going from TO to FROM if
+ it doesn't exist in the graph already. */
+
+static void
+add_implicit_graph_edge (constraint_graph_t graph, unsigned int to,
+ unsigned int from)
+{
+ if (to == from)
+ return;
+
+ if (!graph->implicit_preds[to])
+ graph->implicit_preds[to] = BITMAP_ALLOC (&predbitmap_obstack);
+
+ if (bitmap_set_bit (graph->implicit_preds[to], from))
+ stats.num_implicit_edges++;
+}
+
+/* Add a predecessor graph edge to GRAPH, going from TO to FROM if
+ it doesn't exist in the graph already.
+ Return false if the edge already existed, true otherwise. */
+
+static void
+add_pred_graph_edge (constraint_graph_t graph, unsigned int to,
+ unsigned int from)
+{
+ if (!graph->preds[to])
+ graph->preds[to] = BITMAP_ALLOC (&predbitmap_obstack);
+ bitmap_set_bit (graph->preds[to], from);
+}
+
+/* Add a graph edge to GRAPH, going from FROM to TO if
+ it doesn't exist in the graph already.
+ Return false if the edge already existed, true otherwise. */
+
+static bool
+add_graph_edge (constraint_graph_t graph, unsigned int to,
+ unsigned int from)
+{
+ if (to == from)
+ {
+ return false;
+ }
+ else
+ {
+ bool r = false;
+
+ if (!graph->succs[from])
+ graph->succs[from] = BITMAP_ALLOC (&pta_obstack);
+
+ /* The graph solving process does not avoid "triangles", thus
+ there can be multiple paths from a node to another involving
+ intermediate other nodes. That causes extra copying which is
+ most difficult to avoid when the intermediate node is ESCAPED
+ because there are no edges added from ESCAPED. Avoid
+ adding the direct edge FROM -> TO when we have FROM -> ESCAPED
+ and TO contains ESCAPED.
+ ??? Note this is only a heuristic, it does not prevent the
+ situation from occuring. The heuristic helps PR38474 and
+ PR99912 significantly. */
+ if (to < FIRST_REF_NODE
+ && bitmap_bit_p (graph->succs[from], find (escaped_id))
+ && bitmap_bit_p (get_varinfo (find (to))->solution, escaped_id))
+ {
+ stats.num_avoided_edges++;
+ return false;
+ }
+
+ if (bitmap_set_bit (graph->succs[from], to))
+ {
+ r = true;
+ if (to < FIRST_REF_NODE && from < FIRST_REF_NODE)
+ stats.num_edges++;
+ }
+ return r;
+ }
+}
+
+/* Initialize the constraint graph structure to contain SIZE nodes. */
+
+static void
+init_graph (unsigned int size)
+{
+ unsigned int j;
+
+ bitmap_obstack_initialize (&predbitmap_obstack);
+
+ graph = XCNEW (struct constraint_graph);
+ graph->size = size;
+ graph->succs = XCNEWVEC (bitmap, graph->size);
+ graph->indirect_cycles = XNEWVEC (int, graph->size);
+ graph->rep = XNEWVEC (unsigned int, graph->size);
+ /* ??? Macros do not support template types with multiple arguments,
+ so we use a typedef to work around it. */
+ typedef vec<constraint_t> vec_constraint_t_heap;
+ graph->complex = XCNEWVEC (vec_constraint_t_heap, size);
+ graph->pe = XCNEWVEC (unsigned int, graph->size);
+ graph->pe_rep = XNEWVEC (int, graph->size);
+
+ for (j = 0; j < graph->size; j++)
+ {
+ graph->rep[j] = j;
+ graph->pe_rep[j] = -1;
+ graph->indirect_cycles[j] = -1;
+ }
+}
+
+/* Build the constraint graph, adding only predecessor edges right now. */
+
+static void
+build_pred_graph (void)
+{
+ int i;
+ constraint_t c;
+ unsigned int j;
+
+ graph->implicit_preds = XCNEWVEC (bitmap, graph->size);
+ graph->preds = XCNEWVEC (bitmap, graph->size);
+ graph->pointer_label = XCNEWVEC (unsigned int, graph->size);
+ graph->loc_label = XCNEWVEC (unsigned int, graph->size);
+ graph->pointed_by = XCNEWVEC (bitmap, graph->size);
+ graph->points_to = XCNEWVEC (bitmap, graph->size);
+ graph->eq_rep = XNEWVEC (int, graph->size);
+ graph->direct_nodes = sbitmap_alloc (graph->size);
+ graph->address_taken = BITMAP_ALLOC (&predbitmap_obstack);
+ bitmap_clear (graph->direct_nodes);
+
+ for (j = 1; j < FIRST_REF_NODE; j++)
+ {
+ if (!get_varinfo (j)->is_special_var)
+ bitmap_set_bit (graph->direct_nodes, j);
+ }
+
+ for (j = 0; j < graph->size; j++)
+ graph->eq_rep[j] = -1;
+
+ for (j = 0; j < varmap.length (); j++)
+ graph->indirect_cycles[j] = -1;
+
+ FOR_EACH_VEC_ELT (constraints, i, c)
+ {
+ struct constraint_expr lhs = c->lhs;
+ struct constraint_expr rhs = c->rhs;
+ unsigned int lhsvar = lhs.var;
+ unsigned int rhsvar = rhs.var;
+
+ if (lhs.type == DEREF)
+ {
+ /* *x = y. */
+ if (rhs.offset == 0 && lhs.offset == 0 && rhs.type == SCALAR)
+ {
+ if (lhs.var == anything_id)
+ add_pred_graph_edge (graph, storedanything_id, rhsvar);
+ else
+ add_pred_graph_edge (graph, FIRST_REF_NODE + lhsvar, rhsvar);
+ }
+ }
+ else if (rhs.type == DEREF)
+ {
+ /* x = *y */
+ if (rhs.offset == 0 && lhs.offset == 0 && lhs.type == SCALAR)
+ add_pred_graph_edge (graph, lhsvar, FIRST_REF_NODE + rhsvar);
+ else
+ bitmap_clear_bit (graph->direct_nodes, lhsvar);
+ }
+ else if (rhs.type == ADDRESSOF)
+ {
+ varinfo_t v;
+
+ /* x = &y */
+ if (graph->points_to[lhsvar] == NULL)
+ graph->points_to[lhsvar] = BITMAP_ALLOC (&predbitmap_obstack);
+ bitmap_set_bit (graph->points_to[lhsvar], rhsvar);
+
+ if (graph->pointed_by[rhsvar] == NULL)
+ graph->pointed_by[rhsvar] = BITMAP_ALLOC (&predbitmap_obstack);
+ bitmap_set_bit (graph->pointed_by[rhsvar], lhsvar);
+
+ /* Implicitly, *x = y */
+ add_implicit_graph_edge (graph, FIRST_REF_NODE + lhsvar, rhsvar);
+
+ /* All related variables are no longer direct nodes. */
+ bitmap_clear_bit (graph->direct_nodes, rhsvar);
+ v = get_varinfo (rhsvar);
+ if (!v->is_full_var)
+ {
+ v = get_varinfo (v->head);
+ do
+ {
+ bitmap_clear_bit (graph->direct_nodes, v->id);
+ v = vi_next (v);
+ }
+ while (v != NULL);
+ }
+ bitmap_set_bit (graph->address_taken, rhsvar);
+ }
+ else if (lhsvar > anything_id
+ && lhsvar != rhsvar && lhs.offset == 0 && rhs.offset == 0)
+ {
+ /* x = y */
+ add_pred_graph_edge (graph, lhsvar, rhsvar);
+ /* Implicitly, *x = *y */
+ add_implicit_graph_edge (graph, FIRST_REF_NODE + lhsvar,
+ FIRST_REF_NODE + rhsvar);
+ }
+ else if (lhs.offset != 0 || rhs.offset != 0)
+ {
+ if (rhs.offset != 0)
+ bitmap_clear_bit (graph->direct_nodes, lhs.var);
+ else if (lhs.offset != 0)
+ bitmap_clear_bit (graph->direct_nodes, rhs.var);
+ }
+ }
+}
+
+/* Build the constraint graph, adding successor edges. */
+
+static void
+build_succ_graph (void)
+{
+ unsigned i, t;
+ constraint_t c;
+
+ FOR_EACH_VEC_ELT (constraints, i, c)
+ {
+ struct constraint_expr lhs;
+ struct constraint_expr rhs;
+ unsigned int lhsvar;
+ unsigned int rhsvar;
+
+ if (!c)
+ continue;
+
+ lhs = c->lhs;
+ rhs = c->rhs;
+ lhsvar = find (lhs.var);
+ rhsvar = find (rhs.var);
+
+ if (lhs.type == DEREF)
+ {
+ if (rhs.offset == 0 && lhs.offset == 0 && rhs.type == SCALAR)
+ {
+ if (lhs.var == anything_id)
+ add_graph_edge (graph, storedanything_id, rhsvar);
+ else
+ add_graph_edge (graph, FIRST_REF_NODE + lhsvar, rhsvar);
+ }
+ }
+ else if (rhs.type == DEREF)
+ {
+ if (rhs.offset == 0 && lhs.offset == 0 && lhs.type == SCALAR)
+ add_graph_edge (graph, lhsvar, FIRST_REF_NODE + rhsvar);
+ }
+ else if (rhs.type == ADDRESSOF)
+ {
+ /* x = &y */
+ gcc_checking_assert (find (rhs.var) == rhs.var);
+ bitmap_set_bit (get_varinfo (lhsvar)->solution, rhsvar);
+ }
+ else if (lhsvar > anything_id
+ && lhsvar != rhsvar && lhs.offset == 0 && rhs.offset == 0)
+ {
+ add_graph_edge (graph, lhsvar, rhsvar);
+ }
+ }
+
+ /* Add edges from STOREDANYTHING to all nodes that can receive pointers. */
+ t = find (storedanything_id);
+ for (i = integer_id + 1; i < FIRST_REF_NODE; ++i)
+ {
+ if (get_varinfo (i)->may_have_pointers)
+ add_graph_edge (graph, find (i), t);
+ }
+
+ /* Everything stored to ANYTHING also potentially escapes. */
+ add_graph_edge (graph, find (escaped_id), t);
+}
+
+
+/* Changed variables on the last iteration. */
+static bitmap changed;
+
+/* Strongly Connected Component visitation info. */
+
+class scc_info
+{
+public:
+ scc_info (size_t size);
+ ~scc_info ();
+
+ auto_sbitmap visited;
+ auto_sbitmap deleted;
+ unsigned int *dfs;
+ unsigned int *node_mapping;
+ int current_index;
+ auto_vec<unsigned> scc_stack;
+};
+
+
+/* Recursive routine to find strongly connected components in GRAPH.
+ SI is the SCC info to store the information in, and N is the id of current
+ graph node we are processing.
+
+ This is Tarjan's strongly connected component finding algorithm, as
+ modified by Nuutila to keep only non-root nodes on the stack.
+ The algorithm can be found in "On finding the strongly connected
+ connected components in a directed graph" by Esko Nuutila and Eljas
+ Soisalon-Soininen, in Information Processing Letters volume 49,
+ number 1, pages 9-14. */
+
+static void
+scc_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
+{
+ unsigned int i;
+ bitmap_iterator bi;
+ unsigned int my_dfs;
+
+ bitmap_set_bit (si->visited, n);
+ si->dfs[n] = si->current_index ++;
+ my_dfs = si->dfs[n];
+
+ /* Visit all the successors. */
+ EXECUTE_IF_IN_NONNULL_BITMAP (graph->succs[n], 0, i, bi)
+ {
+ unsigned int w;
+
+ if (i > LAST_REF_NODE)
+ break;
+
+ w = find (i);
+ if (bitmap_bit_p (si->deleted, w))
+ continue;
+
+ if (!bitmap_bit_p (si->visited, w))
+ scc_visit (graph, si, w);
+
+ unsigned int t = find (w);
+ gcc_checking_assert (find (n) == n);
+ if (si->dfs[t] < si->dfs[n])
+ si->dfs[n] = si->dfs[t];
+ }
+
+ /* See if any components have been identified. */
+ if (si->dfs[n] == my_dfs)
+ {
+ if (si->scc_stack.length () > 0
+ && si->dfs[si->scc_stack.last ()] >= my_dfs)
+ {
+ bitmap scc = BITMAP_ALLOC (NULL);
+ unsigned int lowest_node;
+ bitmap_iterator bi;
+
+ bitmap_set_bit (scc, n);
+
+ while (si->scc_stack.length () != 0
+ && si->dfs[si->scc_stack.last ()] >= my_dfs)
+ {
+ unsigned int w = si->scc_stack.pop ();
+
+ bitmap_set_bit (scc, w);
+ }
+
+ lowest_node = bitmap_first_set_bit (scc);
+ gcc_assert (lowest_node < FIRST_REF_NODE);
+
+ /* Collapse the SCC nodes into a single node, and mark the
+ indirect cycles. */
+ EXECUTE_IF_SET_IN_BITMAP (scc, 0, i, bi)
+ {
+ if (i < FIRST_REF_NODE)
+ {
+ if (unite (lowest_node, i))
+ unify_nodes (graph, lowest_node, i, false);
+ }
+ else
+ {
+ unite (lowest_node, i);
+ graph->indirect_cycles[i - FIRST_REF_NODE] = lowest_node;
+ }
+ }
+ bitmap_set_bit (si->deleted, lowest_node);
+ }
+ else
+ bitmap_set_bit (si->deleted, n);
+ }
+ else
+ si->scc_stack.safe_push (n);
+}
+
+/* Unify node FROM into node TO, updating the changed count if
+ necessary when UPDATE_CHANGED is true. */
+
+static void
+unify_nodes (constraint_graph_t graph, unsigned int to, unsigned int from,
+ bool update_changed)
+{
+ gcc_checking_assert (to != from && find (to) == to);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Unifying %s to %s\n",
+ get_varinfo (from)->name,
+ get_varinfo (to)->name);
+
+ if (update_changed)
+ stats.unified_vars_dynamic++;
+ else
+ stats.unified_vars_static++;
+
+ merge_graph_nodes (graph, to, from);
+ if (merge_node_constraints (graph, to, from))
+ {
+ if (update_changed)
+ bitmap_set_bit (changed, to);
+ }
+
+ /* Mark TO as changed if FROM was changed. If TO was already marked
+ as changed, decrease the changed count. */
+
+ if (update_changed
+ && bitmap_clear_bit (changed, from))
+ bitmap_set_bit (changed, to);
+ varinfo_t fromvi = get_varinfo (from);
+ if (fromvi->solution)
+ {
+ /* If the solution changes because of the merging, we need to mark
+ the variable as changed. */
+ varinfo_t tovi = get_varinfo (to);
+ if (bitmap_ior_into (tovi->solution, fromvi->solution))
+ {
+ if (update_changed)
+ bitmap_set_bit (changed, to);
+ }
+
+ BITMAP_FREE (fromvi->solution);
+ if (fromvi->oldsolution)
+ BITMAP_FREE (fromvi->oldsolution);
+
+ if (stats.iterations > 0
+ && tovi->oldsolution)
+ BITMAP_FREE (tovi->oldsolution);
+ }
+ if (graph->succs[to])
+ bitmap_clear_bit (graph->succs[to], to);
+}
+
+/* Add a copy edge FROM -> TO, optimizing special cases. Returns TRUE
+ if the solution of TO changed. */
+
+static bool
+solve_add_graph_edge (constraint_graph_t graph, unsigned int to,
+ unsigned int from)
+{
+ /* Adding edges from the special vars is pointless.
+ They don't have sets that can change. */
+ if (get_varinfo (from)->is_special_var)
+ return bitmap_ior_into (get_varinfo (to)->solution,
+ get_varinfo (from)->solution);
+ /* Merging the solution from ESCAPED needlessly increases
+ the set. Use ESCAPED as representative instead. */
+ else if (from == find (escaped_id))
+ return bitmap_set_bit (get_varinfo (to)->solution, escaped_id);
+ else if (get_varinfo (from)->may_have_pointers
+ && add_graph_edge (graph, to, from))
+ return bitmap_ior_into (get_varinfo (to)->solution,
+ get_varinfo (from)->solution);
+ return false;
+}
+
+/* Process a constraint C that represents x = *(y + off), using DELTA as the
+ starting solution for y. */
+
+static void
+do_sd_constraint (constraint_graph_t graph, constraint_t c,
+ bitmap delta, bitmap *expanded_delta)
+{
+ unsigned int lhs = c->lhs.var;
+ bool flag = false;
+ bitmap sol = get_varinfo (lhs)->solution;
+ unsigned int j;
+ bitmap_iterator bi;
+ HOST_WIDE_INT roffset = c->rhs.offset;
+
+ /* Our IL does not allow this. */
+ gcc_checking_assert (c->lhs.offset == 0);
+
+ /* If the solution of Y contains anything it is good enough to transfer
+ this to the LHS. */
+ if (bitmap_bit_p (delta, anything_id))
+ {
+ flag |= bitmap_set_bit (sol, anything_id);
+ goto done;
+ }
+
+ /* If we do not know at with offset the rhs is dereferenced compute
+ the reachability set of DELTA, conservatively assuming it is
+ dereferenced at all valid offsets. */
+ if (roffset == UNKNOWN_OFFSET)
+ {
+ delta = solution_set_expand (delta, expanded_delta);
+ /* No further offset processing is necessary. */
+ roffset = 0;
+ }
+
+ /* For each variable j in delta (Sol(y)), add
+ an edge in the graph from j to x, and union Sol(j) into Sol(x). */
+ EXECUTE_IF_SET_IN_BITMAP (delta, 0, j, bi)
+ {
+ varinfo_t v = get_varinfo (j);
+ HOST_WIDE_INT fieldoffset = v->offset + roffset;
+ unsigned HOST_WIDE_INT size = v->size;
+ unsigned int t;
+
+ if (v->is_full_var)
+ ;
+ else if (roffset != 0)
+ {
+ if (fieldoffset < 0)
+ v = get_varinfo (v->head);
+ else
+ v = first_or_preceding_vi_for_offset (v, fieldoffset);
+ }
+
+ /* We have to include all fields that overlap the current field
+ shifted by roffset. */
+ do
+ {
+ t = find (v->id);
+
+ flag |= solve_add_graph_edge (graph, lhs, t);
+
+ if (v->is_full_var
+ || v->next == 0)
+ break;
+
+ v = vi_next (v);
+ }
+ while (v->offset < fieldoffset + size);
+ }
+
+done:
+ /* If the LHS solution changed, mark the var as changed. */
+ if (flag)
+ bitmap_set_bit (changed, lhs);
+}
+
+/* Process a constraint C that represents *(x + off) = y using DELTA
+ as the starting solution for x. */
+
+static void
+do_ds_constraint (constraint_t c, bitmap delta, bitmap *expanded_delta)
+{
+ unsigned int rhs = c->rhs.var;
+ bitmap sol = get_varinfo (rhs)->solution;
+ unsigned int j;
+ bitmap_iterator bi;
+ HOST_WIDE_INT loff = c->lhs.offset;
+ bool escaped_p = false;
+
+ /* Our IL does not allow this. */
+ gcc_checking_assert (c->rhs.offset == 0);
+
+ /* If the solution of y contains ANYTHING simply use the ANYTHING
+ solution. This avoids needlessly increasing the points-to sets. */
+ if (bitmap_bit_p (sol, anything_id))
+ sol = get_varinfo (find (anything_id))->solution;
+
+ /* If the solution for x contains ANYTHING we have to merge the
+ solution of y into all pointer variables which we do via
+ STOREDANYTHING. */
+ if (bitmap_bit_p (delta, anything_id))
+ {
+ unsigned t = find (storedanything_id);
+ if (solve_add_graph_edge (graph, t, rhs))
+ bitmap_set_bit (changed, t);
+ return;
+ }
+
+ /* If we do not know at with offset the rhs is dereferenced compute
+ the reachability set of DELTA, conservatively assuming it is
+ dereferenced at all valid offsets. */
+ if (loff == UNKNOWN_OFFSET)
+ {
+ delta = solution_set_expand (delta, expanded_delta);
+ loff = 0;
+ }
+
+ /* For each member j of delta (Sol(x)), add an edge from y to j and
+ union Sol(y) into Sol(j) */
+ EXECUTE_IF_SET_IN_BITMAP (delta, 0, j, bi)
+ {
+ varinfo_t v = get_varinfo (j);
+ unsigned int t;
+ HOST_WIDE_INT fieldoffset = v->offset + loff;
+ unsigned HOST_WIDE_INT size = v->size;
+
+ if (v->is_full_var)
+ ;
+ else if (loff != 0)
+ {
+ if (fieldoffset < 0)
+ v = get_varinfo (v->head);
+ else
+ v = first_or_preceding_vi_for_offset (v, fieldoffset);
+ }
+
+ /* We have to include all fields that overlap the current field
+ shifted by loff. */
+ do
+ {
+ if (v->may_have_pointers)
+ {
+ /* If v is a global variable then this is an escape point. */
+ if (v->is_global_var
+ && !escaped_p)
+ {
+ t = find (escaped_id);
+ if (add_graph_edge (graph, t, rhs)
+ && bitmap_ior_into (get_varinfo (t)->solution, sol))
+ bitmap_set_bit (changed, t);
+ /* Enough to let rhs escape once. */
+ escaped_p = true;
+ }
+
+ if (v->is_special_var)
+ break;
+
+ t = find (v->id);
+
+ if (solve_add_graph_edge (graph, t, rhs))
+ bitmap_set_bit (changed, t);
+ }
+
+ if (v->is_full_var
+ || v->next == 0)
+ break;
+
+ v = vi_next (v);
+ }
+ while (v->offset < fieldoffset + size);
+ }
+}
+
+/* Handle a non-simple (simple meaning requires no iteration),
+ constraint (IE *x = &y, x = *y, *x = y, and x = y with offsets involved). */
+
+static void
+do_complex_constraint (constraint_graph_t graph, constraint_t c, bitmap delta,
+ bitmap *expanded_delta)
+{
+ if (c->lhs.type == DEREF)
+ {
+ if (c->rhs.type == ADDRESSOF)
+ {
+ gcc_unreachable ();
+ }
+ else
+ {
+ /* *x = y */
+ do_ds_constraint (c, delta, expanded_delta);
+ }
+ }
+ else if (c->rhs.type == DEREF)
+ {
+ /* x = *y */
+ if (!(get_varinfo (c->lhs.var)->is_special_var))
+ do_sd_constraint (graph, c, delta, expanded_delta);
+ }
+ else
+ {
+ bitmap tmp;
+ bool flag = false;
+
+ gcc_checking_assert (c->rhs.type == SCALAR && c->lhs.type == SCALAR
+ && c->rhs.offset != 0 && c->lhs.offset == 0);
+ tmp = get_varinfo (c->lhs.var)->solution;
+
+ flag = set_union_with_increment (tmp, delta, c->rhs.offset,
+ expanded_delta);
+
+ if (flag)
+ bitmap_set_bit (changed, c->lhs.var);
+ }
+}
+
+/* Initialize and return a new SCC info structure. */
+
+scc_info::scc_info (size_t size) :
+ visited (size), deleted (size), current_index (0), scc_stack (1)
+{
+ bitmap_clear (visited);
+ bitmap_clear (deleted);
+ node_mapping = XNEWVEC (unsigned int, size);
+ dfs = XCNEWVEC (unsigned int, size);
+
+ for (size_t i = 0; i < size; i++)
+ node_mapping[i] = i;
+}
+
+/* Free an SCC info structure pointed to by SI. */
+
+scc_info::~scc_info ()
+{
+ free (node_mapping);
+ free (dfs);
+}
+
+
+/* Find indirect cycles in GRAPH that occur, using strongly connected
+ components, and note them in the indirect cycles map.
+
+ This technique comes from Ben Hardekopf and Calvin Lin,
+ "It Pays to be Lazy: Fast and Accurate Pointer Analysis for Millions of
+ Lines of Code", submitted to PLDI 2007. */
+
+static void
+find_indirect_cycles (constraint_graph_t graph)
+{
+ unsigned int i;
+ unsigned int size = graph->size;
+ scc_info si (size);
+
+ for (i = 0; i < MIN (LAST_REF_NODE, size); i++)
+ if (!bitmap_bit_p (si.visited, i) && find (i) == i)
+ scc_visit (graph, &si, i);
+}
+
+/* Visit the graph in topological order starting at node N, and store the
+ order in TOPO_ORDER using VISITED to indicate visited nodes. */
+
+static void
+topo_visit (constraint_graph_t graph, vec<unsigned> &topo_order,
+ sbitmap visited, unsigned int n)
+{
+ bitmap_iterator bi;
+ unsigned int j;
+
+ bitmap_set_bit (visited, n);
+
+ if (graph->succs[n])
+ EXECUTE_IF_SET_IN_BITMAP (graph->succs[n], 0, j, bi)
+ {
+ unsigned k = find (j);
+ if (!bitmap_bit_p (visited, k))
+ topo_visit (graph, topo_order, visited, k);
+ }
+
+ /* Also consider copy with offset complex constraints as implicit edges. */
+ for (auto c : graph->complex[n])
+ {
+ /* Constraints are ordered so that SCALAR = SCALAR appear first. */
+ if (c->lhs.type != SCALAR || c->rhs.type != SCALAR)
+ break;
+ gcc_checking_assert (c->rhs.var == n);
+ unsigned k = find (c->lhs.var);
+ if (!bitmap_bit_p (visited, k))
+ topo_visit (graph, topo_order, visited, k);
+ }
+
+ topo_order.quick_push (n);
+}
+
+/* Compute a topological ordering for GRAPH, and return the result. */
+
+static auto_vec<unsigned>
+compute_topo_order (constraint_graph_t graph)
+{
+ unsigned int i;
+ unsigned int size = graph->size;
+
+ auto_sbitmap visited (size);
+ bitmap_clear (visited);
+
+ /* For the heuristic in add_graph_edge to work optimally make sure to
+ first visit the connected component of the graph containing
+ ESCAPED. Do this by extracting the connected component
+ with ESCAPED and append that to all other components as solve_graph
+ pops from the order. */
+ auto_vec<unsigned> tail (size);
+ topo_visit (graph, tail, visited, find (escaped_id));
+
+ auto_vec<unsigned> topo_order (size);
+
+ for (i = 0; i != size; ++i)
+ if (!bitmap_bit_p (visited, i) && find (i) == i)
+ topo_visit (graph, topo_order, visited, i);
+
+ topo_order.splice (tail);
+ return topo_order;
+}
+
+/* Structure used to for hash value numbering of pointer equivalence
+ classes. */
+
+typedef struct equiv_class_label
+{
+ hashval_t hashcode;
+ unsigned int equivalence_class;
+ bitmap labels;
+} *equiv_class_label_t;
+typedef const struct equiv_class_label *const_equiv_class_label_t;
+
+/* Equiv_class_label hashtable helpers. */
+
+struct equiv_class_hasher : nofree_ptr_hash <equiv_class_label>
+{
+ static inline hashval_t hash (const equiv_class_label *);
+ static inline bool equal (const equiv_class_label *,
+ const equiv_class_label *);
+};
+
+/* A hashtable for mapping a bitmap of labels->pointer equivalence
+ classes. */
+static hash_table<equiv_class_hasher> *pointer_equiv_class_table;
+
+/* A hashtable for mapping a bitmap of labels->location equivalence
+ classes. */
+static hash_table<equiv_class_hasher> *location_equiv_class_table;
+
+/* Hash function for a equiv_class_label_t. */
+
+inline hashval_t
+equiv_class_hasher::hash (const equiv_class_label *ecl)
+{
+ return ecl->hashcode;
+}
+
+/* Equality function for two equiv_class_label_t's. */
+
+inline bool
+equiv_class_hasher::equal (const equiv_class_label *eql1,
+ const equiv_class_label *eql2)
+{
+ return (eql1->hashcode == eql2->hashcode
+ && bitmap_equal_p (eql1->labels, eql2->labels));
+}
+
+struct obstack equiv_class_obstack;
+
+/* Lookup a equivalence class in TABLE by the bitmap of LABELS with
+ hash HAS it contains. Sets *REF_LABELS to the bitmap LABELS
+ is equivalent to. */
+
+static equiv_class_label *
+equiv_class_lookup_or_add (hash_table<equiv_class_hasher> *table,
+ bitmap labels)
+{
+ equiv_class_label **slot;
+ equiv_class_label ecl;
+
+ ecl.labels = labels;
+ ecl.hashcode = bitmap_hash (labels);
+ slot = table->find_slot (&ecl, INSERT);
+ if (!*slot)
+ {
+ *slot = XOBNEW (&equiv_class_obstack, struct equiv_class_label);
+ (*slot)->labels = labels;
+ (*slot)->hashcode = ecl.hashcode;
+ (*slot)->equivalence_class = 0;
+ }
+
+ return *slot;
+}
+
+
+/* Perform offline variable substitution.
+
+ This is a worst case quadratic time way of identifying variables
+ that must have equivalent points-to sets, including those caused by
+ static cycles, and single entry subgraphs, in the constraint graph.
+
+ The technique is described in "Exploiting Pointer and Location
+ Equivalence to Optimize Pointer Analysis. In the 14th International
+ Static Analysis Symposium (SAS), August 2007." It is known as the
+ "HU" algorithm, and is equivalent to value numbering the collapsed
+ constraint graph including evaluating unions.
+
+ The general method of finding equivalence classes is as follows:
+ Add fake nodes (REF nodes) and edges for *a = b and a = *b constraints.
+ Initialize all non-REF nodes to be direct nodes.
+ For each constraint a = a U {b}, we set pts(a) = pts(a) u {fresh
+ variable}
+ For each constraint containing the dereference, we also do the same
+ thing.
+
+ We then compute SCC's in the graph and unify nodes in the same SCC,
+ including pts sets.
+
+ For each non-collapsed node x:
+ Visit all unvisited explicit incoming edges.
+ Ignoring all non-pointers, set pts(x) = Union of pts(a) for y
+ where y->x.
+ Lookup the equivalence class for pts(x).
+ If we found one, equivalence_class(x) = found class.
+ Otherwise, equivalence_class(x) = new class, and new_class is
+ added to the lookup table.
+
+ All direct nodes with the same equivalence class can be replaced
+ with a single representative node.
+ All unlabeled nodes (label == 0) are not pointers and all edges
+ involving them can be eliminated.
+ We perform these optimizations during rewrite_constraints
+
+ In addition to pointer equivalence class finding, we also perform
+ location equivalence class finding. This is the set of variables
+ that always appear together in points-to sets. We use this to
+ compress the size of the points-to sets. */
+
+/* Current maximum pointer equivalence class id. */
+static int pointer_equiv_class;
+
+/* Current maximum location equivalence class id. */
+static int location_equiv_class;
+
+/* Recursive routine to find strongly connected components in GRAPH,
+ and label it's nodes with DFS numbers. */
+
+static void
+condense_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
+{
+ unsigned int i;
+ bitmap_iterator bi;
+ unsigned int my_dfs;
+
+ gcc_checking_assert (si->node_mapping[n] == n);
+ bitmap_set_bit (si->visited, n);
+ si->dfs[n] = si->current_index ++;
+ my_dfs = si->dfs[n];
+
+ /* Visit all the successors. */
+ EXECUTE_IF_IN_NONNULL_BITMAP (graph->preds[n], 0, i, bi)
+ {
+ unsigned int w = si->node_mapping[i];
+
+ if (bitmap_bit_p (si->deleted, w))
+ continue;
+
+ if (!bitmap_bit_p (si->visited, w))
+ condense_visit (graph, si, w);
+
+ unsigned int t = si->node_mapping[w];
+ gcc_checking_assert (si->node_mapping[n] == n);
+ if (si->dfs[t] < si->dfs[n])
+ si->dfs[n] = si->dfs[t];
+ }
+
+ /* Visit all the implicit predecessors. */
+ EXECUTE_IF_IN_NONNULL_BITMAP (graph->implicit_preds[n], 0, i, bi)
+ {
+ unsigned int w = si->node_mapping[i];
+
+ if (bitmap_bit_p (si->deleted, w))
+ continue;
+
+ if (!bitmap_bit_p (si->visited, w))
+ condense_visit (graph, si, w);
+
+ unsigned int t = si->node_mapping[w];
+ gcc_assert (si->node_mapping[n] == n);
+ if (si->dfs[t] < si->dfs[n])
+ si->dfs[n] = si->dfs[t];
+ }
+
+ /* See if any components have been identified. */
+ if (si->dfs[n] == my_dfs)
+ {
+ if (si->scc_stack.length () != 0
+ && si->dfs[si->scc_stack.last ()] >= my_dfs)
+ {
+ /* Find the first node of the SCC and do non-bitmap work. */
+ bool direct_p = true;
+ unsigned first = si->scc_stack.length ();
+ do
+ {
+ --first;
+ unsigned int w = si->scc_stack[first];
+ si->node_mapping[w] = n;
+ if (!bitmap_bit_p (graph->direct_nodes, w))
+ direct_p = false;
+ }
+ while (first > 0
+ && si->dfs[si->scc_stack[first - 1]] >= my_dfs);
+ if (!direct_p)
+ bitmap_clear_bit (graph->direct_nodes, n);
+
+ /* Want to reduce to node n, push that first. */
+ si->scc_stack.reserve (1);
+ si->scc_stack.quick_push (si->scc_stack[first]);
+ si->scc_stack[first] = n;
+
+ unsigned scc_size = si->scc_stack.length () - first;
+ unsigned split = scc_size / 2;
+ unsigned carry = scc_size - split * 2;
+ while (split > 0)
+ {
+ for (unsigned i = 0; i < split; ++i)
+ {
+ unsigned a = si->scc_stack[first + i];
+ unsigned b = si->scc_stack[first + split + carry + i];
+
+ /* Unify our nodes. */
+ if (graph->preds[b])
+ {
+ if (!graph->preds[a])
+ std::swap (graph->preds[a], graph->preds[b]);
+ else
+ bitmap_ior_into_and_free (graph->preds[a],
+ &graph->preds[b]);
+ }
+ if (graph->implicit_preds[b])
+ {
+ if (!graph->implicit_preds[a])
+ std::swap (graph->implicit_preds[a],
+ graph->implicit_preds[b]);
+ else
+ bitmap_ior_into_and_free (graph->implicit_preds[a],
+ &graph->implicit_preds[b]);
+ }
+ if (graph->points_to[b])
+ {
+ if (!graph->points_to[a])
+ std::swap (graph->points_to[a], graph->points_to[b]);
+ else
+ bitmap_ior_into_and_free (graph->points_to[a],
+ &graph->points_to[b]);
+ }
+ }
+ unsigned remain = split + carry;
+ split = remain / 2;
+ carry = remain - split * 2;
+ }
+ /* Actually pop the SCC. */
+ si->scc_stack.truncate (first);
+ }
+ bitmap_set_bit (si->deleted, n);
+ }
+ else
+ si->scc_stack.safe_push (n);
+}
+
+/* Label pointer equivalences.
+
+ This performs a value numbering of the constraint graph to
+ discover which variables will always have the same points-to sets
+ under the current set of constraints.
+
+ The way it value numbers is to store the set of points-to bits
+ generated by the constraints and graph edges. This is just used as a
+ hash and equality comparison. The *actual set of points-to bits* is
+ completely irrelevant, in that we don't care about being able to
+ extract them later.
+
+ The equality values (currently bitmaps) just have to satisfy a few
+ constraints, the main ones being:
+ 1. The combining operation must be order independent.
+ 2. The end result of a given set of operations must be unique iff the
+ combination of input values is unique
+ 3. Hashable. */
+
+static void
+label_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
+{
+ unsigned int i, first_pred;
+ bitmap_iterator bi;
+
+ bitmap_set_bit (si->visited, n);
+
+ /* Label and union our incoming edges's points to sets. */
+ first_pred = -1U;
+ EXECUTE_IF_IN_NONNULL_BITMAP (graph->preds[n], 0, i, bi)
+ {
+ unsigned int w = si->node_mapping[i];
+ if (!bitmap_bit_p (si->visited, w))
+ label_visit (graph, si, w);
+
+ /* Skip unused edges. */
+ if (w == n || graph->pointer_label[w] == 0)
+ continue;
+
+ if (graph->points_to[w])
+ {
+ if (!graph->points_to[n])
+ {
+ if (first_pred == -1U)
+ first_pred = w;
+ else
+ {
+ graph->points_to[n] = BITMAP_ALLOC (&predbitmap_obstack);
+ bitmap_ior (graph->points_to[n],
+ graph->points_to[first_pred],
+ graph->points_to[w]);
+ }
+ }
+ else
+ bitmap_ior_into (graph->points_to[n], graph->points_to[w]);
+ }
+ }
+
+ /* Indirect nodes get fresh variables and a new pointer equiv class. */
+ if (!bitmap_bit_p (graph->direct_nodes, n))
+ {
+ if (!graph->points_to[n])
+ {
+ graph->points_to[n] = BITMAP_ALLOC (&predbitmap_obstack);
+ if (first_pred != -1U)
+ bitmap_copy (graph->points_to[n], graph->points_to[first_pred]);
+ }
+ bitmap_set_bit (graph->points_to[n], FIRST_REF_NODE + n);
+ graph->pointer_label[n] = pointer_equiv_class++;
+ equiv_class_label_t ecl;
+ ecl = equiv_class_lookup_or_add (pointer_equiv_class_table,
+ graph->points_to[n]);
+ ecl->equivalence_class = graph->pointer_label[n];
+ return;
+ }
+
+ /* If there was only a single non-empty predecessor the pointer equiv
+ class is the same. */
+ if (!graph->points_to[n])
+ {
+ if (first_pred != -1U)
+ {
+ graph->pointer_label[n] = graph->pointer_label[first_pred];
+ graph->points_to[n] = graph->points_to[first_pred];
+ }
+ return;
+ }
+
+ if (!bitmap_empty_p (graph->points_to[n]))
+ {
+ equiv_class_label_t ecl;
+ ecl = equiv_class_lookup_or_add (pointer_equiv_class_table,
+ graph->points_to[n]);
+ if (ecl->equivalence_class == 0)
+ ecl->equivalence_class = pointer_equiv_class++;
+ else
+ {
+ BITMAP_FREE (graph->points_to[n]);
+ graph->points_to[n] = ecl->labels;
+ }
+ graph->pointer_label[n] = ecl->equivalence_class;
+ }
+}
+
+/* Print the pred graph in dot format. */
+
+static void
+dump_pred_graph (class scc_info *si, FILE *file)
+{
+ unsigned int i;
+
+ /* Only print the graph if it has already been initialized: */
+ if (!graph)
+ return;
+
+ /* Prints the header of the dot file: */
+ fprintf (file, "strict digraph {\n");
+ fprintf (file, " node [\n shape = box\n ]\n");
+ fprintf (file, " edge [\n fontsize = \"12\"\n ]\n");
+ fprintf (file, "\n // List of nodes and complex constraints in "
+ "the constraint graph:\n");
+
+ /* The next lines print the nodes in the graph together with the
+ complex constraints attached to them. */
+ for (i = 1; i < graph->size; i++)
+ {
+ if (i == FIRST_REF_NODE)
+ continue;
+ if (si->node_mapping[i] != i)
+ continue;
+ if (i < FIRST_REF_NODE)
+ fprintf (file, "\"%s\"", get_varinfo (i)->name);
+ else
+ fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
+ if (graph->points_to[i]
+ && !bitmap_empty_p (graph->points_to[i]))
+ {
+ if (i < FIRST_REF_NODE)
+ fprintf (file, "[label=\"%s = {", get_varinfo (i)->name);
+ else
+ fprintf (file, "[label=\"*%s = {",
+ get_varinfo (i - FIRST_REF_NODE)->name);
+ unsigned j;
+ bitmap_iterator bi;
+ EXECUTE_IF_SET_IN_BITMAP (graph->points_to[i], 0, j, bi)
+ fprintf (file, " %d", j);
+ fprintf (file, " }\"]");
+ }
+ fprintf (file, ";\n");
+ }
+
+ /* Go over the edges. */
+ fprintf (file, "\n // Edges in the constraint graph:\n");
+ for (i = 1; i < graph->size; i++)
+ {
+ unsigned j;
+ bitmap_iterator bi;
+ if (si->node_mapping[i] != i)
+ continue;
+ EXECUTE_IF_IN_NONNULL_BITMAP (graph->preds[i], 0, j, bi)
+ {
+ unsigned from = si->node_mapping[j];
+ if (from < FIRST_REF_NODE)
+ fprintf (file, "\"%s\"", get_varinfo (from)->name);
+ else
+ fprintf (file, "\"*%s\"",
+ get_varinfo (from - FIRST_REF_NODE)->name);
+ fprintf (file, " -> ");
+ if (i < FIRST_REF_NODE)
+ fprintf (file, "\"%s\"", get_varinfo (i)->name);
+ else
+ fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
+ fprintf (file, ";\n");
+ }
+ }
+
+ /* Prints the tail of the dot file. */
+ fprintf (file, "}\n");
+}
+
+/* Perform offline variable substitution, discovering equivalence
+ classes, and eliminating non-pointer variables. */
+
+static class scc_info *
+perform_var_substitution (constraint_graph_t graph)
+{
+ unsigned int i;
+ unsigned int size = graph->size;
+ scc_info *si = new scc_info (size);
+
+ bitmap_obstack_initialize (&iteration_obstack);
+ gcc_obstack_init (&equiv_class_obstack);
+ pointer_equiv_class_table = new hash_table<equiv_class_hasher> (511);
+ location_equiv_class_table
+ = new hash_table<equiv_class_hasher> (511);
+ pointer_equiv_class = 1;
+ location_equiv_class = 1;
+
+ /* Condense the nodes, which means to find SCC's, count incoming
+ predecessors, and unite nodes in SCC's. */
+ for (i = 1; i < FIRST_REF_NODE; i++)
+ if (!bitmap_bit_p (si->visited, si->node_mapping[i]))
+ condense_visit (graph, si, si->node_mapping[i]);
+
+ if (dump_file && (dump_flags & TDF_GRAPH))
+ {
+ fprintf (dump_file, "\n\n// The constraint graph before var-substitution "
+ "in dot format:\n");
+ dump_pred_graph (si, dump_file);
+ fprintf (dump_file, "\n\n");
+ }
+
+ bitmap_clear (si->visited);
+ /* Actually the label the nodes for pointer equivalences. */
+ for (i = 1; i < FIRST_REF_NODE; i++)
+ if (!bitmap_bit_p (si->visited, si->node_mapping[i]))
+ label_visit (graph, si, si->node_mapping[i]);
+
+ /* Calculate location equivalence labels. */
+ for (i = 1; i < FIRST_REF_NODE; i++)
+ {
+ bitmap pointed_by;
+ bitmap_iterator bi;
+ unsigned int j;
+
+ if (!graph->pointed_by[i])
+ continue;
+ pointed_by = BITMAP_ALLOC (&iteration_obstack);
+
+ /* Translate the pointed-by mapping for pointer equivalence
+ labels. */
+ EXECUTE_IF_SET_IN_BITMAP (graph->pointed_by[i], 0, j, bi)
+ {
+ bitmap_set_bit (pointed_by,
+ graph->pointer_label[si->node_mapping[j]]);
+ }
+ /* The original pointed_by is now dead. */
+ BITMAP_FREE (graph->pointed_by[i]);
+
+ /* Look up the location equivalence label if one exists, or make
+ one otherwise. */
+ equiv_class_label_t ecl;
+ ecl = equiv_class_lookup_or_add (location_equiv_class_table, pointed_by);
+ if (ecl->equivalence_class == 0)
+ ecl->equivalence_class = location_equiv_class++;
+ else
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file, "Found location equivalence for node %s\n",
+ get_varinfo (i)->name);
+ BITMAP_FREE (pointed_by);
+ }
+ graph->loc_label[i] = ecl->equivalence_class;
+
+ }
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ for (i = 1; i < FIRST_REF_NODE; i++)
+ {
+ unsigned j = si->node_mapping[i];
+ if (j != i)
+ {
+ fprintf (dump_file, "%s node id %d ",
+ bitmap_bit_p (graph->direct_nodes, i)
+ ? "Direct" : "Indirect", i);
+ if (i < FIRST_REF_NODE)
+ fprintf (dump_file, "\"%s\"", get_varinfo (i)->name);
+ else
+ fprintf (dump_file, "\"*%s\"",
+ get_varinfo (i - FIRST_REF_NODE)->name);
+ fprintf (dump_file, " mapped to SCC leader node id %d ", j);
+ if (j < FIRST_REF_NODE)
+ fprintf (dump_file, "\"%s\"\n", get_varinfo (j)->name);
+ else
+ fprintf (dump_file, "\"*%s\"\n",
+ get_varinfo (j - FIRST_REF_NODE)->name);
+ }
+ else
+ {
+ fprintf (dump_file,
+ "Equivalence classes for %s node id %d ",
+ bitmap_bit_p (graph->direct_nodes, i)
+ ? "direct" : "indirect", i);
+ if (i < FIRST_REF_NODE)
+ fprintf (dump_file, "\"%s\"", get_varinfo (i)->name);
+ else
+ fprintf (dump_file, "\"*%s\"",
+ get_varinfo (i - FIRST_REF_NODE)->name);
+ fprintf (dump_file,
+ ": pointer %d, location %d\n",
+ graph->pointer_label[i], graph->loc_label[i]);
+ }
+ }
+
+ /* Quickly eliminate our non-pointer variables. */
+
+ for (i = 1; i < FIRST_REF_NODE; i++)
+ {
+ unsigned int node = si->node_mapping[i];
+
+ if (graph->pointer_label[node] == 0)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ fprintf (dump_file,
+ "%s is a non-pointer variable, eliminating edges.\n",
+ get_varinfo (node)->name);
+ stats.nonpointer_vars++;
+ clear_edges_for_node (graph, node);
+ }
+ }
+
+ return si;
+}
+
+/* Free information that was only necessary for variable
+ substitution. */
+
+static void
+free_var_substitution_info (class scc_info *si)
+{
+ delete si;
+ free (graph->pointer_label);
+ free (graph->loc_label);
+ free (graph->pointed_by);
+ free (graph->points_to);
+ free (graph->eq_rep);
+ sbitmap_free (graph->direct_nodes);
+ delete pointer_equiv_class_table;
+ pointer_equiv_class_table = NULL;
+ delete location_equiv_class_table;
+ location_equiv_class_table = NULL;
+ obstack_free (&equiv_class_obstack, NULL);
+ bitmap_obstack_release (&iteration_obstack);
+}
+
+/* Return an existing node that is equivalent to NODE, which has
+ equivalence class LABEL, if one exists. Return NODE otherwise. */
+
+static unsigned int
+find_equivalent_node (constraint_graph_t graph,
+ unsigned int node, unsigned int label)
+{
+ /* If the address version of this variable is unused, we can
+ substitute it for anything else with the same label.
+ Otherwise, we know the pointers are equivalent, but not the
+ locations, and we can unite them later. */
+
+ if (!bitmap_bit_p (graph->address_taken, node))
+ {
+ gcc_checking_assert (label < graph->size);
+
+ if (graph->eq_rep[label] != -1)
+ {
+ /* Unify the two variables since we know they are equivalent. */
+ if (unite (graph->eq_rep[label], node))
+ unify_nodes (graph, graph->eq_rep[label], node, false);
+ return graph->eq_rep[label];
+ }
+ else
+ {
+ graph->eq_rep[label] = node;
+ graph->pe_rep[label] = node;
+ }
+ }
+ else
+ {
+ gcc_checking_assert (label < graph->size);
+ graph->pe[node] = label;
+ if (graph->pe_rep[label] == -1)
+ graph->pe_rep[label] = node;
+ }
+
+ return node;
+}
+
+/* Unite pointer equivalent but not location equivalent nodes in
+ GRAPH. This may only be performed once variable substitution is
+ finished. */
+
+static void
+unite_pointer_equivalences (constraint_graph_t graph)
+{
+ unsigned int i;
+
+ /* Go through the pointer equivalences and unite them to their
+ representative, if they aren't already. */
+ for (i = 1; i < FIRST_REF_NODE; i++)
+ {
+ unsigned int label = graph->pe[i];
+ if (label)
+ {
+ int label_rep = graph->pe_rep[label];
+
+ if (label_rep == -1)
+ continue;
+
+ label_rep = find (label_rep);
+ if (label_rep >= 0 && unite (label_rep, find (i)))
+ unify_nodes (graph, label_rep, i, false);
+ }
+ }
+}
+
+/* Move complex constraints to the GRAPH nodes they belong to. */
+
+static void
+move_complex_constraints (constraint_graph_t graph)
+{
+ int i;
+ constraint_t c;
+
+ FOR_EACH_VEC_ELT (constraints, i, c)
+ {
+ if (c)
+ {
+ struct constraint_expr lhs = c->lhs;
+ struct constraint_expr rhs = c->rhs;
+
+ if (lhs.type == DEREF)
+ {
+ insert_into_complex (graph, lhs.var, c);
+ }
+ else if (rhs.type == DEREF)
+ {
+ if (!(get_varinfo (lhs.var)->is_special_var))
+ insert_into_complex (graph, rhs.var, c);
+ }
+ else if (rhs.type != ADDRESSOF && lhs.var > anything_id
+ && (lhs.offset != 0 || rhs.offset != 0))
+ {
+ insert_into_complex (graph, rhs.var, c);
+ }
+ }
+ }
+}
+
+/* Optimize and rewrite complex constraints while performing
+ collapsing of equivalent nodes. SI is the SCC_INFO that is the
+ result of perform_variable_substitution. */
+
+static void
+rewrite_constraints (constraint_graph_t graph,
+ class scc_info *si)
+{
+ int i;
+ constraint_t c;
+
+ if (flag_checking)
+ {
+ for (unsigned int j = 0; j < graph->size; j++)
+ gcc_assert (find (j) == j);
+ }
+
+ FOR_EACH_VEC_ELT (constraints, i, c)
+ {
+ struct constraint_expr lhs = c->lhs;
+ struct constraint_expr rhs = c->rhs;
+ unsigned int lhsvar = find (lhs.var);
+ unsigned int rhsvar = find (rhs.var);
+ unsigned int lhsnode, rhsnode;
+ unsigned int lhslabel, rhslabel;
+
+ lhsnode = si->node_mapping[lhsvar];
+ rhsnode = si->node_mapping[rhsvar];
+ lhslabel = graph->pointer_label[lhsnode];
+ rhslabel = graph->pointer_label[rhsnode];
+
+ /* See if it is really a non-pointer variable, and if so, ignore
+ the constraint. */
+ if (lhslabel == 0)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+
+ fprintf (dump_file, "%s is a non-pointer variable, "
+ "ignoring constraint:",
+ get_varinfo (lhs.var)->name);
+ dump_constraint (dump_file, c);
+ fprintf (dump_file, "\n");
+ }
+ constraints[i] = NULL;
+ continue;
+ }
+
+ if (rhslabel == 0)
+ {
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+
+ fprintf (dump_file, "%s is a non-pointer variable, "
+ "ignoring constraint:",
+ get_varinfo (rhs.var)->name);
+ dump_constraint (dump_file, c);
+ fprintf (dump_file, "\n");
+ }
+ constraints[i] = NULL;
+ continue;
+ }
+
+ lhsvar = find_equivalent_node (graph, lhsvar, lhslabel);
+ rhsvar = find_equivalent_node (graph, rhsvar, rhslabel);
+ c->lhs.var = lhsvar;
+ c->rhs.var = rhsvar;
+ }
+}
+
+/* Eliminate indirect cycles involving NODE. Return true if NODE was
+ part of an SCC, false otherwise. */
+
+static bool
+eliminate_indirect_cycles (unsigned int node)
+{
+ if (graph->indirect_cycles[node] != -1
+ && !bitmap_empty_p (get_varinfo (node)->solution))
+ {
+ unsigned int i;
+ auto_vec<unsigned> queue;
+ int queuepos;
+ unsigned int to = find (graph->indirect_cycles[node]);
+ bitmap_iterator bi;
+
+ /* We can't touch the solution set and call unify_nodes
+ at the same time, because unify_nodes is going to do
+ bitmap unions into it. */
+
+ EXECUTE_IF_SET_IN_BITMAP (get_varinfo (node)->solution, 0, i, bi)
+ {
+ if (find (i) == i && i != to)
+ {
+ if (unite (to, i))
+ queue.safe_push (i);
+ }
+ }
+
+ for (queuepos = 0;
+ queue.iterate (queuepos, &i);
+ queuepos++)
+ {
+ unify_nodes (graph, to, i, true);
+ }
+ return true;
+ }
+ return false;
+}
+
+/* Solve the constraint graph GRAPH using our worklist solver.
+ This is based on the PW* family of solvers from the "Efficient Field
+ Sensitive Pointer Analysis for C" paper.
+ It works by iterating over all the graph nodes, processing the complex
+ constraints and propagating the copy constraints, until everything stops
+ changed. This corresponds to steps 6-8 in the solving list given above. */
+
+static void
+solve_graph (constraint_graph_t graph)
+{
+ unsigned int size = graph->size;
+ unsigned int i;
+ bitmap pts;
+
+ changed = BITMAP_ALLOC (NULL);
+
+ /* Mark all initial non-collapsed nodes as changed. */
+ for (i = 1; i < size; i++)
+ {
+ varinfo_t ivi = get_varinfo (i);
+ if (find (i) == i && !bitmap_empty_p (ivi->solution)
+ && ((graph->succs[i] && !bitmap_empty_p (graph->succs[i]))
+ || graph->complex[i].length () > 0))
+ bitmap_set_bit (changed, i);
+ }
+
+ /* Allocate a bitmap to be used to store the changed bits. */
+ pts = BITMAP_ALLOC (&pta_obstack);
+
+ while (!bitmap_empty_p (changed))
+ {
+ unsigned int i;
+ stats.iterations++;
+
+ bitmap_obstack_initialize (&iteration_obstack);
+
+ auto_vec<unsigned> topo_order = compute_topo_order (graph);
+ while (topo_order.length () != 0)
+ {
+ i = topo_order.pop ();
+
+ /* If this variable is not a representative, skip it. */
+ if (find (i) != i)
+ continue;
+
+ /* In certain indirect cycle cases, we may merge this
+ variable to another. */
+ if (eliminate_indirect_cycles (i) && find (i) != i)
+ continue;
+
+ /* If the node has changed, we need to process the
+ complex constraints and outgoing edges again. For complex
+ constraints that modify i itself, like the common group of
+ callarg = callarg + UNKNOWN;
+ callarg = *callarg + UNKNOWN;
+ *callarg = callescape;
+ make sure to iterate immediately because that maximizes
+ cache reuse and expands the graph quickest, leading to
+ better visitation order in the next iteration. */
+ while (bitmap_clear_bit (changed, i))
+ {
+ bitmap solution;
+ vec<constraint_t> &complex = graph->complex[i];
+ varinfo_t vi = get_varinfo (i);
+ bool solution_empty;
+
+ /* Compute the changed set of solution bits. If anything
+ is in the solution just propagate that. */
+ if (bitmap_bit_p (vi->solution, anything_id))
+ {
+ /* If anything is also in the old solution there is
+ nothing to do.
+ ??? But we shouldn't ended up with "changed" set ... */
+ if (vi->oldsolution
+ && bitmap_bit_p (vi->oldsolution, anything_id))
+ break;
+ bitmap_copy (pts, get_varinfo (find (anything_id))->solution);
+ }
+ else if (vi->oldsolution)
+ bitmap_and_compl (pts, vi->solution, vi->oldsolution);
+ else
+ bitmap_copy (pts, vi->solution);
+
+ if (bitmap_empty_p (pts))
+ break;
+
+ if (vi->oldsolution)
+ bitmap_ior_into (vi->oldsolution, pts);
+ else
+ {
+ vi->oldsolution = BITMAP_ALLOC (&oldpta_obstack);
+ bitmap_copy (vi->oldsolution, pts);
+ }
+
+ solution = vi->solution;
+ solution_empty = bitmap_empty_p (solution);
+
+ /* Process the complex constraints. */
+ hash_set<constraint_t> *cvisited = nullptr;
+ if (flag_checking)
+ cvisited = new hash_set<constraint_t>;
+ bitmap expanded_pts = NULL;
+ for (unsigned j = 0; j < complex.length (); ++j)
+ {
+ constraint_t c = complex[j];
+ /* At unification time only the directly involved nodes
+ will get their complex constraints updated. Update
+ our complex constraints now but keep the constraint
+ vector sorted and clear of duplicates. Also make
+ sure to evaluate each prevailing constraint only once. */
+ unsigned int new_lhs = find (c->lhs.var);
+ unsigned int new_rhs = find (c->rhs.var);
+ if (c->lhs.var != new_lhs || c->rhs.var != new_rhs)
+ {
+ constraint tem = *c;
+ tem.lhs.var = new_lhs;
+ tem.rhs.var = new_rhs;
+ unsigned int place
+ = complex.lower_bound (&tem, constraint_less);
+ c->lhs.var = new_lhs;
+ c->rhs.var = new_rhs;
+ if (place != j)
+ {
+ complex.ordered_remove (j);
+ if (j < place)
+ --place;
+ if (place < complex.length ())
+ {
+ if (constraint_equal (*complex[place], *c))
+ {
+ j--;
+ continue;
+ }
+ else
+ complex.safe_insert (place, c);
+ }
+ else
+ complex.quick_push (c);
+ if (place > j)
+ {
+ j--;
+ continue;
+ }
+ }
+ }
+
+ /* The only complex constraint that can change our
+ solution to non-empty, given an empty solution,
+ is a constraint where the lhs side is receiving
+ some set from elsewhere. */
+ if (cvisited && cvisited->add (c))
+ gcc_unreachable ();
+ if (!solution_empty || c->lhs.type != DEREF)
+ do_complex_constraint (graph, c, pts, &expanded_pts);
+ }
+ if (cvisited)
+ {
+ /* When checking, verify the order of constraints is
+ maintained and each constraint is evaluated exactly
+ once. */
+ for (unsigned j = 1; j < complex.length (); ++j)
+ gcc_assert (constraint_less (complex[j-1], complex[j]));
+ gcc_assert (cvisited->elements () == complex.length ());
+ delete cvisited;
+ }
+ BITMAP_FREE (expanded_pts);
+
+ solution_empty = bitmap_empty_p (solution);
+
+ if (!solution_empty)
+ {
+ bitmap_iterator bi;
+ unsigned eff_escaped_id = find (escaped_id);
+ unsigned j;
+
+ /* Propagate solution to all successors. */
+ unsigned to_remove = ~0U;
+ EXECUTE_IF_IN_NONNULL_BITMAP (graph->succs[i],
+ 0, j, bi)
+ {
+ if (to_remove != ~0U)
+ {
+ bitmap_clear_bit (graph->succs[i], to_remove);
+ to_remove = ~0U;
+ }
+ unsigned int to = find (j);
+ if (to != j)
+ {
+ /* Update the succ graph, avoiding duplicate
+ work. */
+ to_remove = j;
+ if (! bitmap_set_bit (graph->succs[i], to))
+ continue;
+ /* We eventually end up processing 'to' twice
+ as it is undefined whether bitmap iteration
+ iterates over bits set during iteration.
+ Play safe instead of doing tricks. */
+ }
+ /* Don't try to propagate to ourselves. */
+ if (to == i)
+ {
+ to_remove = j;
+ continue;
+ }
+ /* Early node unification can lead to edges from
+ escaped - remove them. */
+ if (i == eff_escaped_id)
+ {
+ to_remove = j;
+ if (bitmap_set_bit (get_varinfo (to)->solution,
+ escaped_id))
+ bitmap_set_bit (changed, to);
+ continue;
+ }
+
+ if (bitmap_ior_into (get_varinfo (to)->solution, pts))
+ bitmap_set_bit (changed, to);
+ }
+ if (to_remove != ~0U)
+ bitmap_clear_bit (graph->succs[i], to_remove);
+ }
+ }
+ }
+ bitmap_obstack_release (&iteration_obstack);
+ }
+
+ BITMAP_FREE (pts);
+ BITMAP_FREE (changed);
+ bitmap_obstack_release (&oldpta_obstack);
+}
+
+void
+delete_graph (void)
+{
+ unsigned int i;
+ for (i = 0; i < graph->size; i++)
+ graph->complex[i].release ();
+ free (graph->complex);
+
+ free (graph->succs);
+ free (graph->pe);
+ free (graph->pe_rep);
+ free (graph->indirect_cycles);
+ /* We are not doing free (graph->rep) since the representatives mapping is
+ needed outside of the solver too. */
+ free (graph);
+}
+
+/* Remove the REF and ADDRESS edges from GRAPH, as well as all the
+ predecessor edges. */
+
+static void
+remove_preds_and_fake_succs (constraint_graph_t graph)
+{
+ unsigned int i;
+
+ /* Clear the implicit ref and address nodes from the successor
+ lists. */
+ for (i = 1; i < FIRST_REF_NODE; i++)
+ {
+ if (graph->succs[i])
+ bitmap_clear_range (graph->succs[i], FIRST_REF_NODE,
+ FIRST_REF_NODE * 2);
+ }
+
+ /* Free the successor list for the non-ref nodes. */
+ for (i = FIRST_REF_NODE + 1; i < graph->size; i++)
+ {
+ if (graph->succs[i])
+ BITMAP_FREE (graph->succs[i]);
+ }
+
+ /* Now reallocate the size of the successor list as, and blow away
+ the predecessor bitmaps. */
+ graph->size = varmap.length ();
+ graph->succs = XRESIZEVEC (bitmap, graph->succs, graph->size);
+
+ free (graph->implicit_preds);
+ graph->implicit_preds = NULL;
+ free (graph->preds);
+ graph->preds = NULL;
+ bitmap_obstack_release (&predbitmap_obstack);
+}
+
+namespace pointer_analysis {
+
+/* Solve the constraint set. The entry function of the solver. */
+
+void
+solve_constraints (void)
+{
+ class scc_info *si;
+
+ /* Sort varinfos so that ones that cannot be pointed to are last.
+ This makes bitmaps more efficient. */
+ unsigned int *map = XNEWVEC (unsigned int, varmap.length ());
+ for (unsigned i = 0; i < integer_id + 1; ++i)
+ map[i] = i;
+ /* Start with address-taken vars, followed by not address-taken vars
+ to move vars never appearing in the points-to solution bitmaps last. */
+ unsigned j = integer_id + 1;
+ for (unsigned i = integer_id + 1; i < varmap.length (); ++i)
+ if (varmap[varmap[i]->head]->address_taken)
+ map[i] = j++;
+ for (unsigned i = integer_id + 1; i < varmap.length (); ++i)
+ if (! varmap[varmap[i]->head]->address_taken)
+ map[i] = j++;
+ /* Shuffle varmap according to map. */
+ for (unsigned i = integer_id + 1; i < varmap.length (); ++i)
+ {
+ while (map[varmap[i]->id] != i)
+ std::swap (varmap[i], varmap[map[varmap[i]->id]]);
+ gcc_assert (bitmap_empty_p (varmap[i]->solution));
+ varmap[i]->id = i;
+ varmap[i]->next = map[varmap[i]->next];
+ varmap[i]->head = map[varmap[i]->head];
+ }
+ /* Finally rewrite constraints. */
+ for (unsigned i = 0; i < constraints.length (); ++i)
+ {
+ constraints[i]->lhs.var = map[constraints[i]->lhs.var];
+ constraints[i]->rhs.var = map[constraints[i]->rhs.var];
+ }
+ free (map);
+
+ if (dump_file)
+ fprintf (dump_file,
+ "\nCollapsing static cycles and doing variable "
+ "substitution\n");
+
+ init_graph (varmap.length () * 2);
+
+ if (dump_file)
+ fprintf (dump_file, "Building predecessor graph\n");
+ build_pred_graph ();
+
+ if (dump_file)
+ fprintf (dump_file, "Detecting pointer and location "
+ "equivalences\n");
+ si = perform_var_substitution (graph);
+
+ if (dump_file)
+ fprintf (dump_file, "Rewriting constraints and unifying "
+ "variables\n");
+ rewrite_constraints (graph, si);
+
+ build_succ_graph ();
+
+ free_var_substitution_info (si);
+
+ /* Attach complex constraints to graph nodes. */
+ move_complex_constraints (graph);
+
+ if (dump_file)
+ fprintf (dump_file, "Uniting pointer but not location equivalent "
+ "variables\n");
+ unite_pointer_equivalences (graph);
+
+ if (dump_file)
+ fprintf (dump_file, "Finding indirect cycles\n");
+ find_indirect_cycles (graph);
+
+ /* Implicit nodes and predecessors are no longer necessary at this
+ point. */
+ remove_preds_and_fake_succs (graph);
+
+ if (dump_file && (dump_flags & TDF_GRAPH))
+ {
+ fprintf (dump_file, "\n\n// The constraint graph before solve-graph "
+ "in dot format:\n");
+ dump_constraint_graph (dump_file);
+ fprintf (dump_file, "\n\n");
+ }
+
+ if (dump_file)
+ fprintf (dump_file, "Solving graph\n");
+
+ solve_graph (graph);
+
+ if (dump_file && (dump_flags & TDF_GRAPH))
+ {
+ fprintf (dump_file, "\n\n// The constraint graph after solve-graph "
+ "in dot format:\n");
+ dump_constraint_graph (dump_file);
+ fprintf (dump_file, "\n\n");
+ }
+
+ /* The mapping node -> representative is one of the outputs of the
+ solver. */
+ union_find_compress_all ();
+ var_rep = graph->rep;
+
+ delete_graph ();
+}
+
+} // namespace pointer_analysis
diff --git a/gcc/pta-andersen.h b/gcc/pta-andersen.h
new file mode 100644
index 0000000..190a273
--- /dev/null
+++ b/gcc/pta-andersen.h
@@ -0,0 +1,31 @@
+/* Andersen-style solver for tree based points-to analysis
+ Copyright (C) 2005-2025 Free Software Foundation, Inc.
+ Contributed by Daniel Berlin <dberlin@dberlin.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef PTA_ANDERSEN_H
+#define PTA_ANDERSEN_H
+
+namespace pointer_analysis {
+
+/* Solve the constraint set. */
+void solve_constraints (void);
+
+} // namespace pointer_analysis
+
+#endif /* PTA_ANDERSEN_H */
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 728f66f..b2adab5 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,395 @@
+2025-07-16 Uros Bizjak <ubizjak@gmail.com>
+
+ PR target/121062
+ * gcc.target/i386/pr121062-1.c: New test.
+ * gcc.target/i386/pr121062-2.c: Likewise.
+ * gcc.target/i386/pr121062-3a.c: Likewise.
+ * gcc.target/i386/pr121062-3b.c: Likewise.
+ * gcc.target/i386/pr121062-3c.c: Likewise.
+ * gcc.target/i386/pr121062-4.c: Likewise.
+ * gcc.target/i386/pr121062-5.c: Likewise.
+ * gcc.target/i386/pr121062-6.c: Likewise.
+ * gcc.target/i386/pr121062-7.c: Likewise.
+
+2025-07-16 H.J. Lu <hjl.tools@gmail.com>
+
+ PR target/120881
+ PR testsuite/121078
+ * gcc.dg/20021014-1.c (dg-additional-options): Add -mfentry
+ -fno-pic only on gnu/x86 targets.
+ * gcc.dg/aru-2.c (dg-additional-options): Likewise.
+ * gcc.dg/nest.c (dg-additional-options): Likewise.
+ * gcc.dg/pr32450.c (dg-additional-options): Likewise.
+ * gcc.dg/pr43643.c (dg-additional-options): Likewise.
+ * gcc.target/i386/pr104447.c (dg-additional-options): Likewise.
+ * gcc.target/i386/pr113122-3.c(dg-additional-options): Likewise.
+ * gcc.target/i386/pr119386-1.c (dg-additional-options): Add
+ -mfentry only on gnu targets.
+ * gcc.target/i386/pr119386-2.c (dg-additional-options): Likewise.
+
+2025-07-16 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/121049
+ * gcc.dg/vect/pr121049.c: New testcase.
+
+2025-07-16 Andrew Pinski <quic_apinski@quicinc.com>
+
+ PR tree-optimization/119920
+ PR tree-optimization/112324
+ PR tree-optimization/110015
+ * gcc.dg/vect/vect-reduc-cond-1.c: New test.
+ * gcc.dg/vect/vect-reduc-cond-2.c: New test.
+ * gcc.dg/vect/vect-reduc-cond-3.c: New test.
+
+2025-07-16 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/121116
+ * gcc.dg/torture/pr121116.c: New testcase.
+
+2025-07-16 Spencer Abson <spencer.abson@arm.com>
+
+ PR target/117850
+ * gcc.target/aarch64/simd/vabal_combine.c: Removed. This is
+ covered by fold_to_highpart_1.c
+ * gcc.target/aarch64/simd/fold_to_highpart_1.c: New test.
+ * gcc.target/aarch64/simd/fold_to_highpart_2.c: Likewise.
+ * gcc.target/aarch64/simd/fold_to_highpart_3.c: Likewise.
+ * gcc.target/aarch64/simd/fold_to_highpart_4.c: Likewise.
+ * gcc.target/aarch64/simd/fold_to_highpart_5.c: Likewise.
+ * gcc.target/aarch64/simd/fold_to_highpart_6.c: Likewise.
+
+2025-07-16 Alfie Richards <alfie.richards@arm.com>
+
+ * g++.dg/warn/Wformat-gcc_diag-1.C: Add string_slice "%B" format tests.
+
+2025-07-16 Robin Dapp <rdapp@ventanamicro.com>
+
+ PR middle-end/121065
+ * gcc.target/arm/pr121065.c: New test.
+
+2025-07-16 Robin Dapp <rdapp@ventanamicro.com>
+
+ PR target/120297
+ * gcc.target/riscv/rvv/pr120297.c: New test.
+
+2025-07-16 Kyrylo Tkachov <ktkachov@nvidia.com>
+
+ * gcc.target/aarch64/sve2/eon_bsl2n.c: New test.
+
+2025-07-16 Kyrylo Tkachov <ktkachov@nvidia.com>
+
+ * gcc.target/aarch64/sve2/nbsl_nor_nand_neon.c: New test.
+
+2025-07-16 Paul Thomas <pault@gcc.gnu.org>
+
+ PR fortran/121060
+ * gfortran.dg/associate_75.f90: New test.
+
+2025-07-16 Steve Kargl <sgk@troutmask.apl.washington.edu>
+
+ * gfortran.dg/import13.f90: New test.
+
+2025-07-16 Jeremy Rifkin <jeremy@rifkin.dev>
+
+ PR c/82134
+ * c-c++-common/attr-warn-unused-result-2.c: New test.
+
+2025-07-16 Haochen Jiang <haochen.jiang@intel.com>
+
+ * gcc.target/i386/amxavx512-cvtrowd2ps-2.c: Add -mavx512fp16 to
+ use FP16 related intrins for convert.
+ * gcc.target/i386/amxavx512-cvtrowps2bf16-2.c: Ditto.
+ * gcc.target/i386/amxavx512-cvtrowps2ph-2.c: Ditto.
+ * gcc.target/i386/amxavx512-movrow-2.c: Ditto.
+
+2025-07-16 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/sat/sat_s_add-1-i16.c: Remove function-body
+ check and add no jmp label asm check.
+ * gcc.target/riscv/sat/sat_s_add-1-i32.c:
+ * gcc.target/riscv/sat/sat_s_add-1-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-1-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-2-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-2-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-2-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-2-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-3-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-3-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-3-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-3-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-4-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-4-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-4-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add-4-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-1-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-1-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-1-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-1-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-2-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-2-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-2-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_add_imm-2-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-1-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-1-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-1-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-1-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-2-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-2-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-2-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-2-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-3-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-3-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-3-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-3-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-4-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-4-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-4-i64.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_sub-4-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-1-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-1-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-1-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-1-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-1-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-1-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-2-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-2-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-2-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-2-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-2-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-2-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-3-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-3-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-3-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-3-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-3-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-3-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-4-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-4-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-4-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-4-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-4-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-4-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-5-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-5-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-5-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-5-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-5-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-5-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-6-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-6-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-6-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-6-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-6-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-6-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-7-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-7-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-7-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-7-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-7-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-7-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-8-i16-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-8-i32-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-8-i32-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-8-i64-to-i16.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-8-i64-to-i32.c: Ditto.
+ * gcc.target/riscv/sat/sat_s_trunc-8-i64-to-i8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-1-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-1-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-1-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-1-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-2-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-2-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-2-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-2-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-3-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-3-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-3-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-3-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-4-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-4-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-4-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-4-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-5-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-5-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-5-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-5-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-6-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-6-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-6-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-6-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-7-u16-from-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-7-u16-from-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-7-u32-from-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-7-u8-from-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-7-u8-from-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add-7-u8-from-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-1-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-1-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-1-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-1-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-2-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-2-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-2-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-2-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-3-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-3-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-3-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-3-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-4-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-4-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-4-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_add_imm-4-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_mul-1-u16-from-u128.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_mul-1-u32-from-u128.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_mul-1-u64-from-u128.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_mul-1-u8-from-u128.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-1-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-1-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-1-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-1-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-10-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-10-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-10-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-10-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-11-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-11-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-11-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-11-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-12-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-12-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-12-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-12-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-2-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-2-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-2-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-2-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-3-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-3-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-3-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-3-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-4-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-4-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-4-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-4-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-5-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-5-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-5-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-5-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-6-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-6-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-6-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-6-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-7-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-7-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-7-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-7-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-8-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-8-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-8-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-8-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-9-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-9-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-9-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub-9-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u16-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u16-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u16-3.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u16-4.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u32-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u32-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u32-3.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u32-4.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u64-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u64-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u8-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u8-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u8-3.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u8-4.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-1-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u16-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u16-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u16-3.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u32-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u32-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u32-3.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u64-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u8-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u8-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u8-3.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-2-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u16-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u16-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u32-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u32-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u8-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u8-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-3-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u16-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u16-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u32-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u32-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u8-1.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u8-2.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_sub_imm-4-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-1-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-1-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-1-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-1-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-2-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-2-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-2-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-2-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-3-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-3-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-3-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-3-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-4-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-4-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-4-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-4-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-5-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-5-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-5-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-5-u8.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-6-u16.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-6-u32.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-6-u64.c: Ditto.
+ * gcc.target/riscv/sat/sat_u_trunc-6-u8.c: Ditto.
+
+2025-07-16 Pan Li <pan2.li@intel.com>
+
+ * gcc.target/riscv/rvv/autovec/avg.h: Add int128 type when
+ xlen == 64.
+ * gcc.target/riscv/rvv/autovec/avg_ceil-run-1-i16-from-i32.c:
+ Suppress __int128 warning for run test.
+ * gcc.target/riscv/rvv/autovec/avg_ceil-run-1-i16-from-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_ceil-run-1-i32-from-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_ceil-run-1-i8-from-i16.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_ceil-run-1-i8-from-i32.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_ceil-run-1-i8-from-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_data.h: Fix one incorrect
+ test data.
+ * gcc.target/riscv/rvv/autovec/avg_floor-run-1-i16-from-i32.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_floor-run-1-i16-from-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_floor-run-1-i32-from-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_floor-run-1-i8-from-i16.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_floor-run-1-i8-from-i32.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_floor-run-1-i8-from-i64.c: Ditto.
+ * gcc.target/riscv/rvv/autovec/avg_floor-1-i64-from-i128.c: New test.
+ * gcc.target/riscv/rvv/autovec/avg_floor-run-1-i64-from-i128.c: New test.
+
2025-07-15 David Malcolm <dmalcolm@redhat.com>
PR sarif-replay/120792
diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-array29.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-array29.C
new file mode 100644
index 0000000..714d050
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-array29.C
@@ -0,0 +1,13 @@
+// PR c++/87097
+// { dg-do compile { target c++11 } }
+
+struct A {
+ constexpr A() : data() {}
+ struct X { int n; };
+ X data[2];
+};
+
+static_assert((A(), true), "");
+static_assert(A().data[0].n == 0, "");
+static_assert(A().data[1].n == 0, "");
+constexpr A x;
diff --git a/gcc/testsuite/g++.target/aarch64/sme/sme_throw_1.C b/gcc/testsuite/g++.target/aarch64/sme/sme_throw_1.C
new file mode 100644
index 0000000..76f1e8b
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/sme_throw_1.C
@@ -0,0 +1,55 @@
+/* { dg-do run { target { aarch64*-linux-gnu* && aarch64_sme_hw } } } */
+
+#include <signal.h>
+#include <arm_sme.h>
+
+static bool caught;
+
+[[gnu::noipa]] void thrower(int)
+{
+ throw 1;
+}
+
+[[gnu::noipa]] void bar()
+{
+ *(volatile int *)0 = 0;
+}
+
+[[gnu::noipa]] void foo()
+{
+ try
+ {
+ bar();
+ }
+ catch (int)
+ {
+ caught = true;
+ }
+}
+
+__arm_new("za") __arm_locally_streaming void sme_user()
+{
+ svbool_t all = svptrue_b8();
+ for (unsigned int i = 0; i < svcntb(); ++i)
+ {
+ svint8_t expected = svindex_s8(i + 1, i);
+ svwrite_hor_za8_m(0, i, all, expected);
+ }
+ foo();
+ for (unsigned int i = 0; i < svcntb(); ++i)
+ {
+ svint8_t expected = svindex_s8(i + 1, i);
+ svint8_t actual = svread_hor_za8_m(svdup_s8(0), all, 0, i);
+ if (svptest_any(all, svcmpne(all, expected, actual)))
+ __builtin_abort();
+ }
+ if (!caught)
+ __builtin_abort();
+}
+
+int main()
+{
+ signal(SIGSEGV, thrower);
+ sme_user();
+ return 0;
+}
diff --git a/gcc/testsuite/g++.target/aarch64/sme/sme_throw_2.C b/gcc/testsuite/g++.target/aarch64/sme/sme_throw_2.C
new file mode 100644
index 0000000..db3197c
--- /dev/null
+++ b/gcc/testsuite/g++.target/aarch64/sme/sme_throw_2.C
@@ -0,0 +1,4 @@
+/* { dg-do run { target { aarch64*-linux-gnu* && aarch64_sme_hw } } } */
+/* { dg-options "-O2" } */
+
+#include "sme_throw_1.C"
diff --git a/gcc/testsuite/gcc.dg/20021014-1.c b/gcc/testsuite/gcc.dg/20021014-1.c
index f5f6fcf..ee5d459 100644
--- a/gcc/testsuite/gcc.dg/20021014-1.c
+++ b/gcc/testsuite/gcc.dg/20021014-1.c
@@ -2,7 +2,7 @@
/* { dg-require-profiling "-p" } */
/* { dg-options "-O2 -p" } */
/* { dg-options "-O2 -p -static" { target hppa*-*-hpux* } } */
-/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-* x86_64-*-* } } */
+/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-gnu* x86_64-*-gnu* } } */
/* { dg-error "profiler" "No profiler support" { target xstormy16-*-* } 0 } */
/* { dg-message "" "consider using `-pg' instead of `-p' with gprof(1)" { target *-*-freebsd* } 0 } */
diff --git a/gcc/testsuite/gcc.dg/aru-2.c b/gcc/testsuite/gcc.dg/aru-2.c
index 102ece1..61898de 100644
--- a/gcc/testsuite/gcc.dg/aru-2.c
+++ b/gcc/testsuite/gcc.dg/aru-2.c
@@ -1,7 +1,7 @@
/* { dg-do run } */
/* { dg-require-profiling "-pg" } */
/* { dg-options "-O2 -pg" } */
-/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-* x86_64-*-* } } */
+/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-gnu* x86_64-*-gnu* } } */
static int __attribute__((noinline))
bar (int x)
diff --git a/gcc/testsuite/gcc.dg/nest.c b/gcc/testsuite/gcc.dg/nest.c
index 9221ed1..2dce65e 100644
--- a/gcc/testsuite/gcc.dg/nest.c
+++ b/gcc/testsuite/gcc.dg/nest.c
@@ -3,7 +3,7 @@
/* { dg-require-profiling "-pg" } */
/* { dg-options "-O2 -pg" } */
/* { dg-options "-O2 -pg -static" { target hppa*-*-hpux* } } */
-/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-* x86_64-*-* } } */
+/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-gnu* x86_64-*-gnu* } } */
/* { dg-error "profiler" "No profiler support" { target xstormy16-*-* } 0 } */
extern void abort (void);
diff --git a/gcc/testsuite/gcc.dg/pr121035.c b/gcc/testsuite/gcc.dg/pr121035.c
new file mode 100644
index 0000000..fc0edce
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr121035.c
@@ -0,0 +1,94 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fgimple" } */
+
+int printf(const char *, ...);
+int a, b, d;
+unsigned c;
+int __GIMPLE (ssa,startwith("pre"))
+main ()
+{
+ unsigned int g;
+ int f;
+ unsigned int _1;
+ unsigned int _2;
+ int _3;
+ int _4;
+ int _5;
+ unsigned int _6;
+ unsigned int _7;
+ int _10;
+ unsigned int _11;
+ _Bool _19;
+ _Bool _20;
+ _Bool _22;
+ int _25;
+
+ __BB(2):
+ _25 = a;
+ if (_25 != 0)
+ goto __BB11;
+ else
+ goto __BB10;
+
+ __BB(11):
+ goto __BB3;
+
+ __BB(3):
+ f_26 = __PHI (__BB12: f_18, __BB11: 0);
+ g_15 = c;
+ if (f_26 != 0)
+ goto __BB4;
+ else
+ goto __BB5;
+
+ __BB(4):
+ __builtin_putchar (48);
+ goto __BB5;
+
+ __BB(5):
+ _1 = c;
+ _2 = _1 << 1;
+ _3 = a;
+ _4 = d;
+ _5 = _3 * _4;
+ if (_5 != 0)
+ goto __BB7;
+ else
+ goto __BB6;
+
+ __BB(6):
+ goto __BB7;
+
+ __BB(7):
+ _11 = __PHI (__BB5: 0u, __BB6: 4294967295u);
+ _6 = g_15 * 4294967294u;
+ _7 = _6 | _11;
+ _20 = _3 != 0;
+ _19 = _7 != 0u;
+ _22 = _19 & _20;
+ if (_22 != _Literal (_Bool) 0)
+ goto __BB9;
+ else
+ goto __BB8;
+
+ __BB(8):
+ goto __BB9;
+
+ __BB(9):
+ _10 = __PHI (__BB7: 1, __BB8: 0);
+ b = _10;
+ f_18 = (int) _1;
+ if (_3 != 0)
+ goto __BB12;
+ else
+ goto __BB10;
+
+ __BB(12):
+ goto __BB3;
+
+ __BB(10):
+ return 0;
+
+}
+
+
diff --git a/gcc/testsuite/gcc.dg/pr32450.c b/gcc/testsuite/gcc.dg/pr32450.c
index 4aaeb7d..0af262f 100644
--- a/gcc/testsuite/gcc.dg/pr32450.c
+++ b/gcc/testsuite/gcc.dg/pr32450.c
@@ -3,7 +3,8 @@
/* { dg-do run } */
/* { dg-require-profiling "-pg" } */
/* { dg-options "-O2 -pg" } */
-/* { dg-options "-O2 -pg -mtune=core2 -mfentry -fno-pic" { target { i?86-*-* x86_64-*-* } } } */
+/* { dg-options "-O2 -pg -mtune=core2" { target { i?86-*-* x86_64-*-* } } } */
+/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-gnu* x86_64-*-gnu* } } */
/* { dg-options "-O2 -pg -static" { target hppa*-*-hpux* } } */
extern void abort (void);
diff --git a/gcc/testsuite/gcc.dg/pr43643.c b/gcc/testsuite/gcc.dg/pr43643.c
index a62586d..41c00c8 100644
--- a/gcc/testsuite/gcc.dg/pr43643.c
+++ b/gcc/testsuite/gcc.dg/pr43643.c
@@ -4,7 +4,7 @@
/* { dg-require-profiling "-pg" } */
/* { dg-options "-O2 -pg" } */
/* { dg-options "-O2 -pg -static" { target hppa*-*-hpux* } } */
-/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-* x86_64-*-* } } */
+/* { dg-additional-options "-mfentry -fno-pic" { target i?86-*-gnu* x86_64-*-gnu* } } */
extern char *strdup (const char *);
diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-39.c b/gcc/testsuite/gcc.dg/vect/bb-slp-39.c
index f05ce8f..255bb10 100644
--- a/gcc/testsuite/gcc.dg/vect/bb-slp-39.c
+++ b/gcc/testsuite/gcc.dg/vect/bb-slp-39.c
@@ -16,5 +16,4 @@ void foo (double *p)
}
/* See that we vectorize three SLP instances. */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 3 "slp2" { target { ! { s390*-*-* riscv*-*-* } } } } } */
-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 5 "slp2" { target { s390*-*-* riscv*-*-* } } } } */
+/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 3 "slp2" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr121049.c b/gcc/testsuite/gcc.dg/vect/pr121049.c
new file mode 100644
index 0000000..558c92a
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr121049.c
@@ -0,0 +1,25 @@
+/* { dg-additional-options "--param vect-partial-vector-usage=1" } */
+/* { dg-additional-options "-march=x86-64-v4" { target avx512f_runtime } } */
+
+#include "tree-vect.h"
+
+int mon_lengths[12] = { 1, 10, 100 };
+
+__attribute__ ((noipa)) long
+transtime (int mon)
+{
+ long value = 0;
+ for (int i = 0; i < mon; ++i)
+ value += mon_lengths[i] * 2l;
+ return value;
+}
+
+int
+main ()
+{
+ check_vect ();
+ if (transtime (3) != 222)
+ __builtin_abort ();
+ return 0;
+}
+
diff --git a/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-1.c b/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-1.c
new file mode 100644
index 0000000..d8356b4
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-1.c
@@ -0,0 +1,59 @@
+/* { dg-require-effective-target vect_int } */
+
+#include <stdarg.h>
+#include "tree-vect.h"
+
+/* PR tree-optimization/119920 */
+
+#define N 32
+
+unsigned int ub[N];
+
+/* Test vectorization of reduction of unsigned-int. */
+
+__attribute__ ((noinline, noipa))
+void init(void)
+{
+ #pragma GCC novector
+ for(int i = 0;i < N; i++)
+ ub[i] = i;
+}
+
+
+__attribute__ ((noinline, noipa))
+void main1 (unsigned int b, unsigned int c)
+{
+ int i;
+ unsigned int usum = 0;
+
+ init();
+
+ /* Summation. */
+ for (i = 0; i < N; i++) {
+ if ( ub[i] < N/2 )
+ {
+ usum += b;
+ }
+ else
+ {
+ usum += c;
+ }
+ }
+
+ /* check results: */
+ /* __builtin_printf("%d : %d\n", usum, (N/2*b + N/2*c)); */
+ if (usum != N/2*b + N/2*c)
+ abort ();
+}
+
+int main (void)
+{
+ check_vect ();
+
+ main1 (0, 0);
+ main1 (1, 1);
+ main1 (10, 1);
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { vect_no_int_add } } } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-2.c b/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-2.c
new file mode 100644
index 0000000..80c1dba
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-2.c
@@ -0,0 +1,61 @@
+/* { dg-require-effective-target vect_int } */
+/* { dg-additional-options "-fdump-tree-ifcvt-details" } */
+
+#include <stdarg.h>
+#include "tree-vect.h"
+
+/* PR tree-optimization/119920 */
+
+#define N 32
+
+unsigned int ub[N];
+unsigned int ua[N];
+
+/* Test vectorization of reduction of unsigned-int. */
+
+__attribute__ ((noinline, noipa))
+void init(void)
+{
+ #pragma GCC novector
+ for(int i = 0;i < N; i++) {
+ ub[i] = i;
+ ua[i] = 1;
+ }
+}
+
+
+__attribute__ ((noinline, noipa))
+void main1 (unsigned int b, unsigned int c)
+{
+ int i;
+ unsigned int usum = 0;
+
+ init();
+
+ /* Summation. */
+ for (i = 0; i < N; i++) {
+ unsigned t = ua[i];
+ if ( ub[i] < N/2 )
+ usum += b * t;
+ else
+ usum += c * t;
+ }
+
+ /* check results: */
+ /* __builtin_printf("%d : %d\n", usum, (N/2*b*1 + N/2*c*1)); */
+ if (usum != N/2*b + N/2*c)
+ abort ();
+}
+
+int main (void)
+{
+ check_vect ();
+
+ main1 (0, 0);
+ main1 (1, 1);
+ main1 (10, 1);
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { vect_no_int_add } } } } */
+/* { dg-final { scan-tree-dump-times "changed to factor operation out from COND_EXPR" 2 "ifcvt" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-3.c b/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-3.c
new file mode 100644
index 0000000..e425869
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/vect-reduc-cond-3.c
@@ -0,0 +1,56 @@
+/* { dg-require-effective-target vect_int } */
+
+#include <stdarg.h>
+#include "tree-vect.h"
+
+/* PR tree-optimization/112324 */
+/* PR tree-optimization/110015 */
+
+#define N 32
+
+int ub[N];
+
+/* Test vectorization of reduction of int max with some extra code involed. */
+
+__attribute__ ((noinline, noipa))
+void init(void)
+{
+ #pragma GCC novector
+ for(int i = 0;i < N; i++)
+ ub[i] = (i&4) && (i&1) ? -i : i;
+}
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+__attribute__ ((noinline, noipa))
+void main1 (void)
+{
+ int i;
+ int max = 0;
+
+ init();
+
+ /* Summation. */
+ for (i = 0; i < N; i++) {
+ int tmp = ub[i];
+ if (tmp < 0)
+ max = MAX (-tmp, max);
+ else
+ max = MAX (tmp, max);
+ }
+
+ /* check results: */
+ /* __builtin_printf("%d : %d\n", max, N); */
+ if (max != N - 1)
+ abort ();
+}
+
+int main (void)
+{
+ check_vect ();
+
+ main1 ();
+ return 0;
+}
+
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { xfail { vect_no_int_min_max } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr104447.c b/gcc/testsuite/gcc.target/i386/pr104447.c
index f58170d..145ba90 100644
--- a/gcc/testsuite/gcc.target/i386/pr104447.c
+++ b/gcc/testsuite/gcc.target/i386/pr104447.c
@@ -1,6 +1,7 @@
/* { dg-do compile } */
/* { dg-require-profiling "-pg" } */
-/* { dg-options "-O2 -pg -mfentry -fno-pic" } */
+/* { dg-options "-O2 -pg" } */
+/* { dg-additional-options "-mfentry -fno-pic" { target *-*-gnu* } } */
int
bar (int x)
diff --git a/gcc/testsuite/gcc.target/i386/pr113122-3.c b/gcc/testsuite/gcc.target/i386/pr113122-3.c
index c46805d..87b76de 100644
--- a/gcc/testsuite/gcc.target/i386/pr113122-3.c
+++ b/gcc/testsuite/gcc.target/i386/pr113122-3.c
@@ -1,7 +1,8 @@
/* PR target/113122 */
/* { dg-do assemble { target *-*-linux* } } */
/* { dg-require-effective-target masm_intel } */
-/* { dg-options "-fprofile -mfentry -fno-pic -O2 -masm=intel" } */
+/* { dg-options "-fprofile -O2 -masm=intel" } */
+/* { dg-additional-options "-mfentry -fno-pic" { target *-*-gnu* } } */
void
func (void)
diff --git a/gcc/testsuite/gcc.target/i386/pr119386-1.c b/gcc/testsuite/gcc.target/i386/pr119386-1.c
index 39a3e1d..7a56eac 100644
--- a/gcc/testsuite/gcc.target/i386/pr119386-1.c
+++ b/gcc/testsuite/gcc.target/i386/pr119386-1.c
@@ -1,9 +1,9 @@
/* PR target/119386 */
/* { dg-do compile { target *-*-linux* } } */
/* { dg-options "-O2 -fpic -pg" } */
-/* { dg-additional-options "-mfentry" { target { ! ia32 } } } */
+/* { dg-additional-options "-mfentry" { target { *-*-gnu* && { ! ia32 } } } } */
/* { dg-final { scan-assembler "call\[ \t\]+mcount@PLT" { target ia32 } } } */
-/* { dg-final { scan-assembler "call\[ \t\]+__fentry__@PLT" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "call\[ \t\]+__fentry__@PLT" { target { *-*-gnu* && { ! ia32 } } } } } */
int
main ()
diff --git a/gcc/testsuite/gcc.target/i386/pr119386-2.c b/gcc/testsuite/gcc.target/i386/pr119386-2.c
index d516aa9..cddaaf0 100644
--- a/gcc/testsuite/gcc.target/i386/pr119386-2.c
+++ b/gcc/testsuite/gcc.target/i386/pr119386-2.c
@@ -1,8 +1,8 @@
/* PR target/119386 */
/* { dg-do compile { target *-*-linux* } } */
/* { dg-options "-O2 -fpic -fno-plt -pg" } */
-/* { dg-additional-options "-mfentry" { target { ! ia32 } } } */
-/* { dg-final { scan-assembler "call\[ \t\]+\\*__fentry__@GOTPCREL" { target { ! ia32 } } } } */
+/* { dg-additional-options "-mfentry" { target { *-*-gnu* && { ! ia32 } } } } */
+/* { dg-final { scan-assembler "call\[ \t\]+\\*__fentry__@GOTPCREL" { target { *-*-gnu* && { ! ia32 } } } } } */
/* { dg-final { scan-assembler "call\[ \t\]+\\*mcount@GOT\\(" { target ia32 } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-1.c b/gcc/testsuite/gcc.target/i386/pr121062-1.c
new file mode 100644
index 0000000..799f856
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-1.c
@@ -0,0 +1,34 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=x86-64-v3" } */
+
+extern union {
+ int i;
+ float f;
+} int_as_float_u;
+
+extern int render_result_from_bake_w;
+extern int render_result_from_bake_h_seed_pass;
+extern float *render_result_from_bake_h_primitive;
+extern float *render_result_from_bake_h_seed;
+
+float
+int_as_float(int i)
+{
+ int_as_float_u.i = i;
+ return int_as_float_u.f;
+}
+
+void
+render_result_from_bake_h(int tx)
+{
+ while (render_result_from_bake_w) {
+ for (; tx < render_result_from_bake_w; tx++)
+ render_result_from_bake_h_primitive[1] =
+ render_result_from_bake_h_primitive[2] = int_as_float(-1);
+ if (render_result_from_bake_h_seed_pass) {
+ *render_result_from_bake_h_seed = 0;
+ }
+ }
+}
+
+/* { dg-final { scan-assembler-times "movq\[ \\t\]+\\\$-1, %r\[a-z0-9\]+" 2 { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-2.c b/gcc/testsuite/gcc.target/i386/pr121062-2.c
new file mode 100644
index 0000000..723d68a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-2.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-Og -fno-dce -mtune=generic" } */
+
+typedef int __attribute__((__vector_size__ (4))) S;
+extern void bar (S);
+
+void
+foo ()
+{
+ bar ((S){-1});
+}
+
+/* { dg-final { scan-assembler-times "movl\[ \\t\]+\\\$-1, \\(%esp\\)" 1 { target ia32 } } } */
+/* { dg-final { scan-assembler-times "movl\[ \\t\]+\\\$-1, %edi" 1 { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-3a.c b/gcc/testsuite/gcc.target/i386/pr121062-3a.c
new file mode 100644
index 0000000..effd4ff
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-3a.c
@@ -0,0 +1,23 @@
+/* { dg-do compile { target fpic } } */
+/* { dg-options "-O2 -march=x86-64 -fpic" } */
+
+typedef struct {
+ struct {
+ unsigned short lo4;
+ unsigned short lo3;
+ unsigned short lo2;
+ unsigned short lo1;
+ } i;
+} BID_BINARY80LDOUBLE;
+extern BID_BINARY80LDOUBLE __bid64_to_binary80_x_out;
+void
+__bid64_to_binary80 (void)
+{
+ __bid64_to_binary80_x_out.i.lo4
+ = __bid64_to_binary80_x_out.i.lo3
+ = __bid64_to_binary80_x_out.i.lo2
+ = __bid64_to_binary80_x_out.i.lo1 = 65535;
+}
+
+/* { dg-final { scan-assembler-times "movq\[ \\t\]+%xmm\[0-9\]+, " 1 { target ia32 } } } */
+/* { dg-final { scan-assembler-times "movq\[ \\t\]+\\\$-1, \\(%(e|r)\[a-z0-9\]+\\)" 1 { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-3b.c b/gcc/testsuite/gcc.target/i386/pr121062-3b.c
new file mode 100644
index 0000000..eb89b5d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-3b.c
@@ -0,0 +1,6 @@
+/* { dg-do compile { target { fpic && lp64 } } } */
+/* { dg-options "-O2 -march=x86-64 -fno-pic -mcmodel=large" } */
+
+#include "pr121062-3a.c"
+
+/* { dg-final { scan-assembler-times "movq\[ \\t\]+\\\$-1, \\(%r\[a-z0-9\]+\\)" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-3c.c b/gcc/testsuite/gcc.target/i386/pr121062-3c.c
new file mode 100644
index 0000000..4c07029
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-3c.c
@@ -0,0 +1,6 @@
+/* { dg-do compile { target { fpic && lp64 } } } */
+/* { dg-options "-O2 -march=x86-64 -fpic -mcmodel=large" } */
+
+#include "pr121062-3a.c"
+
+/* { dg-final { scan-assembler-times "movq\[ \\t\]+\\\$-1, \\(%r\[a-z0-9\]+\\)" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-4.c b/gcc/testsuite/gcc.target/i386/pr121062-4.c
new file mode 100644
index 0000000..77a0c2e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-4.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=x86-64" } */
+
+typedef long long int __attribute__((__vector_size__ (8))) S;
+
+void
+foo (S *c)
+{
+ *c = (S){0x12345678badbeefULL};
+}
+
+
+/* { dg-final { scan-assembler-times "movq\[ \\t\]+%xmm\[0-9\]+, " 1 { target ia32 } } } */
+/* { dg-final { scan-assembler-times "movabsq\[ \\t\]+\\\$81985529250168559, %r\[a-z0-9\]+" 1 { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-5.c b/gcc/testsuite/gcc.target/i386/pr121062-5.c
new file mode 100644
index 0000000..22c09a6
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-5.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=x86-64" } */
+
+typedef int __attribute__((__vector_size__ (4))) S;
+
+void
+foo (S *c)
+{
+ *c = (S){0x12345678};
+}
+
+
+/* { dg-final { scan-assembler-times "movl\[ \\t\]+\\\$305419896, \\(%(e|r)\[a-z0-9\]+\\)" 1 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-6.c b/gcc/testsuite/gcc.target/i386/pr121062-6.c
new file mode 100644
index 0000000..780b496
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-6.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-Og -fno-dce -mtune=generic" } */
+
+typedef int __attribute__((__vector_size__ (8))) S;
+
+void
+foo (S *c)
+{
+ *c = (S){0x12345678,0xbadbeefULL};
+}
+
+/* { dg-final { scan-assembler-times "movq\[ \\t\]+%xmm\[0-9\]+, " 1 { target ia32 } } } */
+/* { dg-final { scan-assembler-times "movabsq\[ \\t\]+\\\$841538639400031864, %r\[a-z0-9\]+" 1 { target { ! ia32 } } } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr121062-7.c b/gcc/testsuite/gcc.target/i386/pr121062-7.c
new file mode 100644
index 0000000..f1834f8
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr121062-7.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=x86-64" } */
+
+typedef __bf16 __attribute__((__vector_size__ (4))) S;
+
+void
+foo (S *c)
+{
+ *c = (S){-0.1, 2.1};
+}
+
+
+/* { dg-final { scan-assembler-times "movl\[ \\t\]+\\\$1074183629, \\(%(e|r)\[a-z0-9\]+\\)" 1 } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/pr121064.c b/gcc/testsuite/gcc.target/loongarch/pr121064.c
new file mode 100644
index 0000000..a466c7a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/pr121064.c
@@ -0,0 +1,38 @@
+/* { dg-require-effective-target loongarch_sx_hw } */
+/* { dg-do run } */
+/* { dg-options "-march=loongarch64 -mfpu=64 -mlsx -O3" } */
+
+typedef __INT32_TYPE__ int32_t;
+typedef unsigned __INT32_TYPE__ uint32_t;
+
+__attribute__ ((noipa)) static int32_t
+long_filter_ehigh_3830_1 (int32_t *buffer, int length)
+{
+ int i, j;
+ int32_t dotprod = 0;
+ int32_t delay[4] = { 0 };
+ uint32_t coeffs[4] = { 0 };
+
+ for (i = 0; i < length; i++)
+ {
+ dotprod = 0;
+ for (j = 3; j >= 0; j--)
+ {
+ dotprod += delay[j] * coeffs[j];
+ coeffs[j] += ((delay[j] >> 31) | 1);
+ }
+ for (j = 3; j > 0; j--)
+ delay[j] = delay[j - 1];
+ delay[0] = buffer[i];
+ }
+
+ return dotprod;
+}
+
+int
+main ()
+{
+ int32_t buffer[] = { -1, 1 };
+ if (long_filter_ehigh_3830_1 (buffer, 2) != -1)
+ __builtin_trap ();
+}
diff --git a/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-2.c b/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-2.c
index 2ff5a37..e1c7806 100644
--- a/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-2.c
+++ b/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-2.c
@@ -3,8 +3,10 @@
#include "isfinite-isinf-isnormal-signbit.h"
-/* { dg-final { scan-assembler-times {tcxb\t%f[0-9]+,1365} 1 } } SIGNBIT long double */
-/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,1365} 1 } } SIGNBIT _Decimal128 */
+/* { dg-final { scan-assembler-times {tcxb\t%f[0-9]+,1365} 0 { target lp64 } } } SIGNBIT long double */
+/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,1365} 0 { target lp64 } } } SIGNBIT _Decimal128 */
+/* { dg-final { scan-assembler-times {tcxb\t%f[0-9]+,1365} 1 { target { ! lp64 } } } } SIGNBIT long double */
+/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,1365} 1 { target { ! lp64 } } } } SIGNBIT _Decimal128 */
/* { dg-final { scan-assembler-times {tcxb\t%f[0-9]+,4032} 1 } } ISFINITE long double */
/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,4032} 1 } } ISFINITE _Decimal128 */
/* { dg-final { scan-assembler-times {tcxb\t%f[0-9]+,48} 1 } } ISINF long double */
diff --git a/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-3.c b/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-3.c
index 8f67553..5c9986d 100644
--- a/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-3.c
+++ b/gcc/testsuite/gcc.target/s390/isfinite-isinf-isnormal-signbit-3.c
@@ -3,8 +3,10 @@
#include "isfinite-isinf-isnormal-signbit.h"
-/* { dg-final { scan-assembler-times {wftcixb\t%v[0-9]+,%v[0-9]+,1365} 1 } } */
-/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,1365} 1 } } */
+/* { dg-final { scan-assembler-times {wftcixb\t%v[0-9]+,%v[0-9]+,1365} 0 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,1365} 0 { target lp64 } } } */
+/* { dg-final { scan-assembler-times {wftcixb\t%v[0-9]+,%v[0-9]+,1365} 1 { target { ! lp64 } } } } */
+/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,1365} 1 { target { ! lp64 } } } } */
/* { dg-final { scan-assembler-times {wftcixb\t%v[0-9]+,%v[0-9]+,4032} 1 } } */
/* { dg-final { scan-assembler-times {tdcxt\t%f[0-9]+,4032} 1 } } */
/* { dg-final { scan-assembler-times {wftcixb\t%v[0-9]+,%v[0-9]+,48} 1 } } */
diff --git a/gcc/testsuite/gcc.target/s390/signbit-1.c b/gcc/testsuite/gcc.target/s390/signbit-1.c
new file mode 100644
index 0000000..45f608a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/signbit-1.c
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -march=z900 -save-temps" } */
+/* { dg-final { scan-assembler-times {\ttceb\t} 2 } } */
+/* { dg-final { scan-assembler-times {\ttcdb\t} 2 } } */
+/* { dg-final { scan-assembler-times {\ttcxb\t} 2 } } */
+
+/* Binary Floating-Point */
+
+__attribute__ ((noipa))
+int signbit_float_reg (float x) { return __builtin_signbit (x); }
+__attribute__ ((noipa))
+int signbit_float_mem (float *x) { return __builtin_signbit (*x); }
+__attribute__ ((noipa))
+int signbit_double_reg (double x) { return __builtin_signbit (x); }
+__attribute__ ((noipa))
+int signbit_double_mem (double *x) { return __builtin_signbit (*x); }
+
+__attribute__ ((noipa))
+int
+signbit_longdouble_reg (long double x)
+{
+ __asm__ ("" : "+f" (x));
+ return __builtin_signbit (x);
+}
+
+__attribute__ ((noipa))
+int signbit_longdouble_mem (long double *x) { return __builtin_signbit (*x); }
+
+#include "signbit.h"
+TEST (float, float, __builtin_inff(), __builtin_nanf("42"), 0.f, 42.f)
+TEST (double, double, __builtin_inf(), __builtin_nan("42"), 0., 42.)
+TEST (longdouble, long double, __builtin_infl(), __builtin_nanl("42"), 0.L, 42.L)
+
+int
+main (void)
+{
+ test_float ();
+ test_double ();
+ test_longdouble ();
+}
diff --git a/gcc/testsuite/gcc.target/s390/signbit-2.c b/gcc/testsuite/gcc.target/s390/signbit-2.c
new file mode 100644
index 0000000..488c477
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/signbit-2.c
@@ -0,0 +1,40 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -march=z9-ec -mzarch -save-temps" } */
+/* { dg-final { scan-assembler-times {\ttdcet\t} 2 } } */
+/* { dg-final { scan-assembler-times {\ttdcdt\t} 2 } } */
+/* { dg-final { scan-assembler-times {\ttdcxt\t} 2 } } */
+
+/* Decimal Floating-Point */
+
+__attribute__ ((noipa))
+int signbit_dec32_reg (_Decimal32 x) { return __builtin_signbit (x); }
+__attribute__ ((noipa))
+int signbit_dec32_mem (_Decimal32 *x) { return __builtin_signbit (*x); }
+__attribute__ ((noipa))
+int signbit_dec64_reg (_Decimal64 x) { return __builtin_signbit (x); }
+__attribute__ ((noipa))
+int signbit_dec64_mem (_Decimal64 *x) { return __builtin_signbit (*x); }
+
+__attribute__ ((noipa))
+int
+signbit_dec128_reg (_Decimal128 x)
+{
+ __asm__ ("" : "+f" (x));
+ return __builtin_signbit (x);
+}
+
+__attribute__ ((noipa))
+int signbit_dec128_mem (_Decimal128 *x) { return __builtin_signbit (*x); }
+
+#include "signbit.h"
+TEST (dec32, _Decimal32, __builtin_infd32(), __builtin_nand32("42"), 0.df, 42.df)
+TEST (dec64, _Decimal64, __builtin_infd64(), __builtin_nand64("42"), 0.dd, 42.dd)
+TEST (dec128, _Decimal128, __builtin_infd128(), __builtin_nand128("42"), 0.dl, 42.dl)
+
+int
+main (void)
+{
+ test_dec32 ();
+ test_dec64 ();
+ test_dec128 ();
+}
diff --git a/gcc/testsuite/gcc.target/s390/signbit-3.c b/gcc/testsuite/gcc.target/s390/signbit-3.c
new file mode 100644
index 0000000..2fad58b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/signbit-3.c
@@ -0,0 +1,152 @@
+/* { dg-do run { target lp64 } } */
+/* { dg-options "-O2 -march=z10 -save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+/* Binary Floating-Point */
+
+/*
+** signbit_float_reg:
+** lgdr (%r[0-9]+),%f0
+** srlg (%r[0-9]+),\1,63
+** lgfr %r2,\2
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_float_reg (float x) { return __builtin_signbit (x); }
+
+/*
+** signbit_float_mem:
+** l (%r[0-9]+),0\(%r2\)
+** srl \1,31
+** lgfr %r2,\1
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_float_mem (float *x) { return __builtin_signbit (*x); }
+
+/*
+** signbit_double_reg:
+** lgdr (%r[0-9]+),%f0
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_double_reg (double x) { return __builtin_signbit (x); }
+
+/*
+** signbit_double_mem:
+** lg (%r[0-9]+),0\(%r2\)
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_double_mem (double *x) { return __builtin_signbit (*x); }
+
+/*
+** signbit_longdouble_reg:
+** ld %f0,0\(%r2\)
+** ld %f2,8\(%r2\)
+** lgdr (%r[0-9]+),%f0
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int
+signbit_longdouble_reg (long double x)
+{
+ __asm__ ("" : "+f" (x));
+ return __builtin_signbit (x);
+}
+
+/*
+** signbit_longdouble_mem:
+** lg (%r[0-9]+),0\(%r2\)
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_longdouble_mem (long double *x) { return __builtin_signbit (*x); }
+
+/* Decimal Floating-Point */
+
+/*
+** signbit_dec32_reg:
+** lgdr (%r[0-9]+),%f0
+** srlg (%r[0-9]+),\1,63
+** lgfr %r2,\2
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_dec32_reg (_Decimal32 x) { return __builtin_signbit (x); }
+
+/*
+** signbit_dec32_mem:
+** l (%r[0-9]+),0\(%r2\)
+** srl \1,31
+** lgfr %r2,\1
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_dec32_mem (_Decimal32 *x) { return __builtin_signbit (*x); }
+
+/*
+** signbit_dec64_reg:
+** lgdr (%r[0-9]+),%f0
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_dec64_reg (_Decimal64 x) { return __builtin_signbit (x); }
+
+/*
+** signbit_dec64_mem:
+** lg (%r[0-9]+),0\(%r2\)
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_dec64_mem (_Decimal64 *x) { return __builtin_signbit (*x); }
+
+/*
+** signbit_dec128_reg:
+** ld %f0,0\(%r2\)
+** ld %f2,8\(%r2\)
+** lgdr (%r[0-9]+),%f0
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int
+signbit_dec128_reg (_Decimal128 x)
+{
+ __asm__ ("" : "+f" (x));
+ return __builtin_signbit (x);
+}
+
+/*
+** signbit_dec128_mem:
+** lg (%r[0-9]+),0\(%r2\)
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_dec128_mem (_Decimal128 *x) { return __builtin_signbit (*x); }
+
+#include "signbit.h"
+TEST (float, float, __builtin_inff(), __builtin_nanf("42"), 0.f, 42.f)
+TEST (double, double, __builtin_inf(), __builtin_nan("42"), 0., 42.)
+TEST (longdouble, long double, __builtin_infl(), __builtin_nanl("42"), 0.L, 42.L)
+TEST (dec32, _Decimal32, __builtin_infd32(), __builtin_nand32("42"), 0.df, 42.df)
+TEST (dec64, _Decimal64, __builtin_infd64(), __builtin_nand64("42"), 0.dd, 42.dd)
+TEST (dec128, _Decimal128, __builtin_infd128(), __builtin_nand128("42"), 0.dl, 42.dl)
+
+int
+main (void)
+{
+ test_float ();
+ test_double ();
+ test_longdouble ();
+ test_dec32 ();
+ test_dec64 ();
+ test_dec128 ();
+}
diff --git a/gcc/testsuite/gcc.target/s390/signbit-4.c b/gcc/testsuite/gcc.target/s390/signbit-4.c
new file mode 100644
index 0000000..2cb743e
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/signbit-4.c
@@ -0,0 +1,55 @@
+/* { dg-do run { target lp64 } } */
+/* { dg-require-effective-target s390_vx } */
+/* { dg-options "-O2 -march=z13 -save-temps" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+/* Binary Floating-Point */
+
+/*
+** signbit_float_reg:
+** vlgvf (%r[0-9]+),%v0,0
+** risbgn %r2,\1,64-1,128\+63,32\+1
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_float_reg (float x) { return __builtin_signbit (x); }
+
+/*
+** signbit_float_mem:
+** l (%r[0-9]+),0\(%r2\)
+** risbgn %r2,\1,64-1,128\+63,32\+1
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_float_mem (float *x) { return __builtin_signbit (*x); }
+
+/* Decimal Floating-Point */
+
+/*
+** signbit_dec32_reg:
+** vlgvf (%r[0-9]+),%v0,0
+** risbgn %r2,\1,64-1,128\+63,32\+1
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_dec32_reg (_Decimal32 x) { return __builtin_signbit (x); }
+
+/*
+** signbit_dec32_mem:
+** l (%r[0-9]+),0\(%r2\)
+** risbgn %r2,\1,64-1,128\+63,32\+1
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_dec32_mem (_Decimal32 *x) { return __builtin_signbit (*x); }
+
+#include "signbit.h"
+TEST (float, float, __builtin_inff(), __builtin_nanf("42"), 0.f, 42.f)
+TEST (dec32, _Decimal32, __builtin_infd32(), __builtin_nand32("42"), 0.df, 42.df)
+
+int
+main (void)
+{
+ test_float ();
+ test_dec32 ();
+}
diff --git a/gcc/testsuite/gcc.target/s390/signbit-5.c b/gcc/testsuite/gcc.target/s390/signbit-5.c
new file mode 100644
index 0000000..6840327
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/signbit-5.c
@@ -0,0 +1,35 @@
+/* { dg-do run { target lp64 } } */
+/* { dg-options "-O2 -march=z14 -save-temps" } */
+
+/*
+** signbit_longdouble_reg:
+** ld %f0,0(%r2);ld %f2,8+0(%r2)
+** lgdr (%r[0-9]+),%f0
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int
+signbit_longdouble_reg (long double x)
+{
+ __asm__ ("" : "+f" (x));
+ return __builtin_signbit (x);
+}
+
+/*
+** signbit_longdouble_mem:
+** lg (%r[0-9]+),0\(%r2\)
+** srlg %r2,\1,63
+** br %r14
+*/
+__attribute__ ((noipa))
+int signbit_longdouble_mem (long double *x) { return __builtin_signbit (*x); }
+
+#include "signbit.h"
+TEST (longdouble, long double, __builtin_infl(), __builtin_nanl("42"), 0.L, 42.L)
+
+int
+main (void)
+{
+ test_longdouble ();
+}
diff --git a/gcc/testsuite/gcc.target/s390/signbit.h b/gcc/testsuite/gcc.target/s390/signbit.h
new file mode 100644
index 0000000..730e387
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/signbit.h
@@ -0,0 +1,36 @@
+#define TEST(T, U, I, N, C0, C42) \
+ void test_##T (void) \
+ { \
+ U tmp; \
+ int x; \
+ \
+ x = signbit_##T##_reg(C42); \
+ x += signbit_##T##_reg(C0); \
+ x += signbit_##T##_reg(I); \
+ x += signbit_##T##_reg(N); \
+ tmp = C42; \
+ x += signbit_##T##_mem(&tmp); \
+ tmp = C0; \
+ x += signbit_##T##_mem(&tmp); \
+ tmp = I; \
+ x += signbit_##T##_mem(&tmp); \
+ tmp = N; \
+ x += signbit_##T##_mem(&tmp); \
+ if (x != 0) \
+ __builtin_abort(); \
+ \
+ x = signbit_##T##_reg(-C42); \
+ x += signbit_##T##_reg(-C0); \
+ x += signbit_##T##_reg(-I); \
+ x += signbit_##T##_reg(-N); \
+ tmp = -C42; \
+ x += signbit_##T##_mem(&tmp); \
+ tmp = -C0; \
+ x += signbit_##T##_mem(&tmp); \
+ tmp = -I; \
+ x += signbit_##T##_mem(&tmp); \
+ tmp = -N; \
+ x += signbit_##T##_mem(&tmp); \
+ if (x != 8) \
+ __builtin_abort(); \
+ }
diff --git a/gcc/testsuite/gcc.target/s390/vector/vlgv-zero-extend-1.c b/gcc/testsuite/gcc.target/s390/vector/vlgv-zero-extend-1.c
new file mode 100644
index 0000000..11df6c1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/s390/vector/vlgv-zero-extend-1.c
@@ -0,0 +1,71 @@
+/* { dg-do compile } */
+/* { dg-require-effective-target s390_vx } */
+/* { dg-additional-options "-O2" } */
+/* { dg-final { scan-assembler-not {\tllg?[fhc]r\t} } } */
+
+typedef unsigned char __attribute__ ((vector_size (1))) V1QI;
+typedef unsigned char __attribute__ ((vector_size (2))) V2QI;
+typedef unsigned char __attribute__ ((vector_size (4))) V4QI;
+typedef unsigned char __attribute__ ((vector_size (8))) V8QI;
+typedef unsigned char __attribute__ ((vector_size (16))) V16QI;
+
+typedef unsigned short __attribute__ ((vector_size (2))) V1HI;
+typedef unsigned short __attribute__ ((vector_size (4))) V2HI;
+typedef unsigned short __attribute__ ((vector_size (8))) V4HI;
+typedef unsigned short __attribute__ ((vector_size (16))) V8HI;
+
+typedef unsigned int __attribute__ ((vector_size (4))) V1SI;
+typedef unsigned int __attribute__ ((vector_size (8))) V2SI;
+typedef unsigned int __attribute__ ((vector_size (16))) V4SI;
+
+unsigned short ushort;
+unsigned int uint;
+
+#define TEST(T, U, I) \
+ unsigned T test_ ## I ## _ ## U (U x) { return x[I]; } \
+ void test_ ## I ## _ ## U ## _ushort (U x) { ushort = x[I]; } \
+ void test_ ## I ## _ ## U ## _uint (U x) { uint = x[I]; }
+
+#define TEST1(T, U) \
+ TEST(T, U, 0)
+
+#define TEST2(T, U) \
+ TEST1 (T, U) \
+ TEST(T, U, 1)
+
+#define TEST4(T, U) \
+ TEST2 (T, U) \
+ TEST(T, U, 2) \
+ TEST(T, U, 3)
+
+#define TEST8(T, U) \
+ TEST4 (T, U) \
+ TEST(T, U, 4) \
+ TEST(T, U, 5) \
+ TEST(T, U, 6) \
+ TEST(T, U, 7)
+
+#define TEST16(T, U) \
+ TEST8 (T, U) \
+ TEST(T, U, 9) \
+ TEST(T, U, 10) \
+ TEST(T, U, 11) \
+ TEST(T, U, 12) \
+ TEST(T, U, 13) \
+ TEST(T, U, 14) \
+ TEST(T, U, 15)
+
+TEST1 (char, V1QI)
+TEST2 (char, V2QI)
+TEST4 (char, V4QI)
+TEST8 (char, V8QI)
+TEST16 (char, V16QI)
+
+TEST1 (short, V1HI)
+TEST2 (short, V2HI)
+TEST4 (short, V4HI)
+TEST8 (short, V8HI)
+
+TEST1 (int, V1SI)
+TEST2 (int, V2SI)
+TEST4 (int, V4SI)
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index 4486a6a..65d2e67 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -5800,6 +5800,13 @@ proc add_options_for_aarch64_sve { flags } {
return "$flags -march=armv8.2-a+sve"
}
+proc add_options_for_aarch64_sme { flags } {
+ if { ![istarget aarch64*-*-*] || [check_effective_target_aarch64_sme] } {
+ return "$flags"
+ }
+ return "$flags -march=armv9-a+sme"
+}
+
# Return 1 if this is an ARM target supporting the FP16 alternative
# format. Some multilibs may be incompatible with the options needed. Also
# set et_arm_fp16_alternative_flags to the best options to add.
@@ -6539,6 +6546,22 @@ foreach N { 128 256 512 1024 2048 } {
}]
}
+# Return true if this is an AArch64 target that can run SME code.
+
+proc check_effective_target_aarch64_sme_hw { } {
+ if { ![istarget aarch64*-*-*] } {
+ return 0
+ }
+ return [check_runtime aarch64_sme_hw_available {
+ int
+ main (void)
+ {
+ asm volatile ("rdsvl x0, #1");
+ return 0;
+ }
+ } [add_options_for_aarch64_sme ""]]
+}
+
proc check_effective_target_arm_neonv2_hw { } {
return [check_runtime arm_neon_hwv2_available {
#include "arm_neon.h"
diff --git a/gcc/tree-if-conv.cc b/gcc/tree-if-conv.cc
index 366e959..ba25c19 100644
--- a/gcc/tree-if-conv.cc
+++ b/gcc/tree-if-conv.cc
@@ -2114,6 +2114,187 @@ gen_phi_arg_condition (gphi *phi, ifcvt_arg_entry_t &arg,
return cond;
}
+/* Find the operand which is different between ARG0_OP and ARG1_OP.
+ Returns the operand num where the difference is.
+ Set NEWARG0 and NEWARG1 from the different argument.
+ Returns -1 if none is found.
+ If ARG0_OP/ARG1_OP is commutative also try swapping the
+ two commutative operands and return the operand number where
+ the difference happens in ARG0_OP. */
+
+static int
+find_different_opnum (const gimple_match_op &arg0_op,
+ const gimple_match_op &arg1_op,
+ tree *new_arg0, tree *new_arg1)
+{
+ unsigned opnum = -1;
+ unsigned first;
+ first = first_commutative_argument (arg1_op.code, arg1_op.type);
+ for (unsigned i = 0; i < arg0_op.num_ops; i++)
+ {
+ if (!operand_equal_for_phi_arg_p (arg0_op.ops[i],
+ arg1_op.ops[i]))
+ {
+ /* Can handle only one non equal operand. */
+ if (opnum != -1u)
+ {
+ /* Though if opnum is right before i and opnum is equal
+ to the first communtative argument, handle communtative
+ specially. */
+ if (i == opnum + 1 && opnum == first)
+ goto commutative;
+ return -1;
+ }
+ opnum = i;
+ }
+ }
+ /* If all operands are equal only do this is there was single
+ operand. */
+ if (opnum == -1u)
+ {
+ if (arg0_op.num_ops != 1)
+ return -1;
+ opnum = 0;
+ }
+ *new_arg0 = arg0_op.ops[opnum];
+ *new_arg1 = arg1_op.ops[opnum];
+ return opnum;
+
+/* Handle commutative operations. */
+commutative:
+ gcc_assert (first != -1u);
+
+ /* Check the rest of the arguments to make sure they are the same. */
+ for (unsigned i = first + 2; i < arg0_op.num_ops; i++)
+ if (!operand_equal_for_phi_arg_p (arg0_op.ops[i],
+ arg1_op.ops[i]))
+ return -1;
+
+ /* If the arg0[first+1] and arg1[first] are the same
+ then the one which is different is arg0[first] and arg1[first+1]
+ return first since this is based on arg0. */
+ if (operand_equal_for_phi_arg_p (arg0_op.ops[first + 1],
+ arg1_op.ops[first]))
+ {
+ *new_arg0 = arg0_op.ops[first];
+ *new_arg1 = arg1_op.ops[first + 1];
+ return first;
+ }
+ /* If the arg0[first] and arg1[first+1] are the same
+ then the one which is different is arg0[first+1] and arg1[first]
+ return first+1 since this is based on arg0. */
+ if (operand_equal_for_phi_arg_p (arg0_op.ops[first],
+ arg1_op.ops[first + 1]))
+ {
+ *new_arg0 = arg0_op.ops[first + 1];
+ *new_arg1 = arg1_op.ops[first];
+ return first + 1;
+ }
+ return -1;
+}
+
+/* Factors out an operation from *ARG0 and *ARG1 and
+ create the new statement at GSI. *RES is the
+ result of that new statement. Update *ARG0 and *ARG1
+ and *RES to the new values if the factoring happened.
+ Loops until all of the factoring is completed. */
+
+static void
+factor_out_operators (tree *res, gimple_stmt_iterator *gsi,
+ tree *arg0, tree *arg1, gphi *phi)
+{
+ gimple_match_op arg0_op, arg1_op;
+ bool repeated = false;
+
+again:
+ if (TREE_CODE (*arg0) != SSA_NAME || TREE_CODE (*arg1) != SSA_NAME)
+ return;
+
+ if (operand_equal_p (*arg0, *arg1))
+ return;
+
+ /* If either args have > 1 use, then this transformation actually
+ increases the number of expressions evaluated at runtime. */
+ if (repeated
+ ? (!has_zero_uses (*arg0) || !has_zero_uses (*arg1))
+ : (!has_single_use (*arg0) || !has_single_use (*arg1)))
+ return;
+
+ gimple *arg0_def_stmt = SSA_NAME_DEF_STMT (*arg0);
+ if (!gimple_extract_op (arg0_def_stmt, &arg0_op))
+ return;
+
+ /* At this point there should be no ssa names occuring in abnormals. */
+ gcc_assert (!arg0_op.operands_occurs_in_abnormal_phi ());
+
+ gimple *arg1_def_stmt = SSA_NAME_DEF_STMT (*arg1);
+ if (!gimple_extract_op (arg1_def_stmt, &arg1_op))
+ return;
+
+ /* At this point there should be no ssa names occuring in abnormals. */
+ gcc_assert (!arg1_op.operands_occurs_in_abnormal_phi ());
+
+ /* No factoring can happen if the codes are different
+ or the number operands. */
+ if (arg1_op.code != arg0_op.code
+ || arg1_op.num_ops != arg0_op.num_ops)
+ return;
+
+ tree new_arg0, new_arg1;
+ int opnum = find_different_opnum (arg0_op, arg1_op, &new_arg0, &new_arg1);
+ if (opnum == -1)
+ return;
+
+ if (!types_compatible_p (TREE_TYPE (new_arg0), TREE_TYPE (new_arg1)))
+ return;
+ tree new_res = make_ssa_name (TREE_TYPE (new_arg0), NULL);
+
+ /* Create the operation stmt if possible and insert it. */
+
+ gimple_match_op new_op = arg0_op;
+ new_op.ops[opnum] = new_res;
+ gimple_seq seq = NULL;
+ tree result = *res;
+ result = maybe_push_res_to_seq (&new_op, &seq, result);
+
+ /* If we can't create the new statement, release the temp name
+ and return back. */
+ if (!result)
+ {
+ release_ssa_name (new_res);
+ return;
+ }
+ gsi_insert_seq_before (gsi, seq, GSI_CONTINUE_LINKING);
+
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file, "PHI ");
+ print_generic_expr (dump_file, gimple_phi_result (phi));
+ fprintf (dump_file,
+ " changed to factor operation out from COND_EXPR.\n");
+ fprintf (dump_file, "New stmt with OPERATION that defines ");
+ print_generic_expr (dump_file, result);
+ fprintf (dump_file, ".\n");
+ }
+
+ /* Remove the old operation(s) that has single use. */
+ gimple_stmt_iterator gsi_for_def;
+
+ gsi_for_def = gsi_for_stmt (arg0_def_stmt);
+ gsi_remove (&gsi_for_def, true);
+ release_defs (arg0_def_stmt);
+ gsi_for_def = gsi_for_stmt (arg1_def_stmt);
+ gsi_remove (&gsi_for_def, true);
+ release_defs (arg1_def_stmt);
+
+ /* Update the arguments and try again. */
+ *arg0 = new_arg0;
+ *arg1 = new_arg1;
+ *res = new_res;
+ repeated = true;
+ goto again;
+}
+
/* Create the smallest nested conditional possible. On pre-order we record
which conditionals are live, and on post-order rewrite the chain by removing
already active conditions.
@@ -2293,6 +2474,11 @@ predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi, bool loop_versioned)
arg0 = gimple_phi_arg_def (phi, 0);
arg1 = gimple_phi_arg_def (phi, 1);
}
+
+ /* Factor out operand if possible. This can only be done easily
+ for PHI with 2 elements. */
+ factor_out_operators (&res, gsi, &arg0, &arg1, phi);
+
if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
&op0, &op1, false, &has_nop,
&nop_reduc))
diff --git a/gcc/tree-inline.cc b/gcc/tree-inline.cc
index e8fe035..08e6421 100644
--- a/gcc/tree-inline.cc
+++ b/gcc/tree-inline.cc
@@ -1460,10 +1460,7 @@ copy_tree_body_r (tree *tp, int *walk_subtrees, void *data)
|| OMP_CLAUSE_CODE (*tp) == OMP_CLAUSE_DEPEND))
{
tree t = OMP_CLAUSE_DECL (*tp);
- if (t
- && TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (t && OMP_ITERATOR_DECL_P (t))
{
*walk_subtrees = 0;
OMP_CLAUSE_DECL (*tp) = copy_node (t);
diff --git a/gcc/tree-pretty-print.cc b/gcc/tree-pretty-print.cc
index fadafd6..50d0851 100644
--- a/gcc/tree-pretty-print.cc
+++ b/gcc/tree-pretty-print.cc
@@ -902,9 +902,7 @@ dump_omp_clause (pretty_printer *pp, tree clause, int spc, dump_flags_t flags)
pp_string (pp, "affinity(");
{
tree t = OMP_CLAUSE_DECL (clause);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
dump_omp_iterators (pp, TREE_PURPOSE (t), spc, flags);
pp_colon (pp);
@@ -944,9 +942,7 @@ dump_omp_clause (pretty_printer *pp, tree clause, int spc, dump_flags_t flags)
}
{
tree t = OMP_CLAUSE_DECL (clause);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
dump_omp_iterators (pp, TREE_PURPOSE (t), spc, flags);
pp_colon (pp);
diff --git a/gcc/tree-ssa-pre.cc b/gcc/tree-ssa-pre.cc
index f6c531e..9933173 100644
--- a/gcc/tree-ssa-pre.cc
+++ b/gcc/tree-ssa-pre.cc
@@ -2773,16 +2773,17 @@ find_or_generate_expression (basic_block block, tree op, gimple_seq *stmts)
bitmap exprset = value_expressions[lookfor];
bitmap_iterator bi;
unsigned int i;
- EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
- {
- pre_expr temp = expression_for_id (i);
- /* We cannot insert random REFERENCE expressions at arbitrary
- places. We can insert NARYs which eventually re-materializes
- its operand values. */
- if (temp->kind == NARY)
- return create_expression_by_pieces (block, temp, stmts,
- TREE_TYPE (op));
- }
+ if (exprset)
+ EXECUTE_IF_SET_IN_BITMAP (exprset, 0, i, bi)
+ {
+ pre_expr temp = expression_for_id (i);
+ /* We cannot insert random REFERENCE expressions at arbitrary
+ places. We can insert NARYs which eventually re-materializes
+ its operand values. */
+ if (temp->kind == NARY)
+ return create_expression_by_pieces (block, temp, stmts,
+ TREE_TYPE (op));
+ }
/* Defer. */
return NULL_TREE;
diff --git a/gcc/tree-ssa-structalias.cc b/gcc/tree-ssa-structalias.cc
index 0215243..fd22a94 100644
--- a/gcc/tree-ssa-structalias.cc
+++ b/gcc/tree-ssa-structalias.cc
@@ -48,6 +48,9 @@
#include "ipa-modref.h"
#include "attr-fnspec.h"
+#include "tree-ssa-structalias.h"
+#include "pta-andersen.h"
+
/* The idea behind this analyzer is to generate set constraints from the
program, then solve the resulting constraints in order to generate the
points-to sets.
@@ -201,172 +204,357 @@
And probably more. */
-static bool use_field_sensitive = true;
-static int in_ipa_mode = 0;
-
-/* Used for predecessor bitmaps. */
-static bitmap_obstack predbitmap_obstack;
+namespace pointer_analysis {
/* Used for points-to sets. */
-static bitmap_obstack pta_obstack;
+bitmap_obstack pta_obstack;
-/* Used for oldsolution members of variables. */
-static bitmap_obstack oldpta_obstack;
+/* Used for oldsolution members of variables. */
+bitmap_obstack oldpta_obstack;
-/* Used for per-solver-iteration bitmaps. */
-static bitmap_obstack iteration_obstack;
+/* Table of variable info structures for constraint variables.
+ Indexed directly by variable info id. */
+vec<varinfo_t> varmap;
-static unsigned int create_variable_info_for (tree, const char *, bool);
-typedef struct constraint_graph *constraint_graph_t;
-static void unify_nodes (constraint_graph_t, unsigned int, unsigned int, bool);
+/* List of constraints that we use to build the constraint graph from. */
+vec<constraint_t> constraints;
+
+/* Map from trees to variable infos. */
+static hash_map<tree, varinfo_t> *vi_for_tree;
-struct constraint;
-typedef struct constraint *constraint_t;
+/* The representative variable for a variable. The points-to solution for a
+ var can be found in its rep. Trivially, a var can be its own rep.
+ The solver provides this array once it is done solving. */
+unsigned int *var_rep;
-#define EXECUTE_IF_IN_NONNULL_BITMAP(a, b, c, d) \
- if (a) \
- EXECUTE_IF_SET_IN_BITMAP (a, b, c, d)
+struct constraint_stats stats;
-static struct constraint_stats
+/* Find the first varinfo in the same variable as START that overlaps with
+ OFFSET. Return NULL if we can't find one. */
+
+varinfo_t
+first_vi_for_offset (varinfo_t start, unsigned HOST_WIDE_INT offset)
{
- unsigned int total_vars;
- unsigned int nonpointer_vars;
- unsigned int unified_vars_static;
- unsigned int unified_vars_dynamic;
- unsigned int iterations;
- unsigned int num_edges;
- unsigned int num_implicit_edges;
- unsigned int num_avoided_edges;
- unsigned int points_to_sets_created;
-} stats;
-
-struct variable_info
+ /* If the offset is outside of the variable, bail out. */
+ if (offset >= start->fullsize)
+ return NULL;
+
+ /* If we cannot reach offset from start, lookup the first field
+ and start from there. */
+ if (start->offset > offset)
+ start = get_varinfo (start->head);
+
+ while (start)
+ {
+ /* We may not find a variable in the field list with the actual
+ offset when we have glommed a structure to a variable.
+ In that case, however, offset should still be within the size
+ of the variable. */
+ if (offset >= start->offset
+ && (offset - start->offset) < start->size)
+ return start;
+
+ start = vi_next (start);
+ }
+
+ return NULL;
+}
+
+/* Find the first varinfo in the same variable as START that overlaps with
+ OFFSET. If there is no such varinfo the varinfo directly preceding
+ OFFSET is returned. */
+
+varinfo_t
+first_or_preceding_vi_for_offset (varinfo_t start,
+ unsigned HOST_WIDE_INT offset)
{
- /* ID of this variable */
- unsigned int id;
+ /* If we cannot reach offset from start, lookup the first field
+ and start from there. */
+ if (start->offset > offset)
+ start = get_varinfo (start->head);
- /* True if this is a variable created by the constraint analysis, such as
- heap variables and constraints we had to break up. */
- unsigned int is_artificial_var : 1;
+ /* We may not find a variable in the field list with the actual
+ offset when we have glommed a structure to a variable.
+ In that case, however, offset should still be within the size
+ of the variable.
+ If we got beyond the offset we look for return the field
+ directly preceding offset which may be the last field. */
+ while (start->next
+ && offset >= start->offset
+ && !((offset - start->offset) < start->size))
+ start = vi_next (start);
- /* True if this is a special variable whose solution set should not be
- changed. */
- unsigned int is_special_var : 1;
+ return start;
+}
- /* True for variables whose size is not known or variable. */
- unsigned int is_unknown_size_var : 1;
+/* Print out constraint C to FILE. */
- /* True for (sub-)fields that represent a whole variable. */
- unsigned int is_full_var : 1;
+void
+dump_constraint (FILE *file, constraint_t c)
+{
+ if (c->lhs.type == ADDRESSOF)
+ fprintf (file, "&");
+ else if (c->lhs.type == DEREF)
+ fprintf (file, "*");
+ if (dump_file)
+ fprintf (file, "%s", get_varinfo (c->lhs.var)->name);
+ else
+ fprintf (file, "V%d", c->lhs.var);
+ if (c->lhs.offset == UNKNOWN_OFFSET)
+ fprintf (file, " + UNKNOWN");
+ else if (c->lhs.offset != 0)
+ fprintf (file, " + " HOST_WIDE_INT_PRINT_DEC, c->lhs.offset);
+ fprintf (file, " = ");
+ if (c->rhs.type == ADDRESSOF)
+ fprintf (file, "&");
+ else if (c->rhs.type == DEREF)
+ fprintf (file, "*");
+ if (dump_file)
+ fprintf (file, "%s", get_varinfo (c->rhs.var)->name);
+ else
+ fprintf (file, "V%d", c->rhs.var);
+ if (c->rhs.offset == UNKNOWN_OFFSET)
+ fprintf (file, " + UNKNOWN");
+ else if (c->rhs.offset != 0)
+ fprintf (file, " + " HOST_WIDE_INT_PRINT_DEC, c->rhs.offset);
+}
- /* True if this is a heap variable. */
- unsigned int is_heap_var : 1;
+/* Print out constraint C to stderr. */
- /* True if this is a register variable. */
- unsigned int is_reg_var : 1;
+DEBUG_FUNCTION void
+debug_constraint (constraint_t c)
+{
+ dump_constraint (stderr, c);
+ fprintf (stderr, "\n");
+}
- /* True if this field may contain pointers. */
- unsigned int may_have_pointers : 1;
+/* Print out all constraints to FILE. */
- /* True if this field has only restrict qualified pointers. */
- unsigned int only_restrict_pointers : 1;
+void
+dump_constraints (FILE *file, int from)
+{
+ int i;
+ constraint_t c;
+ for (i = from; constraints.iterate (i, &c); i++)
+ if (c)
+ {
+ dump_constraint (file, c);
+ fprintf (file, "\n");
+ }
+}
- /* True if this represents a heap var created for a restrict qualified
- pointer. */
- unsigned int is_restrict_var : 1;
+/* Print out all constraints to stderr. */
- /* True if this represents a global variable. */
- unsigned int is_global_var : 1;
+DEBUG_FUNCTION void
+debug_constraints (void)
+{
+ dump_constraints (stderr, 0);
+}
- /* True if this represents a module escape point for IPA analysis. */
- unsigned int is_ipa_escape_point : 1;
+/* Print out the points-to solution for VAR to FILE. */
- /* True if this represents a IPA function info. */
- unsigned int is_fn_info : 1;
+void
+dump_solution_for_var (FILE *file, unsigned int var)
+{
+ varinfo_t vi = get_varinfo (var);
+ unsigned int i;
+ bitmap_iterator bi;
- /* True if this appears as RHS in a ADDRESSOF constraint. */
- unsigned int address_taken : 1;
+ /* Dump the solution for unified vars anyway, this avoids difficulties
+ in scanning dumps in the testsuite. */
+ fprintf (file, "%s = { ", vi->name);
+ vi = get_varinfo (var_rep[var]);
+ EXECUTE_IF_SET_IN_BITMAP (vi->solution, 0, i, bi)
+ fprintf (file, "%s ", get_varinfo (i)->name);
+ fprintf (file, "}");
- /* ??? Store somewhere better. */
- unsigned short ruid;
+ /* But note when the variable was unified. */
+ if (vi->id != var)
+ fprintf (file, " same as %s", vi->name);
- /* The ID of the variable for the next field in this structure
- or zero for the last field in this structure. */
- unsigned next;
+ fprintf (file, "\n");
+}
- /* The ID of the variable for the first field in this structure. */
- unsigned head;
+/* Print the points-to solution for VAR to stderr. */
- /* Offset of this variable, in bits, from the base variable */
- unsigned HOST_WIDE_INT offset;
+DEBUG_FUNCTION void
+debug_solution_for_var (unsigned int var)
+{
+ dump_solution_for_var (stderr, var);
+}
- /* Size of the variable, in bits. */
- unsigned HOST_WIDE_INT size;
+/* Dump stats information to OUTFILE. */
- /* Full size of the base variable, in bits. */
- unsigned HOST_WIDE_INT fullsize;
+void
+dump_sa_stats (FILE *outfile)
+{
+ fprintf (outfile, "Points-to Stats:\n");
+ fprintf (outfile, "Total vars: %d\n", stats.total_vars);
+ fprintf (outfile, "Non-pointer vars: %d\n",
+ stats.nonpointer_vars);
+ fprintf (outfile, "Statically unified vars: %d\n",
+ stats.unified_vars_static);
+ fprintf (outfile, "Dynamically unified vars: %d\n",
+ stats.unified_vars_dynamic);
+ fprintf (outfile, "Iterations: %d\n", stats.iterations);
+ fprintf (outfile, "Number of edges: %d\n", stats.num_edges);
+ fprintf (outfile, "Number of implicit edges: %d\n",
+ stats.num_implicit_edges);
+ fprintf (outfile, "Number of avoided edges: %d\n",
+ stats.num_avoided_edges);
+}
- /* In IPA mode the shadow UID in case the variable needs to be duplicated in
- the final points-to solution because it reaches its containing
- function recursively. Zero if none is needed. */
- unsigned int shadow_var_uid;
+/* Dump points-to information to OUTFILE. */
- /* Name of this variable */
- const char *name;
+void
+dump_sa_points_to_info (FILE *outfile)
+{
+ fprintf (outfile, "\nPoints-to sets\n\n");
- /* Tree that this variable is associated with. */
- tree decl;
+ for (unsigned i = 1; i < varmap.length (); i++)
+ {
+ varinfo_t vi = get_varinfo (i);
+ if (!vi->may_have_pointers)
+ continue;
+ dump_solution_for_var (outfile, i);
+ }
+}
- /* Points-to set for this variable. */
- bitmap solution;
- /* Old points-to set for this variable. */
- bitmap oldsolution;
-};
-typedef struct variable_info *varinfo_t;
+/* Debug points-to information to stderr. */
-static varinfo_t first_vi_for_offset (varinfo_t, unsigned HOST_WIDE_INT);
-static varinfo_t first_or_preceding_vi_for_offset (varinfo_t,
- unsigned HOST_WIDE_INT);
-static varinfo_t lookup_vi_for_tree (tree);
-static inline bool type_can_have_subvars (const_tree);
-static void make_param_constraints (varinfo_t);
+DEBUG_FUNCTION void
+debug_sa_points_to_info (void)
+{
+ dump_sa_points_to_info (stderr);
+}
-/* Pool of variable info structures. */
-static object_allocator<variable_info> variable_info_pool
- ("Variable info pool");
+/* Dump varinfo VI to FILE. */
-/* Map varinfo to final pt_solution. */
-static hash_map<varinfo_t, pt_solution *> *final_solutions;
-struct obstack final_solutions_obstack;
+void
+dump_varinfo (FILE *file, varinfo_t vi)
+{
+ if (vi == NULL)
+ return;
-/* Table of variable info structures for constraint variables.
- Indexed directly by variable info id. */
-static vec<varinfo_t> varmap;
+ fprintf (file, "%u: %s\n", vi->id, vi->name);
-/* Return the varmap element N */
+ const char *sep = " ";
+ if (vi->is_artificial_var)
+ fprintf (file, "%sartificial", sep);
+ if (vi->is_special_var)
+ fprintf (file, "%sspecial", sep);
+ if (vi->is_unknown_size_var)
+ fprintf (file, "%sunknown-size", sep);
+ if (vi->is_full_var)
+ fprintf (file, "%sfull", sep);
+ if (vi->is_heap_var)
+ fprintf (file, "%sheap", sep);
+ if (vi->may_have_pointers)
+ fprintf (file, "%smay-have-pointers", sep);
+ if (vi->only_restrict_pointers)
+ fprintf (file, "%sonly-restrict-pointers", sep);
+ if (vi->is_restrict_var)
+ fprintf (file, "%sis-restrict-var", sep);
+ if (vi->is_global_var)
+ fprintf (file, "%sglobal", sep);
+ if (vi->is_ipa_escape_point)
+ fprintf (file, "%sipa-escape-point", sep);
+ if (vi->is_fn_info)
+ fprintf (file, "%sfn-info", sep);
+ if (vi->ruid)
+ fprintf (file, "%srestrict-uid:%u", sep, vi->ruid);
+ if (vi->next)
+ fprintf (file, "%snext:%u", sep, vi->next);
+ if (vi->head != vi->id)
+ fprintf (file, "%shead:%u", sep, vi->head);
+ if (vi->offset)
+ fprintf (file, "%soffset:" HOST_WIDE_INT_PRINT_DEC, sep, vi->offset);
+ if (vi->size != ~HOST_WIDE_INT_0U)
+ fprintf (file, "%ssize:" HOST_WIDE_INT_PRINT_DEC, sep, vi->size);
+ if (vi->fullsize != ~HOST_WIDE_INT_0U && vi->fullsize != vi->size)
+ fprintf (file, "%sfullsize:" HOST_WIDE_INT_PRINT_DEC, sep,
+ vi->fullsize);
+ fprintf (file, "\n");
-static inline varinfo_t
-get_varinfo (unsigned int n)
+ if (vi->solution && !bitmap_empty_p (vi->solution))
+ {
+ bitmap_iterator bi;
+ unsigned i;
+ fprintf (file, " solution: {");
+ EXECUTE_IF_SET_IN_BITMAP (vi->solution, 0, i, bi)
+ fprintf (file, " %u", i);
+ fprintf (file, " }\n");
+ }
+
+ if (vi->oldsolution && !bitmap_empty_p (vi->oldsolution)
+ && !bitmap_equal_p (vi->solution, vi->oldsolution))
+ {
+ bitmap_iterator bi;
+ unsigned i;
+ fprintf (file, " oldsolution: {");
+ EXECUTE_IF_SET_IN_BITMAP (vi->oldsolution, 0, i, bi)
+ fprintf (file, " %u", i);
+ fprintf (file, " }\n");
+ }
+}
+
+/* Dump varinfo VI to stderr. */
+
+DEBUG_FUNCTION void
+debug_varinfo (varinfo_t vi)
+{
+ dump_varinfo (stderr, vi);
+}
+
+/* Dump varmap to FILE. */
+
+void
+dump_varmap (FILE *file)
{
- return varmap[n];
+ if (varmap.length () == 0)
+ return;
+
+ fprintf (file, "variables:\n");
+
+ for (unsigned int i = 0; i < varmap.length (); ++i)
+ {
+ varinfo_t vi = get_varinfo (i);
+ dump_varinfo (file, vi);
+ }
+
+ fprintf (file, "\n");
}
-/* Return the next variable in the list of sub-variables of VI
- or NULL if VI is the last sub-variable. */
+/* Dump varmap to stderr. */
-static inline varinfo_t
-vi_next (varinfo_t vi)
+DEBUG_FUNCTION void
+debug_varmap (void)
{
- return get_varinfo (vi->next);
+ dump_varmap (stderr);
}
-/* Static IDs for the special variables. Variable ID zero is unused
- and used as terminator for the sub-variable chain. */
-enum { nothing_id = 1, anything_id = 2, string_id = 3,
- escaped_id = 4, nonlocal_id = 5, escaped_return_id = 6,
- storedanything_id = 7, integer_id = 8 };
+} // namespace pointer_analysis
+
+
+using namespace pointer_analysis;
+
+static bool use_field_sensitive = true;
+static int in_ipa_mode = 0;
+
+static unsigned int create_variable_info_for (tree, const char *, bool);
+static varinfo_t lookup_vi_for_tree (tree);
+static inline bool type_can_have_subvars (const_tree);
+static void make_param_constraints (varinfo_t);
+
+/* Pool of variable info structures. */
+static object_allocator<variable_info> variable_info_pool
+ ("Variable info pool");
+
+/* Map varinfo to final pt_solution. */
+static hash_map<varinfo_t, pt_solution *> *final_solutions;
+static struct obstack final_solutions_obstack;
/* Return a new variable info structure consisting for a variable
named NAME, and using constraint graph node NODE. Append it
@@ -502,166 +690,15 @@ get_call_clobber_vi (gcall *call)
}
-enum constraint_expr_type {SCALAR, DEREF, ADDRESSOF};
-
-/* An expression that appears in a constraint. */
-
-struct constraint_expr
-{
- /* Constraint type. */
- constraint_expr_type type;
-
- /* Variable we are referring to in the constraint. */
- unsigned int var;
-
- /* Offset, in bits, of this constraint from the beginning of
- variables it ends up referring to.
-
- IOW, in a deref constraint, we would deref, get the result set,
- then add OFFSET to each member. */
- HOST_WIDE_INT offset;
-};
-
-/* Use 0x8000... as special unknown offset. */
-#define UNKNOWN_OFFSET HOST_WIDE_INT_MIN
-
-typedef struct constraint_expr ce_s;
static void get_constraint_for_1 (tree, vec<ce_s> *, bool, bool);
static void get_constraint_for (tree, vec<ce_s> *);
static void get_constraint_for_rhs (tree, vec<ce_s> *);
static void do_deref (vec<ce_s> *);
-/* Our set constraints are made up of two constraint expressions, one
- LHS, and one RHS.
-
- As described in the introduction, our set constraints each represent an
- operation between set valued variables.
-*/
-struct constraint
-{
- struct constraint_expr lhs;
- struct constraint_expr rhs;
-};
+/* Allocator for 'constraints' vector. */
-/* List of constraints that we use to build the constraint graph from. */
-
-static vec<constraint_t> constraints;
static object_allocator<constraint> constraint_pool ("Constraint pool");
-/* The constraint graph is represented as an array of bitmaps
- containing successor nodes. */
-
-struct constraint_graph
-{
- /* Size of this graph, which may be different than the number of
- nodes in the variable map. */
- unsigned int size;
-
- /* Explicit successors of each node. */
- bitmap *succs;
-
- /* Implicit predecessors of each node (Used for variable
- substitution). */
- bitmap *implicit_preds;
-
- /* Explicit predecessors of each node (Used for variable substitution). */
- bitmap *preds;
-
- /* Indirect cycle representatives, or -1 if the node has no indirect
- cycles. */
- int *indirect_cycles;
-
- /* Representative node for a node. rep[a] == a unless the node has
- been unified. */
- unsigned int *rep;
-
- /* Equivalence class representative for a label. This is used for
- variable substitution. */
- int *eq_rep;
-
- /* Pointer equivalence label for a node. All nodes with the same
- pointer equivalence label can be unified together at some point
- (either during constraint optimization or after the constraint
- graph is built). */
- unsigned int *pe;
-
- /* Pointer equivalence representative for a label. This is used to
- handle nodes that are pointer equivalent but not location
- equivalent. We can unite these once the addressof constraints
- are transformed into initial points-to sets. */
- int *pe_rep;
-
- /* Pointer equivalence label for each node, used during variable
- substitution. */
- unsigned int *pointer_label;
-
- /* Location equivalence label for each node, used during location
- equivalence finding. */
- unsigned int *loc_label;
-
- /* Pointed-by set for each node, used during location equivalence
- finding. This is pointed-by rather than pointed-to, because it
- is constructed using the predecessor graph. */
- bitmap *pointed_by;
-
- /* Points to sets for pointer equivalence. This is *not* the actual
- points-to sets for nodes. */
- bitmap *points_to;
-
- /* Bitmap of nodes where the bit is set if the node is a direct
- node. Used for variable substitution. */
- sbitmap direct_nodes;
-
- /* Bitmap of nodes where the bit is set if the node is address
- taken. Used for variable substitution. */
- bitmap address_taken;
-
- /* Vector of complex constraints for each graph node. Complex
- constraints are those involving dereferences or offsets that are
- not 0. */
- vec<constraint_t> *complex;
-};
-
-static constraint_graph_t graph;
-
-/* During variable substitution and the offline version of indirect
- cycle finding, we create nodes to represent dereferences and
- address taken constraints. These represent where these start and
- end. */
-#define FIRST_REF_NODE (varmap).length ()
-#define LAST_REF_NODE (FIRST_REF_NODE + (FIRST_REF_NODE - 1))
-
-/* Return the representative node for NODE, if NODE has been unioned
- with another NODE.
- This function performs path compression along the way to finding
- the representative. */
-
-static unsigned int
-find (unsigned int node)
-{
- gcc_checking_assert (node < graph->size);
- if (graph->rep[node] != node)
- return graph->rep[node] = find (graph->rep[node]);
- return node;
-}
-
-/* Union the TO and FROM nodes to the TO nodes.
- Note that at some point in the future, we may want to do
- union-by-rank, in which case we are going to have to return the
- node we unified to. */
-
-static bool
-unite (unsigned int to, unsigned int from)
-{
- gcc_checking_assert (to < graph->size && from < graph->size);
- if (to != from && graph->rep[from] != to)
- {
- graph->rep[from] = to;
- return true;
- }
- return false;
-}
-
/* Create a new constraint consisting of LHS and RHS expressions. */
static constraint_t
@@ -674,2312 +711,6 @@ new_constraint (const struct constraint_expr lhs,
return ret;
}
-/* Print out constraint C to FILE. */
-
-static void
-dump_constraint (FILE *file, constraint_t c)
-{
- if (c->lhs.type == ADDRESSOF)
- fprintf (file, "&");
- else if (c->lhs.type == DEREF)
- fprintf (file, "*");
- if (dump_file)
- fprintf (file, "%s", get_varinfo (c->lhs.var)->name);
- else
- fprintf (file, "V%d", c->lhs.var);
- if (c->lhs.offset == UNKNOWN_OFFSET)
- fprintf (file, " + UNKNOWN");
- else if (c->lhs.offset != 0)
- fprintf (file, " + " HOST_WIDE_INT_PRINT_DEC, c->lhs.offset);
- fprintf (file, " = ");
- if (c->rhs.type == ADDRESSOF)
- fprintf (file, "&");
- else if (c->rhs.type == DEREF)
- fprintf (file, "*");
- if (dump_file)
- fprintf (file, "%s", get_varinfo (c->rhs.var)->name);
- else
- fprintf (file, "V%d", c->rhs.var);
- if (c->rhs.offset == UNKNOWN_OFFSET)
- fprintf (file, " + UNKNOWN");
- else if (c->rhs.offset != 0)
- fprintf (file, " + " HOST_WIDE_INT_PRINT_DEC, c->rhs.offset);
-}
-
-
-void debug_constraint (constraint_t);
-void debug_constraints (void);
-void debug_constraint_graph (void);
-void debug_solution_for_var (unsigned int);
-void debug_sa_points_to_info (void);
-void debug_varinfo (varinfo_t);
-void debug_varmap (void);
-
-/* Print out constraint C to stderr. */
-
-DEBUG_FUNCTION void
-debug_constraint (constraint_t c)
-{
- dump_constraint (stderr, c);
- fprintf (stderr, "\n");
-}
-
-/* Print out all constraints to FILE */
-
-static void
-dump_constraints (FILE *file, int from)
-{
- int i;
- constraint_t c;
- for (i = from; constraints.iterate (i, &c); i++)
- if (c)
- {
- dump_constraint (file, c);
- fprintf (file, "\n");
- }
-}
-
-/* Print out all constraints to stderr. */
-
-DEBUG_FUNCTION void
-debug_constraints (void)
-{
- dump_constraints (stderr, 0);
-}
-
-/* Print the constraint graph in dot format. */
-
-static void
-dump_constraint_graph (FILE *file)
-{
- unsigned int i;
-
- /* Only print the graph if it has already been initialized: */
- if (!graph)
- return;
-
- /* Prints the header of the dot file: */
- fprintf (file, "strict digraph {\n");
- fprintf (file, " node [\n shape = box\n ]\n");
- fprintf (file, " edge [\n fontsize = \"12\"\n ]\n");
- fprintf (file, "\n // List of nodes and complex constraints in "
- "the constraint graph:\n");
-
- /* The next lines print the nodes in the graph together with the
- complex constraints attached to them. */
- for (i = 1; i < graph->size; i++)
- {
- if (i == FIRST_REF_NODE)
- continue;
- if (find (i) != i)
- continue;
- if (i < FIRST_REF_NODE)
- fprintf (file, "\"%s\"", get_varinfo (i)->name);
- else
- fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
- if (graph->complex[i].exists ())
- {
- unsigned j;
- constraint_t c;
- fprintf (file, " [label=\"\\N\\n");
- for (j = 0; graph->complex[i].iterate (j, &c); ++j)
- {
- dump_constraint (file, c);
- fprintf (file, "\\l");
- }
- fprintf (file, "\"]");
- }
- fprintf (file, ";\n");
- }
-
- /* Go over the edges. */
- fprintf (file, "\n // Edges in the constraint graph:\n");
- for (i = 1; i < graph->size; i++)
- {
- unsigned j;
- bitmap_iterator bi;
- if (find (i) != i)
- continue;
- EXECUTE_IF_IN_NONNULL_BITMAP (graph->succs[i], 0, j, bi)
- {
- unsigned to = find (j);
- if (i == to)
- continue;
- if (i < FIRST_REF_NODE)
- fprintf (file, "\"%s\"", get_varinfo (i)->name);
- else
- fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
- fprintf (file, " -> ");
- if (to < FIRST_REF_NODE)
- fprintf (file, "\"%s\"", get_varinfo (to)->name);
- else
- fprintf (file, "\"*%s\"", get_varinfo (to - FIRST_REF_NODE)->name);
- fprintf (file, ";\n");
- }
- }
-
- /* Prints the tail of the dot file. */
- fprintf (file, "}\n");
-}
-
-/* Print out the constraint graph to stderr. */
-
-DEBUG_FUNCTION void
-debug_constraint_graph (void)
-{
- dump_constraint_graph (stderr);
-}
-
-/* SOLVER FUNCTIONS
-
- The solver is a simple worklist solver, that works on the following
- algorithm:
-
- sbitmap changed_nodes = all zeroes;
- changed_count = 0;
- For each node that is not already collapsed:
- changed_count++;
- set bit in changed nodes
-
- while (changed_count > 0)
- {
- compute topological ordering for constraint graph
-
- find and collapse cycles in the constraint graph (updating
- changed if necessary)
-
- for each node (n) in the graph in topological order:
- changed_count--;
-
- Process each complex constraint associated with the node,
- updating changed if necessary.
-
- For each outgoing edge from n, propagate the solution from n to
- the destination of the edge, updating changed as necessary.
-
- } */
-
-/* Return true if two constraint expressions A and B are equal. */
-
-static bool
-constraint_expr_equal (struct constraint_expr a, struct constraint_expr b)
-{
- return a.type == b.type && a.var == b.var && a.offset == b.offset;
-}
-
-/* Return true if constraint expression A is less than constraint expression
- B. This is just arbitrary, but consistent, in order to give them an
- ordering. */
-
-static bool
-constraint_expr_less (struct constraint_expr a, struct constraint_expr b)
-{
- if (a.type == b.type)
- {
- if (a.var == b.var)
- return a.offset < b.offset;
- else
- return a.var < b.var;
- }
- else
- return a.type < b.type;
-}
-
-/* Return true if constraint A is less than constraint B. This is just
- arbitrary, but consistent, in order to give them an ordering. */
-
-static bool
-constraint_less (const constraint_t &a, const constraint_t &b)
-{
- if (constraint_expr_less (a->lhs, b->lhs))
- return true;
- else if (constraint_expr_less (b->lhs, a->lhs))
- return false;
- else
- return constraint_expr_less (a->rhs, b->rhs);
-}
-
-/* Return true if two constraints A and B are equal. */
-
-static bool
-constraint_equal (const constraint &a, const constraint &b)
-{
- return constraint_expr_equal (a.lhs, b.lhs)
- && constraint_expr_equal (a.rhs, b.rhs);
-}
-
-
-/* Find a constraint LOOKFOR in the sorted constraint vector VEC */
-
-static constraint_t
-constraint_vec_find (vec<constraint_t> vec,
- constraint &lookfor)
-{
- unsigned int place;
- constraint_t found;
-
- if (!vec.exists ())
- return NULL;
-
- place = vec.lower_bound (&lookfor, constraint_less);
- if (place >= vec.length ())
- return NULL;
- found = vec[place];
- if (!constraint_equal (*found, lookfor))
- return NULL;
- return found;
-}
-
-/* Union two constraint vectors, TO and FROM. Put the result in TO.
- Returns true of TO set is changed. */
-
-static bool
-constraint_set_union (vec<constraint_t> *to,
- vec<constraint_t> *from)
-{
- int i;
- constraint_t c;
- bool any_change = false;
-
- FOR_EACH_VEC_ELT (*from, i, c)
- {
- if (constraint_vec_find (*to, *c) == NULL)
- {
- unsigned int place = to->lower_bound (c, constraint_less);
- to->safe_insert (place, c);
- any_change = true;
- }
- }
- return any_change;
-}
-
-/* Expands the solution in SET to all sub-fields of variables included. */
-
-static bitmap
-solution_set_expand (bitmap set, bitmap *expanded)
-{
- bitmap_iterator bi;
- unsigned j;
-
- if (*expanded)
- return *expanded;
-
- *expanded = BITMAP_ALLOC (&iteration_obstack);
-
- /* In a first pass expand variables, once for each head to avoid
- quadratic behavior, to include all sub-fields. */
- unsigned prev_head = 0;
- EXECUTE_IF_SET_IN_BITMAP (set, 0, j, bi)
- {
- varinfo_t v = get_varinfo (j);
- if (v->is_artificial_var
- || v->is_full_var)
- continue;
- if (v->head != prev_head)
- {
- varinfo_t head = get_varinfo (v->head);
- unsigned num = 1;
- for (varinfo_t n = vi_next (head); n != NULL; n = vi_next (n))
- {
- if (n->id != head->id + num)
- {
- /* Usually sub variables are adjacent but since we
- create pointed-to restrict representatives there
- can be gaps as well. */
- bitmap_set_range (*expanded, head->id, num);
- head = n;
- num = 1;
- }
- else
- num++;
- }
-
- bitmap_set_range (*expanded, head->id, num);
- prev_head = v->head;
- }
- }
-
- /* And finally set the rest of the bits from SET in an efficient way. */
- bitmap_ior_into (*expanded, set);
-
- return *expanded;
-}
-
-/* Union solution sets TO and DELTA, and add INC to each member of DELTA in the
- process. */
-
-static bool
-set_union_with_increment (bitmap to, bitmap delta, HOST_WIDE_INT inc,
- bitmap *expanded_delta)
-{
- bool changed = false;
- bitmap_iterator bi;
- unsigned int i;
-
- /* If the solution of DELTA contains anything it is good enough to transfer
- this to TO. */
- if (bitmap_bit_p (delta, anything_id))
- return bitmap_set_bit (to, anything_id);
-
- /* If the offset is unknown we have to expand the solution to
- all subfields. */
- if (inc == UNKNOWN_OFFSET)
- {
- delta = solution_set_expand (delta, expanded_delta);
- changed |= bitmap_ior_into (to, delta);
- return changed;
- }
-
- /* For non-zero offset union the offsetted solution into the destination. */
- EXECUTE_IF_SET_IN_BITMAP (delta, 0, i, bi)
- {
- varinfo_t vi = get_varinfo (i);
-
- /* If this is a variable with just one field just set its bit
- in the result. */
- if (vi->is_artificial_var
- || vi->is_unknown_size_var
- || vi->is_full_var)
- changed |= bitmap_set_bit (to, i);
- else
- {
- HOST_WIDE_INT fieldoffset = vi->offset + inc;
- unsigned HOST_WIDE_INT size = vi->size;
-
- /* If the offset makes the pointer point to before the
- variable use offset zero for the field lookup. */
- if (fieldoffset < 0)
- vi = get_varinfo (vi->head);
- else
- vi = first_or_preceding_vi_for_offset (vi, fieldoffset);
-
- do
- {
- changed |= bitmap_set_bit (to, vi->id);
- if (vi->is_full_var
- || vi->next == 0)
- break;
-
- /* We have to include all fields that overlap the current field
- shifted by inc. */
- vi = vi_next (vi);
- }
- while (vi->offset < fieldoffset + size);
- }
- }
-
- return changed;
-}
-
-/* Insert constraint C into the list of complex constraints for graph
- node VAR. */
-
-static void
-insert_into_complex (constraint_graph_t graph,
- unsigned int var, constraint_t c)
-{
- vec<constraint_t> complex = graph->complex[var];
- unsigned int place = complex.lower_bound (c, constraint_less);
-
- /* Only insert constraints that do not already exist. */
- if (place >= complex.length ()
- || !constraint_equal (*c, *complex[place]))
- graph->complex[var].safe_insert (place, c);
-}
-
-
-/* Condense two variable nodes into a single variable node, by moving
- all associated info from FROM to TO. Returns true if TO node's
- constraint set changes after the merge. */
-
-static bool
-merge_node_constraints (constraint_graph_t graph, unsigned int to,
- unsigned int from)
-{
- unsigned int i;
- constraint_t c;
- bool any_change = false;
-
- gcc_checking_assert (find (from) == to);
-
- /* Move all complex constraints from src node into to node */
- FOR_EACH_VEC_ELT (graph->complex[from], i, c)
- {
- /* In complex constraints for node FROM, we may have either
- a = *FROM, and *FROM = a, or an offseted constraint which are
- always added to the rhs node's constraints. */
-
- if (c->rhs.type == DEREF)
- c->rhs.var = to;
- else if (c->lhs.type == DEREF)
- c->lhs.var = to;
- else
- c->rhs.var = to;
-
- }
- any_change = constraint_set_union (&graph->complex[to],
- &graph->complex[from]);
- graph->complex[from].release ();
- return any_change;
-}
-
-
-/* Remove edges involving NODE from GRAPH. */
-
-static void
-clear_edges_for_node (constraint_graph_t graph, unsigned int node)
-{
- if (graph->succs[node])
- BITMAP_FREE (graph->succs[node]);
-}
-
-/* Merge GRAPH nodes FROM and TO into node TO. */
-
-static void
-merge_graph_nodes (constraint_graph_t graph, unsigned int to,
- unsigned int from)
-{
- if (graph->indirect_cycles[from] != -1)
- {
- /* If we have indirect cycles with the from node, and we have
- none on the to node, the to node has indirect cycles from the
- from node now that they are unified.
- If indirect cycles exist on both, unify the nodes that they
- are in a cycle with, since we know they are in a cycle with
- each other. */
- if (graph->indirect_cycles[to] == -1)
- graph->indirect_cycles[to] = graph->indirect_cycles[from];
- }
-
- /* Merge all the successor edges. */
- if (graph->succs[from])
- {
- if (!graph->succs[to])
- graph->succs[to] = BITMAP_ALLOC (&pta_obstack);
- bitmap_ior_into (graph->succs[to],
- graph->succs[from]);
- }
-
- clear_edges_for_node (graph, from);
-}
-
-
-/* Add an indirect graph edge to GRAPH, going from TO to FROM if
- it doesn't exist in the graph already. */
-
-static void
-add_implicit_graph_edge (constraint_graph_t graph, unsigned int to,
- unsigned int from)
-{
- if (to == from)
- return;
-
- if (!graph->implicit_preds[to])
- graph->implicit_preds[to] = BITMAP_ALLOC (&predbitmap_obstack);
-
- if (bitmap_set_bit (graph->implicit_preds[to], from))
- stats.num_implicit_edges++;
-}
-
-/* Add a predecessor graph edge to GRAPH, going from TO to FROM if
- it doesn't exist in the graph already.
- Return false if the edge already existed, true otherwise. */
-
-static void
-add_pred_graph_edge (constraint_graph_t graph, unsigned int to,
- unsigned int from)
-{
- if (!graph->preds[to])
- graph->preds[to] = BITMAP_ALLOC (&predbitmap_obstack);
- bitmap_set_bit (graph->preds[to], from);
-}
-
-/* Add a graph edge to GRAPH, going from FROM to TO if
- it doesn't exist in the graph already.
- Return false if the edge already existed, true otherwise. */
-
-static bool
-add_graph_edge (constraint_graph_t graph, unsigned int to,
- unsigned int from)
-{
- if (to == from)
- {
- return false;
- }
- else
- {
- bool r = false;
-
- if (!graph->succs[from])
- graph->succs[from] = BITMAP_ALLOC (&pta_obstack);
-
- /* The graph solving process does not avoid "triangles", thus
- there can be multiple paths from a node to another involving
- intermediate other nodes. That causes extra copying which is
- most difficult to avoid when the intermediate node is ESCAPED
- because there are no edges added from ESCAPED. Avoid
- adding the direct edge FROM -> TO when we have FROM -> ESCAPED
- and TO contains ESCAPED.
- ??? Note this is only a heuristic, it does not prevent the
- situation from occuring. The heuristic helps PR38474 and
- PR99912 significantly. */
- if (to < FIRST_REF_NODE
- && bitmap_bit_p (graph->succs[from], find (escaped_id))
- && bitmap_bit_p (get_varinfo (find (to))->solution, escaped_id))
- {
- stats.num_avoided_edges++;
- return false;
- }
-
- if (bitmap_set_bit (graph->succs[from], to))
- {
- r = true;
- if (to < FIRST_REF_NODE && from < FIRST_REF_NODE)
- stats.num_edges++;
- }
- return r;
- }
-}
-
-
-/* Initialize the constraint graph structure to contain SIZE nodes. */
-
-static void
-init_graph (unsigned int size)
-{
- unsigned int j;
-
- graph = XCNEW (struct constraint_graph);
- graph->size = size;
- graph->succs = XCNEWVEC (bitmap, graph->size);
- graph->indirect_cycles = XNEWVEC (int, graph->size);
- graph->rep = XNEWVEC (unsigned int, graph->size);
- /* ??? Macros do not support template types with multiple arguments,
- so we use a typedef to work around it. */
- typedef vec<constraint_t> vec_constraint_t_heap;
- graph->complex = XCNEWVEC (vec_constraint_t_heap, size);
- graph->pe = XCNEWVEC (unsigned int, graph->size);
- graph->pe_rep = XNEWVEC (int, graph->size);
-
- for (j = 0; j < graph->size; j++)
- {
- graph->rep[j] = j;
- graph->pe_rep[j] = -1;
- graph->indirect_cycles[j] = -1;
- }
-}
-
-/* Build the constraint graph, adding only predecessor edges right now. */
-
-static void
-build_pred_graph (void)
-{
- int i;
- constraint_t c;
- unsigned int j;
-
- graph->implicit_preds = XCNEWVEC (bitmap, graph->size);
- graph->preds = XCNEWVEC (bitmap, graph->size);
- graph->pointer_label = XCNEWVEC (unsigned int, graph->size);
- graph->loc_label = XCNEWVEC (unsigned int, graph->size);
- graph->pointed_by = XCNEWVEC (bitmap, graph->size);
- graph->points_to = XCNEWVEC (bitmap, graph->size);
- graph->eq_rep = XNEWVEC (int, graph->size);
- graph->direct_nodes = sbitmap_alloc (graph->size);
- graph->address_taken = BITMAP_ALLOC (&predbitmap_obstack);
- bitmap_clear (graph->direct_nodes);
-
- for (j = 1; j < FIRST_REF_NODE; j++)
- {
- if (!get_varinfo (j)->is_special_var)
- bitmap_set_bit (graph->direct_nodes, j);
- }
-
- for (j = 0; j < graph->size; j++)
- graph->eq_rep[j] = -1;
-
- for (j = 0; j < varmap.length (); j++)
- graph->indirect_cycles[j] = -1;
-
- FOR_EACH_VEC_ELT (constraints, i, c)
- {
- struct constraint_expr lhs = c->lhs;
- struct constraint_expr rhs = c->rhs;
- unsigned int lhsvar = lhs.var;
- unsigned int rhsvar = rhs.var;
-
- if (lhs.type == DEREF)
- {
- /* *x = y. */
- if (rhs.offset == 0 && lhs.offset == 0 && rhs.type == SCALAR)
- {
- if (lhs.var == anything_id)
- add_pred_graph_edge (graph, storedanything_id, rhsvar);
- else
- add_pred_graph_edge (graph, FIRST_REF_NODE + lhsvar, rhsvar);
- }
- }
- else if (rhs.type == DEREF)
- {
- /* x = *y */
- if (rhs.offset == 0 && lhs.offset == 0 && lhs.type == SCALAR)
- add_pred_graph_edge (graph, lhsvar, FIRST_REF_NODE + rhsvar);
- else
- bitmap_clear_bit (graph->direct_nodes, lhsvar);
- }
- else if (rhs.type == ADDRESSOF)
- {
- varinfo_t v;
-
- /* x = &y */
- if (graph->points_to[lhsvar] == NULL)
- graph->points_to[lhsvar] = BITMAP_ALLOC (&predbitmap_obstack);
- bitmap_set_bit (graph->points_to[lhsvar], rhsvar);
-
- if (graph->pointed_by[rhsvar] == NULL)
- graph->pointed_by[rhsvar] = BITMAP_ALLOC (&predbitmap_obstack);
- bitmap_set_bit (graph->pointed_by[rhsvar], lhsvar);
-
- /* Implicitly, *x = y */
- add_implicit_graph_edge (graph, FIRST_REF_NODE + lhsvar, rhsvar);
-
- /* All related variables are no longer direct nodes. */
- bitmap_clear_bit (graph->direct_nodes, rhsvar);
- v = get_varinfo (rhsvar);
- if (!v->is_full_var)
- {
- v = get_varinfo (v->head);
- do
- {
- bitmap_clear_bit (graph->direct_nodes, v->id);
- v = vi_next (v);
- }
- while (v != NULL);
- }
- bitmap_set_bit (graph->address_taken, rhsvar);
- }
- else if (lhsvar > anything_id
- && lhsvar != rhsvar && lhs.offset == 0 && rhs.offset == 0)
- {
- /* x = y */
- add_pred_graph_edge (graph, lhsvar, rhsvar);
- /* Implicitly, *x = *y */
- add_implicit_graph_edge (graph, FIRST_REF_NODE + lhsvar,
- FIRST_REF_NODE + rhsvar);
- }
- else if (lhs.offset != 0 || rhs.offset != 0)
- {
- if (rhs.offset != 0)
- bitmap_clear_bit (graph->direct_nodes, lhs.var);
- else if (lhs.offset != 0)
- bitmap_clear_bit (graph->direct_nodes, rhs.var);
- }
- }
-}
-
-/* Build the constraint graph, adding successor edges. */
-
-static void
-build_succ_graph (void)
-{
- unsigned i, t;
- constraint_t c;
-
- FOR_EACH_VEC_ELT (constraints, i, c)
- {
- struct constraint_expr lhs;
- struct constraint_expr rhs;
- unsigned int lhsvar;
- unsigned int rhsvar;
-
- if (!c)
- continue;
-
- lhs = c->lhs;
- rhs = c->rhs;
- lhsvar = find (lhs.var);
- rhsvar = find (rhs.var);
-
- if (lhs.type == DEREF)
- {
- if (rhs.offset == 0 && lhs.offset == 0 && rhs.type == SCALAR)
- {
- if (lhs.var == anything_id)
- add_graph_edge (graph, storedanything_id, rhsvar);
- else
- add_graph_edge (graph, FIRST_REF_NODE + lhsvar, rhsvar);
- }
- }
- else if (rhs.type == DEREF)
- {
- if (rhs.offset == 0 && lhs.offset == 0 && lhs.type == SCALAR)
- add_graph_edge (graph, lhsvar, FIRST_REF_NODE + rhsvar);
- }
- else if (rhs.type == ADDRESSOF)
- {
- /* x = &y */
- gcc_checking_assert (find (rhs.var) == rhs.var);
- bitmap_set_bit (get_varinfo (lhsvar)->solution, rhsvar);
- }
- else if (lhsvar > anything_id
- && lhsvar != rhsvar && lhs.offset == 0 && rhs.offset == 0)
- {
- add_graph_edge (graph, lhsvar, rhsvar);
- }
- }
-
- /* Add edges from STOREDANYTHING to all nodes that can receive pointers. */
- t = find (storedanything_id);
- for (i = integer_id + 1; i < FIRST_REF_NODE; ++i)
- {
- if (get_varinfo (i)->may_have_pointers)
- add_graph_edge (graph, find (i), t);
- }
-
- /* Everything stored to ANYTHING also potentially escapes. */
- add_graph_edge (graph, find (escaped_id), t);
-}
-
-
-/* Changed variables on the last iteration. */
-static bitmap changed;
-
-/* Strongly Connected Component visitation info. */
-
-class scc_info
-{
-public:
- scc_info (size_t size);
- ~scc_info ();
-
- auto_sbitmap visited;
- auto_sbitmap deleted;
- unsigned int *dfs;
- unsigned int *node_mapping;
- int current_index;
- auto_vec<unsigned> scc_stack;
-};
-
-
-/* Recursive routine to find strongly connected components in GRAPH.
- SI is the SCC info to store the information in, and N is the id of current
- graph node we are processing.
-
- This is Tarjan's strongly connected component finding algorithm, as
- modified by Nuutila to keep only non-root nodes on the stack.
- The algorithm can be found in "On finding the strongly connected
- connected components in a directed graph" by Esko Nuutila and Eljas
- Soisalon-Soininen, in Information Processing Letters volume 49,
- number 1, pages 9-14. */
-
-static void
-scc_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
-{
- unsigned int i;
- bitmap_iterator bi;
- unsigned int my_dfs;
-
- bitmap_set_bit (si->visited, n);
- si->dfs[n] = si->current_index ++;
- my_dfs = si->dfs[n];
-
- /* Visit all the successors. */
- EXECUTE_IF_IN_NONNULL_BITMAP (graph->succs[n], 0, i, bi)
- {
- unsigned int w;
-
- if (i > LAST_REF_NODE)
- break;
-
- w = find (i);
- if (bitmap_bit_p (si->deleted, w))
- continue;
-
- if (!bitmap_bit_p (si->visited, w))
- scc_visit (graph, si, w);
-
- unsigned int t = find (w);
- gcc_checking_assert (find (n) == n);
- if (si->dfs[t] < si->dfs[n])
- si->dfs[n] = si->dfs[t];
- }
-
- /* See if any components have been identified. */
- if (si->dfs[n] == my_dfs)
- {
- if (si->scc_stack.length () > 0
- && si->dfs[si->scc_stack.last ()] >= my_dfs)
- {
- bitmap scc = BITMAP_ALLOC (NULL);
- unsigned int lowest_node;
- bitmap_iterator bi;
-
- bitmap_set_bit (scc, n);
-
- while (si->scc_stack.length () != 0
- && si->dfs[si->scc_stack.last ()] >= my_dfs)
- {
- unsigned int w = si->scc_stack.pop ();
-
- bitmap_set_bit (scc, w);
- }
-
- lowest_node = bitmap_first_set_bit (scc);
- gcc_assert (lowest_node < FIRST_REF_NODE);
-
- /* Collapse the SCC nodes into a single node, and mark the
- indirect cycles. */
- EXECUTE_IF_SET_IN_BITMAP (scc, 0, i, bi)
- {
- if (i < FIRST_REF_NODE)
- {
- if (unite (lowest_node, i))
- unify_nodes (graph, lowest_node, i, false);
- }
- else
- {
- unite (lowest_node, i);
- graph->indirect_cycles[i - FIRST_REF_NODE] = lowest_node;
- }
- }
- bitmap_set_bit (si->deleted, lowest_node);
- }
- else
- bitmap_set_bit (si->deleted, n);
- }
- else
- si->scc_stack.safe_push (n);
-}
-
-/* Unify node FROM into node TO, updating the changed count if
- necessary when UPDATE_CHANGED is true. */
-
-static void
-unify_nodes (constraint_graph_t graph, unsigned int to, unsigned int from,
- bool update_changed)
-{
- gcc_checking_assert (to != from && find (to) == to);
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Unifying %s to %s\n",
- get_varinfo (from)->name,
- get_varinfo (to)->name);
-
- if (update_changed)
- stats.unified_vars_dynamic++;
- else
- stats.unified_vars_static++;
-
- merge_graph_nodes (graph, to, from);
- if (merge_node_constraints (graph, to, from))
- {
- if (update_changed)
- bitmap_set_bit (changed, to);
- }
-
- /* Mark TO as changed if FROM was changed. If TO was already marked
- as changed, decrease the changed count. */
-
- if (update_changed
- && bitmap_clear_bit (changed, from))
- bitmap_set_bit (changed, to);
- varinfo_t fromvi = get_varinfo (from);
- if (fromvi->solution)
- {
- /* If the solution changes because of the merging, we need to mark
- the variable as changed. */
- varinfo_t tovi = get_varinfo (to);
- if (bitmap_ior_into (tovi->solution, fromvi->solution))
- {
- if (update_changed)
- bitmap_set_bit (changed, to);
- }
-
- BITMAP_FREE (fromvi->solution);
- if (fromvi->oldsolution)
- BITMAP_FREE (fromvi->oldsolution);
-
- if (stats.iterations > 0
- && tovi->oldsolution)
- BITMAP_FREE (tovi->oldsolution);
- }
- if (graph->succs[to])
- bitmap_clear_bit (graph->succs[to], to);
-}
-
-/* Add a copy edge FROM -> TO, optimizing special cases. Returns TRUE
- if the solution of TO changed. */
-
-static bool
-solve_add_graph_edge (constraint_graph_t graph, unsigned int to,
- unsigned int from)
-{
- /* Adding edges from the special vars is pointless.
- They don't have sets that can change. */
- if (get_varinfo (from)->is_special_var)
- return bitmap_ior_into (get_varinfo (to)->solution,
- get_varinfo (from)->solution);
- /* Merging the solution from ESCAPED needlessly increases
- the set. Use ESCAPED as representative instead. */
- else if (from == find (escaped_id))
- return bitmap_set_bit (get_varinfo (to)->solution, escaped_id);
- else if (get_varinfo (from)->may_have_pointers
- && add_graph_edge (graph, to, from))
- return bitmap_ior_into (get_varinfo (to)->solution,
- get_varinfo (from)->solution);
- return false;
-}
-
-/* Process a constraint C that represents x = *(y + off), using DELTA as the
- starting solution for y. */
-
-static void
-do_sd_constraint (constraint_graph_t graph, constraint_t c,
- bitmap delta, bitmap *expanded_delta)
-{
- unsigned int lhs = c->lhs.var;
- bool flag = false;
- bitmap sol = get_varinfo (lhs)->solution;
- unsigned int j;
- bitmap_iterator bi;
- HOST_WIDE_INT roffset = c->rhs.offset;
-
- /* Our IL does not allow this. */
- gcc_checking_assert (c->lhs.offset == 0);
-
- /* If the solution of Y contains anything it is good enough to transfer
- this to the LHS. */
- if (bitmap_bit_p (delta, anything_id))
- {
- flag |= bitmap_set_bit (sol, anything_id);
- goto done;
- }
-
- /* If we do not know at with offset the rhs is dereferenced compute
- the reachability set of DELTA, conservatively assuming it is
- dereferenced at all valid offsets. */
- if (roffset == UNKNOWN_OFFSET)
- {
- delta = solution_set_expand (delta, expanded_delta);
- /* No further offset processing is necessary. */
- roffset = 0;
- }
-
- /* For each variable j in delta (Sol(y)), add
- an edge in the graph from j to x, and union Sol(j) into Sol(x). */
- EXECUTE_IF_SET_IN_BITMAP (delta, 0, j, bi)
- {
- varinfo_t v = get_varinfo (j);
- HOST_WIDE_INT fieldoffset = v->offset + roffset;
- unsigned HOST_WIDE_INT size = v->size;
- unsigned int t;
-
- if (v->is_full_var)
- ;
- else if (roffset != 0)
- {
- if (fieldoffset < 0)
- v = get_varinfo (v->head);
- else
- v = first_or_preceding_vi_for_offset (v, fieldoffset);
- }
-
- /* We have to include all fields that overlap the current field
- shifted by roffset. */
- do
- {
- t = find (v->id);
-
- flag |= solve_add_graph_edge (graph, lhs, t);
-
- if (v->is_full_var
- || v->next == 0)
- break;
-
- v = vi_next (v);
- }
- while (v->offset < fieldoffset + size);
- }
-
-done:
- /* If the LHS solution changed, mark the var as changed. */
- if (flag)
- bitmap_set_bit (changed, lhs);
-}
-
-/* Process a constraint C that represents *(x + off) = y using DELTA
- as the starting solution for x. */
-
-static void
-do_ds_constraint (constraint_t c, bitmap delta, bitmap *expanded_delta)
-{
- unsigned int rhs = c->rhs.var;
- bitmap sol = get_varinfo (rhs)->solution;
- unsigned int j;
- bitmap_iterator bi;
- HOST_WIDE_INT loff = c->lhs.offset;
- bool escaped_p = false;
-
- /* Our IL does not allow this. */
- gcc_checking_assert (c->rhs.offset == 0);
-
- /* If the solution of y contains ANYTHING simply use the ANYTHING
- solution. This avoids needlessly increasing the points-to sets. */
- if (bitmap_bit_p (sol, anything_id))
- sol = get_varinfo (find (anything_id))->solution;
-
- /* If the solution for x contains ANYTHING we have to merge the
- solution of y into all pointer variables which we do via
- STOREDANYTHING. */
- if (bitmap_bit_p (delta, anything_id))
- {
- unsigned t = find (storedanything_id);
- if (solve_add_graph_edge (graph, t, rhs))
- bitmap_set_bit (changed, t);
- return;
- }
-
- /* If we do not know at with offset the rhs is dereferenced compute
- the reachability set of DELTA, conservatively assuming it is
- dereferenced at all valid offsets. */
- if (loff == UNKNOWN_OFFSET)
- {
- delta = solution_set_expand (delta, expanded_delta);
- loff = 0;
- }
-
- /* For each member j of delta (Sol(x)), add an edge from y to j and
- union Sol(y) into Sol(j) */
- EXECUTE_IF_SET_IN_BITMAP (delta, 0, j, bi)
- {
- varinfo_t v = get_varinfo (j);
- unsigned int t;
- HOST_WIDE_INT fieldoffset = v->offset + loff;
- unsigned HOST_WIDE_INT size = v->size;
-
- if (v->is_full_var)
- ;
- else if (loff != 0)
- {
- if (fieldoffset < 0)
- v = get_varinfo (v->head);
- else
- v = first_or_preceding_vi_for_offset (v, fieldoffset);
- }
-
- /* We have to include all fields that overlap the current field
- shifted by loff. */
- do
- {
- if (v->may_have_pointers)
- {
- /* If v is a global variable then this is an escape point. */
- if (v->is_global_var
- && !escaped_p)
- {
- t = find (escaped_id);
- if (add_graph_edge (graph, t, rhs)
- && bitmap_ior_into (get_varinfo (t)->solution, sol))
- bitmap_set_bit (changed, t);
- /* Enough to let rhs escape once. */
- escaped_p = true;
- }
-
- if (v->is_special_var)
- break;
-
- t = find (v->id);
-
- if (solve_add_graph_edge (graph, t, rhs))
- bitmap_set_bit (changed, t);
- }
-
- if (v->is_full_var
- || v->next == 0)
- break;
-
- v = vi_next (v);
- }
- while (v->offset < fieldoffset + size);
- }
-}
-
-/* Handle a non-simple (simple meaning requires no iteration),
- constraint (IE *x = &y, x = *y, *x = y, and x = y with offsets involved). */
-
-static void
-do_complex_constraint (constraint_graph_t graph, constraint_t c, bitmap delta,
- bitmap *expanded_delta)
-{
- if (c->lhs.type == DEREF)
- {
- if (c->rhs.type == ADDRESSOF)
- {
- gcc_unreachable ();
- }
- else
- {
- /* *x = y */
- do_ds_constraint (c, delta, expanded_delta);
- }
- }
- else if (c->rhs.type == DEREF)
- {
- /* x = *y */
- if (!(get_varinfo (c->lhs.var)->is_special_var))
- do_sd_constraint (graph, c, delta, expanded_delta);
- }
- else
- {
- bitmap tmp;
- bool flag = false;
-
- gcc_checking_assert (c->rhs.type == SCALAR && c->lhs.type == SCALAR
- && c->rhs.offset != 0 && c->lhs.offset == 0);
- tmp = get_varinfo (c->lhs.var)->solution;
-
- flag = set_union_with_increment (tmp, delta, c->rhs.offset,
- expanded_delta);
-
- if (flag)
- bitmap_set_bit (changed, c->lhs.var);
- }
-}
-
-/* Initialize and return a new SCC info structure. */
-
-scc_info::scc_info (size_t size) :
- visited (size), deleted (size), current_index (0), scc_stack (1)
-{
- bitmap_clear (visited);
- bitmap_clear (deleted);
- node_mapping = XNEWVEC (unsigned int, size);
- dfs = XCNEWVEC (unsigned int, size);
-
- for (size_t i = 0; i < size; i++)
- node_mapping[i] = i;
-}
-
-/* Free an SCC info structure pointed to by SI */
-
-scc_info::~scc_info ()
-{
- free (node_mapping);
- free (dfs);
-}
-
-
-/* Find indirect cycles in GRAPH that occur, using strongly connected
- components, and note them in the indirect cycles map.
-
- This technique comes from Ben Hardekopf and Calvin Lin,
- "It Pays to be Lazy: Fast and Accurate Pointer Analysis for Millions of
- Lines of Code", submitted to PLDI 2007. */
-
-static void
-find_indirect_cycles (constraint_graph_t graph)
-{
- unsigned int i;
- unsigned int size = graph->size;
- scc_info si (size);
-
- for (i = 0; i < MIN (LAST_REF_NODE, size); i ++ )
- if (!bitmap_bit_p (si.visited, i) && find (i) == i)
- scc_visit (graph, &si, i);
-}
-
-/* Visit the graph in topological order starting at node N, and store the
- order in TOPO_ORDER using VISITED to indicate visited nodes. */
-
-static void
-topo_visit (constraint_graph_t graph, vec<unsigned> &topo_order,
- sbitmap visited, unsigned int n)
-{
- bitmap_iterator bi;
- unsigned int j;
-
- bitmap_set_bit (visited, n);
-
- if (graph->succs[n])
- EXECUTE_IF_SET_IN_BITMAP (graph->succs[n], 0, j, bi)
- {
- unsigned k = find (j);
- if (!bitmap_bit_p (visited, k))
- topo_visit (graph, topo_order, visited, k);
- }
-
- /* Also consider copy with offset complex constraints as implicit edges. */
- for (auto c : graph->complex[n])
- {
- /* Constraints are ordered so that SCALAR = SCALAR appear first. */
- if (c->lhs.type != SCALAR || c->rhs.type != SCALAR)
- break;
- gcc_checking_assert (c->rhs.var == n);
- unsigned k = find (c->lhs.var);
- if (!bitmap_bit_p (visited, k))
- topo_visit (graph, topo_order, visited, k);
- }
-
- topo_order.quick_push (n);
-}
-
-/* Compute a topological ordering for GRAPH, and return the result. */
-
-static auto_vec<unsigned>
-compute_topo_order (constraint_graph_t graph)
-{
- unsigned int i;
- unsigned int size = graph->size;
-
- auto_sbitmap visited (size);
- bitmap_clear (visited);
-
- /* For the heuristic in add_graph_edge to work optimally make sure to
- first visit the connected component of the graph containing
- ESCAPED. Do this by extracting the connected component
- with ESCAPED and append that to all other components as solve_graph
- pops from the order. */
- auto_vec<unsigned> tail (size);
- topo_visit (graph, tail, visited, find (escaped_id));
-
- auto_vec<unsigned> topo_order (size);
-
- for (i = 0; i != size; ++i)
- if (!bitmap_bit_p (visited, i) && find (i) == i)
- topo_visit (graph, topo_order, visited, i);
-
- topo_order.splice (tail);
- return topo_order;
-}
-
-/* Structure used to for hash value numbering of pointer equivalence
- classes. */
-
-typedef struct equiv_class_label
-{
- hashval_t hashcode;
- unsigned int equivalence_class;
- bitmap labels;
-} *equiv_class_label_t;
-typedef const struct equiv_class_label *const_equiv_class_label_t;
-
-/* Equiv_class_label hashtable helpers. */
-
-struct equiv_class_hasher : nofree_ptr_hash <equiv_class_label>
-{
- static inline hashval_t hash (const equiv_class_label *);
- static inline bool equal (const equiv_class_label *,
- const equiv_class_label *);
-};
-
-/* Hash function for a equiv_class_label_t */
-
-inline hashval_t
-equiv_class_hasher::hash (const equiv_class_label *ecl)
-{
- return ecl->hashcode;
-}
-
-/* Equality function for two equiv_class_label_t's. */
-
-inline bool
-equiv_class_hasher::equal (const equiv_class_label *eql1,
- const equiv_class_label *eql2)
-{
- return (eql1->hashcode == eql2->hashcode
- && bitmap_equal_p (eql1->labels, eql2->labels));
-}
-
-/* A hashtable for mapping a bitmap of labels->pointer equivalence
- classes. */
-static hash_table<equiv_class_hasher> *pointer_equiv_class_table;
-
-/* A hashtable for mapping a bitmap of labels->location equivalence
- classes. */
-static hash_table<equiv_class_hasher> *location_equiv_class_table;
-
-struct obstack equiv_class_obstack;
-
-/* Lookup a equivalence class in TABLE by the bitmap of LABELS with
- hash HAS it contains. Sets *REF_LABELS to the bitmap LABELS
- is equivalent to. */
-
-static equiv_class_label *
-equiv_class_lookup_or_add (hash_table<equiv_class_hasher> *table,
- bitmap labels)
-{
- equiv_class_label **slot;
- equiv_class_label ecl;
-
- ecl.labels = labels;
- ecl.hashcode = bitmap_hash (labels);
- slot = table->find_slot (&ecl, INSERT);
- if (!*slot)
- {
- *slot = XOBNEW (&equiv_class_obstack, struct equiv_class_label);
- (*slot)->labels = labels;
- (*slot)->hashcode = ecl.hashcode;
- (*slot)->equivalence_class = 0;
- }
-
- return *slot;
-}
-
-/* Perform offline variable substitution.
-
- This is a worst case quadratic time way of identifying variables
- that must have equivalent points-to sets, including those caused by
- static cycles, and single entry subgraphs, in the constraint graph.
-
- The technique is described in "Exploiting Pointer and Location
- Equivalence to Optimize Pointer Analysis. In the 14th International
- Static Analysis Symposium (SAS), August 2007." It is known as the
- "HU" algorithm, and is equivalent to value numbering the collapsed
- constraint graph including evaluating unions.
-
- The general method of finding equivalence classes is as follows:
- Add fake nodes (REF nodes) and edges for *a = b and a = *b constraints.
- Initialize all non-REF nodes to be direct nodes.
- For each constraint a = a U {b}, we set pts(a) = pts(a) u {fresh
- variable}
- For each constraint containing the dereference, we also do the same
- thing.
-
- We then compute SCC's in the graph and unify nodes in the same SCC,
- including pts sets.
-
- For each non-collapsed node x:
- Visit all unvisited explicit incoming edges.
- Ignoring all non-pointers, set pts(x) = Union of pts(a) for y
- where y->x.
- Lookup the equivalence class for pts(x).
- If we found one, equivalence_class(x) = found class.
- Otherwise, equivalence_class(x) = new class, and new_class is
- added to the lookup table.
-
- All direct nodes with the same equivalence class can be replaced
- with a single representative node.
- All unlabeled nodes (label == 0) are not pointers and all edges
- involving them can be eliminated.
- We perform these optimizations during rewrite_constraints
-
- In addition to pointer equivalence class finding, we also perform
- location equivalence class finding. This is the set of variables
- that always appear together in points-to sets. We use this to
- compress the size of the points-to sets. */
-
-/* Current maximum pointer equivalence class id. */
-static int pointer_equiv_class;
-
-/* Current maximum location equivalence class id. */
-static int location_equiv_class;
-
-/* Recursive routine to find strongly connected components in GRAPH,
- and label it's nodes with DFS numbers. */
-
-static void
-condense_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
-{
- unsigned int i;
- bitmap_iterator bi;
- unsigned int my_dfs;
-
- gcc_checking_assert (si->node_mapping[n] == n);
- bitmap_set_bit (si->visited, n);
- si->dfs[n] = si->current_index ++;
- my_dfs = si->dfs[n];
-
- /* Visit all the successors. */
- EXECUTE_IF_IN_NONNULL_BITMAP (graph->preds[n], 0, i, bi)
- {
- unsigned int w = si->node_mapping[i];
-
- if (bitmap_bit_p (si->deleted, w))
- continue;
-
- if (!bitmap_bit_p (si->visited, w))
- condense_visit (graph, si, w);
-
- unsigned int t = si->node_mapping[w];
- gcc_checking_assert (si->node_mapping[n] == n);
- if (si->dfs[t] < si->dfs[n])
- si->dfs[n] = si->dfs[t];
- }
-
- /* Visit all the implicit predecessors. */
- EXECUTE_IF_IN_NONNULL_BITMAP (graph->implicit_preds[n], 0, i, bi)
- {
- unsigned int w = si->node_mapping[i];
-
- if (bitmap_bit_p (si->deleted, w))
- continue;
-
- if (!bitmap_bit_p (si->visited, w))
- condense_visit (graph, si, w);
-
- unsigned int t = si->node_mapping[w];
- gcc_assert (si->node_mapping[n] == n);
- if (si->dfs[t] < si->dfs[n])
- si->dfs[n] = si->dfs[t];
- }
-
- /* See if any components have been identified. */
- if (si->dfs[n] == my_dfs)
- {
- if (si->scc_stack.length () != 0
- && si->dfs[si->scc_stack.last ()] >= my_dfs)
- {
- /* Find the first node of the SCC and do non-bitmap work. */
- bool direct_p = true;
- unsigned first = si->scc_stack.length ();
- do
- {
- --first;
- unsigned int w = si->scc_stack[first];
- si->node_mapping[w] = n;
- if (!bitmap_bit_p (graph->direct_nodes, w))
- direct_p = false;
- }
- while (first > 0
- && si->dfs[si->scc_stack[first - 1]] >= my_dfs);
- if (!direct_p)
- bitmap_clear_bit (graph->direct_nodes, n);
-
- /* Want to reduce to node n, push that first. */
- si->scc_stack.reserve (1);
- si->scc_stack.quick_push (si->scc_stack[first]);
- si->scc_stack[first] = n;
-
- unsigned scc_size = si->scc_stack.length () - first;
- unsigned split = scc_size / 2;
- unsigned carry = scc_size - split * 2;
- while (split > 0)
- {
- for (unsigned i = 0; i < split; ++i)
- {
- unsigned a = si->scc_stack[first + i];
- unsigned b = si->scc_stack[first + split + carry + i];
-
- /* Unify our nodes. */
- if (graph->preds[b])
- {
- if (!graph->preds[a])
- std::swap (graph->preds[a], graph->preds[b]);
- else
- bitmap_ior_into_and_free (graph->preds[a],
- &graph->preds[b]);
- }
- if (graph->implicit_preds[b])
- {
- if (!graph->implicit_preds[a])
- std::swap (graph->implicit_preds[a],
- graph->implicit_preds[b]);
- else
- bitmap_ior_into_and_free (graph->implicit_preds[a],
- &graph->implicit_preds[b]);
- }
- if (graph->points_to[b])
- {
- if (!graph->points_to[a])
- std::swap (graph->points_to[a], graph->points_to[b]);
- else
- bitmap_ior_into_and_free (graph->points_to[a],
- &graph->points_to[b]);
- }
- }
- unsigned remain = split + carry;
- split = remain / 2;
- carry = remain - split * 2;
- }
- /* Actually pop the SCC. */
- si->scc_stack.truncate (first);
- }
- bitmap_set_bit (si->deleted, n);
- }
- else
- si->scc_stack.safe_push (n);
-}
-
-/* Label pointer equivalences.
-
- This performs a value numbering of the constraint graph to
- discover which variables will always have the same points-to sets
- under the current set of constraints.
-
- The way it value numbers is to store the set of points-to bits
- generated by the constraints and graph edges. This is just used as a
- hash and equality comparison. The *actual set of points-to bits* is
- completely irrelevant, in that we don't care about being able to
- extract them later.
-
- The equality values (currently bitmaps) just have to satisfy a few
- constraints, the main ones being:
- 1. The combining operation must be order independent.
- 2. The end result of a given set of operations must be unique iff the
- combination of input values is unique
- 3. Hashable. */
-
-static void
-label_visit (constraint_graph_t graph, class scc_info *si, unsigned int n)
-{
- unsigned int i, first_pred;
- bitmap_iterator bi;
-
- bitmap_set_bit (si->visited, n);
-
- /* Label and union our incoming edges's points to sets. */
- first_pred = -1U;
- EXECUTE_IF_IN_NONNULL_BITMAP (graph->preds[n], 0, i, bi)
- {
- unsigned int w = si->node_mapping[i];
- if (!bitmap_bit_p (si->visited, w))
- label_visit (graph, si, w);
-
- /* Skip unused edges */
- if (w == n || graph->pointer_label[w] == 0)
- continue;
-
- if (graph->points_to[w])
- {
- if (!graph->points_to[n])
- {
- if (first_pred == -1U)
- first_pred = w;
- else
- {
- graph->points_to[n] = BITMAP_ALLOC (&predbitmap_obstack);
- bitmap_ior (graph->points_to[n],
- graph->points_to[first_pred],
- graph->points_to[w]);
- }
- }
- else
- bitmap_ior_into (graph->points_to[n], graph->points_to[w]);
- }
- }
-
- /* Indirect nodes get fresh variables and a new pointer equiv class. */
- if (!bitmap_bit_p (graph->direct_nodes, n))
- {
- if (!graph->points_to[n])
- {
- graph->points_to[n] = BITMAP_ALLOC (&predbitmap_obstack);
- if (first_pred != -1U)
- bitmap_copy (graph->points_to[n], graph->points_to[first_pred]);
- }
- bitmap_set_bit (graph->points_to[n], FIRST_REF_NODE + n);
- graph->pointer_label[n] = pointer_equiv_class++;
- equiv_class_label_t ecl;
- ecl = equiv_class_lookup_or_add (pointer_equiv_class_table,
- graph->points_to[n]);
- ecl->equivalence_class = graph->pointer_label[n];
- return;
- }
-
- /* If there was only a single non-empty predecessor the pointer equiv
- class is the same. */
- if (!graph->points_to[n])
- {
- if (first_pred != -1U)
- {
- graph->pointer_label[n] = graph->pointer_label[first_pred];
- graph->points_to[n] = graph->points_to[first_pred];
- }
- return;
- }
-
- if (!bitmap_empty_p (graph->points_to[n]))
- {
- equiv_class_label_t ecl;
- ecl = equiv_class_lookup_or_add (pointer_equiv_class_table,
- graph->points_to[n]);
- if (ecl->equivalence_class == 0)
- ecl->equivalence_class = pointer_equiv_class++;
- else
- {
- BITMAP_FREE (graph->points_to[n]);
- graph->points_to[n] = ecl->labels;
- }
- graph->pointer_label[n] = ecl->equivalence_class;
- }
-}
-
-/* Print the pred graph in dot format. */
-
-static void
-dump_pred_graph (class scc_info *si, FILE *file)
-{
- unsigned int i;
-
- /* Only print the graph if it has already been initialized: */
- if (!graph)
- return;
-
- /* Prints the header of the dot file: */
- fprintf (file, "strict digraph {\n");
- fprintf (file, " node [\n shape = box\n ]\n");
- fprintf (file, " edge [\n fontsize = \"12\"\n ]\n");
- fprintf (file, "\n // List of nodes and complex constraints in "
- "the constraint graph:\n");
-
- /* The next lines print the nodes in the graph together with the
- complex constraints attached to them. */
- for (i = 1; i < graph->size; i++)
- {
- if (i == FIRST_REF_NODE)
- continue;
- if (si->node_mapping[i] != i)
- continue;
- if (i < FIRST_REF_NODE)
- fprintf (file, "\"%s\"", get_varinfo (i)->name);
- else
- fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
- if (graph->points_to[i]
- && !bitmap_empty_p (graph->points_to[i]))
- {
- if (i < FIRST_REF_NODE)
- fprintf (file, "[label=\"%s = {", get_varinfo (i)->name);
- else
- fprintf (file, "[label=\"*%s = {",
- get_varinfo (i - FIRST_REF_NODE)->name);
- unsigned j;
- bitmap_iterator bi;
- EXECUTE_IF_SET_IN_BITMAP (graph->points_to[i], 0, j, bi)
- fprintf (file, " %d", j);
- fprintf (file, " }\"]");
- }
- fprintf (file, ";\n");
- }
-
- /* Go over the edges. */
- fprintf (file, "\n // Edges in the constraint graph:\n");
- for (i = 1; i < graph->size; i++)
- {
- unsigned j;
- bitmap_iterator bi;
- if (si->node_mapping[i] != i)
- continue;
- EXECUTE_IF_IN_NONNULL_BITMAP (graph->preds[i], 0, j, bi)
- {
- unsigned from = si->node_mapping[j];
- if (from < FIRST_REF_NODE)
- fprintf (file, "\"%s\"", get_varinfo (from)->name);
- else
- fprintf (file, "\"*%s\"", get_varinfo (from - FIRST_REF_NODE)->name);
- fprintf (file, " -> ");
- if (i < FIRST_REF_NODE)
- fprintf (file, "\"%s\"", get_varinfo (i)->name);
- else
- fprintf (file, "\"*%s\"", get_varinfo (i - FIRST_REF_NODE)->name);
- fprintf (file, ";\n");
- }
- }
-
- /* Prints the tail of the dot file. */
- fprintf (file, "}\n");
-}
-
-/* Perform offline variable substitution, discovering equivalence
- classes, and eliminating non-pointer variables. */
-
-static class scc_info *
-perform_var_substitution (constraint_graph_t graph)
-{
- unsigned int i;
- unsigned int size = graph->size;
- scc_info *si = new scc_info (size);
-
- bitmap_obstack_initialize (&iteration_obstack);
- gcc_obstack_init (&equiv_class_obstack);
- pointer_equiv_class_table = new hash_table<equiv_class_hasher> (511);
- location_equiv_class_table
- = new hash_table<equiv_class_hasher> (511);
- pointer_equiv_class = 1;
- location_equiv_class = 1;
-
- /* Condense the nodes, which means to find SCC's, count incoming
- predecessors, and unite nodes in SCC's. */
- for (i = 1; i < FIRST_REF_NODE; i++)
- if (!bitmap_bit_p (si->visited, si->node_mapping[i]))
- condense_visit (graph, si, si->node_mapping[i]);
-
- if (dump_file && (dump_flags & TDF_GRAPH))
- {
- fprintf (dump_file, "\n\n// The constraint graph before var-substitution "
- "in dot format:\n");
- dump_pred_graph (si, dump_file);
- fprintf (dump_file, "\n\n");
- }
-
- bitmap_clear (si->visited);
- /* Actually the label the nodes for pointer equivalences */
- for (i = 1; i < FIRST_REF_NODE; i++)
- if (!bitmap_bit_p (si->visited, si->node_mapping[i]))
- label_visit (graph, si, si->node_mapping[i]);
-
- /* Calculate location equivalence labels. */
- for (i = 1; i < FIRST_REF_NODE; i++)
- {
- bitmap pointed_by;
- bitmap_iterator bi;
- unsigned int j;
-
- if (!graph->pointed_by[i])
- continue;
- pointed_by = BITMAP_ALLOC (&iteration_obstack);
-
- /* Translate the pointed-by mapping for pointer equivalence
- labels. */
- EXECUTE_IF_SET_IN_BITMAP (graph->pointed_by[i], 0, j, bi)
- {
- bitmap_set_bit (pointed_by,
- graph->pointer_label[si->node_mapping[j]]);
- }
- /* The original pointed_by is now dead. */
- BITMAP_FREE (graph->pointed_by[i]);
-
- /* Look up the location equivalence label if one exists, or make
- one otherwise. */
- equiv_class_label_t ecl;
- ecl = equiv_class_lookup_or_add (location_equiv_class_table, pointed_by);
- if (ecl->equivalence_class == 0)
- ecl->equivalence_class = location_equiv_class++;
- else
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file, "Found location equivalence for node %s\n",
- get_varinfo (i)->name);
- BITMAP_FREE (pointed_by);
- }
- graph->loc_label[i] = ecl->equivalence_class;
-
- }
-
- if (dump_file && (dump_flags & TDF_DETAILS))
- for (i = 1; i < FIRST_REF_NODE; i++)
- {
- unsigned j = si->node_mapping[i];
- if (j != i)
- {
- fprintf (dump_file, "%s node id %d ",
- bitmap_bit_p (graph->direct_nodes, i)
- ? "Direct" : "Indirect", i);
- if (i < FIRST_REF_NODE)
- fprintf (dump_file, "\"%s\"", get_varinfo (i)->name);
- else
- fprintf (dump_file, "\"*%s\"",
- get_varinfo (i - FIRST_REF_NODE)->name);
- fprintf (dump_file, " mapped to SCC leader node id %d ", j);
- if (j < FIRST_REF_NODE)
- fprintf (dump_file, "\"%s\"\n", get_varinfo (j)->name);
- else
- fprintf (dump_file, "\"*%s\"\n",
- get_varinfo (j - FIRST_REF_NODE)->name);
- }
- else
- {
- fprintf (dump_file,
- "Equivalence classes for %s node id %d ",
- bitmap_bit_p (graph->direct_nodes, i)
- ? "direct" : "indirect", i);
- if (i < FIRST_REF_NODE)
- fprintf (dump_file, "\"%s\"", get_varinfo (i)->name);
- else
- fprintf (dump_file, "\"*%s\"",
- get_varinfo (i - FIRST_REF_NODE)->name);
- fprintf (dump_file,
- ": pointer %d, location %d\n",
- graph->pointer_label[i], graph->loc_label[i]);
- }
- }
-
- /* Quickly eliminate our non-pointer variables. */
-
- for (i = 1; i < FIRST_REF_NODE; i++)
- {
- unsigned int node = si->node_mapping[i];
-
- if (graph->pointer_label[node] == 0)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- fprintf (dump_file,
- "%s is a non-pointer variable, eliminating edges.\n",
- get_varinfo (node)->name);
- stats.nonpointer_vars++;
- clear_edges_for_node (graph, node);
- }
- }
-
- return si;
-}
-
-/* Free information that was only necessary for variable
- substitution. */
-
-static void
-free_var_substitution_info (class scc_info *si)
-{
- delete si;
- free (graph->pointer_label);
- free (graph->loc_label);
- free (graph->pointed_by);
- free (graph->points_to);
- free (graph->eq_rep);
- sbitmap_free (graph->direct_nodes);
- delete pointer_equiv_class_table;
- pointer_equiv_class_table = NULL;
- delete location_equiv_class_table;
- location_equiv_class_table = NULL;
- obstack_free (&equiv_class_obstack, NULL);
- bitmap_obstack_release (&iteration_obstack);
-}
-
-/* Return an existing node that is equivalent to NODE, which has
- equivalence class LABEL, if one exists. Return NODE otherwise. */
-
-static unsigned int
-find_equivalent_node (constraint_graph_t graph,
- unsigned int node, unsigned int label)
-{
- /* If the address version of this variable is unused, we can
- substitute it for anything else with the same label.
- Otherwise, we know the pointers are equivalent, but not the
- locations, and we can unite them later. */
-
- if (!bitmap_bit_p (graph->address_taken, node))
- {
- gcc_checking_assert (label < graph->size);
-
- if (graph->eq_rep[label] != -1)
- {
- /* Unify the two variables since we know they are equivalent. */
- if (unite (graph->eq_rep[label], node))
- unify_nodes (graph, graph->eq_rep[label], node, false);
- return graph->eq_rep[label];
- }
- else
- {
- graph->eq_rep[label] = node;
- graph->pe_rep[label] = node;
- }
- }
- else
- {
- gcc_checking_assert (label < graph->size);
- graph->pe[node] = label;
- if (graph->pe_rep[label] == -1)
- graph->pe_rep[label] = node;
- }
-
- return node;
-}
-
-/* Unite pointer equivalent but not location equivalent nodes in
- GRAPH. This may only be performed once variable substitution is
- finished. */
-
-static void
-unite_pointer_equivalences (constraint_graph_t graph)
-{
- unsigned int i;
-
- /* Go through the pointer equivalences and unite them to their
- representative, if they aren't already. */
- for (i = 1; i < FIRST_REF_NODE; i++)
- {
- unsigned int label = graph->pe[i];
- if (label)
- {
- int label_rep = graph->pe_rep[label];
-
- if (label_rep == -1)
- continue;
-
- label_rep = find (label_rep);
- if (label_rep >= 0 && unite (label_rep, find (i)))
- unify_nodes (graph, label_rep, i, false);
- }
- }
-}
-
-/* Move complex constraints to the GRAPH nodes they belong to. */
-
-static void
-move_complex_constraints (constraint_graph_t graph)
-{
- int i;
- constraint_t c;
-
- FOR_EACH_VEC_ELT (constraints, i, c)
- {
- if (c)
- {
- struct constraint_expr lhs = c->lhs;
- struct constraint_expr rhs = c->rhs;
-
- if (lhs.type == DEREF)
- {
- insert_into_complex (graph, lhs.var, c);
- }
- else if (rhs.type == DEREF)
- {
- if (!(get_varinfo (lhs.var)->is_special_var))
- insert_into_complex (graph, rhs.var, c);
- }
- else if (rhs.type != ADDRESSOF && lhs.var > anything_id
- && (lhs.offset != 0 || rhs.offset != 0))
- {
- insert_into_complex (graph, rhs.var, c);
- }
- }
- }
-}
-
-
-/* Optimize and rewrite complex constraints while performing
- collapsing of equivalent nodes. SI is the SCC_INFO that is the
- result of perform_variable_substitution. */
-
-static void
-rewrite_constraints (constraint_graph_t graph,
- class scc_info *si)
-{
- int i;
- constraint_t c;
-
- if (flag_checking)
- {
- for (unsigned int j = 0; j < graph->size; j++)
- gcc_assert (find (j) == j);
- }
-
- FOR_EACH_VEC_ELT (constraints, i, c)
- {
- struct constraint_expr lhs = c->lhs;
- struct constraint_expr rhs = c->rhs;
- unsigned int lhsvar = find (lhs.var);
- unsigned int rhsvar = find (rhs.var);
- unsigned int lhsnode, rhsnode;
- unsigned int lhslabel, rhslabel;
-
- lhsnode = si->node_mapping[lhsvar];
- rhsnode = si->node_mapping[rhsvar];
- lhslabel = graph->pointer_label[lhsnode];
- rhslabel = graph->pointer_label[rhsnode];
-
- /* See if it is really a non-pointer variable, and if so, ignore
- the constraint. */
- if (lhslabel == 0)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
-
- fprintf (dump_file, "%s is a non-pointer variable, "
- "ignoring constraint:",
- get_varinfo (lhs.var)->name);
- dump_constraint (dump_file, c);
- fprintf (dump_file, "\n");
- }
- constraints[i] = NULL;
- continue;
- }
-
- if (rhslabel == 0)
- {
- if (dump_file && (dump_flags & TDF_DETAILS))
- {
-
- fprintf (dump_file, "%s is a non-pointer variable, "
- "ignoring constraint:",
- get_varinfo (rhs.var)->name);
- dump_constraint (dump_file, c);
- fprintf (dump_file, "\n");
- }
- constraints[i] = NULL;
- continue;
- }
-
- lhsvar = find_equivalent_node (graph, lhsvar, lhslabel);
- rhsvar = find_equivalent_node (graph, rhsvar, rhslabel);
- c->lhs.var = lhsvar;
- c->rhs.var = rhsvar;
- }
-}
-
-/* Eliminate indirect cycles involving NODE. Return true if NODE was
- part of an SCC, false otherwise. */
-
-static bool
-eliminate_indirect_cycles (unsigned int node)
-{
- if (graph->indirect_cycles[node] != -1
- && !bitmap_empty_p (get_varinfo (node)->solution))
- {
- unsigned int i;
- auto_vec<unsigned> queue;
- int queuepos;
- unsigned int to = find (graph->indirect_cycles[node]);
- bitmap_iterator bi;
-
- /* We can't touch the solution set and call unify_nodes
- at the same time, because unify_nodes is going to do
- bitmap unions into it. */
-
- EXECUTE_IF_SET_IN_BITMAP (get_varinfo (node)->solution, 0, i, bi)
- {
- if (find (i) == i && i != to)
- {
- if (unite (to, i))
- queue.safe_push (i);
- }
- }
-
- for (queuepos = 0;
- queue.iterate (queuepos, &i);
- queuepos++)
- {
- unify_nodes (graph, to, i, true);
- }
- return true;
- }
- return false;
-}
-
-/* Solve the constraint graph GRAPH using our worklist solver.
- This is based on the PW* family of solvers from the "Efficient Field
- Sensitive Pointer Analysis for C" paper.
- It works by iterating over all the graph nodes, processing the complex
- constraints and propagating the copy constraints, until everything stops
- changed. This corresponds to steps 6-8 in the solving list given above. */
-
-static void
-solve_graph (constraint_graph_t graph)
-{
- unsigned int size = graph->size;
- unsigned int i;
- bitmap pts;
-
- changed = BITMAP_ALLOC (NULL);
-
- /* Mark all initial non-collapsed nodes as changed. */
- for (i = 1; i < size; i++)
- {
- varinfo_t ivi = get_varinfo (i);
- if (find (i) == i && !bitmap_empty_p (ivi->solution)
- && ((graph->succs[i] && !bitmap_empty_p (graph->succs[i]))
- || graph->complex[i].length () > 0))
- bitmap_set_bit (changed, i);
- }
-
- /* Allocate a bitmap to be used to store the changed bits. */
- pts = BITMAP_ALLOC (&pta_obstack);
-
- while (!bitmap_empty_p (changed))
- {
- unsigned int i;
- stats.iterations++;
-
- bitmap_obstack_initialize (&iteration_obstack);
-
- auto_vec<unsigned> topo_order = compute_topo_order (graph);
- while (topo_order.length () != 0)
- {
- i = topo_order.pop ();
-
- /* If this variable is not a representative, skip it. */
- if (find (i) != i)
- continue;
-
- /* In certain indirect cycle cases, we may merge this
- variable to another. */
- if (eliminate_indirect_cycles (i) && find (i) != i)
- continue;
-
- /* If the node has changed, we need to process the
- complex constraints and outgoing edges again. For complex
- constraints that modify i itself, like the common group of
- callarg = callarg + UNKNOWN;
- callarg = *callarg + UNKNOWN;
- *callarg = callescape;
- make sure to iterate immediately because that maximizes
- cache reuse and expands the graph quickest, leading to
- better visitation order in the next iteration. */
- while (bitmap_clear_bit (changed, i))
- {
- bitmap solution;
- vec<constraint_t> &complex = graph->complex[i];
- varinfo_t vi = get_varinfo (i);
- bool solution_empty;
-
- /* Compute the changed set of solution bits. If anything
- is in the solution just propagate that. */
- if (bitmap_bit_p (vi->solution, anything_id))
- {
- /* If anything is also in the old solution there is
- nothing to do.
- ??? But we shouldn't ended up with "changed" set ... */
- if (vi->oldsolution
- && bitmap_bit_p (vi->oldsolution, anything_id))
- break;
- bitmap_copy (pts, get_varinfo (find (anything_id))->solution);
- }
- else if (vi->oldsolution)
- bitmap_and_compl (pts, vi->solution, vi->oldsolution);
- else
- bitmap_copy (pts, vi->solution);
-
- if (bitmap_empty_p (pts))
- break;
-
- if (vi->oldsolution)
- bitmap_ior_into (vi->oldsolution, pts);
- else
- {
- vi->oldsolution = BITMAP_ALLOC (&oldpta_obstack);
- bitmap_copy (vi->oldsolution, pts);
- }
-
- solution = vi->solution;
- solution_empty = bitmap_empty_p (solution);
-
- /* Process the complex constraints */
- hash_set<constraint_t> *cvisited = nullptr;
- if (flag_checking)
- cvisited = new hash_set<constraint_t>;
- bitmap expanded_pts = NULL;
- for (unsigned j = 0; j < complex.length (); ++j)
- {
- constraint_t c = complex[j];
- /* At unification time only the directly involved nodes
- will get their complex constraints updated. Update
- our complex constraints now but keep the constraint
- vector sorted and clear of duplicates. Also make
- sure to evaluate each prevailing constraint only once. */
- unsigned int new_lhs = find (c->lhs.var);
- unsigned int new_rhs = find (c->rhs.var);
- if (c->lhs.var != new_lhs || c->rhs.var != new_rhs)
- {
- constraint tem = *c;
- tem.lhs.var = new_lhs;
- tem.rhs.var = new_rhs;
- unsigned int place
- = complex.lower_bound (&tem, constraint_less);
- c->lhs.var = new_lhs;
- c->rhs.var = new_rhs;
- if (place != j)
- {
- complex.ordered_remove (j);
- if (j < place)
- --place;
- if (place < complex.length ())
- {
- if (constraint_equal (*complex[place], *c))
- {
- j--;
- continue;
- }
- else
- complex.safe_insert (place, c);
- }
- else
- complex.quick_push (c);
- if (place > j)
- {
- j--;
- continue;
- }
- }
- }
-
- /* The only complex constraint that can change our
- solution to non-empty, given an empty solution,
- is a constraint where the lhs side is receiving
- some set from elsewhere. */
- if (cvisited && cvisited->add (c))
- gcc_unreachable ();
- if (!solution_empty || c->lhs.type != DEREF)
- do_complex_constraint (graph, c, pts, &expanded_pts);
- }
- if (cvisited)
- {
- /* When checking, verify the order of constraints is
- maintained and each constraint is evaluated exactly
- once. */
- for (unsigned j = 1; j < complex.length (); ++j)
- gcc_assert (constraint_less (complex[j-1], complex[j]));
- gcc_assert (cvisited->elements () == complex.length ());
- delete cvisited;
- }
- BITMAP_FREE (expanded_pts);
-
- solution_empty = bitmap_empty_p (solution);
-
- if (!solution_empty)
- {
- bitmap_iterator bi;
- unsigned eff_escaped_id = find (escaped_id);
- unsigned j;
-
- /* Propagate solution to all successors. */
- unsigned to_remove = ~0U;
- EXECUTE_IF_IN_NONNULL_BITMAP (graph->succs[i],
- 0, j, bi)
- {
- if (to_remove != ~0U)
- {
- bitmap_clear_bit (graph->succs[i], to_remove);
- to_remove = ~0U;
- }
- unsigned int to = find (j);
- if (to != j)
- {
- /* Update the succ graph, avoiding duplicate
- work. */
- to_remove = j;
- if (! bitmap_set_bit (graph->succs[i], to))
- continue;
- /* We eventually end up processing 'to' twice
- as it is undefined whether bitmap iteration
- iterates over bits set during iteration.
- Play safe instead of doing tricks. */
- }
- /* Don't try to propagate to ourselves. */
- if (to == i)
- {
- to_remove = j;
- continue;
- }
- /* Early node unification can lead to edges from
- escaped - remove them. */
- if (i == eff_escaped_id)
- {
- to_remove = j;
- if (bitmap_set_bit (get_varinfo (to)->solution,
- escaped_id))
- bitmap_set_bit (changed, to);
- continue;
- }
-
- if (bitmap_ior_into (get_varinfo (to)->solution, pts))
- bitmap_set_bit (changed, to);
- }
- if (to_remove != ~0U)
- bitmap_clear_bit (graph->succs[i], to_remove);
- }
- }
- }
- bitmap_obstack_release (&iteration_obstack);
- }
-
- BITMAP_FREE (pts);
- BITMAP_FREE (changed);
- bitmap_obstack_release (&oldpta_obstack);
-}
-
-/* Map from trees to variable infos. */
-static hash_map<tree, varinfo_t> *vi_for_tree;
-
-
/* Insert ID as the variable id for tree T in the vi_for_tree map. */
static void
@@ -3003,7 +734,7 @@ lookup_vi_for_tree (tree t)
return *slot;
}
-/* Return a printable name for DECL */
+/* Return a printable name for DECL. */
static const char *
alias_get_name (tree decl)
@@ -3191,10 +922,10 @@ process_constraint (constraint_t t)
if (!get_varinfo (lhs.var)->may_have_pointers)
return;
- /* This can happen in our IR with things like n->a = *p */
+ /* This can happen in our IR with things like n->a = *p. */
if (rhs.type == DEREF && lhs.type == DEREF && rhs.var != anything_id)
{
- /* Split into tmp = *rhs, *lhs = tmp */
+ /* Split into tmp = *rhs, *lhs = tmp. */
struct constraint_expr tmplhs;
tmplhs = new_scalar_tmp_constraint_exp ("doubledereftmp", true);
process_constraint (new_constraint (tmplhs, rhs));
@@ -3202,7 +933,7 @@ process_constraint (constraint_t t)
}
else if ((rhs.type != SCALAR || rhs.offset != 0) && lhs.type == DEREF)
{
- /* Split into tmp = &rhs, *lhs = tmp */
+ /* Split into tmp = &rhs, *lhs = tmp. */
struct constraint_expr tmplhs;
tmplhs = new_scalar_tmp_constraint_exp ("derefaddrtmp", true);
process_constraint (new_constraint (tmplhs, rhs));
@@ -3370,7 +1101,7 @@ get_constraint_for_component_ref (tree t, vec<ce_s> *results,
tree forzero;
/* Some people like to do cute things like take the address of
- &0->a.b */
+ &0->a.b. */
forzero = t;
while (handled_component_p (forzero)
|| INDIRECT_REF_P (forzero)
@@ -3444,7 +1175,7 @@ get_constraint_for_component_ref (tree t, vec<ce_s> *results,
{
/* In languages like C, you can access one past the end of an
array. You aren't allowed to dereference it, so we can
- ignore this constraint. When we handle pointer subtraction,
+ ignore this constraint. When we handle pointer subtraction,
we may have to do something cute here. */
if (maybe_lt (poly_uint64 (bitpos), get_varinfo (result.var)->fullsize)
@@ -3481,7 +1212,7 @@ get_constraint_for_component_ref (tree t, vec<ce_s> *results,
results->safe_push (cexpr);
}
else if (results->length () == 0)
- /* Assert that we found *some* field there. The user couldn't be
+ /* Assert that we found *some* field there. The user couldn't be
accessing *only* padding. */
/* Still the user could access one past the end of an array
embedded in a struct resulting in accessing *only* padding. */
@@ -3535,7 +1266,7 @@ get_constraint_for_component_ref (tree t, vec<ce_s> *results,
/* Dereference the constraint expression CONS, and return the result.
DEREF (ADDRESSOF) = SCALAR
DEREF (SCALAR) = DEREF
- DEREF (DEREF) = (temp = DEREF1; result = DEREF(temp))
+ DEREF (DEREF) = (temp = DEREF1; result = DEREF (temp))
This is needed so that we can handle dereferencing DEREF constraints. */
static void
@@ -3594,7 +1325,7 @@ get_constraint_for_1 (tree t, vec<ce_s> *results, bool address_p,
point to anything by itself. That is, of course, unless it is an
integer constant being treated as a pointer, in which case, we
will return that this is really the addressof anything. This
- happens below, since it will fall into the default case. The only
+ happens below, since it will fall into the default case. The only
case we know something about an integer treated like a pointer is
when it is the NULL pointer, and then we just say it points to
NULL.
@@ -3745,7 +1476,7 @@ get_constraint_for_1 (tree t, vec<ce_s> *results, bool address_p,
tmp.truncate (0);
}
/* We do not know whether the constructor was complete,
- so technically we have to add &NOTHING or &ANYTHING
+ so technically we have to add &NOTHING or &ANYTHING
like we do for an empty constructor as well. */
return;
}
@@ -4144,7 +1875,7 @@ get_function_part_constraint (varinfo_t fi, unsigned part)
/* Produce constraints for argument ARG of call STMT with eaf flags
FLAGS. RESULTS is array holding constraints for return value.
CALLESCAPE_ID is variable where call loocal escapes are added.
- WRITES_GLOVEL_MEMORY is true if callee may write global memory. */
+ WRITES_GLOVEL_MEMORY is true if callee may write global memory. */
static void
handle_call_arg (gcall *stmt, tree arg, vec<ce_s> *results, int flags,
@@ -4510,7 +2241,7 @@ handle_lhs_call (gcall *stmt, tree lhs, int flags, vec<ce_s> &rhsc,
rhsc.truncate (0);
vi = make_heapvar ("HEAP", true);
/* We are marking allocated storage local, we deal with it becoming
- global by escaping and setting of vars_contains_escaped_heap. */
+ global by escaping and setting of vars_contains_escaped_heap. */
DECL_EXTERNAL (vi->decl) = 0;
vi->is_global_var = 0;
/* If this is not a real malloc call assume the memory was
@@ -4698,8 +2429,8 @@ find_func_aliases_for_builtin_call (struct function *fn, gcall *t)
}
case BUILT_IN_STACK_SAVE:
case BUILT_IN_STACK_RESTORE:
- /* Nothing interesting happens. */
- return true;
+ /* Nothing interesting happens. */
+ return true;
case BUILT_IN_ALLOCA:
case BUILT_IN_ALLOCA_WITH_ALIGN:
case BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX:
@@ -4723,7 +2454,7 @@ find_func_aliases_for_builtin_call (struct function *fn, gcall *t)
return true;
}
case BUILT_IN_POSIX_MEMALIGN:
- {
+ {
tree ptrptr = gimple_call_arg (t, 0);
get_constraint_for (ptrptr, &lhsc);
do_deref (&lhsc);
@@ -4806,7 +2537,7 @@ find_func_aliases_for_builtin_call (struct function *fn, gcall *t)
}
break;
/* String / character search functions return a pointer into the
- source string or NULL. */
+ source string or NULL. */
case BUILT_IN_INDEX:
case BUILT_IN_STRCHR:
case BUILT_IN_STRRCHR:
@@ -4827,7 +2558,7 @@ find_func_aliases_for_builtin_call (struct function *fn, gcall *t)
}
return true;
/* Pure functions that return something not based on any object and
- that use the memory pointed to by their arguments (but not
+ that use the memory pointed to by their arguments (but not
transitively). */
case BUILT_IN_STRCMP:
case BUILT_IN_STRCMP_EQ:
@@ -5192,7 +2923,7 @@ find_func_aliases (struct function *fn, gimple *origt)
}
}
/* In IPA mode, we need to generate constraints to pass call
- arguments through their calls. There are two cases,
+ arguments through their calls. There are two cases,
either a GIMPLE_CALL returning a value, or just a plain
GIMPLE_CALL when we are not.
@@ -5445,7 +3176,7 @@ find_func_clobbers (struct function *fn, gimple *origt)
|| (TREE_CODE (tem) == MEM_REF
&& !(TREE_CODE (TREE_OPERAND (tem, 0)) == ADDR_EXPR
&& auto_var_in_fn_p
- (TREE_OPERAND (TREE_OPERAND (tem, 0), 0), fn->decl))))
+ (TREE_OPERAND (TREE_OPERAND (tem, 0), 0), fn->decl))))
{
struct constraint_expr lhsc, *rhsp;
unsigned i;
@@ -5474,7 +3205,7 @@ find_func_clobbers (struct function *fn, gimple *origt)
|| (TREE_CODE (tem) == MEM_REF
&& !(TREE_CODE (TREE_OPERAND (tem, 0)) == ADDR_EXPR
&& auto_var_in_fn_p
- (TREE_OPERAND (TREE_OPERAND (tem, 0), 0), fn->decl))))
+ (TREE_OPERAND (TREE_OPERAND (tem, 0), 0), fn->decl))))
{
struct constraint_expr lhs, *rhsp;
unsigned i;
@@ -5786,65 +3517,6 @@ find_func_clobbers (struct function *fn, gimple *origt)
}
-/* Find the first varinfo in the same variable as START that overlaps with
- OFFSET. Return NULL if we can't find one. */
-
-static varinfo_t
-first_vi_for_offset (varinfo_t start, unsigned HOST_WIDE_INT offset)
-{
- /* If the offset is outside of the variable, bail out. */
- if (offset >= start->fullsize)
- return NULL;
-
- /* If we cannot reach offset from start, lookup the first field
- and start from there. */
- if (start->offset > offset)
- start = get_varinfo (start->head);
-
- while (start)
- {
- /* We may not find a variable in the field list with the actual
- offset when we have glommed a structure to a variable.
- In that case, however, offset should still be within the size
- of the variable. */
- if (offset >= start->offset
- && (offset - start->offset) < start->size)
- return start;
-
- start = vi_next (start);
- }
-
- return NULL;
-}
-
-/* Find the first varinfo in the same variable as START that overlaps with
- OFFSET. If there is no such varinfo the varinfo directly preceding
- OFFSET is returned. */
-
-static varinfo_t
-first_or_preceding_vi_for_offset (varinfo_t start,
- unsigned HOST_WIDE_INT offset)
-{
- /* If we cannot reach offset from start, lookup the first field
- and start from there. */
- if (start->offset > offset)
- start = get_varinfo (start->head);
-
- /* We may not find a variable in the field list with the actual
- offset when we have glommed a structure to a variable.
- In that case, however, offset should still be within the size
- of the variable.
- If we got beyond the offset we look for return the field
- directly preceding offset which may be the last field. */
- while (start->next
- && offset >= start->offset
- && !((offset - start->offset) < start->size))
- start = vi_next (start);
-
- return start;
-}
-
-
/* This structure is used during pushing fields onto the fieldstack
to track the offset of the field, since bitpos_of_field gives it
relative to its immediate containing type, and we want it relative
@@ -5871,7 +3543,7 @@ struct fieldoff
typedef struct fieldoff fieldoff_s;
-/* qsort comparison function for two fieldoff's PA and PB */
+/* qsort comparison function for two fieldoff's PA and PB. */
static int
fieldoff_compare (const void *pa, const void *pb)
@@ -6599,38 +4271,6 @@ create_variable_info_for (tree decl, const char *name, bool add_id)
return id;
}
-/* Print out the points-to solution for VAR to FILE. */
-
-static void
-dump_solution_for_var (FILE *file, unsigned int var)
-{
- varinfo_t vi = get_varinfo (var);
- unsigned int i;
- bitmap_iterator bi;
-
- /* Dump the solution for unified vars anyway, this avoids difficulties
- in scanning dumps in the testsuite. */
- fprintf (file, "%s = { ", vi->name);
- vi = get_varinfo (find (var));
- EXECUTE_IF_SET_IN_BITMAP (vi->solution, 0, i, bi)
- fprintf (file, "%s ", get_varinfo (i)->name);
- fprintf (file, "}");
-
- /* But note when the variable was unified. */
- if (vi->id != var)
- fprintf (file, " same as %s", vi->name);
-
- fprintf (file, "\n");
-}
-
-/* Print the points-to solution for VAR to stderr. */
-
-DEBUG_FUNCTION void
-debug_solution_for_var (unsigned int var)
-{
- dump_solution_for_var (stderr, var);
-}
-
/* Register the constraints for function parameter related VI. */
static void
@@ -6718,7 +4358,7 @@ struct shared_bitmap_hasher : free_ptr_hash <shared_bitmap_info>
const shared_bitmap_info *);
};
-/* Hash function for a shared_bitmap_info_t */
+/* Hash function for a shared_bitmap_info_t. */
inline hashval_t
shared_bitmap_hasher::hash (const shared_bitmap_info *bi)
@@ -6726,7 +4366,7 @@ shared_bitmap_hasher::hash (const shared_bitmap_info *bi)
return bi->hashcode;
}
-/* Equality function for two shared_bitmap_info_t's. */
+/* Equality function for two shared_bitmap_info_t's. */
inline bool
shared_bitmap_hasher::equal (const shared_bitmap_info *sbi1,
@@ -6784,8 +4424,8 @@ set_uids_in_ptset (bitmap into, bitmap from, struct pt_solution *pt,
{
unsigned int i;
bitmap_iterator bi;
- varinfo_t escaped_vi = get_varinfo (find (escaped_id));
- varinfo_t escaped_return_vi = get_varinfo (find (escaped_return_id));
+ varinfo_t escaped_vi = get_varinfo (var_rep[escaped_id]);
+ varinfo_t escaped_return_vi = get_varinfo (var_rep[escaped_return_id]);
bool everything_escaped
= escaped_vi->solution && bitmap_bit_p (escaped_vi->solution, anything_id);
@@ -6883,7 +4523,7 @@ find_what_var_points_to (tree fndecl, varinfo_t orig_vi)
/* This variable may have been collapsed, let's get the real
variable. */
- vi = get_varinfo (find (orig_vi->id));
+ vi = get_varinfo (var_rep[orig_vi->id]);
/* See if we have already computed the solution and return it. */
pt_solution **slot = &final_solutions->get_or_insert (vi);
@@ -6910,7 +4550,7 @@ find_what_var_points_to (tree fndecl, varinfo_t orig_vi)
else
pt->escaped = 1;
/* Expand some special vars of ESCAPED in-place here. */
- varinfo_t evi = get_varinfo (find (escaped_id));
+ varinfo_t evi = get_varinfo (var_rep[escaped_id]);
if (bitmap_bit_p (evi->solution, nonlocal_id))
pt->nonlocal = 1;
}
@@ -7137,7 +4777,7 @@ pt_solution_includes_global (struct pt_solution *pt, bool escaped_local_p)
|| pt->nonlocal
|| pt->vars_contains_nonlocal
/* The following is a hack to make the malloc escape hack work.
- In reality we'd need different sets for escaped-through-return
+ In reality we'd need different sets for escaped-through-return
and escaped-to-callees and passes would need to be updated. */
|| pt->vars_contains_escaped_heap)
return true;
@@ -7273,52 +4913,6 @@ pt_solutions_intersect (struct pt_solution *pt1, struct pt_solution *pt2)
return res;
}
-/* Dump stats information to OUTFILE. */
-
-static void
-dump_sa_stats (FILE *outfile)
-{
- fprintf (outfile, "Points-to Stats:\n");
- fprintf (outfile, "Total vars: %d\n", stats.total_vars);
- fprintf (outfile, "Non-pointer vars: %d\n",
- stats.nonpointer_vars);
- fprintf (outfile, "Statically unified vars: %d\n",
- stats.unified_vars_static);
- fprintf (outfile, "Dynamically unified vars: %d\n",
- stats.unified_vars_dynamic);
- fprintf (outfile, "Iterations: %d\n", stats.iterations);
- fprintf (outfile, "Number of edges: %d\n", stats.num_edges);
- fprintf (outfile, "Number of implicit edges: %d\n",
- stats.num_implicit_edges);
- fprintf (outfile, "Number of avoided edges: %d\n",
- stats.num_avoided_edges);
-}
-
-/* Dump points-to information to OUTFILE. */
-
-static void
-dump_sa_points_to_info (FILE *outfile)
-{
- fprintf (outfile, "\nPoints-to sets\n\n");
-
- for (unsigned i = 1; i < varmap.length (); i++)
- {
- varinfo_t vi = get_varinfo (i);
- if (!vi->may_have_pointers)
- continue;
- dump_solution_for_var (outfile, i);
- }
-}
-
-
-/* Debug points-to information to stderr. */
-
-DEBUG_FUNCTION void
-debug_sa_points_to_info (void)
-{
- dump_sa_points_to_info (stderr);
-}
-
/* Initialize the always-existing constraint variables for NULL
ANYTHING, READONLY, and INTEGER */
@@ -7511,7 +5105,7 @@ init_base_vars (void)
process_constraint (new_constraint (lhs, rhs));
}
-/* Initialize things necessary to perform PTA */
+/* Initialize things necessary to perform PTA. */
static void
init_alias_vars (void)
@@ -7520,7 +5114,6 @@ init_alias_vars (void)
bitmap_obstack_initialize (&pta_obstack);
bitmap_obstack_initialize (&oldpta_obstack);
- bitmap_obstack_initialize (&predbitmap_obstack);
constraints.create (8);
varmap.create (8);
@@ -7537,144 +5130,6 @@ init_alias_vars (void)
gcc_obstack_init (&final_solutions_obstack);
}
-/* Remove the REF and ADDRESS edges from GRAPH, as well as all the
- predecessor edges. */
-
-static void
-remove_preds_and_fake_succs (constraint_graph_t graph)
-{
- unsigned int i;
-
- /* Clear the implicit ref and address nodes from the successor
- lists. */
- for (i = 1; i < FIRST_REF_NODE; i++)
- {
- if (graph->succs[i])
- bitmap_clear_range (graph->succs[i], FIRST_REF_NODE,
- FIRST_REF_NODE * 2);
- }
-
- /* Free the successor list for the non-ref nodes. */
- for (i = FIRST_REF_NODE + 1; i < graph->size; i++)
- {
- if (graph->succs[i])
- BITMAP_FREE (graph->succs[i]);
- }
-
- /* Now reallocate the size of the successor list as, and blow away
- the predecessor bitmaps. */
- graph->size = varmap.length ();
- graph->succs = XRESIZEVEC (bitmap, graph->succs, graph->size);
-
- free (graph->implicit_preds);
- graph->implicit_preds = NULL;
- free (graph->preds);
- graph->preds = NULL;
- bitmap_obstack_release (&predbitmap_obstack);
-}
-
-/* Solve the constraint set. */
-
-static void
-solve_constraints (void)
-{
- class scc_info *si;
-
- /* Sort varinfos so that ones that cannot be pointed to are last.
- This makes bitmaps more efficient. */
- unsigned int *map = XNEWVEC (unsigned int, varmap.length ());
- for (unsigned i = 0; i < integer_id + 1; ++i)
- map[i] = i;
- /* Start with address-taken vars, followed by not address-taken vars
- to move vars never appearing in the points-to solution bitmaps last. */
- unsigned j = integer_id + 1;
- for (unsigned i = integer_id + 1; i < varmap.length (); ++i)
- if (varmap[varmap[i]->head]->address_taken)
- map[i] = j++;
- for (unsigned i = integer_id + 1; i < varmap.length (); ++i)
- if (! varmap[varmap[i]->head]->address_taken)
- map[i] = j++;
- /* Shuffle varmap according to map. */
- for (unsigned i = integer_id + 1; i < varmap.length (); ++i)
- {
- while (map[varmap[i]->id] != i)
- std::swap (varmap[i], varmap[map[varmap[i]->id]]);
- gcc_assert (bitmap_empty_p (varmap[i]->solution));
- varmap[i]->id = i;
- varmap[i]->next = map[varmap[i]->next];
- varmap[i]->head = map[varmap[i]->head];
- }
- /* Finally rewrite constraints. */
- for (unsigned i = 0; i < constraints.length (); ++i)
- {
- constraints[i]->lhs.var = map[constraints[i]->lhs.var];
- constraints[i]->rhs.var = map[constraints[i]->rhs.var];
- }
- free (map);
-
- if (dump_file)
- fprintf (dump_file,
- "\nCollapsing static cycles and doing variable "
- "substitution\n");
-
- init_graph (varmap.length () * 2);
-
- if (dump_file)
- fprintf (dump_file, "Building predecessor graph\n");
- build_pred_graph ();
-
- if (dump_file)
- fprintf (dump_file, "Detecting pointer and location "
- "equivalences\n");
- si = perform_var_substitution (graph);
-
- if (dump_file)
- fprintf (dump_file, "Rewriting constraints and unifying "
- "variables\n");
- rewrite_constraints (graph, si);
-
- build_succ_graph ();
-
- free_var_substitution_info (si);
-
- /* Attach complex constraints to graph nodes. */
- move_complex_constraints (graph);
-
- if (dump_file)
- fprintf (dump_file, "Uniting pointer but not location equivalent "
- "variables\n");
- unite_pointer_equivalences (graph);
-
- if (dump_file)
- fprintf (dump_file, "Finding indirect cycles\n");
- find_indirect_cycles (graph);
-
- /* Implicit nodes and predecessors are no longer necessary at this
- point. */
- remove_preds_and_fake_succs (graph);
-
- if (dump_file && (dump_flags & TDF_GRAPH))
- {
- fprintf (dump_file, "\n\n// The constraint graph before solve-graph "
- "in dot format:\n");
- dump_constraint_graph (dump_file);
- fprintf (dump_file, "\n\n");
- }
-
- if (dump_file)
- fprintf (dump_file, "Solving graph\n");
-
- solve_graph (graph);
-
- if (dump_file && (dump_flags & TDF_GRAPH))
- {
- fprintf (dump_file, "\n\n// The constraint graph after solve-graph "
- "in dot format:\n");
- dump_constraint_graph (dump_file);
- fprintf (dump_file, "\n\n");
- }
-}
-
/* Create points-to sets for the current function. See the comments
at the start of the file for an algorithmic overview. */
@@ -7845,8 +5300,6 @@ compute_points_to_sets (void)
static void
delete_points_to_sets (void)
{
- unsigned int i;
-
delete shared_bitmap_table;
shared_bitmap_table = NULL;
if (dump_file && (dump_flags & TDF_STATS))
@@ -7858,16 +5311,7 @@ delete_points_to_sets (void)
bitmap_obstack_release (&pta_obstack);
constraints.release ();
- for (i = 0; i < graph->size; i++)
- graph->complex[i].release ();
- free (graph->complex);
-
- free (graph->rep);
- free (graph->succs);
- free (graph->pe);
- free (graph->pe_rep);
- free (graph->indirect_cycles);
- free (graph);
+ free (var_rep);
varmap.release ();
variable_info_pool.release ();
@@ -7914,14 +5358,14 @@ visit_loadstore (gimple *, tree base, tree ref, void *data)
if (! vi)
return false;
- vi = get_varinfo (find (vi->id));
+ vi = get_varinfo (var_rep[vi->id]);
if (bitmap_intersect_p (rvars, vi->solution)
|| (escaped_p && bitmap_bit_p (vi->solution, escaped_id)))
return false;
}
/* Do not overwrite existing cliques (that includes clique, base
- pairs we just set). */
+ pairs we just set). */
if (MR_DEPENDENCE_CLIQUE (base) == 0)
{
MR_DEPENDENCE_CLIQUE (base) = clique;
@@ -8050,7 +5494,7 @@ compute_dependence_clique (void)
varinfo_t vi = lookup_vi_for_tree (p);
if (!vi)
continue;
- vi = get_varinfo (find (vi->id));
+ vi = get_varinfo (var_rep[vi->id]);
bitmap_iterator bi;
unsigned j;
varinfo_t restrict_var = NULL;
@@ -8101,11 +5545,11 @@ compute_dependence_clique (void)
maybe_set_dependence_info);
if (used)
{
- /* Add all subvars to the set of restrict pointed-to set. */
+ /* Add all subvars to the set of restrict pointed-to set. */
for (unsigned sv = restrict_var->head; sv != 0;
sv = get_varinfo (sv)->next)
bitmap_set_bit (rvars, sv);
- varinfo_t escaped = get_varinfo (find (escaped_id));
+ varinfo_t escaped = get_varinfo (var_rep[escaped_id]);
if (bitmap_bit_p (escaped->solution, restrict_var->id))
escaped_p = true;
}
@@ -8261,7 +5705,7 @@ struct pt_solution ipa_escaped_pt
= { true, false, false, false, false, false,
false, false, false, false, false, NULL };
-/* Associate node with varinfo DATA. Worker for
+/* Associate node with varinfo DATA. Worker for
cgraph_for_symbol_thunks_and_aliases. */
static bool
associate_varinfo_to_alias (struct cgraph_node *node, void *data)
@@ -8275,111 +5719,6 @@ associate_varinfo_to_alias (struct cgraph_node *node, void *data)
return false;
}
-/* Dump varinfo VI to FILE. */
-
-static void
-dump_varinfo (FILE *file, varinfo_t vi)
-{
- if (vi == NULL)
- return;
-
- fprintf (file, "%u: %s\n", vi->id, vi->name);
-
- const char *sep = " ";
- if (vi->is_artificial_var)
- fprintf (file, "%sartificial", sep);
- if (vi->is_special_var)
- fprintf (file, "%sspecial", sep);
- if (vi->is_unknown_size_var)
- fprintf (file, "%sunknown-size", sep);
- if (vi->is_full_var)
- fprintf (file, "%sfull", sep);
- if (vi->is_heap_var)
- fprintf (file, "%sheap", sep);
- if (vi->may_have_pointers)
- fprintf (file, "%smay-have-pointers", sep);
- if (vi->only_restrict_pointers)
- fprintf (file, "%sonly-restrict-pointers", sep);
- if (vi->is_restrict_var)
- fprintf (file, "%sis-restrict-var", sep);
- if (vi->is_global_var)
- fprintf (file, "%sglobal", sep);
- if (vi->is_ipa_escape_point)
- fprintf (file, "%sipa-escape-point", sep);
- if (vi->is_fn_info)
- fprintf (file, "%sfn-info", sep);
- if (vi->ruid)
- fprintf (file, "%srestrict-uid:%u", sep, vi->ruid);
- if (vi->next)
- fprintf (file, "%snext:%u", sep, vi->next);
- if (vi->head != vi->id)
- fprintf (file, "%shead:%u", sep, vi->head);
- if (vi->offset)
- fprintf (file, "%soffset:" HOST_WIDE_INT_PRINT_DEC, sep, vi->offset);
- if (vi->size != ~HOST_WIDE_INT_0U)
- fprintf (file, "%ssize:" HOST_WIDE_INT_PRINT_DEC, sep, vi->size);
- if (vi->fullsize != ~HOST_WIDE_INT_0U && vi->fullsize != vi->size)
- fprintf (file, "%sfullsize:" HOST_WIDE_INT_PRINT_DEC, sep,
- vi->fullsize);
- fprintf (file, "\n");
-
- if (vi->solution && !bitmap_empty_p (vi->solution))
- {
- bitmap_iterator bi;
- unsigned i;
- fprintf (file, " solution: {");
- EXECUTE_IF_SET_IN_BITMAP (vi->solution, 0, i, bi)
- fprintf (file, " %u", i);
- fprintf (file, " }\n");
- }
-
- if (vi->oldsolution && !bitmap_empty_p (vi->oldsolution)
- && !bitmap_equal_p (vi->solution, vi->oldsolution))
- {
- bitmap_iterator bi;
- unsigned i;
- fprintf (file, " oldsolution: {");
- EXECUTE_IF_SET_IN_BITMAP (vi->oldsolution, 0, i, bi)
- fprintf (file, " %u", i);
- fprintf (file, " }\n");
- }
-}
-
-/* Dump varinfo VI to stderr. */
-
-DEBUG_FUNCTION void
-debug_varinfo (varinfo_t vi)
-{
- dump_varinfo (stderr, vi);
-}
-
-/* Dump varmap to FILE. */
-
-static void
-dump_varmap (FILE *file)
-{
- if (varmap.length () == 0)
- return;
-
- fprintf (file, "variables:\n");
-
- for (unsigned int i = 0; i < varmap.length (); ++i)
- {
- varinfo_t vi = get_varinfo (i);
- dump_varinfo (file, vi);
- }
-
- fprintf (file, "\n");
-}
-
-/* Dump varmap to stderr. */
-
-DEBUG_FUNCTION void
-debug_varmap (void)
-{
- dump_varmap (stderr);
-}
-
/* Compute whether node is refered to non-locally. Worker for
cgraph_for_symbol_thunks_and_aliases. */
static bool
@@ -8493,7 +5832,7 @@ ipa_pta_execute (void)
varinfo_t vi = get_vi_for_tree (var->decl);
/* For the purpose of IPA PTA unit-local globals are not
- escape points. */
+ escape points. */
bool nonlocal_p = (DECL_EXTERNAL (var->decl)
|| TREE_PUBLIC (var->decl)
|| var->used_from_other_partition
@@ -8592,7 +5931,7 @@ ipa_pta_execute (void)
for (varinfo_t ai = first_vi_for_offset (fi, fi_parm_base);
ai; ai = vi_next (ai))
{
- varinfo_t vi = get_varinfo (find (ai->id));
+ varinfo_t vi = get_varinfo (var_rep[ai->id]);
bitmap_iterator bi;
unsigned j;
EXECUTE_IF_SET_IN_BITMAP (vi->solution, 0, j, bi)
@@ -8608,12 +5947,12 @@ ipa_pta_execute (void)
}
}
/* As well as global variables which are another way of passing
- arguments to recursive invocations. */
+ arguments to recursive invocations. */
else if (fi->is_global_var)
{
for (varinfo_t ai = fi; ai; ai = vi_next (ai))
{
- varinfo_t vi = get_varinfo (find (ai->id));
+ varinfo_t vi = get_varinfo (var_rep[ai->id]);
bitmap_iterator bi;
unsigned j;
EXECUTE_IF_SET_IN_BITMAP (vi->solution, 0, j, bi)
@@ -8708,10 +6047,10 @@ ipa_pta_execute (void)
{
*gimple_call_clobber_set (stmt)
= find_what_var_points_to
- (node->decl, first_vi_for_offset (fi, fi_clobbers));
+ (node->decl, first_vi_for_offset (fi, fi_clobbers));
*gimple_call_use_set (stmt)
= find_what_var_points_to
- (node->decl, first_vi_for_offset (fi, fi_uses));
+ (node->decl, first_vi_for_offset (fi, fi_uses));
}
/* Handle direct calls to external functions. */
else if (decl && (!fi || fi->decl))
@@ -8738,7 +6077,8 @@ ipa_pta_execute (void)
}
pt = gimple_call_clobber_set (stmt);
- if (gimple_call_flags (stmt) & (ECF_CONST|ECF_PURE|ECF_NOVOPS))
+ if (gimple_call_flags (stmt) &
+ (ECF_CONST|ECF_PURE|ECF_NOVOPS))
memset (pt, 0, sizeof (struct pt_solution));
else if ((vi = lookup_call_clobber_vi (stmt)) != NULL)
{
@@ -8763,7 +6103,7 @@ ipa_pta_execute (void)
{
/* We need to accumulate all clobbers/uses of all possible
callees. */
- fi = get_varinfo (find (fi->id));
+ fi = get_varinfo (var_rep[fi->id]);
/* If we cannot constrain the set of functions we'll end up
calling we end up using/clobbering everything. */
if (bitmap_bit_p (fi->solution, anything_id)
@@ -8823,7 +6163,7 @@ ipa_pta_execute (void)
fn->gimple_df->ipa_pta = true;
/* We have to re-set the final-solution cache after each function
- because what is a "global" is dependent on function context. */
+ because what is a "global" is dependent on function context. */
final_solutions->empty ();
obstack_free (&final_solutions_obstack, NULL);
gcc_obstack_init (&final_solutions_obstack);
diff --git a/gcc/tree-ssa-structalias.h b/gcc/tree-ssa-structalias.h
new file mode 100644
index 0000000..4104bad
--- /dev/null
+++ b/gcc/tree-ssa-structalias.h
@@ -0,0 +1,217 @@
+/* Tree based points-to analysis
+ Copyright (C) 2005-2025 Free Software Foundation, Inc.
+ Contributed by Daniel Berlin <dberlin@dberlin.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* NOTE: This file declares the internal interface of the points-to analyzer.
+ Outward-facing function declarations can be found in tree-ssa-alias.h. */
+
+#ifndef TREE_SSA_STRUCTALIAS_H
+#define TREE_SSA_STRUCTALIAS_H
+
+namespace pointer_analysis {
+
+enum constraint_expr_type {SCALAR, DEREF, ADDRESSOF};
+
+/* Static IDs for the special variables. Variable ID zero is unused
+ and used as terminator for the sub-variable chain. */
+enum { nothing_id = 1, anything_id = 2, string_id = 3,
+ escaped_id = 4, nonlocal_id = 5, escaped_return_id = 6,
+ storedanything_id = 7, integer_id = 8 };
+
+/* Use 0x8000... as special unknown offset. */
+#define UNKNOWN_OFFSET HOST_WIDE_INT_MIN
+
+/* An expression that appears in a constraint. */
+
+struct constraint_expr
+{
+ /* Constraint type. */
+ constraint_expr_type type;
+
+ /* Variable we are referring to in the constraint. */
+ unsigned int var;
+
+ /* Offset, in bits, of this constraint from the beginning of
+ variables it ends up referring to.
+
+ IOW, in a deref constraint, we would deref, get the result set,
+ then add OFFSET to each member. */
+ HOST_WIDE_INT offset;
+};
+typedef struct constraint_expr ce_s;
+
+/* Our set constraints are made up of two constraint expressions, one
+ LHS, and one RHS.
+
+ As described in the introduction in tree-ssa-structalias.cc, our set
+ constraints each represent an operation between set valued variables.
+*/
+struct constraint
+{
+ struct constraint_expr lhs;
+ struct constraint_expr rhs;
+};
+typedef struct constraint *constraint_t;
+
+struct variable_info
+{
+ /* ID of this variable. */
+ unsigned int id;
+
+ /* True if this is a variable created by the constraint analysis, such as
+ heap variables and constraints we had to break up. */
+ unsigned int is_artificial_var : 1;
+
+ /* True if this is a special variable whose solution set should not be
+ changed. */
+ unsigned int is_special_var : 1;
+
+ /* True for variables whose size is not known or variable. */
+ unsigned int is_unknown_size_var : 1;
+
+ /* True for (sub-)fields that represent a whole variable. */
+ unsigned int is_full_var : 1;
+
+ /* True if this is a heap variable. */
+ unsigned int is_heap_var : 1;
+
+ /* True if this is a register variable. */
+ unsigned int is_reg_var : 1;
+
+ /* True if this field may contain pointers. */
+ unsigned int may_have_pointers : 1;
+
+ /* True if this field has only restrict qualified pointers. */
+ unsigned int only_restrict_pointers : 1;
+
+ /* True if this represents a heap var created for a restrict qualified
+ pointer. */
+ unsigned int is_restrict_var : 1;
+
+ /* True if this represents a global variable. */
+ unsigned int is_global_var : 1;
+
+ /* True if this represents a module escape point for IPA analysis. */
+ unsigned int is_ipa_escape_point : 1;
+
+ /* True if this represents a IPA function info. */
+ unsigned int is_fn_info : 1;
+
+ /* True if this appears as RHS in a ADDRESSOF constraint. */
+ unsigned int address_taken : 1;
+
+ /* ??? Store somewhere better. */
+ unsigned short ruid;
+
+ /* The ID of the variable for the next field in this structure
+ or zero for the last field in this structure. */
+ unsigned next;
+
+ /* The ID of the variable for the first field in this structure. */
+ unsigned head;
+
+ /* Offset of this variable, in bits, from the base variable. */
+ unsigned HOST_WIDE_INT offset;
+
+ /* Size of the variable, in bits. */
+ unsigned HOST_WIDE_INT size;
+
+ /* Full size of the base variable, in bits. */
+ unsigned HOST_WIDE_INT fullsize;
+
+ /* In IPA mode the shadow UID in case the variable needs to be duplicated in
+ the final points-to solution because it reaches its containing
+ function recursively. Zero if none is needed. */
+ unsigned int shadow_var_uid;
+
+ /* Name of this variable. */
+ const char *name;
+
+ /* Tree that this variable is associated with. */
+ tree decl;
+
+ /* Points-to set for this variable. */
+ bitmap solution;
+
+ /* Old points-to set for this variable. */
+ bitmap oldsolution;
+};
+typedef struct variable_info *varinfo_t;
+
+struct constraint_stats
+{
+ unsigned int total_vars;
+ unsigned int nonpointer_vars;
+ unsigned int unified_vars_static;
+ unsigned int unified_vars_dynamic;
+ unsigned int iterations;
+ unsigned int num_edges;
+ unsigned int num_implicit_edges;
+ unsigned int num_avoided_edges;
+ unsigned int points_to_sets_created;
+};
+
+extern struct constraint_stats stats;
+
+extern bitmap_obstack pta_obstack;
+extern bitmap_obstack oldpta_obstack;
+
+extern vec<varinfo_t> varmap;
+extern vec<constraint_t> constraints;
+extern unsigned int *var_rep;
+
+
+/* Return the varmap element N. */
+
+inline varinfo_t
+get_varinfo (unsigned int n)
+{
+ return varmap[n];
+}
+
+/* Return the next variable in the list of sub-variables of VI
+ or NULL if VI is the last sub-variable. */
+
+inline varinfo_t
+vi_next (varinfo_t vi)
+{
+ return get_varinfo (vi->next);
+}
+
+varinfo_t first_vi_for_offset (varinfo_t start,
+ unsigned HOST_WIDE_INT offset);
+varinfo_t first_or_preceding_vi_for_offset (varinfo_t start,
+ unsigned HOST_WIDE_INT offset);
+void dump_constraint (FILE *file, constraint_t c);
+void dump_constraints (FILE *file, int from);
+void dump_solution_for_var (FILE *file, unsigned int var);
+void dump_sa_stats (FILE *outfile);
+void dump_sa_points_to_info (FILE *outfile);
+void dump_varinfo (FILE *file, varinfo_t vi);
+void dump_varmap (FILE *file);
+void debug_constraint (constraint_t);
+void debug_constraints (void);
+void debug_solution_for_var (unsigned int);
+void debug_sa_points_to_info (void);
+void debug_varinfo (varinfo_t);
+void debug_varmap (void);
+
+} // namespace pointer_analysis
+
+#endif /* TREE_SSA_STRUCTALIAS_H */
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index ad753869..fe67d4d 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -1114,6 +1114,16 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
matches[0] = false;
return false;
}
+ if (is_a <bb_vec_info> (vinfo)
+ && known_le (TYPE_VECTOR_SUBPARTS (vectype), 1U))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Build SLP failed: not using single lane "
+ "vector type %T\n", vectype);
+ matches[0] = false;
+ return false;
+ }
/* Record nunits required but continue analysis, producing matches[]
as if nunits was not an issue. This allows splitting of groups
to happen. */
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index e2dcfaa..f0d3105 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -5460,6 +5460,7 @@ vectorizable_conversion (vec_info *vinfo,
vec<tree> vec_oprnds1 = vNULL;
tree vop0;
bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
+ loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
int multi_step_cvt = 0;
vec<tree> interm_types = vNULL;
tree intermediate_type, cvt_type = NULL_TREE;
@@ -5802,6 +5803,20 @@ vectorizable_conversion (vec_info *vinfo,
gcc_unreachable ();
}
+ if (modifier == WIDEN
+ && loop_vinfo
+ && LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
+ && (code1 == VEC_WIDEN_MULT_EVEN_EXPR
+ || widening_evenodd_fn_p (code1)))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't use a fully-masked loop because"
+ " widening operation on even/odd elements"
+ " mixes up lanes.\n");
+ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
+ }
+
if (!vec_stmt) /* transformation not required. */
{
if (!vect_maybe_update_slp_op_vectype (slp_op0, vectype_in)
diff --git a/gcc/tree.h b/gcc/tree.h
index 50fc2d354..c0e434b 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -2233,6 +2233,12 @@ class auto_suppress_location_wrappers
#define OMP_CLAUSE_OPERAND(NODE, I) \
OMP_CLAUSE_ELT_CHECK (NODE, I)
+/* True if the clause decl NODE contains an OpenMP iterator. */
+#define OMP_ITERATOR_DECL_P(NODE) \
+ (TREE_CODE (NODE) == TREE_LIST \
+ && TREE_PURPOSE (NODE) \
+ && TREE_CODE (TREE_PURPOSE (NODE)) == TREE_VEC)
+
/* In a BLOCK (scope) node:
Variables declared in the scope NODE. */
#define BLOCK_VARS(NODE) (BLOCK_CHECK (NODE)->block.vars)