aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorTobias Burnus <tobias@codesourcery.com>2022-10-18 10:00:17 +0200
committerTobias Burnus <tobias@codesourcery.com>2022-10-18 10:00:17 +0200
commit764db096e7f6f984d24ed0bfd03c4a6a2ad4fa78 (patch)
tree52d3bca708fb2949eb5679eda44ead078ddba299 /gcc
parentcc4b1e41c766ffcbcb60caec06e1d65cfe89a802 (diff)
parent912bdd5cfb92f6dd58accd755ad14f47c0df619e (diff)
downloadgcc-764db096e7f6f984d24ed0bfd03c4a6a2ad4fa78.zip
gcc-764db096e7f6f984d24ed0bfd03c4a6a2ad4fa78.tar.gz
gcc-764db096e7f6f984d24ed0bfd03c4a6a2ad4fa78.tar.bz2
Merge branch 'releases/gcc-12' into devel/omp/gcc-12
Merge up to r12-8843-g912bdd5cfb92f6dd58accd755ad14f47c0df619e (18th Oct 2022)
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog157
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/config/arm/mve.md30
-rw-r--r--gcc/config/rs6000/rs6000-call.cc6
-rw-r--r--gcc/config/sparc/sparc.cc24
-rw-r--r--gcc/cp/ChangeLog9
-rw-r--r--gcc/cp/except.cc7
-rw-r--r--gcc/fortran/ChangeLog31
-rw-r--r--gcc/fortran/trans-expr.cc65
-rw-r--r--gcc/fortran/trans.h3
-rw-r--r--gcc/testsuite/ChangeLog115
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/initlist-defarg3.C13
-rw-r--r--gcc/testsuite/g++.dg/torture/pr106922.C48
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr106922.C90
-rw-r--r--gcc/testsuite/g++.dg/uninit-pr105937.C235
-rw-r--r--gcc/testsuite/gcc.dg/pr107107.c25
-rw-r--r--gcc/testsuite/gcc.dg/torture/pr106892.c30
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-100.c25
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr107160.c41
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr107212-1.c27
-rw-r--r--gcc/testsuite/gcc.dg/vect/pr107212-2.c23
-rw-r--r--gcc/testsuite/gfortran.dg/intent_optimize_4.f9043
-rw-r--r--gcc/testsuite/gfortran.dg/intent_out_15.f9027
-rw-r--r--gcc/testsuite/gfortran.dg/pr106934.f907
-rw-r--r--gcc/testsuite/gfortran.dg/vect/pr107254.f9049
-rw-r--r--gcc/tree-predcom.cc18
-rw-r--r--gcc/tree-ssa-pre.cc18
-rw-r--r--gcc/tree-ssa-sccvn.cc118
-rw-r--r--gcc/tree-ssa-uninit.cc14
-rw-r--r--gcc/tree-ssa.cc6
-rw-r--r--gcc/tree-vect-loop.cc23
-rw-r--r--gcc/tree-vect-slp.cc33
32 files changed, 1234 insertions, 128 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index d2b7bc1..5a83f18 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,160 @@
+2022-10-17 Pat Haugen <pthaugen@linux.ibm.com>
+
+ Backported from master:
+ 2022-05-17 Pat Haugen <pthaugen@linux.ibm.com>
+
+ PR target/99685
+ * config/rs6000/rs6000-call.cc (rs6000_function_arg_advance_1): Bump
+ register count when not splitting IEEE 128-bit Complex.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107254
+ * tree-vect-slp.cc (vect_slp_analyze_node_operations_1):
+ For permutes also analyze live lanes.
+ (vect_schedule_slp_node): For permutes also code generate
+ live lane extracts.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-11 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107212
+ * tree-vect-loop.cc (vectorizable_reduction): Make sure to
+ set STMT_VINFO_REDUC_DEF for all live lanes in a SLP
+ reduction.
+ (vectorizable_live_operation): Do not pun to the SLP
+ node representative for reduction epilogue generation.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107160
+ * tree-vect-loop.cc (vect_create_epilog_for_reduction):
+ Do not register accumulator if we failed to reduce it
+ to a single vector.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-06 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107107
+ * tree-ssa-sccvn.cc (visit_reference_op_store): Do not
+ affect value-numbering when doing the tail merging
+ MODIFY_EXPR lookup.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-23 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * tree-ssa-sccvn.cc (vn_reference_lookup_3): Allow
+ an arbitrary number of same valued skipped stores.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-22 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * tree-ssa-sccvn.cc (vn_walk_cb_data::same_val): New member.
+ (vn_walk_cb_data::finish): Perform delayed verification of
+ a skipped may-alias.
+ (vn_reference_lookup_pieces): Likewise.
+ (vn_reference_lookup): Likewise.
+ (vn_reference_lookup_3): When skipping stores of the same
+ value also handle constant stores that are more than a
+ single VDEF away by delaying the verification.
+
+2022-10-14 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR target/107248
+ * config/sparc/sparc.cc (sparc_expand_prologue): Emit a frame
+ blockage for leaf functions.
+ (sparc_flat_expand_prologue): Emit frame instead of full blockage.
+ (sparc_expand_epilogue): Emit a frame blockage for leaf functions.
+ (sparc_flat_expand_epilogue): Emit frame instead of full blockage.
+
+2022-10-13 Tobias Burnus <tobias@codesourcery.com>
+
+ Backported from master:
+ 2022-10-04 Tobias Burnus <tobias@codesourcery.com>
+
+ * doc/install.texi (Specific): Add missing items to bullet list.
+ (amdgcn): Update LLVM requirements, use version not date for newlib.
+ (nvptx): Use version not git hash for newlib.
+
+2022-10-11 Christophe Lyon <christophe.lyon@arm.com>
+
+ * config/arm/mve.md (mve_vqshluq_n_s<mode>): Use
+ MVE_pred/MVE_constraint instead of mve_imm_7/Ra.
+ (mve_vqshluq_m_n_s<mode>): Likewise.
+ (mve_vqrshrnbq_n_<supf><mode>): Use MVE_pred3/MVE_constraint3
+ instead of mve_imm_8/Rb.
+ (mve_vqrshrunbq_n_s<mode>): Likewise.
+ (mve_vqrshrntq_n_<supf><mode>): Likewise.
+ (mve_vqrshruntq_n_s<mode>): Likewise.
+ (mve_vrshrnbq_n_<supf><mode>): Likewise.
+ (mve_vrshrntq_n_<supf><mode>): Likewise.
+ (mve_vqrshrnbq_m_n_<supf><mode>): Likewise.
+ (mve_vqrshrntq_m_n_<supf><mode>): Likewise.
+ (mve_vrshrnbq_m_n_<supf><mode>): Likewise.
+ (mve_vrshrntq_m_n_<supf><mode>): Likewise.
+ (mve_vqrshrunbq_m_n_s<mode>): Likewise.
+ (mve_vsriq_n_<supf><mode): Use MVE_pred2/MVE_constraint2 instead
+ of mve_imm_selective_upto_8/Rg.
+ (mve_vsriq_m_n_<supf><mode>): Likewise.
+ (cherry-picked from c3fb6658c7670e446f2fd00984404d971e416b3c)
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106934
+ * tree-ssa.cc (non_rewritable_mem_ref_base): Avoid BIT_FIELD_REFs
+ of bitfields.
+ (maybe_rewrite_mem_ref_base): Likewise.
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * tree-ssa-pre.cc (translate_vuse_through_block): Only
+ keep the VUSE if its def dominates PHIBLOCK.
+ (prune_clobbered_mems): Rewrite logic so we check whether
+ a value dies in a block when the VUSE def doesn't dominate it.
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-09 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106892
+ * tree-predcom.cc (ref_at_iteration): Do not associate the
+ constant part of the offset into the MEM_REF offset
+ operand, across a non-zero offset.
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-08-22 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/105937
+ * tree-ssa-uninit.cc (find_uninit_use): Do not queue PHIs
+ on backedges.
+ (execute_late_warn_uninitialized): Mark backedges.
+
2022-10-03 Sergei Trofimovich <siarheit@google.com>
Backported from master:
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index ee52440..e1b70eb 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20221011
+20221018
diff --git a/gcc/config/arm/mve.md b/gcc/config/arm/mve.md
index f16991c..469e7e7 100644
--- a/gcc/config/arm/mve.md
+++ b/gcc/config/arm/mve.md
@@ -1617,7 +1617,7 @@
[
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "w")
- (match_operand:SI 2 "mve_imm_7" "Ra")]
+ (match_operand:SI 2 "<MVE_pred>" "<MVE_constraint>")]
VQSHLUQ_N_S))
]
"TARGET_HAVE_MVE"
@@ -2608,7 +2608,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")]
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")]
VQRSHRNBQ_N))
]
"TARGET_HAVE_MVE"
@@ -2623,7 +2623,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")]
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")]
VQRSHRUNBQ_N_S))
]
"TARGET_HAVE_MVE"
@@ -3563,7 +3563,7 @@
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
(match_operand:MVE_2 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_selective_upto_8" "Rg")]
+ (match_operand:SI 3 "<MVE_pred2>" "<MVE_constraint2>")]
VSRIQ_N))
]
"TARGET_HAVE_MVE"
@@ -4466,7 +4466,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")]
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")]
VQRSHRNTQ_N))
]
"TARGET_HAVE_MVE"
@@ -4482,7 +4482,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")]
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")]
VQRSHRUNTQ_N_S))
]
"TARGET_HAVE_MVE"
@@ -4770,7 +4770,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")]
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")]
VRSHRNBQ_N))
]
"TARGET_HAVE_MVE"
@@ -4786,7 +4786,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")]
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")]
VRSHRNTQ_N))
]
"TARGET_HAVE_MVE"
@@ -4980,7 +4980,7 @@
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
(match_operand:MVE_2 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_7" "Ra")
+ (match_operand:SI 3 "<MVE_pred>" "<MVE_constraint>")
(match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
VQSHLUQ_M_N_S))
]
@@ -5012,7 +5012,7 @@
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
(unspec:MVE_2 [(match_operand:MVE_2 1 "s_register_operand" "0")
(match_operand:MVE_2 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_selective_upto_8" "Rg")
+ (match_operand:SI 3 "<MVE_pred2>" "<MVE_constraint2>")
(match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
VSRIQ_M_N))
]
@@ -6131,7 +6131,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")
(match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
VQRSHRNBQ_M_N))
]
@@ -6148,7 +6148,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")
(match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
VQRSHRNTQ_M_N))
]
@@ -6216,7 +6216,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")
(match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
VRSHRNBQ_M_N))
]
@@ -6233,7 +6233,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")
(match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
VRSHRNTQ_M_N))
]
@@ -6454,7 +6454,7 @@
(set (match_operand:<V_narrow_pack> 0 "s_register_operand" "=w")
(unspec:<V_narrow_pack> [(match_operand:<V_narrow_pack> 1 "s_register_operand" "0")
(match_operand:MVE_5 2 "s_register_operand" "w")
- (match_operand:SI 3 "mve_imm_8" "Rb")
+ (match_operand:SI 3 "<MVE_pred3>" "<MVE_constraint3>")
(match_operand:<MVE_VPRED> 4 "vpr_register_operand" "Up")]
VQRSHRUNBQ_M_N_S))
]
diff --git a/gcc/config/rs6000/rs6000-call.cc b/gcc/config/rs6000/rs6000-call.cc
index f06c692..d27df7b 100644
--- a/gcc/config/rs6000/rs6000-call.cc
+++ b/gcc/config/rs6000/rs6000-call.cc
@@ -1111,6 +1111,12 @@ rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
{
cum->vregno += n_elts;
+ /* If we are not splitting Complex IEEE128 args then account for the
+ fact that they are passed in 2 VSX regs. */
+ if (!targetm.calls.split_complex_arg && type
+ && TREE_CODE (type) == COMPLEX_TYPE && elt_mode == KCmode)
+ cum->vregno++;
+
if (!TARGET_ALTIVEC)
error ("cannot pass argument in vector register because"
" altivec instructions are disabled, use %qs"
diff --git a/gcc/config/sparc/sparc.cc b/gcc/config/sparc/sparc.cc
index aca925b..a475373 100644
--- a/gcc/config/sparc/sparc.cc
+++ b/gcc/config/sparc/sparc.cc
@@ -6050,6 +6050,9 @@ sparc_expand_prologue (void)
}
RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Ensure no memory access is done before the frame is established. */
+ emit_insn (gen_frame_blockage ());
}
else
{
@@ -6064,13 +6067,7 @@ sparc_expand_prologue (void)
/* %sp is not the CFA register anymore. */
emit_insn (gen_stack_pointer_inc (GEN_INT (4096 - size)));
- /* Make sure no %fp-based store is issued until after the frame is
- established. The offset between the frame pointer and the stack
- pointer is calculated relative to the value of the stack pointer
- at the end of the function prologue, and moving instructions that
- access the stack via the frame pointer between the instructions
- that decrement the stack pointer could result in accessing the
- register window save area, which is volatile. */
+ /* Likewise. */
emit_insn (gen_frame_blockage ());
}
else
@@ -6166,8 +6163,8 @@ sparc_flat_expand_prologue (void)
}
RTX_FRAME_RELATED_P (insn) = 1;
- /* Ensure nothing is scheduled until after the frame is established. */
- emit_insn (gen_blockage ());
+ /* Ensure no memory access is done before the frame is established. */
+ emit_insn (gen_frame_blockage ());
if (frame_pointer_needed)
{
@@ -6254,6 +6251,9 @@ sparc_expand_epilogue (bool for_eh)
; /* do nothing. */
else if (sparc_leaf_function_p)
{
+ /* Ensure no memory access is done after the frame is destroyed. */
+ emit_insn (gen_frame_blockage ());
+
if (size <= 4096)
emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
else if (size <= 8192)
@@ -6304,15 +6304,15 @@ sparc_flat_expand_epilogue (bool for_eh)
; /* do nothing. */
else if (frame_pointer_needed)
{
- /* Make sure the frame is destroyed after everything else is done. */
- emit_insn (gen_blockage ());
+ /* Ensure no memory access is done after the frame is destroyed. */
+ emit_insn (gen_frame_blockage ());
emit_move_insn (stack_pointer_rtx, gen_rtx_REG (Pmode, 1));
}
else
{
/* Likewise. */
- emit_insn (gen_blockage ());
+ emit_insn (gen_frame_blockage ());
if (size <= 4096)
emit_insn (gen_stack_pointer_inc (GEN_INT (size)));
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index bc2465f..9dc4d05 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,12 @@
+2022-10-13 Marek Polacek <polacek@redhat.com>
+
+ Backported from master:
+ 2022-10-13 Marek Polacek <polacek@redhat.com>
+
+ PR c++/106925
+ * except.cc (maybe_splice_retval_cleanup): Check current_function_decl.
+ Make the bool const.
+
2022-09-29 Jason Merrill <jason@redhat.com>
* class.cc (check_methods): Call constraints_satisfied_p.
diff --git a/gcc/cp/except.cc b/gcc/cp/except.cc
index da0a65c..58d8772 100644
--- a/gcc/cp/except.cc
+++ b/gcc/cp/except.cc
@@ -1322,9 +1322,12 @@ maybe_splice_retval_cleanup (tree compound_stmt)
{
/* If we need a cleanup for the return value, add it in at the same level as
pushdecl_outermost_localscope. And also in try blocks. */
- bool function_body
+ const bool function_body
= (current_binding_level->level_chain
- && current_binding_level->level_chain->kind == sk_function_parms);
+ && current_binding_level->level_chain->kind == sk_function_parms
+ /* When we're processing a default argument, c_f_d may not have been
+ set. */
+ && current_function_decl);
if ((function_body || current_binding_level->kind == sk_try)
&& !DECL_CONSTRUCTOR_P (current_function_decl)
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index ce03cb2..edf507b 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,34 @@
+2022-10-12 Mikael Morin <mikael@gcc.gnu.org>
+
+ Backported from master:
+ 2022-09-25 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/106817
+ * trans-expr.cc (gfc_conv_procedure_call): Collect all clobbers
+ to their own separate block. Append the block of clobbers to
+ the procedure preliminary block after the argument evaluation
+ codes for all the arguments.
+
+2022-10-12 Mikael Morin <mikael@gcc.gnu.org>
+
+ Backported from master:
+ 2022-09-25 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/105012
+ * trans-expr.cc (gfc_conv_procedure_call): Retrieve variable
+ from the just calculated variable reference.
+
+2022-10-12 Mikael Morin <mikael@gcc.gnu.org>
+
+ Backported from master:
+ 2022-09-25 Mikael Morin <mikael@gcc.gnu.org>
+
+ * trans.h (gfc_conv_expr_reference): Remove add_clobber
+ argument.
+ * trans-expr.cc (gfc_conv_expr_reference): Ditto. Inline code
+ depending on add_clobber and conditions controlling it ...
+ (gfc_conv_procedure_call): ... to here.
+
2022-10-08 José Rui Faustino de Sousa <jrfsousa@gmail.com>
Backported from master:
diff --git a/gcc/fortran/trans-expr.cc b/gcc/fortran/trans-expr.cc
index 05d57fb..0fb82f5 100644
--- a/gcc/fortran/trans-expr.cc
+++ b/gcc/fortran/trans-expr.cc
@@ -6029,7 +6029,6 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
gfc_charlen cl;
gfc_expr *e;
gfc_symbol *fsym;
- stmtblock_t post;
enum {MISSING = 0, ELEMENTAL, SCALAR, SCALAR_POINTER, ARRAY};
gfc_component *comp = NULL;
int arglen;
@@ -6073,7 +6072,9 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
else
info = NULL;
+ stmtblock_t post, clobbers;
gfc_init_block (&post);
+ gfc_init_block (&clobbers);
gfc_init_interface_mapping (&mapping);
if (!comp)
{
@@ -6406,7 +6407,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
&& e->symtree->n.sym->attr.pointer))
&& fsym && fsym->attr.target)
/* Make sure the function only gets called once. */
- gfc_conv_expr_reference (&parmse, e, false);
+ gfc_conv_expr_reference (&parmse, e);
else if (e->expr_type == EXPR_FUNCTION
&& e->symtree->n.sym->result
&& e->symtree->n.sym->result != e->symtree->n.sym
@@ -6513,22 +6514,37 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
}
else
{
- bool add_clobber;
- add_clobber = fsym && fsym->attr.intent == INTENT_OUT
- && !fsym->attr.allocatable && !fsym->attr.pointer
- && e->symtree && e->symtree->n.sym
- && !e->symtree->n.sym->attr.dimension
- && !e->symtree->n.sym->attr.pointer
- && !e->symtree->n.sym->attr.allocatable
- /* See PR 41453. */
- && !e->symtree->n.sym->attr.dummy
- /* FIXME - PR 87395 and PR 41453 */
- && e->symtree->n.sym->attr.save == SAVE_NONE
- && !e->symtree->n.sym->attr.associate_var
- && e->ts.type != BT_CHARACTER && e->ts.type != BT_DERIVED
- && e->ts.type != BT_CLASS && !sym->attr.elemental;
-
- gfc_conv_expr_reference (&parmse, e, add_clobber);
+ gfc_conv_expr_reference (&parmse, e);
+
+ if (fsym
+ && fsym->attr.intent == INTENT_OUT
+ && !fsym->attr.allocatable
+ && !fsym->attr.pointer
+ && e->expr_type == EXPR_VARIABLE
+ && e->ref == NULL
+ && e->symtree
+ && e->symtree->n.sym
+ && !e->symtree->n.sym->attr.dimension
+ && !e->symtree->n.sym->attr.pointer
+ && !e->symtree->n.sym->attr.allocatable
+ /* See PR 41453. */
+ && !e->symtree->n.sym->attr.dummy
+ /* FIXME - PR 87395 and PR 41453 */
+ && e->symtree->n.sym->attr.save == SAVE_NONE
+ && !e->symtree->n.sym->attr.associate_var
+ && e->ts.type != BT_CHARACTER
+ && e->ts.type != BT_DERIVED
+ && e->ts.type != BT_CLASS
+ && !sym->attr.elemental)
+ {
+ tree var;
+ /* FIXME: This fails if var is passed by reference, see PR
+ 41453. */
+ var = build_fold_indirect_ref_loc (input_location,
+ parmse.expr);
+ tree clobber = build_clobber (TREE_TYPE (var));
+ gfc_add_modify (&clobbers, var, clobber);
+ }
}
/* Catch base objects that are not variables. */
if (e->ts.type == BT_CLASS
@@ -7395,6 +7411,7 @@ gfc_conv_procedure_call (gfc_se * se, gfc_symbol * sym,
vec_safe_push (arglist, parmse.expr);
}
+ gfc_add_block_to_block (&se->pre, &clobbers);
gfc_finish_interface_mapping (&mapping, &se->pre, &se->post);
if (comp)
@@ -9495,7 +9512,7 @@ gfc_conv_expr_type (gfc_se * se, gfc_expr * expr, tree type)
values only. */
void
-gfc_conv_expr_reference (gfc_se * se, gfc_expr * expr, bool add_clobber)
+gfc_conv_expr_reference (gfc_se * se, gfc_expr * expr)
{
gfc_ss *ss;
tree var;
@@ -9536,16 +9553,6 @@ gfc_conv_expr_reference (gfc_se * se, gfc_expr * expr, bool add_clobber)
gfc_add_block_to_block (&se->pre, &se->post);
se->expr = var;
}
- else if (add_clobber && expr->ref == NULL)
- {
- tree clobber;
- tree var;
- /* FIXME: This fails if var is passed by reference, see PR
- 41453. */
- var = expr->symtree->n.sym->backend_decl;
- clobber = build_clobber (TREE_TYPE (var));
- gfc_add_modify (&se->pre, var, clobber);
- }
return;
}
diff --git a/gcc/fortran/trans.h b/gcc/fortran/trans.h
index 2833459..e968167 100644
--- a/gcc/fortran/trans.h
+++ b/gcc/fortran/trans.h
@@ -500,8 +500,7 @@ tree gfc_build_compare_string (tree, tree, tree, tree, int, enum tree_code);
void gfc_conv_expr (gfc_se * se, gfc_expr * expr);
void gfc_conv_expr_val (gfc_se * se, gfc_expr * expr);
void gfc_conv_expr_lhs (gfc_se * se, gfc_expr * expr);
-void gfc_conv_expr_reference (gfc_se * se, gfc_expr * expr,
- bool add_clobber = false);
+void gfc_conv_expr_reference (gfc_se * se, gfc_expr * expr);
void gfc_conv_expr_type (gfc_se * se, gfc_expr *, tree);
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 8a09e5c..831106e 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,118 @@
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107254
+ * gfortran.dg/vect/pr107254.f90: New testcase.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-11 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107212
+ * gcc.dg/vect/pr107212-1.c: New testcase.
+ * gcc.dg/vect/pr107212-2.c: Likewise.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-13 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107160
+ * gcc.dg/vect/pr107160.c: New testcase.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-10-06 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/107107
+ * gcc.dg/pr107107.c: New testcase.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-23 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * g++.dg/torture/pr106922.C: New testcase.
+
+2022-10-17 Jakub Jelinek <jakub@redhat.com>
+
+ Backported from master:
+ 2022-09-23 Jakub Jelinek <jakub@redhat.com>
+
+ PR tree-optimization/106922
+ * g++.dg/tree-ssa/pr106922.C: Scan in cddce3 dump rather than
+ dce3. Remove -fdump-tree-pre-details from dg-options.
+
+2022-10-17 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-22 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * gcc.dg/tree-ssa/ssa-fre-100.c: New testcase.
+ * g++.dg/tree-ssa/pr106922.C: Adjust.
+
+2022-10-13 Marek Polacek <polacek@redhat.com>
+
+ Backported from master:
+ 2022-10-13 Marek Polacek <polacek@redhat.com>
+
+ PR c++/106925
+ * g++.dg/cpp0x/initlist-defarg3.C: New test.
+
+2022-10-12 Mikael Morin <mikael@gcc.gnu.org>
+
+ Backported from master:
+ 2022-09-25 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/106817
+ * gfortran.dg/intent_optimize_4.f90: New test.
+
+2022-10-12 Mikael Morin <mikael@gcc.gnu.org>
+
+ Backported from master:
+ 2022-09-25 Mikael Morin <mikael@gcc.gnu.org>
+
+ PR fortran/105012
+ * gfortran.dg/intent_out_15.f90: New test.
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-14 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106934
+ * gfortran.dg/pr106934.f90: New testcase.
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-15 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106922
+ * g++.dg/tree-ssa/pr106922.C: New testcase.
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-09-09 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/106892
+ * gcc.dg/torture/pr106892.c: New testcase.
+
+2022-10-11 Richard Biener <rguenther@suse.de>
+
+ Backported from master:
+ 2022-08-22 Richard Biener <rguenther@suse.de>
+
+ PR tree-optimization/105937
+ * g++.dg/uninit-pr105937.C: New testcase.
+
2022-10-08 José Rui Faustino de Sousa <jrfsousa@gmail.com>
Backported from master:
diff --git a/gcc/testsuite/g++.dg/cpp0x/initlist-defarg3.C b/gcc/testsuite/g++.dg/cpp0x/initlist-defarg3.C
new file mode 100644
index 0000000..5c3e886
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/initlist-defarg3.C
@@ -0,0 +1,13 @@
+// PR c++/106925
+// { dg-do compile { target c++11 } }
+
+struct Foo;
+template <int _Nm> struct __array_traits { typedef Foo _Type[_Nm]; };
+template <int _Nm> struct array {
+ typename __array_traits<_Nm>::_Type _M_elems;
+};
+template <int size> struct MyVector { array<size> data{}; };
+struct Foo {
+ float a{0};
+};
+void foo(MyVector<1> = MyVector<1>());
diff --git a/gcc/testsuite/g++.dg/torture/pr106922.C b/gcc/testsuite/g++.dg/torture/pr106922.C
new file mode 100644
index 0000000..046fc6c
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/pr106922.C
@@ -0,0 +1,48 @@
+// { dg-do compile }
+// { dg-require-effective-target c++17 }
+// { dg-additional-options "-Wall" }
+// -O1 doesn't iterate VN and thus has bogus uninit diagnostics
+// { dg-skip-if "" { *-*-* } { "-O1" } { "" } }
+
+#include <vector>
+
+#include <optional>
+template <class T>
+using Optional = std::optional<T>;
+
+#include <sstream>
+
+struct MyOptionalStructWithInt {
+ int myint; /* works without this */
+ Optional<std::vector<std::string>> myoptional;
+};
+
+struct MyOptionalsStruct {
+ MyOptionalStructWithInt external1;
+ MyOptionalStructWithInt external2;
+};
+
+struct MyStruct { };
+std::ostream &operator << (std::ostream &os, const MyStruct &myStruct);
+
+std::vector<MyStruct> getMyStructs();
+
+void test()
+{
+ MyOptionalsStruct externals;
+ MyOptionalStructWithInt internal1;
+ MyOptionalStructWithInt internal2;
+
+ std::vector<MyStruct> myStructs;
+ myStructs = getMyStructs();
+
+ for (const auto& myStruct : myStructs)
+ {
+ std::stringstream address_stream;
+ address_stream << myStruct;
+ internal1.myint = internal2.myint = 0;
+ externals.external1 = internal1;
+ externals.external2 = internal2;
+ externals.external2 = internal2;
+ }
+}
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr106922.C b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
new file mode 100644
index 0000000..2aec497
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
@@ -0,0 +1,90 @@
+// { dg-require-effective-target c++20 }
+// { dg-options "-O2 -fdump-tree-cddce3" }
+
+template <typename> struct __new_allocator {
+ void deallocate(int *, int) { operator delete(0); }
+};
+template <typename _Tp> using __allocator_base = __new_allocator<_Tp>;
+template <typename> struct allocator : __allocator_base<int> {
+ [[__gnu__::__always_inline__]] void deallocate(int *__p, int __n) {
+ __allocator_base<int>::deallocate(__p, __n);
+ }
+};
+template <typename> struct allocator_traits;
+template <typename _Tp> struct allocator_traits<allocator<_Tp>> {
+ using allocator_type = allocator<_Tp>;
+ using pointer = _Tp *;
+ using size_type = int;
+ template <typename _Up> using rebind_alloc = allocator<_Up>;
+ static void deallocate(allocator_type &__a, pointer __p, size_type __n) {
+ __a.deallocate(__p, __n);
+ }
+};
+template <typename _Alloc> struct __alloc_traits : allocator_traits<_Alloc> {
+ typedef allocator_traits<_Alloc> _Base_type;
+ template <typename _Tp> struct rebind {
+ typedef _Base_type::template rebind_alloc<_Tp> other;
+ };
+};
+long _M_deallocate___n;
+struct _Vector_base {
+ typedef __alloc_traits<allocator<int>>::rebind<int>::other _Tp_alloc_type;
+ typedef __alloc_traits<_Tp_alloc_type>::pointer pointer;
+ struct _Vector_impl_data {
+ pointer _M_start;
+ };
+ struct _Vector_impl : _Tp_alloc_type, _Vector_impl_data {};
+ ~_Vector_base() { _M_deallocate(_M_impl._M_start); }
+ _Vector_impl _M_impl;
+ void _M_deallocate(pointer __p) {
+ if (__p)
+ __alloc_traits<_Tp_alloc_type>::deallocate(_M_impl, __p,
+ _M_deallocate___n);
+ }
+};
+struct vector : _Vector_base {};
+struct aligned_storage {
+ int dummy_;
+ int *ptr_ref0;
+ vector &ref() {
+ vector *__trans_tmp_2;
+ void *__trans_tmp_1 = &dummy_;
+ union {
+ void *ap_pvoid;
+ vector *as_ptype;
+ } caster{__trans_tmp_1};
+ __trans_tmp_2 = caster.as_ptype;
+ return *__trans_tmp_2;
+ }
+};
+struct optional_base {
+ optional_base operator=(optional_base &) {
+ bool __trans_tmp_3 = m_initialized;
+ if (__trans_tmp_3)
+ m_initialized = false;
+ return *this;
+ }
+ ~optional_base() {
+ if (m_initialized)
+ m_storage.ref().~vector();
+ }
+ bool m_initialized;
+ aligned_storage m_storage;
+};
+struct optional : optional_base {
+ optional() : optional_base() {}
+};
+template <class> using Optional = optional;
+struct Trans_NS___cxx11_basic_stringstream {};
+void operator<<(Trans_NS___cxx11_basic_stringstream, int);
+int testfunctionfoo_myStructs[10];
+void testfunctionfoo() {
+ Optional<char> external, internal;
+ for (auto myStruct : testfunctionfoo_myStructs) {
+ Trans_NS___cxx11_basic_stringstream address_stream;
+ address_stream << myStruct;
+ external = internal;
+ }
+}
+
+// { dg-final { scan-tree-dump-not "m_initialized" "cddce3" } }
diff --git a/gcc/testsuite/g++.dg/uninit-pr105937.C b/gcc/testsuite/g++.dg/uninit-pr105937.C
new file mode 100644
index 0000000..26b4f74
--- /dev/null
+++ b/gcc/testsuite/g++.dg/uninit-pr105937.C
@@ -0,0 +1,235 @@
+// { dg-do compile }
+// { dg-require-effective-target c++17 }
+// { dg-options "-O2 -Wall" }
+
+#include <stdint.h>
+#include <optional>
+#include <string_view>
+
+using utf8 = char;
+
+enum
+{
+ FONT_SIZE_TINY = 2,
+ FONT_SIZE_SMALL = 0,
+ FONT_SIZE_MEDIUM = 1,
+ FONT_SIZE_COUNT = 3
+};
+
+constexpr const uint16_t FONT_SPRITE_GLYPH_COUNT = 224;
+
+enum class FontSpriteBase : int16_t
+{
+ MEDIUM_EXTRA_DARK = -2,
+ MEDIUM_DARK = -1,
+
+ TINY = FONT_SIZE_TINY * FONT_SPRITE_GLYPH_COUNT,
+ SMALL = FONT_SIZE_SMALL * FONT_SPRITE_GLYPH_COUNT,
+ MEDIUM = FONT_SIZE_MEDIUM * FONT_SPRITE_GLYPH_COUNT,
+};
+
+struct TTFSurface;
+
+class CodepointView
+{
+private:
+ std::string_view _str;
+
+public:
+ class iterator
+ {
+ private:
+ std::string_view _str;
+ size_t _index;
+
+ public:
+ iterator(std::string_view str, size_t index)
+ : _str(str)
+ , _index(index)
+ {
+ }
+
+ bool operator==(const iterator& rhs) const
+ {
+ return _index == rhs._index;
+ }
+ bool operator!=(const iterator& rhs) const
+ {
+ return _index != rhs._index;
+ }
+ char32_t operator*() const
+ {
+ return GetNextCodepoint(&_str[_index], nullptr);
+ }
+ iterator& operator++()
+ {
+ return *this;
+ }
+ iterator operator++(int)
+ {
+ auto result = *this;
+ if (_index < _str.size())
+ {
+ const utf8* nextch;
+ GetNextCodepoint(&_str[_index], &nextch);
+ _index = nextch - _str.data();
+ }
+ return result;
+ }
+
+ size_t GetIndex() const
+ {
+ return _index;
+ }
+
+ static char32_t GetNextCodepoint(const char* ch, const char** next);
+ };
+
+ CodepointView(std::string_view str)
+ : _str(str)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(_str, 0);
+ }
+
+ iterator end() const
+ {
+ return iterator(_str, _str.size());
+ }
+};
+
+struct InternalTTFFont;
+using TTF_Font = InternalTTFFont;
+struct TTFFontDescriptor
+{
+ const utf8* filename;
+ const utf8* font_name;
+ int32_t ptSize;
+ int32_t offset_x;
+ int32_t offset_y;
+ int32_t line_height;
+ int32_t hinting_threshold;
+ TTF_Font* font;
+};
+using codepoint_t = uint32_t;
+
+#define abstract = 0
+
+struct ITTF
+{
+ virtual ~ITTF() = default;
+ virtual TTFFontDescriptor* ttf_get_font_from_sprite_base(FontSpriteBase spriteBase) abstract;
+ virtual TTFSurface* ttf_surface_cache_get_or_add(TTF_Font* font, std::string_view text) abstract;
+};
+
+namespace OpenRCT2 {
+ struct IContext
+ {
+ virtual ~IContext() = default;
+
+ virtual ITTF* GetTTF() abstract;
+ };
+}
+
+static void ttf_draw_string_raw_ttf(OpenRCT2::IContext* context, std::string_view text)
+{
+ TTFFontDescriptor* fontDesc = context->GetTTF()->ttf_get_font_from_sprite_base(FontSpriteBase::MEDIUM_EXTRA_DARK);
+ if (fontDesc->font == nullptr)
+ {
+ return;
+ }
+
+ TTFSurface* surface = context->GetTTF()->ttf_surface_cache_get_or_add(fontDesc->font, text);
+ if (surface == nullptr)
+ return;
+}
+
+namespace UnicodeChar
+{
+ // Punctuation
+ constexpr char32_t leftguillemet = 0xAB;
+ constexpr char32_t rightguillemet = 0xBB;
+ constexpr char32_t german_quote_open = 0x201E;
+ constexpr char32_t quote_open = 0x201C;
+ constexpr char32_t quote_close = 0x201D;
+
+ // Dingbats
+ constexpr char32_t up = 0x25B2;
+ constexpr char32_t small_up = 0x25B4;
+ constexpr char32_t right = 0x25B6;
+ constexpr char32_t down = 0x25BC;
+ constexpr char32_t small_down = 0x25BE;
+ constexpr char32_t left = 0x25C0;
+ constexpr char32_t tick = 0x2713;
+ constexpr char32_t plus = 0x2795;
+ constexpr char32_t minus = 0x2796;
+
+ // Emoji
+ constexpr char32_t cross = 0x274C;
+ constexpr char32_t variation_selector = 0xFE0F;
+ constexpr char32_t eye = 0x1F441;
+ constexpr char32_t road = 0x1F6E3;
+ constexpr char32_t railway = 0x1F6E4;
+}; // namespace UnicodeChar
+
+
+static bool ShouldUseSpriteForCodepoint(char32_t codepoint)
+{
+ switch (codepoint)
+ {
+ case UnicodeChar::up:
+ case UnicodeChar::down:
+ case UnicodeChar::leftguillemet:
+ case UnicodeChar::tick:
+ case UnicodeChar::cross:
+ case UnicodeChar::right:
+ case UnicodeChar::rightguillemet:
+ case UnicodeChar::small_up:
+ case UnicodeChar::small_down:
+ case UnicodeChar::left:
+ case UnicodeChar::quote_open:
+ case UnicodeChar::quote_close:
+ case UnicodeChar::german_quote_open:
+ case UnicodeChar::plus:
+ case UnicodeChar::minus:
+ case UnicodeChar::variation_selector:
+ case UnicodeChar::eye:
+ case UnicodeChar::road:
+ case UnicodeChar::railway:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void ttf_process_string_literal(OpenRCT2::IContext* context, std::string_view text)
+{
+ CodepointView codepoints(text);
+ std::optional<size_t> ttfRunIndex;
+ for (auto it = codepoints.begin(); it != codepoints.end(); it++)
+ {
+ auto codepoint = *it;
+ if (ShouldUseSpriteForCodepoint(codepoint))
+ {
+ if (ttfRunIndex.has_value())
+ {
+ // Draw the TTF run
+ auto len = it.GetIndex() - ttfRunIndex.value(); // { dg-bogus "may be used uninitialized" }
+ ttf_draw_string_raw_ttf(context, text.substr(ttfRunIndex.value(), len));
+ ttfRunIndex = std::nullopt;
+ }
+
+ // Draw the sprite font glyph
+ }
+ else
+ {
+ if (!ttfRunIndex.has_value())
+ {
+ ttfRunIndex = it.GetIndex();
+ }
+ }
+ }
+}
diff --git a/gcc/testsuite/gcc.dg/pr107107.c b/gcc/testsuite/gcc.dg/pr107107.c
new file mode 100644
index 0000000..5ad6a63
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr107107.c
@@ -0,0 +1,25 @@
+/* { dg-do run } */
+/* { dg-options "-O2 -ftree-tail-merge" } */
+
+static inline void set_longish(int is_long_long, void *p, long x)
+{
+ if (is_long_long)
+ *(long long*)p = x;
+ else
+ *(long*)p = x;
+}
+static long test(long long *p, int index, int mode)
+{
+ *p = 1;
+ set_longish(mode, p+index, 2);
+ return *p;
+}
+long (*volatile vtest)(long long*, int, int) = test;
+int main(void)
+{
+ long long x;
+ long result = vtest(&x, 0, 1);
+ if (result != 2 || x != 2)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/torture/pr106892.c b/gcc/testsuite/gcc.dg/torture/pr106892.c
new file mode 100644
index 0000000..73a66a0
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/pr106892.c
@@ -0,0 +1,30 @@
+/* { dg-do run } */
+
+int a, b, c, d, e;
+int f[8];
+static int g() {
+ while (a)
+ a >>= 4;
+ return 0;
+}
+static int h(int i) {
+ if (i >= '0')
+ return i - '0';
+ //__builtin_unreachable ();
+}
+void __attribute__((noipa)) j(int i) {
+ for (b = 2; g() <= 7; b++)
+ if (i) {
+ for (; e <= 7; e++)
+ for (c = 1; c <= 7; c++) {
+ d = h(b + '0');
+ f[-d + 4] ^= 3;
+ }
+ return;
+ }
+}
+int main() {
+ j(1);
+ if (f[2] != 0)
+ __builtin_abort ();
+}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-100.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-100.c
new file mode 100644
index 0000000..ead7654
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-100.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-fre1" } */
+
+float bar, baz;
+void foo (int *p, int n)
+{
+ *p = 0;
+ do
+ {
+ bar = 1.;
+ /* When iterating we should have optimistically value-numbered
+ *p to zero, on the second iteration we have to prove the
+ store below does not affect the value of this load though.
+ We can compare the stored value against the value from the
+ previous iteration instead relying on a non-walking lookup. */
+ if (*p)
+ {
+ baz = 2.;
+ *p = 0;
+ }
+ }
+ while (--n);
+}
+
+/* { dg-final { scan-tree-dump-not "baz" "fre1" } } */
diff --git a/gcc/testsuite/gcc.dg/vect/pr107160.c b/gcc/testsuite/gcc.dg/vect/pr107160.c
new file mode 100644
index 0000000..4f9f853c
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr107160.c
@@ -0,0 +1,41 @@
+/* { dg-do run } */
+
+#include <math.h>
+
+#define N 128
+float fl[N];
+
+__attribute__ ((noipa)) void
+init ()
+{
+ for (int i = 0; i < N; i++)
+ fl[i] = i;
+}
+
+__attribute__ ((noipa)) float
+foo (int n1)
+{
+ float sum0, sum1, sum2, sum3;
+ sum0 = sum1 = sum2 = sum3 = 0.0f;
+
+ int n = (n1 / 4) * 4;
+ for (int i = 0; i < n; i += 4)
+ {
+ sum0 += fabs (fl[i]);
+ sum1 += fabs (fl[i + 1]);
+ sum2 += fabs (fl[i + 2]);
+ sum3 += fabs (fl[i + 3]);
+ }
+
+ return sum0 + sum1 + sum2 + sum3;
+}
+
+int
+main ()
+{
+ init ();
+ float res = foo (80);
+ if (res != 3160)
+ __builtin_abort ();
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vect/pr107212-1.c b/gcc/testsuite/gcc.dg/vect/pr107212-1.c
new file mode 100644
index 0000000..5343f9b
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr107212-1.c
@@ -0,0 +1,27 @@
+/* { dg-do run } */
+
+#include "tree-vect.h"
+
+int main()
+{
+ check_vect ();
+
+ unsigned int tab[6][2] = { {69, 73}, {36, 40}, {24, 16},
+ {16, 11}, {4, 5}, {3, 1} };
+
+ int sum_0 = 0;
+ int sum_1 = 0;
+
+ for(int t=0; t<6; t++) {
+ sum_0 += tab[t][0];
+ sum_1 += tab[t][1];
+ }
+
+ int x1 = (sum_0 < 100);
+ int x2 = (sum_0 > 200);
+
+ if (x1 || x2 || sum_1 != 146)
+ __builtin_abort ();
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.dg/vect/pr107212-2.c b/gcc/testsuite/gcc.dg/vect/pr107212-2.c
new file mode 100644
index 0000000..109c2b9
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/vect/pr107212-2.c
@@ -0,0 +1,23 @@
+/* { dg-do run } */
+
+#include "tree-vect.h"
+
+int sum_1 = 0;
+
+int main()
+{
+ check_vect ();
+
+ unsigned int tab[6][2] = {{150, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}};
+
+ int sum_0 = 0;
+
+ for (int t = 0; t < 6; t++) {
+ sum_0 += tab[t][0];
+ sum_1 += tab[t][0];
+ }
+
+ if (sum_0 < 100 || sum_0 > 200)
+ __builtin_abort();
+ return 0;
+}
diff --git a/gcc/testsuite/gfortran.dg/intent_optimize_4.f90 b/gcc/testsuite/gfortran.dg/intent_optimize_4.f90
new file mode 100644
index 0000000..effbaa1
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/intent_optimize_4.f90
@@ -0,0 +1,43 @@
+! { dg-do run }
+! { dg-additional-options "-fdump-tree-original" }
+! { dg-final { scan-tree-dump-times "CLOBBER" 2 "original" } }
+!
+! PR fortran/106817
+! Check that for an actual argument whose dummy is INTENT(OUT),
+! the clobber that is emitted in the caller before a procedure call
+! happens after any expression depending on the argument value has been
+! evaluated.
+!
+
+module m
+ implicit none
+contains
+ subroutine copy1(out, in)
+ integer, intent(in) :: in
+ integer, intent(out) :: out
+ out = in
+ end subroutine copy1
+ subroutine copy2(in, out)
+ integer, intent(in) :: in
+ integer, intent(out) :: out
+ out = in
+ end subroutine copy2
+end module m
+
+program p
+ use m
+ implicit none
+ integer :: a, b
+
+ ! Clobbering of a should happen after a+1 has been evaluated.
+ a = 3
+ call copy1(a, a+1)
+ if (a /= 4) stop 1
+
+ ! Clobbering order does not depend on the order of arguments.
+ ! It should also come last with reversed arguments.
+ b = 12
+ call copy2(b+1, b)
+ if (b /= 13) stop 2
+
+end program p
diff --git a/gcc/testsuite/gfortran.dg/intent_out_15.f90 b/gcc/testsuite/gfortran.dg/intent_out_15.f90
new file mode 100644
index 0000000..64334e6
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/intent_out_15.f90
@@ -0,0 +1,27 @@
+! { dg-do compile }
+! { dg-additional-options "-fdump-tree-original" }
+!
+! PR fortran/105012
+! The following case was triggering an ICE because of a clobber
+! on the DERFC function decl instead of its result.
+
+module error_function
+integer, parameter :: r8 = selected_real_kind(12) ! 8 byte real
+contains
+SUBROUTINE CALERF_r8(ARG, RESULT, JINT)
+ integer, parameter :: rk = r8
+ real(rk), intent(in) :: arg
+ real(rk), intent(out) :: result
+ IF (Y .LE. THRESH) THEN
+ END IF
+end SUBROUTINE CALERF_r8
+FUNCTION DERFC(X)
+ integer, parameter :: rk = r8 ! 8 byte real
+ real(rk), intent(in) :: X
+ real(rk) :: DERFC
+ CALL CALERF_r8(X, DERFC, JINT)
+END FUNCTION DERFC
+end module error_function
+
+! { dg-final { scan-tree-dump-times "CLOBBER" 1 "original" } }
+! { dg-final { scan-tree-dump "__result_derfc = {CLOBBER};" "original" } }
diff --git a/gcc/testsuite/gfortran.dg/pr106934.f90 b/gcc/testsuite/gfortran.dg/pr106934.f90
new file mode 100644
index 0000000..ac58a3e
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr106934.f90
@@ -0,0 +1,7 @@
+! { dg-do compile }
+! { dg-options "-O" }
+subroutine s
+ logical(1) :: a = .true.
+ logical(2) :: b
+ a = transfer(b, a)
+end
diff --git a/gcc/testsuite/gfortran.dg/vect/pr107254.f90 b/gcc/testsuite/gfortran.dg/vect/pr107254.f90
new file mode 100644
index 0000000..85bcb5f
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/vect/pr107254.f90
@@ -0,0 +1,49 @@
+! { dg-do run }
+
+subroutine dlartg( f, g, s, r )
+ implicit none
+ double precision :: f, g, r, s
+ double precision :: d, p
+
+ d = sqrt( f*f + g*g )
+ p = 1.d0 / d
+ if( abs( f ) > 1 ) then
+ s = g*sign( p, f )
+ r = sign( d, f )
+ else
+ s = g*sign( p, f )
+ r = sign( d, f )
+ end if
+end subroutine
+
+subroutine dhgeqz( n, h, t )
+ implicit none
+ integer n
+ double precision h( n, * ), t( n, * )
+ integer jc
+ double precision c, s, temp, temp2, tempr
+ temp2 = 10d0
+ call dlartg( 10d0, temp2, s, tempr )
+ c = 0.9d0
+ s = 1.d0
+ do jc = 1, n
+ temp = c*h( 1, jc ) + s*h( 2, jc )
+ h( 2, jc ) = -s*h( 1, jc ) + c*h( 2, jc )
+ h( 1, jc ) = temp
+ temp2 = c*t( 1, jc ) + s*t( 2, jc )
+ t( 2, jc ) = -s*t( 1, jc ) + c*t( 2, jc )
+ t( 1, jc ) = temp2
+ enddo
+end subroutine dhgeqz
+
+program test
+ implicit none
+ double precision h(2,2), t(2,2)
+ h = 0
+ t(1,1) = 1
+ t(2,1) = 0
+ t(1,2) = 0
+ t(2,2) = 0
+ call dhgeqz( 2, h, t )
+ if (t(2,2).ne.0) STOP 1
+end program test
diff --git a/gcc/tree-predcom.cc b/gcc/tree-predcom.cc
index fb45725..3666b90 100644
--- a/gcc/tree-predcom.cc
+++ b/gcc/tree-predcom.cc
@@ -1771,10 +1771,24 @@ ref_at_iteration (data_reference_p dr, int iter,
ref = TREE_OPERAND (ref, 0);
}
}
- tree addr = fold_build_pointer_plus (DR_BASE_ADDRESS (dr), off);
+ /* We may not associate the constant offset across the pointer plus
+ expression because that might form a pointer to before the object
+ then. But for some cases we can retain that to allow tree_could_trap_p
+ to return false - see gcc.dg/tree-ssa/predcom-1.c */
+ tree addr, alias_ptr;
+ if (integer_zerop (off))
+ {
+ alias_ptr = fold_convert (reference_alias_ptr_type (ref), coff);
+ addr = DR_BASE_ADDRESS (dr);
+ }
+ else
+ {
+ alias_ptr = build_zero_cst (reference_alias_ptr_type (ref));
+ off = size_binop (PLUS_EXPR, off, coff);
+ addr = fold_build_pointer_plus (DR_BASE_ADDRESS (dr), off);
+ }
addr = force_gimple_operand_1 (unshare_expr (addr), stmts,
is_gimple_mem_ref_addr, NULL_TREE);
- tree alias_ptr = fold_convert (reference_alias_ptr_type (ref), coff);
tree type = build_aligned_type (TREE_TYPE (ref),
get_object_alignment (ref));
ref = build2 (MEM_REF, type, addr, alias_ptr);
diff --git a/gcc/tree-ssa-pre.cc b/gcc/tree-ssa-pre.cc
index f3ce460..a595d0d 100644
--- a/gcc/tree-ssa-pre.cc
+++ b/gcc/tree-ssa-pre.cc
@@ -1249,7 +1249,11 @@ translate_vuse_through_block (vec<vn_reference_op_s> operands,
if (same_valid)
*same_valid = true;
- if (gimple_bb (phi) != phiblock)
+ /* If value-numbering provided a memory state for this
+ that dominates PHIBLOCK we can just use that. */
+ if (gimple_nop_p (phi)
+ || (gimple_bb (phi) != phiblock
+ && dominated_by_p (CDI_DOMINATORS, phiblock, gimple_bb (phi))))
return vuse;
/* We have pruned expressions that are killed in PHIBLOCK via
@@ -2044,11 +2048,13 @@ prune_clobbered_mems (bitmap_set_t set, basic_block block)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (ref->vuse);
if (!gimple_nop_p (def_stmt)
- && ((gimple_bb (def_stmt) != block
- && !dominated_by_p (CDI_DOMINATORS,
- block, gimple_bb (def_stmt)))
- || (gimple_bb (def_stmt) == block
- && value_dies_in_block_x (expr, block))))
+ /* If value-numbering provided a memory state for this
+ that dominates BLOCK we're done, otherwise we have
+ to check if the value dies in BLOCK. */
+ && !(gimple_bb (def_stmt) != block
+ && dominated_by_p (CDI_DOMINATORS,
+ block, gimple_bb (def_stmt)))
+ && value_dies_in_block_x (expr, block))
to_remove = i;
}
/* If the REFERENCE may trap make sure the block does not contain
diff --git a/gcc/tree-ssa-sccvn.cc b/gcc/tree-ssa-sccvn.cc
index 2bcf44c..a63f5c8 100644
--- a/gcc/tree-ssa-sccvn.cc
+++ b/gcc/tree-ssa-sccvn.cc
@@ -1802,7 +1802,8 @@ struct vn_walk_cb_data
vn_lookup_kind vn_walk_kind_, bool tbaa_p_, tree mask_,
bool redundant_store_removal_p_)
: vr (vr_), last_vuse_ptr (last_vuse_ptr_), last_vuse (NULL_TREE),
- mask (mask_), masked_result (NULL_TREE), vn_walk_kind (vn_walk_kind_),
+ mask (mask_), masked_result (NULL_TREE), same_val (NULL_TREE),
+ vn_walk_kind (vn_walk_kind_),
tbaa_p (tbaa_p_), redundant_store_removal_p (redundant_store_removal_p_),
saved_operands (vNULL), first_set (-2), first_base_set (-2),
known_ranges (NULL)
@@ -1862,6 +1863,7 @@ struct vn_walk_cb_data
tree last_vuse;
tree mask;
tree masked_result;
+ tree same_val;
vn_lookup_kind vn_walk_kind;
bool tbaa_p;
bool redundant_store_removal_p;
@@ -1900,6 +1902,8 @@ vn_walk_cb_data::finish (alias_set_type set, alias_set_type base_set, tree val)
masked_result = val;
return (void *) -1;
}
+ if (same_val && !operand_equal_p (val, same_val))
+ return (void *) -1;
vec<vn_reference_op_s> &operands
= saved_operands.exists () ? saved_operands : vr->operands;
return vn_reference_lookup_or_insert_for_pieces (last_vuse, set, base_set,
@@ -2666,36 +2670,61 @@ vn_reference_lookup_3 (ao_ref *ref, tree vuse, void *data_,
and return the found value. */
if (is_gimple_reg_type (TREE_TYPE (lhs))
&& types_compatible_p (TREE_TYPE (lhs), vr->type)
- && (ref->ref || data->orig_ref.ref))
- {
- tree *saved_last_vuse_ptr = data->last_vuse_ptr;
- /* Do not update last_vuse_ptr in vn_reference_lookup_2. */
- data->last_vuse_ptr = NULL;
- tree saved_vuse = vr->vuse;
- hashval_t saved_hashcode = vr->hashcode;
- void *res = vn_reference_lookup_2 (ref, gimple_vuse (def_stmt), data);
- /* Need to restore vr->vuse and vr->hashcode. */
- vr->vuse = saved_vuse;
- vr->hashcode = saved_hashcode;
- data->last_vuse_ptr = saved_last_vuse_ptr;
- if (res && res != (void *)-1)
+ && (ref->ref || data->orig_ref.ref)
+ && !data->mask
+ && data->partial_defs.is_empty ()
+ && multiple_p (get_object_alignment
+ (ref->ref ? ref->ref : data->orig_ref.ref),
+ ref->size)
+ && multiple_p (get_object_alignment (lhs), ref->size))
+ {
+ tree rhs = gimple_assign_rhs1 (def_stmt);
+ /* ??? We may not compare to ahead values which might be from
+ a different loop iteration but only to loop invariants. Use
+ CONSTANT_CLASS_P (unvalueized!) as conservative approximation.
+ The one-hop lookup below doesn't have this issue since there's
+ a virtual PHI before we ever reach a backedge to cross.
+ We can skip multiple defs as long as they are from the same
+ value though. */
+ if (data->same_val
+ && !operand_equal_p (data->same_val, rhs))
+ ;
+ else if (CONSTANT_CLASS_P (rhs))
{
- vn_reference_t vnresult = (vn_reference_t) res;
- tree rhs = gimple_assign_rhs1 (def_stmt);
- if (TREE_CODE (rhs) == SSA_NAME)
- rhs = SSA_VAL (rhs);
- if (vnresult->result
- && operand_equal_p (vnresult->result, rhs, 0)
- /* We have to honor our promise about union type punning
- and also support arbitrary overlaps with
- -fno-strict-aliasing. So simply resort to alignment to
- rule out overlaps. Do this check last because it is
- quite expensive compared to the hash-lookup above. */
- && multiple_p (get_object_alignment
- (ref->ref ? ref->ref : data->orig_ref.ref),
- ref->size)
- && multiple_p (get_object_alignment (lhs), ref->size))
- return res;
+ if (dump_file && (dump_flags & TDF_DETAILS))
+ {
+ fprintf (dump_file,
+ "Skipping possible redundant definition ");
+ print_gimple_stmt (dump_file, def_stmt, 0);
+ }
+ /* Delay the actual compare of the values to the end of the walk
+ but do not update last_vuse from here. */
+ data->last_vuse_ptr = NULL;
+ data->same_val = rhs;
+ return NULL;
+ }
+ else
+ {
+ tree *saved_last_vuse_ptr = data->last_vuse_ptr;
+ /* Do not update last_vuse_ptr in vn_reference_lookup_2. */
+ data->last_vuse_ptr = NULL;
+ tree saved_vuse = vr->vuse;
+ hashval_t saved_hashcode = vr->hashcode;
+ void *res = vn_reference_lookup_2 (ref, gimple_vuse (def_stmt),
+ data);
+ /* Need to restore vr->vuse and vr->hashcode. */
+ vr->vuse = saved_vuse;
+ vr->hashcode = saved_hashcode;
+ data->last_vuse_ptr = saved_last_vuse_ptr;
+ if (res && res != (void *)-1)
+ {
+ vn_reference_t vnresult = (vn_reference_t) res;
+ if (TREE_CODE (rhs) == SSA_NAME)
+ rhs = SSA_VAL (rhs);
+ if (vnresult->result
+ && operand_equal_p (vnresult->result, rhs, 0))
+ return res;
+ }
}
}
}
@@ -3656,6 +3685,14 @@ vn_reference_lookup_pieces (tree vuse, alias_set_type set,
if (ops_for_ref != shared_lookup_references)
ops_for_ref.release ();
gcc_checking_assert (vr1.operands == shared_lookup_references);
+ if (*vnresult
+ && data.same_val
+ && (!(*vnresult)->result
+ || !operand_equal_p ((*vnresult)->result, data.same_val)))
+ {
+ *vnresult = NULL;
+ return NULL_TREE;
+ }
}
if (*vnresult)
@@ -3734,6 +3771,10 @@ vn_reference_lookup (tree op, tree vuse, vn_lookup_kind kind,
if (wvnresult)
{
gcc_assert (mask == NULL_TREE);
+ if (data.same_val
+ && (!wvnresult->result
+ || !operand_equal_p (wvnresult->result, data.same_val)))
+ return NULL_TREE;
if (vnresult)
*vnresult = wvnresult;
return wvnresult->result;
@@ -5449,19 +5490,6 @@ visit_reference_op_store (tree lhs, tree op, gimple *stmt)
if (!resultsame)
{
- /* Only perform the following when being called from PRE
- which embeds tail merging. */
- if (default_vn_walk_kind == VN_WALK)
- {
- assign = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, op);
- vn_reference_lookup (assign, vuse, VN_NOWALK, &vnresult, false);
- if (vnresult)
- {
- VN_INFO (vdef)->visited = true;
- return set_ssa_val_to (vdef, vnresult->result_vdef);
- }
- }
-
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf (dump_file, "No store match\n");
@@ -5486,7 +5514,9 @@ visit_reference_op_store (tree lhs, tree op, gimple *stmt)
if (default_vn_walk_kind == VN_WALK)
{
assign = build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, op);
- vn_reference_insert (assign, lhs, vuse, vdef);
+ vn_reference_lookup (assign, vuse, VN_NOWALK, &vnresult, false);
+ if (!vnresult)
+ vn_reference_insert (assign, lhs, vuse, vdef);
}
}
else
diff --git a/gcc/tree-ssa-uninit.cc b/gcc/tree-ssa-uninit.cc
index f326f17..30aafda 100644
--- a/gcc/tree-ssa-uninit.cc
+++ b/gcc/tree-ssa-uninit.cc
@@ -41,6 +41,7 @@ along with GCC; see the file COPYING3. If not see
#include "gimple-predicate-analysis.h"
#include "domwalk.h"
#include "tree-ssa-sccvn.h"
+#include "cfganal.h"
/* This implements the pass that does predicate aware warning on uses of
possibly uninitialized variables. The pass first collects the set of
@@ -1191,8 +1192,16 @@ find_uninit_use (gphi *phi, unsigned uninit_opnds,
basic_block use_bb;
if (gphi *use_phi = dyn_cast<gphi *> (use_stmt))
- use_bb = gimple_phi_arg_edge (use_phi,
- PHI_ARG_INDEX_FROM_USE (use_p))->src;
+ {
+ edge e = gimple_phi_arg_edge (use_phi,
+ PHI_ARG_INDEX_FROM_USE (use_p));
+ use_bb = e->src;
+ /* Do not look for uses in the next iteration of a loop, predicate
+ analysis will not use the appropriate predicates to prove
+ reachability. */
+ if (e->flags & EDGE_DFS_BACK)
+ continue;
+ }
else
use_bb = gimple_bb (use_stmt);
@@ -1339,6 +1348,7 @@ execute_late_warn_uninitialized (function *fun)
/* Mark all edges executable, warn_uninitialized_vars will skip
unreachable blocks. */
set_all_edges_as_executable (fun);
+ mark_dfs_back_edges (fun);
/* Re-do the plain uninitialized variable check, as optimization may have
straightened control flow. Do this first so that we don't accidentally
diff --git a/gcc/tree-ssa.cc b/gcc/tree-ssa.cc
index a362a0a..ab8b1c2 100644
--- a/gcc/tree-ssa.cc
+++ b/gcc/tree-ssa.cc
@@ -1459,6 +1459,8 @@ maybe_rewrite_mem_ref_base (tree *tp, bitmap suitable_for_renaming)
&& (! INTEGRAL_TYPE_P (TREE_TYPE (*tp))
|| (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp)))
== TYPE_PRECISION (TREE_TYPE (*tp))))
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (sym))
+ || type_has_mode_precision_p (TREE_TYPE (sym)))
&& wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (*tp))),
BITS_PER_UNIT) == 0)
{
@@ -1531,6 +1533,10 @@ non_rewritable_mem_ref_base (tree ref)
&& (! INTEGRAL_TYPE_P (TREE_TYPE (base))
|| (wi::to_offset (TYPE_SIZE (TREE_TYPE (base)))
== TYPE_PRECISION (TREE_TYPE (base))))
+ /* ??? Likewise for extracts from bitfields, we'd have
+ to pun the base object to a size precision mode first. */
+ && (! INTEGRAL_TYPE_P (TREE_TYPE (decl))
+ || type_has_mode_precision_p (TREE_TYPE (decl)))
&& wi::umod_trunc (wi::to_offset (TYPE_SIZE (TREE_TYPE (base))),
BITS_PER_UNIT) == 0)
return NULL_TREE;
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index d1c19ce..001f7dd 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -6083,7 +6083,8 @@ vect_create_epilog_for_reduction (loop_vec_info loop_vinfo,
}
/* Record this operation if it could be reused by the epilogue loop. */
- if (STMT_VINFO_REDUC_TYPE (reduc_info) == TREE_CODE_REDUCTION)
+ if (STMT_VINFO_REDUC_TYPE (reduc_info) == TREE_CODE_REDUCTION
+ && vec_num == 1)
loop_vinfo->reusable_accumulators.put (scalar_results[0],
{ orig_reduc_input, reduc_info });
@@ -6678,10 +6679,20 @@ vectorizable_reduction (loop_vec_info loop_vinfo,
}
if (!REDUC_GROUP_FIRST_ELEMENT (vdef))
only_slp_reduc_chain = false;
- /* ??? For epilogue generation live members of the chain need
+ /* For epilogue generation live members of the chain need
to point back to the PHI via their original stmt for
- info_for_reduction to work. */
- if (STMT_VINFO_LIVE_P (vdef))
+ info_for_reduction to work. For SLP we need to look at
+ all lanes here - even though we only will vectorize from
+ the SLP node with live lane zero the other live lanes also
+ need to be identified as part of a reduction to be able
+ to skip code generation for them. */
+ if (slp_for_stmt_info)
+ {
+ for (auto s : SLP_TREE_SCALAR_STMTS (slp_for_stmt_info))
+ if (STMT_VINFO_LIVE_P (s))
+ STMT_VINFO_REDUC_DEF (vect_orig_stmt (s)) = phi_info;
+ }
+ else if (STMT_VINFO_LIVE_P (vdef))
STMT_VINFO_REDUC_DEF (def) = phi_info;
gimple_match_op op;
if (!gimple_extract_op (vdef->stmt, &op))
@@ -8754,10 +8765,6 @@ vectorizable_live_operation (vec_info *vinfo,
all involved stmts together. */
else if (slp_index != 0)
return true;
- else
- /* For SLP reductions the meta-info is attached to
- the representative. */
- stmt_info = SLP_TREE_REPRESENTATIVE (slp_node);
}
stmt_vec_info reduc_info = info_for_reduction (loop_vinfo, stmt_info);
gcc_assert (reduc_info->is_reduc_info);
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index 0223056..ec9c10a 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -4504,7 +4504,23 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node,
/* Handle purely internal nodes. */
if (SLP_TREE_CODE (node) == VEC_PERM_EXPR)
- return vectorizable_slp_permutation (vinfo, NULL, node, cost_vec);
+ {
+ if (!vectorizable_slp_permutation (vinfo, NULL, node, cost_vec))
+ return false;
+
+ stmt_vec_info slp_stmt_info;
+ unsigned int i;
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, slp_stmt_info)
+ {
+ if (STMT_VINFO_LIVE_P (slp_stmt_info)
+ && !vectorizable_live_operation (vinfo,
+ slp_stmt_info, NULL, node,
+ node_instance, i,
+ false, cost_vec))
+ return false;
+ }
+ return true;
+ }
gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
@@ -7344,8 +7360,6 @@ vect_schedule_slp_node (vec_info *vinfo,
}
}
- bool done_p = false;
-
/* Handle purely internal nodes. */
if (SLP_TREE_CODE (node) == VEC_PERM_EXPR)
{
@@ -7356,9 +7370,18 @@ vect_schedule_slp_node (vec_info *vinfo,
but open-code it here (partly). */
bool done = vectorizable_slp_permutation (vinfo, &si, node, NULL);
gcc_assert (done);
- done_p = true;
+ stmt_vec_info slp_stmt_info;
+ unsigned int i;
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, slp_stmt_info)
+ if (STMT_VINFO_LIVE_P (slp_stmt_info))
+ {
+ done = vectorizable_live_operation (vinfo,
+ slp_stmt_info, &si, node,
+ instance, i, true, NULL);
+ gcc_assert (done);
+ }
}
- if (!done_p)
+ else
vect_transform_stmt (vinfo, stmt_info, &si, node, instance);
}