aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorMartin Liska <mliska@suse.cz>2021-06-08 09:47:03 +0200
committerMartin Liska <mliska@suse.cz>2021-06-08 09:47:03 +0200
commit8451c23d2872e99371728bd9ccddec7661b11c63 (patch)
tree7e7a2c3143ebed71635690bd4387d7ceca0805e0 /gcc
parent6467a4e9a6cceae84be71007d11dfc61b47b43a4 (diff)
parent7191e63d051b18c8f62ff73916345fc623a1bf7e (diff)
downloadgcc-8451c23d2872e99371728bd9ccddec7661b11c63.zip
gcc-8451c23d2872e99371728bd9ccddec7661b11c63.tar.gz
gcc-8451c23d2872e99371728bd9ccddec7661b11c63.tar.bz2
Merge branch 'master' into devel/sphinx
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog120
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/bitmap.c108
-rw-r--r--gcc/bitmap.h7
-rw-r--r--gcc/c-family/ChangeLog5
-rw-r--r--gcc/c/ChangeLog6
-rw-r--r--gcc/c/c-typeck.c2
-rw-r--r--gcc/config/h8300/movepush.md53
-rw-r--r--gcc/config/i386/i386-expand.c9
-rw-r--r--gcc/config/i386/mmx.md12
-rw-r--r--gcc/cp/ChangeLog7
-rw-r--r--gcc/cp/init.c2
-rw-r--r--gcc/cp/lambda.c6
-rw-r--r--gcc/cp/module.cc6
-rw-r--r--gcc/cp/name-lookup.c24
-rw-r--r--gcc/cp/parser.c24
-rw-r--r--gcc/cp/pt.c1
-rw-r--r--gcc/cp/semantics.c6
-rw-r--r--gcc/doc/invoke.texi3
-rw-r--r--gcc/fortran/intrinsic.texi2
-rw-r--r--gcc/fortran/trans-expr.c2
-rw-r--r--gcc/genautomata.c2
-rw-r--r--gcc/gimple-range-cache.cc147
-rw-r--r--gcc/gimple-range-cache.h1
-rw-r--r--gcc/objc/ChangeLog5
-rw-r--r--gcc/objcp/ChangeLog5
-rw-r--r--gcc/params.opt4
-rw-r--r--gcc/testsuite/ChangeLog52
-rw-r--r--gcc/testsuite/g++.dg/cpp0x/alias-decl-73.C9
-rw-r--r--gcc/testsuite/g++.dg/template/access38.C15
-rw-r--r--gcc/testsuite/g++.dg/template/lookup17.C18
-rw-r--r--gcc/testsuite/g++.target/i386/pr100885.C2
-rw-r--r--gcc/testsuite/gcc.dg/format/strfmon-1.c2
-rw-r--r--gcc/testsuite/gcc.dg/sso-14.c7
-rw-r--r--gcc/testsuite/gcc.dg/tree-ssa/pr100794.c20
-rw-r--r--gcc/testsuite/gcc.target/i386/pr100637-5b.c25
-rw-r--r--gcc/testsuite/gcc.target/i386/pr100637-5w.c25
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82735-3.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82735-4.c2
-rw-r--r--gcc/testsuite/gcc.target/i386/pr82735-5.c2
-rw-r--r--gcc/testsuite/gfortran.dg/char4-subscript.f902
-rw-r--r--gcc/tree-predcom.c100
42 files changed, 749 insertions, 105 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 1c7dc42..b9001dd 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,123 @@
+2021-06-07 Andrew MacLeod <amacleod@redhat.com>
+
+ * gimple-range-cache.cc (class sbr_sparse_bitmap): New.
+ (sbr_sparse_bitmap::sbr_sparse_bitmap): New.
+ (sbr_sparse_bitmap::bitmap_set_quad): New.
+ (sbr_sparse_bitmap::bitmap_get_quad): New.
+ (sbr_sparse_bitmap::set_bb_range): New.
+ (sbr_sparse_bitmap::get_bb_range): New.
+ (sbr_sparse_bitmap::bb_range_p): New.
+ (block_range_cache::block_range_cache): initialize bitmap obstack.
+ (block_range_cache::~block_range_cache): Destruct obstack.
+ (block_range_cache::set_bb_range): Decide when to utilze the
+ sparse on entry cache.
+ * gimple-range-cache.h (block_range_cache): Add bitmap obstack.
+ * params.opt (-param=evrp-sparse-threshold): New.
+
+2021-06-07 Andrew MacLeod <amacleod@redhat.com>
+
+ * bitmap.c (bitmap_set_aligned_chunk): New.
+ (bitmap_get_aligned_chunk): New.
+ (test_aligned_chunk): New.
+ (bitmap_c_tests): Call test_aligned_chunk.
+ * bitmap.h (bitmap_set_aligned_chunk, bitmap_get_aligned_chunk): New.
+
+2021-06-07 Uroš Bizjak <ubizjak@gmail.com>
+
+ PR target/100637
+ * config/i386/i386-expand.c (ix86_expand_vector_init_duplicate):
+ Handle V4QI mode.
+ (ix86_expand_vector_init_one_nonzero): Ditto.
+ (ix86_expand_vector_init_one_var): Ditto.
+ (ix86_expand_vector_init_general): Ditto.
+ * config/i386/mmx.md (vec_initv4qiqi): New expander.
+
+2021-06-07 Jeff Law <jeffreyalaw@gmail.com>
+
+ * config/h8300/movepush.md: Change most _clobber_flags
+ patterns to instead use <cczn> subst.
+ (movsi_cczn): New pattern with usable CC cases split out.
+ (movsi_h8sx_cczn): Likewise.
+
+2021-06-07 Martin Liska <mliska@suse.cz>
+
+ * common/common-target.def: Split long lines and replace them
+ with '\n\'.
+ * target.def: Likewise.
+ * doc/tm.texi: Re-generated.
+
+2021-06-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/100887
+ * fold-const.c (fold_read_from_vector): Return NULL if trying to
+ read from a CONSTRUCTOR with vector type elements.
+
+2021-06-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/100898
+ * tree-inline.c (copy_bb): Only use gimple_call_arg_ptr if memcpy
+ should copy any arguments. Don't call gimple_call_num_args
+ on id->call_stmt or call_stmt more than once.
+
+2021-06-07 liuhongt <hongtao.liu@intel.com>
+
+ PR target/100885
+ * config/i386/sse.md (*sse4_1_zero_extendv8qiv8hi2_3): Refine
+ constraints.
+ (<insn>v4siv4di2): Delete constraints for define_expand.
+
+2021-06-07 liuhongt <hongtao.liu@intel.com>
+
+ PR target/82735
+ * config/i386/i386-expand.c (ix86_expand_builtin): Remove
+ assignment of cfun->machine->has_explicit_vzeroupper.
+ * config/i386/i386-features.c
+ (ix86_add_reg_usage_to_vzerouppers): Delete.
+ (ix86_add_reg_usage_to_vzeroupper): Ditto.
+ (rest_of_handle_insert_vzeroupper): Remove
+ ix86_add_reg_usage_to_vzerouppers, add df_analyze at the end
+ of the function.
+ (gate): Remove cfun->machine->has_explicit_vzeroupper.
+ * config/i386/i386-protos.h (ix86_expand_avx_vzeroupper):
+ Declared.
+ * config/i386/i386.c (ix86_insn_callee_abi): New function.
+ (ix86_initialize_callee_abi): Ditto.
+ (ix86_expand_avx_vzeroupper): Ditto.
+ (ix86_hard_regno_call_part_clobbered): Adjust for vzeroupper
+ ABI.
+ (TARGET_INSN_CALLEE_ABI): Define as ix86_insn_callee_abi.
+ (ix86_emit_mode_set): Call ix86_expand_avx_vzeroupper
+ directly.
+ * config/i386/i386.h (struct GTY(()) machine_function): Delete
+ has_explicit_vzeroupper.
+ * config/i386/i386.md (enum unspec): New member
+ UNSPEC_CALLEE_ABI.
+ (ABI_DEFAULT,ABI_VZEROUPPER,ABI_UNKNOWN): New
+ define_constants for insn callee abi index.
+ * config/i386/predicates.md (vzeroupper_pattern): Adjust.
+ * config/i386/sse.md (UNSPECV_VZEROUPPER): Deleted.
+ (avx_vzeroupper): Call ix86_expand_avx_vzeroupper.
+ (*avx_vzeroupper): Rename to ..
+ (avx_vzeroupper_callee_abi): .. this, and adjust pattern as
+ call_insn which has a special vzeroupper ABI.
+ (*avx_vzeroupper_1): Deleted.
+
+2021-06-07 liuhongt <hongtao.liu@intel.com>
+
+ PR target/82735
+ * df-scan.c (df_get_call_refs): When call_insn is a fake call,
+ it won't use stack pointer reg.
+ * final.c (leaf_function_p): When call_insn is a fake call, it
+ won't affect caller as a leaf function.
+ * reg-stack.c (callee_clobbers_any_stack_reg): New.
+ (subst_stack_regs): When call_insn doesn't clobber any stack
+ reg, don't clear the arguments.
+ * rtl.c (shallow_copy_rtx): Don't clear flag used when orig is
+ a insn.
+ * shrink-wrap.c (requires_stack_frame_p): No need for stack
+ frame for a fake call.
+ * rtl.h (FAKE_CALL_P): New macro.
+
2021-06-06 Eric Botcazou <ebotcazou@adacore.com>
* config/sparc/sparc-protos.h (order_regs_for_local_alloc): Rename
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 0c8a502..504c3fa 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20210607
+20210608
diff --git a/gcc/bitmap.c b/gcc/bitmap.c
index 5a650cd..b915fdf 100644
--- a/gcc/bitmap.c
+++ b/gcc/bitmap.c
@@ -1004,6 +1004,83 @@ bitmap_bit_p (const_bitmap head, int bit)
return (ptr->bits[word_num] >> bit_num) & 1;
}
+/* Set CHUNK_SIZE bits at a time in bitmap HEAD.
+ Store CHUNK_VALUE starting at bits CHUNK * chunk_size.
+ This is the set routine for viewing bitmap as a multi-bit sparse array. */
+
+void
+bitmap_set_aligned_chunk (bitmap head, unsigned int chunk,
+ unsigned int chunk_size, BITMAP_WORD chunk_value)
+{
+ // Ensure chunk size is a power of 2 and fits in BITMAP_WORD.
+ gcc_checking_assert (pow2p_hwi (chunk_size));
+ gcc_checking_assert (chunk_size < (sizeof (BITMAP_WORD) * CHAR_BIT));
+
+ // Ensure chunk_value is within range of chunk_size bits.
+ BITMAP_WORD max_value = (1 << chunk_size) - 1;
+ gcc_checking_assert (chunk_value <= max_value);
+
+ unsigned bit = chunk * chunk_size;
+ unsigned indx = bit / BITMAP_ELEMENT_ALL_BITS;
+ bitmap_element *ptr;
+ if (!head->tree_form)
+ ptr = bitmap_list_find_element (head, indx);
+ else
+ ptr = bitmap_tree_find_element (head, indx);
+ unsigned word_num = bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
+ unsigned bit_num = bit % BITMAP_WORD_BITS;
+ BITMAP_WORD bit_val = chunk_value << bit_num;
+ BITMAP_WORD mask = ~(max_value << bit_num);
+
+ if (ptr != 0)
+ {
+ ptr->bits[word_num] &= mask;
+ ptr->bits[word_num] |= bit_val;
+ return;
+ }
+
+ ptr = bitmap_element_allocate (head);
+ ptr->indx = bit / BITMAP_ELEMENT_ALL_BITS;
+ ptr->bits[word_num] = bit_val;
+ if (!head->tree_form)
+ bitmap_list_link_element (head, ptr);
+ else
+ bitmap_tree_link_element (head, ptr);
+}
+
+/* This is the get routine for viewing bitmap as a multi-bit sparse array.
+ Return a set of CHUNK_SIZE consecutive bits from HEAD, starting at bit
+ CHUNK * chunk_size. */
+
+BITMAP_WORD
+bitmap_get_aligned_chunk (const_bitmap head, unsigned int chunk,
+ unsigned int chunk_size)
+{
+ // Ensure chunk size is a power of 2, fits in BITMAP_WORD and is in range.
+ gcc_checking_assert (pow2p_hwi (chunk_size));
+ gcc_checking_assert (chunk_size < (sizeof (BITMAP_WORD) * CHAR_BIT));
+
+ BITMAP_WORD max_value = (1 << chunk_size) - 1;
+ unsigned bit = chunk * chunk_size;
+ unsigned int indx = bit / BITMAP_ELEMENT_ALL_BITS;
+ const bitmap_element *ptr;
+ unsigned bit_num;
+ unsigned word_num;
+
+ if (!head->tree_form)
+ ptr = bitmap_list_find_element (const_cast<bitmap> (head), indx);
+ else
+ ptr = bitmap_tree_find_element (const_cast<bitmap> (head), indx);
+ if (ptr == 0)
+ return 0;
+
+ bit_num = bit % BITMAP_WORD_BITS;
+ word_num = bit / BITMAP_WORD_BITS % BITMAP_ELEMENT_WORDS;
+
+ // Return 4 bits.
+ return (ptr->bits[word_num] >> bit_num) & max_value;
+}
+
#if GCC_VERSION < 3400
/* Table of number of set bits in a character, indexed by value of char. */
static const unsigned char popcount_table[] =
@@ -2857,6 +2934,33 @@ test_bitmap_single_bit_set_p ()
ASSERT_EQ (1066, bitmap_first_set_bit (b));
}
+/* Verify accessing aligned bit chunks works as expected. */
+
+static void
+test_aligned_chunk (unsigned num_bits)
+{
+ bitmap b = bitmap_gc_alloc ();
+ int limit = 2 ^ num_bits;
+
+ int index = 3;
+ for (int x = 0; x < limit; x++)
+ {
+ bitmap_set_aligned_chunk (b, index, num_bits, (BITMAP_WORD) x);
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index, num_bits) == x);
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index + 1,
+ num_bits) == 0);
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index - 1,
+ num_bits) == 0);
+ index += 3;
+ }
+ index = 3;
+ for (int x = 0; x < limit ; x++)
+ {
+ ASSERT_TRUE ((int) bitmap_get_aligned_chunk (b, index, num_bits) == x);
+ index += 3;
+ }
+}
+
/* Run all of the selftests within this file. */
void
@@ -2867,6 +2971,10 @@ bitmap_c_tests ()
test_clear_bit_in_middle ();
test_copying ();
test_bitmap_single_bit_set_p ();
+ /* Test 2, 4 and 8 bit aligned chunks. */
+ test_aligned_chunk (2);
+ test_aligned_chunk (4);
+ test_aligned_chunk (8);
}
} // namespace selftest
diff --git a/gcc/bitmap.h b/gcc/bitmap.h
index 2613855..0846f79 100644
--- a/gcc/bitmap.h
+++ b/gcc/bitmap.h
@@ -438,6 +438,13 @@ extern bool bitmap_set_bit (bitmap, int);
/* Return true if a bit is set in a bitmap. */
extern int bitmap_bit_p (const_bitmap, int);
+/* Set and get multiple bit values in a sparse bitmap. This allows a bitmap to
+ function as a sparse array of bit patterns where the patterns are
+ multiples of power of 2. This is more efficient than performing this as
+ multiple individual operations. */
+void bitmap_set_aligned_chunk (bitmap, unsigned int, unsigned int, BITMAP_WORD);
+BITMAP_WORD bitmap_get_aligned_chunk (const_bitmap, unsigned int, unsigned int);
+
/* Debug functions to print a bitmap. */
extern void debug_bitmap (const_bitmap);
extern void debug_bitmap_file (FILE *, const_bitmap);
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index 3938ef1..8c9b355 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,8 @@
+2021-06-07 Martin Liska <mliska@suse.cz>
+
+ * c-target.def: Split long lines and replace them
+ with '\n\'.
+
2021-06-04 Martin Sebor <msebor@redhat.com>
PR c/100783
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index ff665e3..27f8838 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,9 @@
+2021-06-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR c/100920
+ * c-typeck.c (convert_for_assignment): Test fndecl_built_in_p to
+ spot built-in functions.
+
2021-06-06 Jakub Jelinek <jakub@redhat.com>
PR c/100902
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index daa2e12..845d50f 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -7303,7 +7303,7 @@ convert_for_assignment (location_t location, location_t expr_loc, tree type,
/* Do not warn for built-in functions, for example memcpy, since we
control how they behave and they can be useful in this area. */
if (TREE_CODE (rname) != FUNCTION_DECL
- || !DECL_IS_UNDECLARED_BUILTIN (rname))
+ || !fndecl_built_in_p (rname))
warning_at (location, OPT_Wscalar_storage_order,
"passing argument %d of %qE from incompatible "
"scalar storage order", parmnum, rname);
diff --git a/gcc/config/h8300/movepush.md b/gcc/config/h8300/movepush.md
index 9ce00fb..ada4ddd 100644
--- a/gcc/config/h8300/movepush.md
+++ b/gcc/config/h8300/movepush.md
@@ -13,7 +13,7 @@
[(parallel [(set (match_dup 0) (match_dup 1))
(clobber (reg:CC CC_REG))])])
-(define_insn "*movqi_clobber_flags"
+(define_insn "*movqi<cczn>"
[(set (match_operand:QI 0 "general_operand_dst" "=r,r ,<,r,r,m")
(match_operand:QI 1 "general_operand_src" " I,r>,r,n,m,r"))
(clobber (reg:CC CC_REG))]
@@ -36,7 +36,7 @@
[(parallel [(set (match_dup 0) (match_dup 1))
(clobber (reg:CC CC_REG))])])
-(define_insn "*movqi_h8sx_clobber_flags"
+(define_insn "*movqi_h8sx<cczn>"
[(set (match_operand:QI 0 "general_operand_dst" "=Z,rQ")
(match_operand:QI 1 "general_operand_src" "P4>X,rQi"))
(clobber (reg:CC CC_REG))]
@@ -74,7 +74,7 @@
(clobber (reg:CC CC_REG))])])
-(define_insn "movstrictqi_clobber_flags"
+(define_insn "*movstrictqi<cczn>"
[(set (strict_low_part (match_operand:QI 0 "general_operand_dst" "+r,r"))
(match_operand:QI 1 "general_operand_src" "I,rmi>"))
(clobber (reg:CC CC_REG))]
@@ -97,7 +97,7 @@
[(parallel [(set (match_dup 0) (match_dup 1))
(clobber (reg:CC CC_REG))])])
-(define_insn "*movhi_clobber_flags"
+(define_insn "*movhi<cczn>"
[(set (match_operand:HI 0 "general_operand_dst" "=r,r,<,r,r,m")
(match_operand:HI 1 "general_operand_src" "I,r>,r,i,m,r"))
(clobber (reg:CC CC_REG))]
@@ -121,7 +121,7 @@
[(parallel [(set (match_dup 0) (match_dup 1))
(clobber (reg:CC CC_REG))])])
-(define_insn "*movhi_h8sx_clobber_flags"
+(define_insn "*movhi_h8sx<cczn>"
[(set (match_operand:HI 0 "general_operand_dst" "=r,r,Z,Q,rQ")
(match_operand:HI 1 "general_operand_src" "I,P3>X,P4>X,IP8>X,rQi"))
(clobber (reg:CC CC_REG))]
@@ -144,7 +144,7 @@
[(parallel [(set (strict_low_part (match_dup 0)) (match_dup 1))
(clobber (reg:CC CC_REG))])])
-(define_insn "movstricthi_clobber_flags"
+(define_insn "*movstricthi<cczn>"
[(set (strict_low_part (match_operand:HI 0 "general_operand_dst" "+r,r,r"))
(match_operand:HI 1 "general_operand_src" "I,P3>X,rmi"))
(clobber (reg:CC CC_REG))]
@@ -168,8 +168,8 @@
(clobber (reg:CC CC_REG))])])
(define_insn "*movsi_clobber_flags"
- [(set (match_operand:SI 0 "general_operand_dst" "=r,r,r,<,r,r,m,*a,*a,r")
- (match_operand:SI 1 "general_operand_src" "I,r,i,r,>,m,r,I,r,*a"))
+ [(set (match_operand:SI 0 "general_operand_dst" "=r,r,r,<,r,r,m,*a,*a, r")
+ (match_operand:SI 1 "general_operand_src" " I,r,i,r,>,m,r, I, r,*a"))
(clobber (reg:CC CC_REG))]
"(TARGET_H8300S || TARGET_H8300H) && !TARGET_H8300SX
&& h8300_move_ok (operands[0], operands[1])"
@@ -235,6 +235,25 @@
}
[(set (attr "length") (symbol_ref "compute_mov_length (operands)"))])
+(define_insn "*movsi_cczn"
+ [(set (reg:CCZN CC_REG)
+ (compare:CCZN
+ (match_operand:SI 1 "general_operand_src" " I,r,i,r,>,m,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "general_operand_dst" "=r,r,r,<,r,r,m")
+ (match_dup 1))]
+ "(TARGET_H8300S || TARGET_H8300H) && !TARGET_H8300SX
+ && h8300_move_ok (operands[0], operands[1])"
+ "@
+ sub.l %S0,%S0
+ mov.l %S1,%S0
+ mov.l %S1,%S0
+ mov.l %S1,%S0
+ mov.l %S1,%S0
+ mov.l %S1,%S0
+ mov.l %S1,%S0"
+ [(set (attr "length") (symbol_ref "compute_mov_length (operands)"))])
+
(define_insn_and_split "*movsi_h8sx"
[(set (match_operand:SI 0 "general_operand_dst" "=r,r,Q,rQ,*a,*a,r")
(match_operand:SI 1 "general_operand_src" "I,P3>X,IP8>X,rQi,I,r,*a"))]
@@ -260,6 +279,22 @@
[(set_attr "length_table" "*,*,short_immediate,movl,*,*,*")
(set_attr "length" "2,2,*,*,2,6,4")])
+(define_insn "*movsi_h8sx_ccnz"
+ [(set (reg:CCZN CC_REG)
+ (compare:CCZN
+ (match_operand:SI 1 "general_operand_src" "I,P3>X,IP8>X,rQi")
+ (const_int 0)))
+ (set (match_operand:SI 0 "general_operand_dst" "=r,r,Q,rQ")
+ (match_dup 1))]
+ "TARGET_H8300SX"
+ "@
+ sub.l %S0,%S0
+ mov.l %S1:3,%S0
+ mov.l %S1,%S0
+ mov.l %S1,%S0"
+ [(set_attr "length_table" "*,*,short_immediate,movl")
+ (set_attr "length" "2,2,*,*")])
+
(define_insn_and_split "*movsf_h8sx"
[(set (match_operand:SF 0 "general_operand_dst" "=r,rQ")
(match_operand:SF 1 "general_operand_src" "G,rQi"))]
@@ -326,7 +361,7 @@
(match_dup 0))
(clobber (reg:CC CC_REG))])])
-(define_insn "*push1_<QHI:mode>_clobber_flags"
+(define_insn "*push1_<QHI:mode><cczn>"
[(set (mem:QHI
(pre_modify:P
(reg:P SP_REG)
diff --git a/gcc/config/i386/i386-expand.c b/gcc/config/i386/i386-expand.c
index fb0676f..c3ce21b 100644
--- a/gcc/config/i386/i386-expand.c
+++ b/gcc/config/i386/i386-expand.c
@@ -13733,6 +13733,7 @@ ix86_expand_vector_init_duplicate (bool mmx_ok, machine_mode mode,
return false;
case E_V8QImode:
+ case E_V4QImode:
if (!mmx_ok)
return false;
goto widen;
@@ -13878,6 +13879,9 @@ ix86_expand_vector_init_one_nonzero (bool mmx_ok, machine_mode mode,
case E_V4HImode:
use_vector_set = TARGET_SSE || TARGET_3DNOW_A;
break;
+ case E_V4QImode:
+ use_vector_set = TARGET_SSE4_1;
+ break;
case E_V32QImode:
case E_V16HImode:
use_vector_set = TARGET_AVX;
@@ -14086,6 +14090,10 @@ ix86_expand_vector_init_one_var (bool mmx_ok, machine_mode mode,
break;
wmode = V4HImode;
goto widen;
+ case E_V4QImode:
+ if (TARGET_SSE4_1)
+ break;
+ wmode = V2HImode;
widen:
/* There's no way to set one QImode entry easily. Combine
the variable value with its adjacent constant value, and
@@ -14535,6 +14543,7 @@ quarter:
case E_V8QImode:
case E_V2HImode:
+ case E_V4QImode:
break;
default:
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index c3fd280..0a17a54 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -3369,7 +3369,17 @@
(match_operand 1)]
"TARGET_SSE2"
{
- ix86_expand_vector_init (false, operands[0],
+ ix86_expand_vector_init (TARGET_MMX_WITH_SSE, operands[0],
+ operands[1]);
+ DONE;
+})
+
+(define_expand "vec_initv4qiqi"
+ [(match_operand:V2HI 0 "register_operand")
+ (match_operand 1)]
+ "TARGET_SSE2"
+{
+ ix86_expand_vector_init (TARGET_MMX_WITH_SSE, operands[0],
operands[1]);
DONE;
})
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index 7542375..225b8917 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,10 @@
+2021-06-07 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/100918
+ * parser.c (cp_parser_lookup_name): Check access of the lookup
+ result before we potentially adjust an injected-class-name to
+ its TEMPLATE_DECL.
+
2021-06-06 Jakub Jelinek <jakub@redhat.com>
PR c/100902
diff --git a/gcc/cp/init.c b/gcc/cp/init.c
index b112328..1b161d5 100644
--- a/gcc/cp/init.c
+++ b/gcc/cp/init.c
@@ -2214,7 +2214,7 @@ build_offset_ref (tree type, tree member, bool address_p,
if (!ok)
return error_mark_node;
if (DECL_STATIC_FUNCTION_P (t))
- return t;
+ return member;
member = t;
}
else
diff --git a/gcc/cp/lambda.c b/gcc/cp/lambda.c
index 4a1e090..2e9d38b 100644
--- a/gcc/cp/lambda.c
+++ b/gcc/cp/lambda.c
@@ -1338,9 +1338,9 @@ is_lambda_ignored_entity (tree val)
/* None of the lookups that use qualify_lookup want the op() from the
lambda; they want the one from the enclosing class. */
- val = OVL_FIRST (val);
- if (LAMBDA_FUNCTION_P (val))
- return true;
+ if (tree fns = maybe_get_fns (val))
+ if (LAMBDA_FUNCTION_P (OVL_FIRST (fns)))
+ return true;
return false;
}
diff --git a/gcc/cp/module.cc b/gcc/cp/module.cc
index f0fb014..f259515 100644
--- a/gcc/cp/module.cc
+++ b/gcc/cp/module.cc
@@ -2820,12 +2820,16 @@ struct merge_key {
struct duplicate_hash : nodel_ptr_hash<tree_node>
{
+#if 0
+ /* This breaks variadic bases in the xtreme_header tests. Since ::equal is
+ the default pointer_hash::equal, let's use the default hash as well. */
inline static hashval_t hash (value_type decl)
{
if (TREE_CODE (decl) == TREE_BINFO)
decl = TYPE_NAME (BINFO_TYPE (decl));
return hashval_t (DECL_UID (decl));
}
+#endif
};
/* Hashmap of merged duplicates. Usually decls, but can contain
@@ -8908,7 +8912,7 @@ trees_in::tree_value ()
dump (dumper::MERGE)
&& dump ("Deduping binfo %N[%u]", type, ix);
existing = TYPE_BINFO (type);
- while (existing && ix)
+ while (existing && ix--)
existing = TREE_CHAIN (existing);
if (existing)
register_duplicate (t, existing);
diff --git a/gcc/cp/name-lookup.c b/gcc/cp/name-lookup.c
index 241ad2b..1be5f3d 100644
--- a/gcc/cp/name-lookup.c
+++ b/gcc/cp/name-lookup.c
@@ -5236,7 +5236,7 @@ set_inherited_value_binding_p (cxx_binding *binding, tree decl,
{
tree context;
- if (TREE_CODE (decl) == OVERLOAD)
+ if (is_overloaded_fn (decl))
context = ovl_scope (decl);
else
{
@@ -5338,28 +5338,6 @@ get_class_binding (tree name, cp_binding_level *scope)
/*protect=*/2, /*want_type=*/false,
tf_warning_or_error);
- if (value_binding
- && (TREE_CODE (value_binding) == TYPE_DECL
- || DECL_CLASS_TEMPLATE_P (value_binding)
- || (TREE_CODE (value_binding) == TREE_LIST
- && TREE_TYPE (value_binding) == error_mark_node
- && (TREE_CODE (TREE_VALUE (value_binding))
- == TYPE_DECL))))
- /* We found a type binding, even when looking for a non-type
- binding. This means that we already processed this binding
- above. */
- ;
- else if (value_binding)
- {
- if (TREE_CODE (value_binding) == TREE_LIST
- && TREE_TYPE (value_binding) == error_mark_node)
- /* NAME is ambiguous. */
- ;
- else if (BASELINK_P (value_binding))
- /* NAME is some overloaded functions. */
- value_binding = BASELINK_FUNCTIONS (value_binding);
- }
-
/* If we found either a type binding or a value binding, create a
new binding object. */
if (type_binding || value_binding)
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 0649bf9..24f248a 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -29505,6 +29505,19 @@ cp_parser_lookup_name (cp_parser *parser, tree name,
if (!decl || decl == error_mark_node)
return error_mark_node;
+ /* If we have resolved the name of a member declaration, check to
+ see if the declaration is accessible. When the name resolves to
+ set of overloaded functions, accessibility is checked when
+ overload resolution is done. If we have a TREE_LIST, then the lookup
+ is either ambiguous or it found multiple injected-class-names, the
+ accessibility of which is trivially satisfied.
+
+ During an explicit instantiation, access is not checked at all,
+ as per [temp.explicit]. */
+ if (DECL_P (decl))
+ check_accessibility_of_qualified_id (decl, object_type, parser->scope,
+ tf_warning_or_error);
+
/* Pull out the template from an injected-class-name (or multiple). */
if (is_template)
decl = maybe_get_template_decl_from_type_decl (decl);
@@ -29531,17 +29544,6 @@ cp_parser_lookup_name (cp_parser *parser, tree name,
|| TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE
|| BASELINK_P (decl));
- /* If we have resolved the name of a member declaration, check to
- see if the declaration is accessible. When the name resolves to
- set of overloaded functions, accessibility is checked when
- overload resolution is done.
-
- During an explicit instantiation, access is not checked at all,
- as per [temp.explicit]. */
- if (DECL_P (decl))
- check_accessibility_of_qualified_id (decl, object_type, parser->scope,
- tf_warning_or_error);
-
maybe_record_typedef_use (decl);
return cp_expr (decl, name_location);
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index 2ae886d..b0155a9 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -9597,6 +9597,7 @@ lookup_template_function (tree fns, tree arglist)
if (BASELINK_P (fns))
{
+ fns = copy_node (fns);
BASELINK_FUNCTIONS (fns) = build2 (TEMPLATE_ID_EXPR,
unknown_type_node,
BASELINK_FUNCTIONS (fns),
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index d08c1dd..f506a23 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -3663,8 +3663,10 @@ baselink_for_fns (tree fns)
cl = currently_open_derived_class (scope);
if (!cl)
cl = scope;
- cl = TYPE_BINFO (cl);
- return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE);
+ tree access_path = TYPE_BINFO (cl);
+ tree conv_path = (cl == scope ? access_path
+ : lookup_base (cl, scope, ba_any, NULL, tf_none));
+ return build_baselink (conv_path, access_path, fns, /*optype=*/NULL_TREE);
}
/* Returns true iff DECL is a variable from a function outside
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index d725e76..0e34962 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -14170,6 +14170,9 @@ we may be able to devirtualize speculatively.
The maximum number of assertions to add along the default edge of a switch
statement during VRP.
+@item evrp-sparse-threshold
+Maximum number of basic blocks before EVRP uses a sparse cache.
+
@item evrp-mode
Specifies the mode Early VRP should operate in.
diff --git a/gcc/fortran/intrinsic.texi b/gcc/fortran/intrinsic.texi
index 81866d6..0a52b49 100644
--- a/gcc/fortran/intrinsic.texi
+++ b/gcc/fortran/intrinsic.texi
@@ -486,7 +486,7 @@ Inquiry function
@item @emph{Arguments}:
@multitable @columnfractions .15 .70
@item @var{NAME} @tab Scalar @code{CHARACTER} of default kind with the
-file name. Tailing blank are ignored unless the character @code{achar(0)}
+file name. Trailing blank are ignored unless the character @code{achar(0)}
is present, then all characters up to and excluding @code{achar(0)} are
used as file name.
@item @var{MODE} @tab Scalar @code{CHARACTER} of default kind with the
diff --git a/gcc/fortran/trans-expr.c b/gcc/fortran/trans-expr.c
index e3bc886..de406ad 100644
--- a/gcc/fortran/trans-expr.c
+++ b/gcc/fortran/trans-expr.c
@@ -9513,7 +9513,7 @@ gfc_trans_pointer_assignment (gfc_expr * expr1, gfc_expr * expr2)
{
gfc_add_data_component (expr2);
/* The following is required as gfc_add_data_component doesn't
- update ts.type if there is a tailing REF_ARRAY. */
+ update ts.type if there is a trailing REF_ARRAY. */
expr2->ts.type = BT_DERIVED;
}
diff --git a/gcc/genautomata.c b/gcc/genautomata.c
index e6295e6..6bbfc68 100644
--- a/gcc/genautomata.c
+++ b/gcc/genautomata.c
@@ -6787,7 +6787,7 @@ create_automata (void)
finish_regexp_representation calls. */
/* This recursive function forms string representation of regexp
- (without tailing '\0'). */
+ (without trailing '\0'). */
static void
form_regexp (regexp_t regexp)
{
diff --git a/gcc/gimple-range-cache.cc b/gcc/gimple-range-cache.cc
index c58acf4..249515f 100644
--- a/gcc/gimple-range-cache.cc
+++ b/gcc/gimple-range-cache.cc
@@ -235,12 +235,140 @@ sbr_vector::bb_range_p (const basic_block bb)
return m_tab[bb->index] != NULL;
}
+// This class implements the on entry cache via a sparse bitmap.
+// It uses the quad bit routines to access 4 bits at a time.
+// A value of 0 (the default) means there is no entry, and a value of
+// 1 thru SBR_NUM represents an element in the m_range vector.
+// Varying is given the first value (1) and pre-cached.
+// SBR_NUM + 1 represents the value of UNDEFINED, and is never stored.
+// SBR_NUM is the number of values that can be cached.
+// Indexes are 1..SBR_NUM and are stored locally at m_range[0..SBR_NUM-1]
+
+#define SBR_NUM 14
+#define SBR_UNDEF SBR_NUM + 1
+#define SBR_VARYING 1
+
+class sbr_sparse_bitmap : public ssa_block_ranges
+{
+public:
+ sbr_sparse_bitmap (tree t, irange_allocator *allocator, bitmap_obstack *bm);
+ virtual void set_bb_range (const basic_block bb, const irange &r) OVERRIDE;
+ virtual bool get_bb_range (irange &r, const basic_block bb) OVERRIDE;
+ virtual bool bb_range_p (const basic_block bb) OVERRIDE;
+private:
+ void bitmap_set_quad (bitmap head, int quad, int quad_value);
+ int bitmap_get_quad (const_bitmap head, int quad);
+ irange_allocator *m_irange_allocator;
+ irange *m_range[SBR_NUM];
+ bitmap bitvec;
+ tree m_type;
+};
+
+// Initialize a block cache for an ssa_name of type T.
+
+sbr_sparse_bitmap::sbr_sparse_bitmap (tree t, irange_allocator *allocator,
+ bitmap_obstack *bm)
+{
+ gcc_checking_assert (TYPE_P (t));
+ m_type = t;
+ bitvec = BITMAP_ALLOC (bm);
+ m_irange_allocator = allocator;
+ // Pre-cache varying.
+ m_range[0] = m_irange_allocator->allocate (2);
+ m_range[0]->set_varying (t);
+ // Pre-cache zero and non-zero values for pointers.
+ if (POINTER_TYPE_P (t))
+ {
+ m_range[1] = m_irange_allocator->allocate (2);
+ m_range[1]->set_nonzero (t);
+ m_range[2] = m_irange_allocator->allocate (2);
+ m_range[2]->set_zero (t);
+ }
+ else
+ m_range[1] = m_range[2] = NULL;
+ // Clear SBR_NUM entries.
+ for (int x = 3; x < SBR_NUM; x++)
+ m_range[x] = 0;
+}
+
+// Set 4 bit values in a sparse bitmap. This allows a bitmap to
+// function as a sparse array of 4 bit values.
+// QUAD is the index, QUAD_VALUE is the 4 bit value to set.
+
+inline void
+sbr_sparse_bitmap::bitmap_set_quad (bitmap head, int quad, int quad_value)
+{
+ bitmap_set_aligned_chunk (head, quad, 4, (BITMAP_WORD) quad_value);
+}
+
+// Get a 4 bit value from a sparse bitmap. This allows a bitmap to
+// function as a sparse array of 4 bit values.
+// QUAD is the index.
+inline int
+sbr_sparse_bitmap::bitmap_get_quad (const_bitmap head, int quad)
+{
+ return (int) bitmap_get_aligned_chunk (head, quad, 4);
+}
+
+// Set the range on entry to basic block BB to R.
+
+void
+sbr_sparse_bitmap::set_bb_range (const basic_block bb, const irange &r)
+{
+ if (r.undefined_p ())
+ {
+ bitmap_set_quad (bitvec, bb->index, SBR_UNDEF);
+ return;
+ }
+
+ // Loop thru the values to see if R is already present.
+ for (int x = 0; x < SBR_NUM; x++)
+ if (!m_range[x] || r == *(m_range[x]))
+ {
+ if (!m_range[x])
+ m_range[x] = m_irange_allocator->allocate (r);
+ bitmap_set_quad (bitvec, bb->index, x + 1);
+ return;
+ }
+ // All values are taken, default to VARYING.
+ bitmap_set_quad (bitvec, bb->index, SBR_VARYING);
+ return;
+}
+
+// Return the range associated with block BB in R. Return false if
+// there is no range.
+
+bool
+sbr_sparse_bitmap::get_bb_range (irange &r, const basic_block bb)
+{
+ int value = bitmap_get_quad (bitvec, bb->index);
+
+ if (!value)
+ return false;
+
+ gcc_checking_assert (value <= SBR_UNDEF);
+ if (value == SBR_UNDEF)
+ r.set_undefined ();
+ else
+ r = *(m_range[value - 1]);
+ return true;
+}
+
+// Return true if a range is present.
+
+bool
+sbr_sparse_bitmap::bb_range_p (const basic_block bb)
+{
+ return (bitmap_get_quad (bitvec, bb->index) != 0);
+}
+
// -------------------------------------------------------------------------
// Initialize the block cache.
block_range_cache::block_range_cache ()
{
+ bitmap_obstack_initialize (&m_bitmaps);
m_ssa_ranges.create (0);
m_ssa_ranges.safe_grow_cleared (num_ssa_names);
m_irange_allocator = new irange_allocator;
@@ -253,6 +381,7 @@ block_range_cache::~block_range_cache ()
delete m_irange_allocator;
// Release the vector itself.
m_ssa_ranges.release ();
+ bitmap_obstack_release (&m_bitmaps);
}
// Set the range for NAME on entry to block BB to R.
@@ -268,9 +397,21 @@ block_range_cache::set_bb_range (tree name, const basic_block bb,
if (!m_ssa_ranges[v])
{
- void *r = m_irange_allocator->get_memory (sizeof (sbr_vector));
- m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
- m_irange_allocator);
+ // Use sparse representation if there are too many basic blocks.
+ if (last_basic_block_for_fn (cfun) > param_evrp_sparse_threshold)
+ {
+ void *r = m_irange_allocator->get_memory (sizeof (sbr_sparse_bitmap));
+ m_ssa_ranges[v] = new (r) sbr_sparse_bitmap (TREE_TYPE (name),
+ m_irange_allocator,
+ &m_bitmaps);
+ }
+ else
+ {
+ // Otherwise use the default vector implemntation.
+ void *r = m_irange_allocator->get_memory (sizeof (sbr_vector));
+ m_ssa_ranges[v] = new (r) sbr_vector (TREE_TYPE (name),
+ m_irange_allocator);
+ }
}
m_ssa_ranges[v]->set_bb_range (bb, r);
}
diff --git a/gcc/gimple-range-cache.h b/gcc/gimple-range-cache.h
index 4af461d..ce4449a 100644
--- a/gcc/gimple-range-cache.h
+++ b/gcc/gimple-range-cache.h
@@ -61,6 +61,7 @@ private:
ssa_block_ranges &get_block_ranges (tree name);
ssa_block_ranges *query_block_ranges (tree name);
irange_allocator *m_irange_allocator;
+ bitmap_obstack m_bitmaps;
};
// This global cache is used with the range engine as markers for what
diff --git a/gcc/objc/ChangeLog b/gcc/objc/ChangeLog
index 7ae02af..6a3d66e 100644
--- a/gcc/objc/ChangeLog
+++ b/gcc/objc/ChangeLog
@@ -1,3 +1,8 @@
+2021-06-07 Bernd Edlinger <bernd.edlinger@softing.com>
+
+ * Make-lang.in (cc1obj-checksum.c): For stage-final re-use
+ the checksum from the previous stage.
+
2021-05-20 Indu Bhagat <indu.bhagat@oracle.com>
* objc-act.c (synth_module_prologue): Use uint32_t instead of enum
diff --git a/gcc/objcp/ChangeLog b/gcc/objcp/ChangeLog
index 44b412b..f3ef33a 100644
--- a/gcc/objcp/ChangeLog
+++ b/gcc/objcp/ChangeLog
@@ -1,3 +1,8 @@
+2021-06-07 Bernd Edlinger <bernd.edlinger@softing.com>
+
+ * Make-lang.in (cc1objplus-checksum.c): For stage-final re-use
+ the checksum from the previous stage.
+
2021-01-05 Jakub Jelinek <jakub@redhat.com>
* Make-lang.in (cc1objplus-checksum, cc1objplus$(exeext): Add
diff --git a/gcc/params.opt b/gcc/params.opt
index 0d0dcd2..18e6036 100644
--- a/gcc/params.opt
+++ b/gcc/params.opt
@@ -126,6 +126,10 @@ Maximum size (in bytes) of objects tracked bytewise by dead store elimination.
Common Joined UInteger Var(param_early_inlining_insns) Init(6) Optimization Param
Maximal estimated growth of function body caused by early inlining of single call.
+-param=evrp-sparse-threshold=
+Common Joined UInteger Var(param_evrp_sparse_threshold) Init(800) Optimization Param
+Maximum number of basic blocks before EVRP uses a sparse cache.
+
-param=evrp-mode=
Common Joined Var(param_evrp_mode) Enum(evrp_mode) Init(EVRP_MODE_EVRP_FIRST) Param Optimization
--param=evrp-mode=[legacy|ranger|legacy-first|ranger-first|ranger-trace|ranger-debug|trace|debug] Specifies the mode Early VRP should operate in.
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index d4f10b1..272bad0 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,55 @@
+2021-06-07 Uroš Bizjak <ubizjak@gmail.com>
+
+ PR target/100637
+ * gcc.target/i386/pr100637-5b.c: New test.
+ * gcc.target/i386/pr100637-5w.c: Ditto.
+
+2021-06-07 H.J. Lu <hjl.tools@gmail.com>
+
+ PR target/82735
+ * gcc.target/i386/pr82735-3.c: Don't compile for x32.
+ * gcc.target/i386/pr82735-4.c: Likewise.
+ * gcc.target/i386/pr82735-5.c: Likewise.
+
+2021-06-07 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc.dg/sso-14.c: Adjust.
+
+2021-06-07 Patrick Palka <ppalka@redhat.com>
+
+ PR c++/100918
+ * g++.dg/template/access38.C: New test.
+
+2021-06-07 H.J. Lu <hjl.tools@gmail.com>
+
+ PR target/100885
+ * g++.target/i386/pr100885.C (_mm_set_epi64): Cast __m64 to long
+ long.
+
+2021-06-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR target/100887
+ * gcc.dg/pr100887.c: New test.
+
+2021-06-07 Jakub Jelinek <jakub@redhat.com>
+
+ PR middle-end/100898
+ * g++.dg/ext/va-arg-pack-3.C: New test.
+
+2021-06-07 liuhongt <hongtao.liu@intel.com>
+
+ PR target/100885
+ * g++.target/i386/pr100885.C: New test.
+
+2021-06-07 liuhongt <hongtao.liu@intel.com>
+
+ PR target/82735
+ * gcc.target/i386/pr82735-1.c: New test.
+ * gcc.target/i386/pr82735-2.c: New test.
+ * gcc.target/i386/pr82735-3.c: New test.
+ * gcc.target/i386/pr82735-4.c: New test.
+ * gcc.target/i386/pr82735-5.c: New test.
+
2021-06-06 Jakub Jelinek <jakub@redhat.com>
* gcc.dg/gomp/scan-1.c: New test.
diff --git a/gcc/testsuite/g++.dg/cpp0x/alias-decl-73.C b/gcc/testsuite/g++.dg/cpp0x/alias-decl-73.C
new file mode 100644
index 0000000..aae7786
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp0x/alias-decl-73.C
@@ -0,0 +1,9 @@
+// PR c++/100102
+// { dg-do compile { target c++11 } }
+
+template <bool B1> using a = int;
+template <class T3, class T4> struct k {
+ static long o();
+ template <class T5> using n = a<bool(k::o)>;
+ n<int> q;
+};
diff --git a/gcc/testsuite/g++.dg/template/access38.C b/gcc/testsuite/g++.dg/template/access38.C
new file mode 100644
index 0000000..488f865
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/access38.C
@@ -0,0 +1,15 @@
+// PR c++/100918
+
+struct Outer {
+ template<class T>
+ struct Inner { ~Inner(); };
+};
+
+template<>
+Outer::Inner<int>::~Inner<int>() { } // { dg-error "template-id" "" { target c++20 } }
+
+template<class T>
+Outer::Inner<T>::~Inner<T>() { } // { dg-error "template-id" "" { target c++20 } }
+
+Outer::Inner<int> x;
+Outer::Inner<char> y;
diff --git a/gcc/testsuite/g++.dg/template/lookup17.C b/gcc/testsuite/g++.dg/template/lookup17.C
new file mode 100644
index 0000000..b8571b9
--- /dev/null
+++ b/gcc/testsuite/g++.dg/template/lookup17.C
@@ -0,0 +1,18 @@
+// PR c++/91706
+// { dg-do compile { target c++11 } }
+// { dg-additional-options -g }
+
+template <bool> struct A;
+
+struct B { static constexpr bool g = false; };
+
+struct C {
+ template <typename> static B c ();
+};
+
+template <class T> struct D : C {
+ using c = decltype (c<T>());
+ using E = A<c::g>;
+};
+
+D<int> g;
diff --git a/gcc/testsuite/g++.target/i386/pr100885.C b/gcc/testsuite/g++.target/i386/pr100885.C
index 08a5bdd..bec08f7 100644
--- a/gcc/testsuite/g++.target/i386/pr100885.C
+++ b/gcc/testsuite/g++.target/i386/pr100885.C
@@ -33,7 +33,7 @@ protected:
}
};
__m128i _mm_set_epi64(__m64 __q0) {
- __m128i __trans_tmp_5{(long)__q0};
+ __m128i __trans_tmp_5{(long long)__q0};
return __trans_tmp_5;
}
long _mm_storel_epi64___P, Draw_dsts;
diff --git a/gcc/testsuite/gcc.dg/format/strfmon-1.c b/gcc/testsuite/gcc.dg/format/strfmon-1.c
index 934242a..a790db5 100644
--- a/gcc/testsuite/gcc.dg/format/strfmon-1.c
+++ b/gcc/testsuite/gcc.dg/format/strfmon-1.c
@@ -57,7 +57,7 @@ foo (char *s, size_t m, double d, long double ld)
strfmon (s, m, "%n%n", d); /* { dg-warning "matching" "too few args" } */
strfmon (s, m, ""); /* { dg-warning "zero-length" "empty" } */
strfmon (s, m, NULL); /* { dg-warning "null" "null format string" } */
- strfmon (s, m, "%"); /* { dg-warning "trailing" "tailing %" } */
+ strfmon (s, m, "%"); /* { dg-warning "trailing" "trailing %" } */
strfmon (s, m, "%n\0", d); /* { dg-warning "embedded" "embedded NUL" } */
strfmon (s, m, "%^^n", d); /* { dg-warning "repeated" "repeated flag" } */
}
diff --git a/gcc/testsuite/gcc.dg/sso-14.c b/gcc/testsuite/gcc.dg/sso-14.c
index af98145..8941946 100644
--- a/gcc/testsuite/gcc.dg/sso-14.c
+++ b/gcc/testsuite/gcc.dg/sso-14.c
@@ -5,6 +5,7 @@
#include <stddef.h>
#include <stdlib.h>
+#include <string.h>
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define REV_ENDIANNESS __attribute__((scalar_storage_order("big-endian")))
@@ -42,12 +43,14 @@ int main(void)
{
t_s12 *msg1 = __builtin_alloca(10);
t_u12 *msg2 = __builtin_alloca(10);
+ int same;
msg1 = malloc (sizeof (t_s12));
msg2 = malloc (sizeof (t_u12));
- msg1->a[0].val = 0;
- msg2->a[0].val = 0;
+ memset (msg1, 0, sizeof (t_s12));
+ memcpy (msg2, &msg1, sizeof (t_s12));
+ same = memcmp (msg1, msg2, sizeof (t_s12));
return 0;
}
diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr100794.c b/gcc/testsuite/gcc.dg/tree-ssa/pr100794.c
new file mode 100644
index 0000000..6f707ae
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/tree-ssa/pr100794.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-loop-vectorize -fdump-tree-pcom-details -fdisable-tree-vect" } */
+
+extern double arr[100];
+extern double foo (double, double);
+extern double sum;
+
+void
+test (int i_0, int i_n)
+{
+ int i;
+ for (i = i_0; i < i_n - 1; i++)
+ {
+ double a = arr[i];
+ double b = arr[i + 1];
+ sum += a * b;
+ }
+}
+
+/* { dg-final { scan-tree-dump "Executing predictive commoning without unrolling" "pcom" } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr100637-5b.c b/gcc/testsuite/gcc.target/i386/pr100637-5b.c
new file mode 100644
index 0000000..3e6cc8f
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr100637-5b.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse2" } */
+
+typedef char S;
+typedef S V __attribute__((vector_size(4 * sizeof(S))));
+
+V duplicate (S a)
+{
+ return (V) { a, a, a, a };
+}
+
+V one_nonzero (S a)
+{
+ return (V) { 0, a };
+}
+
+V one_var (S a)
+{
+ return (V) { 1, a };
+}
+
+V general (S a, S b, S c, S d)
+{
+ return (V) { a, b, c, d };
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr100637-5w.c b/gcc/testsuite/gcc.target/i386/pr100637-5w.c
new file mode 100644
index 0000000..3f67738
--- /dev/null
+++ b/gcc/testsuite/gcc.target/i386/pr100637-5w.c
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -msse2" } */
+
+typedef short S;
+typedef S V __attribute__((vector_size(2 * sizeof(S))));
+
+V duplicate (S a)
+{
+ return (V) { a, a };
+}
+
+V one_nonzero (S a)
+{
+ return (V) { 0, a };
+}
+
+V one_var (S a)
+{
+ return (V) { 1, a };
+}
+
+V general (S a, S b)
+{
+ return (V) { a, b };
+}
diff --git a/gcc/testsuite/gcc.target/i386/pr82735-3.c b/gcc/testsuite/gcc.target/i386/pr82735-3.c
index e3f801e..854087d 100644
--- a/gcc/testsuite/gcc.target/i386/pr82735-3.c
+++ b/gcc/testsuite/gcc.target/i386/pr82735-3.c
@@ -1,4 +1,4 @@
-/* { dg-do compile } */
+/* { dg-do compile { target { ! x32 } } } */
/* { dg-options "-mavx -O2 -mabi=ms" } */
/* { dg-final { scan-assembler-not {(?n)xmm([6-9]|1[0-5])} } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82735-4.c b/gcc/testsuite/gcc.target/i386/pr82735-4.c
index 78c0a6c..2218ffc 100644
--- a/gcc/testsuite/gcc.target/i386/pr82735-4.c
+++ b/gcc/testsuite/gcc.target/i386/pr82735-4.c
@@ -1,4 +1,4 @@
-/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-do compile { target { lp64 } } } */
/* { dg-options "-mavx -O2 -mabi=ms -mno-avx512f -masm=att" } */
/* { dg-final { scan-assembler-times {(?n)(?:vmovdqa[1-9]*|vmovap[sd])[\t ]*%xmm[0-9]+, [0-9]*\(%rsp\)} 10 } } */
/* { dg-final { scan-assembler-times {(?n)(?:vmovdqa[1-9]*|vmovap[sd])[\t ]*[0-9]*\(%rsp\), %xmm[0-9]+} 10 } } */
diff --git a/gcc/testsuite/gcc.target/i386/pr82735-5.c b/gcc/testsuite/gcc.target/i386/pr82735-5.c
index 2a58cbe..a9ef05f 100644
--- a/gcc/testsuite/gcc.target/i386/pr82735-5.c
+++ b/gcc/testsuite/gcc.target/i386/pr82735-5.c
@@ -1,4 +1,4 @@
-/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-do compile { target { lp64 } } } */
/* { dg-options "-mavx -O2 -mabi=ms -mno-avx512f -masm=att" } */
/* { dg-final { scan-assembler-times {(?n)(?:vmovdqa[1-9]*|vmovap[sd])[\t ]*%xmm[0-9]+, [0-9]*\(%rsp\)} 10 } } */
/* { dg-final { scan-assembler-times {(?n)(?:vmovdqa[1-9]*|vmovap[sd])[\t ]*[0-9]*\(%rsp\), %xmm[0-9]+} 10 } } */
diff --git a/gcc/testsuite/gfortran.dg/char4-subscript.f90 b/gcc/testsuite/gfortran.dg/char4-subscript.f90
index fd1cf69..b4e2d11 100644
--- a/gcc/testsuite/gfortran.dg/char4-subscript.f90
+++ b/gcc/testsuite/gfortran.dg/char4-subscript.f90
@@ -22,7 +22,7 @@ if (ichar(var%str2(5:5)) /= int(Z'1F608')) stop 2
deallocate(var%str2)
end
-! Note: the last '\x00' is regarded as string terminator, hence, the tailing \0 byte is not in the dump
+! Note: the last '\x00' is regarded as string terminator, hence, the trailing \0 byte is not in the dump
! { dg-final { scan-tree-dump { \(\*var\.str2\)\[1\]{lb: 1 sz: 4} = "(d\\x00\\x00|\\x00\\x00\\x00d)"\[1\]{lb: 1 sz: 4};} "original" } }
! { dg-final { scan-tree-dump { __builtin_memmove \(\(void \*\) &\(\*var.str2\)\[2\]{lb: 1 sz: 4}, \(void \*\) &"(e\\x00\\x00\\x00f\\x00\\x00|\\x00\\x00\\x00e\\x00\\x00\\x00f)"\[1\]{lb: 1 sz: 4}, 8\);} "original" } }
diff --git a/gcc/tree-predcom.c b/gcc/tree-predcom.c
index 5482f50..ac1674d 100644
--- a/gcc/tree-predcom.c
+++ b/gcc/tree-predcom.c
@@ -2280,8 +2280,6 @@ execute_pred_commoning (class loop *loop, vec<chain_p> chains,
remove_stmt (a->stmt);
}
}
-
- update_ssa (TODO_update_ssa_only_virtuals);
}
/* For each reference in CHAINS, if its defining statement is
@@ -3174,21 +3172,21 @@ insert_init_seqs (class loop *loop, vec<chain_p> chains)
}
}
-/* Performs predictive commoning for LOOP. Sets bit 1<<0 of return value
- if LOOP was unrolled; Sets bit 1<<1 of return value if loop closed ssa
- form was corrupted. */
+/* Performs predictive commoning for LOOP. Sets bit 1<<1 of return value
+ if LOOP was unrolled; Sets bit 1<<2 of return value if loop closed ssa
+ form was corrupted. Non-zero return value indicates some changes were
+ applied to this loop. */
static unsigned
-tree_predictive_commoning_loop (class loop *loop)
+tree_predictive_commoning_loop (class loop *loop, bool allow_unroll_p)
{
vec<data_reference_p> datarefs;
vec<ddr_p> dependences;
struct component *components;
vec<chain_p> chains = vNULL;
- unsigned unroll_factor;
+ unsigned unroll_factor = 0;
class tree_niter_desc desc;
bool unroll = false, loop_closed_ssa = false;
- edge exit;
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file, "Processing loop %d\n", loop->num);
@@ -3244,13 +3242,22 @@ tree_predictive_commoning_loop (class loop *loop)
determine_roots (loop, components, &chains);
release_components (components);
+ auto cleanup = [&]() {
+ release_chains (chains);
+ free_data_refs (datarefs);
+ BITMAP_FREE (looparound_phis);
+ free_affine_expand_cache (&name_expansions);
+ };
+
if (!chains.exists ())
{
if (dump_file && (dump_flags & TDF_DETAILS))
fprintf (dump_file,
"Predictive commoning failed: no suitable chains\n");
- goto end;
+ cleanup ();
+ return 0;
}
+
prepare_initializers (loop, chains);
loop_closed_ssa = prepare_finalizers (loop, chains);
@@ -3265,13 +3272,13 @@ tree_predictive_commoning_loop (class loop *loop)
dump_chains (dump_file, chains);
}
- /* Determine the unroll factor, and if the loop should be unrolled, ensure
- that its number of iterations is divisible by the factor. */
- unroll_factor = determine_unroll_factor (chains);
- scev_reset ();
- unroll = (unroll_factor > 1
- && can_unroll_loop_p (loop, unroll_factor, &desc));
- exit = single_dom_exit (loop);
+ if (allow_unroll_p)
+ /* Determine the unroll factor, and if the loop should be unrolled, ensure
+ that its number of iterations is divisible by the factor. */
+ unroll_factor = determine_unroll_factor (chains);
+
+ if (unroll_factor > 1)
+ unroll = can_unroll_loop_p (loop, unroll_factor, &desc);
/* Execute the predictive commoning transformations, and possibly unroll the
loop. */
@@ -3285,8 +3292,6 @@ tree_predictive_commoning_loop (class loop *loop)
dta.chains = chains;
dta.tmp_vars = tmp_vars;
- update_ssa (TODO_update_ssa_only_virtuals);
-
/* Cfg manipulations performed in tree_transform_and_unroll_loop before
execute_pred_commoning_cbck is called may cause phi nodes to be
reallocated, which is a problem since CHAINS may point to these
@@ -3295,6 +3300,7 @@ tree_predictive_commoning_loop (class loop *loop)
the phi nodes in execute_pred_commoning_cbck. A bit hacky. */
replace_phis_by_defined_names (chains);
+ edge exit = single_dom_exit (loop);
tree_transform_and_unroll_loop (loop, unroll_factor, exit, &desc,
execute_pred_commoning_cbck, &dta);
eliminate_temp_copies (loop, tmp_vars);
@@ -3307,20 +3313,15 @@ tree_predictive_commoning_loop (class loop *loop)
execute_pred_commoning (loop, chains, tmp_vars);
}
-end: ;
- release_chains (chains);
- free_data_refs (datarefs);
- BITMAP_FREE (looparound_phis);
+ cleanup ();
- free_affine_expand_cache (&name_expansions);
-
- return (unroll ? 1 : 0) | (loop_closed_ssa ? 2 : 0);
+ return (unroll ? 2 : 1) | (loop_closed_ssa ? 4 : 1);
}
/* Runs predictive commoning. */
unsigned
-tree_predictive_commoning (void)
+tree_predictive_commoning (bool allow_unroll_p)
{
class loop *loop;
unsigned ret = 0, changed = 0;
@@ -3329,18 +3330,25 @@ tree_predictive_commoning (void)
FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
if (optimize_loop_for_speed_p (loop))
{
- changed |= tree_predictive_commoning_loop (loop);
+ changed |= tree_predictive_commoning_loop (loop, allow_unroll_p);
}
free_original_copy_tables ();
if (changed > 0)
{
- scev_reset ();
+ ret = TODO_update_ssa_only_virtuals;
+ /* Some loop(s) got unrolled. */
if (changed > 1)
- rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
+ {
+ scev_reset ();
- ret = TODO_cleanup_cfg;
+ /* Need to fix up loop closed SSA. */
+ if (changed >= 4)
+ rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
+
+ ret |= TODO_cleanup_cfg;
+ }
}
return ret;
@@ -3349,12 +3357,12 @@ tree_predictive_commoning (void)
/* Predictive commoning Pass. */
static unsigned
-run_tree_predictive_commoning (struct function *fun)
+run_tree_predictive_commoning (struct function *fun, bool allow_unroll_p)
{
if (number_of_loops (fun) <= 1)
return 0;
- return tree_predictive_commoning ();
+ return tree_predictive_commoning (allow_unroll_p);
}
namespace {
@@ -3369,7 +3377,7 @@ const pass_data pass_data_predcom =
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
- TODO_update_ssa_only_virtuals, /* todo_flags_finish */
+ 0, /* todo_flags_finish */
};
class pass_predcom : public gimple_opt_pass
@@ -3380,11 +3388,27 @@ public:
{}
/* opt_pass methods: */
- virtual bool gate (function *) { return flag_predictive_commoning != 0; }
- virtual unsigned int execute (function *fun)
- {
- return run_tree_predictive_commoning (fun);
- }
+ virtual bool
+ gate (function *)
+ {
+ if (flag_predictive_commoning != 0)
+ return true;
+ /* Loop vectorization enables predictive commoning implicitly
+ only if predictive commoning isn't set explicitly, and it
+ doesn't allow unrolling. */
+ if (flag_tree_loop_vectorize
+ && !global_options_set.x_flag_predictive_commoning)
+ return true;
+
+ return false;
+ }
+
+ virtual unsigned int
+ execute (function *fun)
+ {
+ bool allow_unroll_p = flag_predictive_commoning != 0;
+ return run_tree_predictive_commoning (fun, allow_unroll_p);
+ }
}; // class pass_predcom