aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/Makefile.in1
-rw-r--r--gcc/builtins.def6
-rw-r--r--gcc/cobol/Make-lang.in3
-rw-r--r--gcc/cobol/TODO33
-rw-r--r--gcc/cobol/cbldiag.h6
-rw-r--r--gcc/cobol/cdf-copy.cc6
-rw-r--r--gcc/cobol/cdf.y8
-rw-r--r--gcc/cobol/cdfval.h2
-rw-r--r--gcc/cobol/copybook.h9
-rw-r--r--gcc/cobol/except.cc2
-rw-r--r--gcc/cobol/exceptg.h6
-rw-r--r--gcc/cobol/genapi.cc157
-rw-r--r--gcc/cobol/inspect.h8
-rw-r--r--gcc/cobol/lexio.cc9
-rw-r--r--gcc/cobol/lexio.h14
-rw-r--r--gcc/cobol/parse.y14
-rw-r--r--gcc/cobol/scan.l232
-rw-r--r--gcc/cobol/scan_ante.h13
-rw-r--r--gcc/cobol/scan_post.h7
-rw-r--r--gcc/cobol/symbols.cc46
-rw-r--r--gcc/cobol/symbols.h80
-rw-r--r--gcc/cobol/symfind.cc12
-rw-r--r--gcc/cobol/util.cc16
-rw-r--r--gcc/cobol/util.h1
-rw-r--r--gcc/config/riscv/riscv-v.cc1
-rw-r--r--gcc/config/riscv/riscv.cc1
-rw-r--r--gcc/config/riscv/vector-iterators.md2
-rw-r--r--gcc/config/rs6000/rs6000-builtin.cc24
-rw-r--r--gcc/cp/decl2.cc33
-rw-r--r--gcc/diagnostic-format-html.cc332
-rw-r--r--gcc/fortran/f95-lang.cc3
-rw-r--r--gcc/fortran/gfortran.h6
-rw-r--r--gcc/fortran/options.cc4
-rw-r--r--gcc/fortran/trans-expr.cc10
-rw-r--r--gcc/gimple-fold.cc82
-rw-r--r--gcc/gimple-fold.h1
-rw-r--r--gcc/m2/gm2-compiler/M2Quads.mod60
-rw-r--r--gcc/omp-builtins.def9
-rw-r--r--gcc/selftest-run-tests.cc1
-rw-r--r--gcc/selftest.h15
-rw-r--r--gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device-2.c29
-rw-r--r--gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device.c32
-rw-r--r--gcc/testsuite/g++.dg/cpp1z/constexpr-if39.C30
-rw-r--r--gcc/testsuite/gcc.target/arc/fma-1.c3
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv-nofm.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv-nofm.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv.c4
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u32.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u64.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u32.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u64.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u32.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u64.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u32.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u64.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u32.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u64.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u16.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u32.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u64.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u8.c2
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h196
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u16.c15
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u32.c15
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u64.c15
-rw-r--r--gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u8.c15
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device-2.f9021
-rw-r--r--gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device.f9024
-rw-r--r--gcc/testsuite/gm2/iso/fail/badreturn.mod5
-rw-r--r--gcc/testsuite/gm2/iso/fail/badreturn2.mod12
-rw-r--r--gcc/testsuite/gm2/iso/pass/modulereturn.mod5
-rw-r--r--gcc/testsuite/gm2/iso/pass/modulereturn2.mod10
-rw-r--r--gcc/text-art/widget.cc10
-rw-r--r--gcc/text-art/widget.h11
-rw-r--r--gcc/tree-inline.cc2
-rw-r--r--gcc/tree-ssa-phiopt.cc15
-rw-r--r--gcc/tree-vect-stmts.cc1646
-rw-r--r--gcc/xml.cc367
-rw-r--r--gcc/xml.h8
90 files changed, 2111 insertions, 1714 deletions
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 49869531..fe20b65 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1862,6 +1862,7 @@ OBJS-libcommon = diagnostic-spec.o diagnostic.o diagnostic-color.o \
edit-context.o \
pretty-print.o intl.o \
json.o json-parsing.o \
+ xml.o \
sbitmap.o \
vec.o input.o hash-table.o ggc-none.o memory-block.o \
selftest.o selftest-diagnostic.o sort.o \
diff --git a/gcc/builtins.def b/gcc/builtins.def
index fdcad54..59a43a1 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -217,6 +217,8 @@ along with GCC; see the file COPYING3. If not see
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
false, true, true, ATTRS, false, \
flag_openacc)
+/* Set NONANSI_P = false to enable the builtins also with -fno-nonansi-builtins,
+ esp. as -std=c++../c.. imply that flag and -fopenacc should be othogonal. */
#undef DEF_GOACC_BUILTIN_COMPILER
#define DEF_GOACC_BUILTIN_COMPILER(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
@@ -232,10 +234,12 @@ along with GCC; see the file COPYING3. If not see
(flag_openacc \
|| flag_openmp \
|| flag_tree_parallelize_loops > 1))
+/* Set NONANSI_P = false to enable the builtins also with -fno-nonansi-builtins,
+ esp. as -std=c++../c.. imply that flag and -fopenmp should be othogonal. */
#undef DEF_GOMP_BUILTIN_COMPILER
#define DEF_GOMP_BUILTIN_COMPILER(ENUM, NAME, TYPE, ATTRS) \
DEF_BUILTIN (ENUM, "__builtin_" NAME, BUILT_IN_NORMAL, TYPE, TYPE, \
- flag_openmp, true, true, ATTRS, false, flag_openmp)
+ flag_openmp, true, false, ATTRS, false, flag_openmp)
/* Builtin used by the implementation of GNU TM. These
functions are mapped to the actual implementation of the STM library. */
diff --git a/gcc/cobol/Make-lang.in b/gcc/cobol/Make-lang.in
index a474123..993e4c6 100644
--- a/gcc/cobol/Make-lang.in
+++ b/gcc/cobol/Make-lang.in
@@ -159,8 +159,7 @@ FLEX_WARNING = warning, dangerous trailing context
cobol/scan.cc: cobol/scan.l
$(FLEX) -o$@ $(LFLAGS) $< 2>$@~ || { cat $@~ >&1; exit 1; }
awk '! /$(FLEX_WARNING)/ {print > "/dev/stderr"; nerr++} \
- END {print "$(FLEX):", NR, "messages" > "/dev/stderr"; \
- exit nerr}' $@~
+ END {print "$(FLEX):", NR, "messages" > "/dev/stderr"}' $@~
@rm $@~
diff --git a/gcc/cobol/TODO b/gcc/cobol/TODO
new file mode 100644
index 0000000..02ee0e2
--- /dev/null
+++ b/gcc/cobol/TODO
@@ -0,0 +1,33 @@
+Below is listed work to be done, hopefully all of it in 2025 for
+GCC 16. They are vaguely in priority order, in that addressing more
+technical issues may illuminate ways to attack more amorphous ones.
+
+Portability:
+ - host/target, for cross-compilation
+ - OS portability, BSD, macOS, Solaris
+ - 64-bit portability, LE
+ - 64-bit portability, BE
+ - 2025 goal: Compile & run on primary & secondary GCC 15 platforms
+ https://www.gnu.org/software/gcc/gcc-15/criteria.html
+
+Correctness:
+ - LTO ODR, PR 119215
+ - cppcheck
+ - valgrind
+ - -static produces dynamic
+
+Efficiency:
+ - Code size for MOVE 'a' TO FOO(1,1)
+ - EC checking
+
+COBOL Features:
+ - XML, JSON
+ - MF system functions
+ - National characters (and Unicode, for IBM)
+ - GLOBAL and PERFORM declaratives
+ - dialect feature names (to enable and enumerate)
+
+GCC features:
+ - make check-nist
+ - -Werror, -Wno-<foo>
+ - -fEC-ALL, -fno-EC-I-O
diff --git a/gcc/cobol/cbldiag.h b/gcc/cobol/cbldiag.h
index 3cb54e7..2d2ff4c 100644
--- a/gcc/cobol/cbldiag.h
+++ b/gcc/cobol/cbldiag.h
@@ -93,9 +93,9 @@ void cbl_unimplemented(const char *gmsgid, ...); // error
void cbl_unimplemented_at( const YYLTYPE& loc, const char *gmsgid, ... );
/*
- * dbgmsg produce messages not intended for the user. They cannot
- * be localized and fwrite directly to standard out. dbgmsg is activated by
- * -fflex-debug or -fyacc-debug.
+ * dbgmsg produce messages not intended for the user. They cannot be localized
+ * and fwrite directly to standard error. dbgmsg is activated by -fflex-debug
+ * or -fyacc-debug.
*/
void dbgmsg( const char fmt[], ... ) ATTRIBUTE_PRINTF_1;
diff --git a/gcc/cobol/cdf-copy.cc b/gcc/cobol/cdf-copy.cc
index 99f5866..3f5ae30 100644
--- a/gcc/cobol/cdf-copy.cc
+++ b/gcc/cobol/cdf-copy.cc
@@ -304,9 +304,9 @@ copybook_elem_t::open_file( const char directory[], bool literally ) {
dbgmsg("found copybook file %s", filename);
this->source.name = xstrdup(filename);
if( ! cobol_filename(this->source.name, inode_of(fd)) ) {
- error_msg(source.loc, "recursive copybook: '%s' includes itself", this->source);
- (void)! close(fd);
- fd = -1;
+ error_msg(source.loc, "recursive copybook: '%s' includes itself", this->source);
+ (void)! close(fd);
+ fd = -1;
}
dbgmsg("%s: opened %s as fd %d", __func__, source.name, fd);
return fd;
diff --git a/gcc/cobol/cdf.y b/gcc/cobol/cdf.y
index 7680f48..0440d02 100644
--- a/gcc/cobol/cdf.y
+++ b/gcc/cobol/cdf.y
@@ -263,7 +263,7 @@ top: partials { YYACCEPT; }
}
| copy error {
error_msg(@error, "COPY directive must end in a '.'");
- YYACCEPT;
+ YYABORT;
}
| completes { YYACCEPT; }
;
@@ -584,7 +584,7 @@ copybook_name: COPY name_one[src]
if( -1 == copybook.open(@src, $src.string) ) {
error_msg(@src, "could not open copybook file "
"for '%s'", $src.string);
- YYERROR;
+ YYABORT;
}
}
| COPY name_one[src] IN name_one[lib]
@@ -593,7 +593,7 @@ copybook_name: COPY name_one[src]
if( -1 == copybook.open(@src, $src.string) ) {
error_msg(@src, "could not open copybook file "
"for '%s' in '%'s'", $src.string, $lib.string);
- YYERROR;
+ YYABORT;
}
}
;
@@ -864,7 +864,7 @@ static int ydflex(void) {
}
bool
-cdf_value( const char name[], cdfval_t value ) {
+cdf_value( const char name[], const cdfval_t& value ) {
auto p = dictionary.find(name);
if( p != dictionary.end() ) return false;
diff --git a/gcc/cobol/cdfval.h b/gcc/cobol/cdfval.h
index 76ed7da..09c21ab 100644
--- a/gcc/cobol/cdfval.h
+++ b/gcc/cobol/cdfval.h
@@ -116,6 +116,6 @@ const cdfval_t *
cdf_value( const char name[] );
bool
-cdf_value( const char name[], cdfval_t value );
+cdf_value( const char name[], const cdfval_t& value );
#endif
diff --git a/gcc/cobol/copybook.h b/gcc/cobol/copybook.h
index a4b1117..fa91fe5 100644
--- a/gcc/cobol/copybook.h
+++ b/gcc/cobol/copybook.h
@@ -62,7 +62,7 @@ class copybook_elem_t {
struct copybook_loc_t {
YYLTYPE loc;
const char *name;
- copybook_loc_t() : name(NULL) {}
+ copybook_loc_t() : loc(), name(NULL) {}
} source, library;
bool suppress;
static std::list<const char *> suffixes;
@@ -74,12 +74,11 @@ class copybook_elem_t {
copybook_elem_t()
: suppress(false)
+ , literally()
, fd(-1)
, nsubexpr(0)
, regex_text(NULL)
- {
- literally = {};
- }
+ {}
void clear() {
suppress = false;
@@ -130,7 +129,7 @@ private:
class uppername_t {
std::string upper;
public:
- uppername_t( const std::string input ) : upper(input) {
+ explicit uppername_t( const std::string& input ) : upper(input) {
std::transform(input.begin(), input.end(), upper.begin(),
[]( char ch ) { return TOUPPER(ch); } );
}
diff --git a/gcc/cobol/except.cc b/gcc/cobol/except.cc
index d477139..d2bc24a 100644
--- a/gcc/cobol/except.cc
+++ b/gcc/cobol/except.cc
@@ -51,7 +51,7 @@ static const ec_descr_t *
ec_type_descr( ec_type_t type ) {
auto p = std::find( __gg__exception_table, __gg__exception_table_end, type );
if( p == __gg__exception_table_end ) {
- cbl_internal_error("no such exception: 0x%04x", type);
+ cbl_internal_error("no such exception: 0x%x", type);
}
return p;
}
diff --git a/gcc/cobol/exceptg.h b/gcc/cobol/exceptg.h
index e29e056..f90cc28 100644
--- a/gcc/cobol/exceptg.h
+++ b/gcc/cobol/exceptg.h
@@ -58,8 +58,8 @@ class exception_turn_t {
exception_turn_t() : enabled(false), location(false) {};
- exception_turn_t( ec_type_t ec, bool enabled = true )
- : enabled(enabled)
+ explicit exception_turn_t( ec_type_t ec, bool enabled = true )
+ : enabled(enabled), location(false)
{
add_exception(ec);
}
@@ -74,7 +74,7 @@ class exception_turn_t {
const ec_filemap_t& exception_files() const { return exceptions; }
- bool add_exception( ec_type_t type, const filelist_t files = filelist_t() ) {
+ bool add_exception( ec_type_t type, const filelist_t& files = filelist_t() ) {
ec_disposition_t disposition = ec_type_disposition(type);
if( disposition != ec_implemented(disposition) ) {
cbl_unimplementedw("CDF: exception '%s'", ec_type_str(type));
diff --git a/gcc/cobol/genapi.cc b/gcc/cobol/genapi.cc
index bde8151..595aa61 100644
--- a/gcc/cobol/genapi.cc
+++ b/gcc/cobol/genapi.cc
@@ -1019,10 +1019,63 @@ parser_compile_dcls( const std::vector<uint64_t>& dcls )
return retval;
}
-static void store_location_stuff(const cbl_name_t statement_name);
+static void
+store_location_stuff(const cbl_name_t statement_name)
+ {
+ if( exception_location_active && !current_declarative_section_name() )
+ {
+ // We need to establish some stuff for EXCEPTION- function processing
+
+ gg_assign(var_decl_exception_program_id,
+ gg_string_literal(current_function->our_unmangled_name));
+
+ if( strstr(current_function->current_section->label->name, "_implicit")
+ != current_function->current_section->label->name )
+ {
+ gg_assign(var_decl_exception_section,
+ gg_string_literal(current_function->current_section->label->name));
+ }
+ else
+ {
+ gg_assign(var_decl_exception_section,
+ gg_cast(build_pointer_type(CHAR_P),null_pointer_node));
+ }
+
+ if( strstr(current_function->current_paragraph->label->name, "_implicit")
+ != current_function->current_paragraph->label->name )
+ {
+ gg_assign(var_decl_exception_paragraph,
+ gg_string_literal(current_function->current_paragraph->label->name));
+ }
+ else
+ {
+ gg_assign(var_decl_exception_paragraph,
+ gg_cast(build_pointer_type(CHAR_P), null_pointer_node));
+ }
+
+ gg_assign(var_decl_exception_source_file,
+ gg_string_literal(current_filename.back().c_str()));
+ gg_assign(var_decl_exception_line_number, build_int_cst_type(INT,
+ CURRENT_LINE_NUMBER));
+ gg_assign(var_decl_exception_statement, gg_string_literal(statement_name));
+ }
+ }
+
+static
+void
+set_exception_environment( tree ecs, tree dcls )
+ {
+ gg_call(VOID,
+ "__gg__set_exception_environment",
+ ecs ? gg_get_address_of(ecs) : null_pointer_node,
+ dcls ? gg_get_address_of(dcls) : null_pointer_node,
+ NULL_TREE);
+ }
void
-parser_statement_begin( const cbl_name_t statement_name, tree ecs, tree dcls )
+parser_statement_begin( const cbl_name_t statement_name,
+ tree ecs,
+ tree dcls )
{
SHOW_PARSE
{
@@ -1052,6 +1105,35 @@ parser_statement_begin( const cbl_name_t statement_name, tree ecs, tree dcls )
TRACE1_END
}
+ gcc_assert( gg_trans_unit.function_stack.size() );
+
+ // In the cases where enabled_exceptions.size() is non-zero, or when
+ // there is a possibility of an EC-I-O exception because this is a file
+ // operation, we need to store the location information and do the exception
+ // overhead:
+
+ static const std::set<std::string> file_ops =
+ {
+ "OPEN",
+ "CLOSE",
+ "READ",
+ "WRITE",
+ "DELETE",
+ "REWRITE",
+ "START",
+ };
+
+ // Performance note: By doing exception processing only when necessary
+ // the execution time of a program doing two-billion simple adds in an inner
+ // loop dropped from 3.8 seconds to 0.175 seconds.
+
+ bool exception_processing = enabled_exceptions.size() ;
+
+ if( !exception_processing )
+ {
+ exception_processing = file_ops.find(statement_name) != file_ops.end();
+ }
+
if( gg_get_current_line_number() == DEFAULT_LINE_NUMBER )
{
// This code is intended to prevert GDB anomalies when the first line of a
@@ -1064,23 +1146,17 @@ parser_statement_begin( const cbl_name_t statement_name, tree ecs, tree dcls )
// Each file I-O routine calls store_location_stuff explicitly, because
// those exceptions can't be defeated.
- if( enabled_exceptions.size() )
+ if( exception_processing )
{
store_location_stuff(statement_name);
}
gg_set_current_line_number(CURRENT_LINE_NUMBER);
- // if( ecs || dcls || sv_is_i_o )
+ if( exception_processing )
{
- gg_call(VOID,
- "__gg__set_exception_environment",
- ecs ? gg_get_address_of(ecs) : null_pointer_node,
- dcls ? gg_get_address_of(dcls) : null_pointer_node,
- NULL_TREE);
+ set_exception_environment(ecs, dcls);
}
-
- gcc_assert( gg_trans_unit.function_stack.size() );
sv_is_i_o = false;
}
@@ -7833,12 +7909,13 @@ parser_perform_conditional( struct cbl_perform_tgt_t *tgt )
SHOW_PARSE_END
}
- size_t i = tgt->addresses.number_of_conditionals;
+ unsigned int i = tgt->addresses.number_of_conditionals;
if( !(i < MAXIMUM_UNTILS) )
{
- cbl_internal_error("%s:%d: %zu exceeds MAXIMUM_UNTILS of %d, line %d",
- __func__, __LINE__, i, MAXIMUM_UNTILS, CURRENT_LINE_NUMBER);
+ cbl_internal_error("%s:%d: %u exceeds MAXIMUM_UNTILS of %d, line %d",
+ __func__, __LINE__,
+ i, MAXIMUM_UNTILS, CURRENT_LINE_NUMBER);
}
gcc_assert(i < MAXIMUM_UNTILS);
@@ -7882,7 +7959,7 @@ parser_perform_conditional_end( struct cbl_perform_tgt_t *tgt )
SHOW_PARSE_END
}
- size_t i = tgt->addresses.number_of_conditionals;
+ unsigned int i = tgt->addresses.number_of_conditionals;
gcc_assert(i);
// We need to cap off the prior conditional in this chain of conditionals
@@ -13427,48 +13504,6 @@ parser_set_numeric(struct cbl_field_t *tgt, ssize_t value)
NULL_TREE );
}
-static void
-store_location_stuff(const cbl_name_t statement_name)
- {
- if( exception_location_active && !current_declarative_section_name() )
- {
- // We need to establish some stuff for EXCEPTION- function processing
-
- gg_assign(var_decl_exception_program_id,
- gg_string_literal(current_function->our_unmangled_name));
-
- if( strstr(current_function->current_section->label->name, "_implicit")
- != current_function->current_section->label->name )
- {
- gg_assign(var_decl_exception_section,
- gg_string_literal(current_function->current_section->label->name));
- }
- else
- {
- gg_assign(var_decl_exception_section,
- gg_cast(build_pointer_type(CHAR_P),null_pointer_node));
- }
-
- if( strstr(current_function->current_paragraph->label->name, "_implicit")
- != current_function->current_paragraph->label->name )
- {
- gg_assign(var_decl_exception_paragraph,
- gg_string_literal(current_function->current_paragraph->label->name));
- }
- else
- {
- gg_assign(var_decl_exception_paragraph,
- gg_cast(build_pointer_type(CHAR_P), null_pointer_node));
- }
-
- gg_assign(var_decl_exception_source_file,
- gg_string_literal(current_filename.back().c_str()));
- gg_assign(var_decl_exception_line_number, build_int_cst_type(INT,
- CURRENT_LINE_NUMBER));
- gg_assign(var_decl_exception_statement, gg_string_literal(statement_name));
- }
- }
-
void
parser_exception_clear()
{
@@ -13548,9 +13583,17 @@ parser_check_fatal_exception()
TRACE1_END
}
+ // Performance note:
+ // A simple program that does two billion additions of 32-bit binary numbers
+ // in its innermost loop had an execution time of 19.5 seconds. By putting in
+ // the if() statement, that was reduced to 3.8 seconds.
+
+ if( enabled_exceptions.size() || sv_is_i_o )
+ {
gg_call(VOID,
"__gg__check_fatal_exception",
NULL_TREE);
+ }
}
void
diff --git a/gcc/cobol/inspect.h b/gcc/cobol/inspect.h
index 9e86a0b..fb8fda4 100644
--- a/gcc/cobol/inspect.h
+++ b/gcc/cobol/inspect.h
@@ -102,8 +102,8 @@ struct cbx_inspect_match_t {
cbx_inspect_match_t(
const DATA& matching = DATA(),
- cbx_inspect_qual_t<DATA> before = cbx_inspect_qual_t<DATA>(),
- cbx_inspect_qual_t<DATA> after = cbx_inspect_qual_t<DATA>()
+ const cbx_inspect_qual_t<DATA>& before = cbx_inspect_qual_t<DATA>(),
+ const cbx_inspect_qual_t<DATA>& after = cbx_inspect_qual_t<DATA>()
)
: matching(matching)
, before(before)
@@ -192,7 +192,7 @@ typedef cbx_inspect_oper_t<cbl_refer_t> cbl_inspect_oper_t;
template <typename DATA>
struct cbx_inspect_t {
DATA tally; // identifier-2: NULL without a tally
- size_t nbound; // Each FOR or REPLACING operation starts with a cbl_inspect_bound_t
+ size_t nbound; // FOR and REPLACING start with a cbl_inspect_bound_t
cbx_inspect_oper_t<DATA> *opers;
cbx_inspect_t( const DATA& tally = DATA() )
@@ -200,7 +200,7 @@ struct cbx_inspect_t {
, nbound(0)
, opers(NULL)
{}
- cbx_inspect_t( const DATA& tally, cbx_inspect_oper_t<DATA> oper )
+ cbx_inspect_t( const DATA& tally, const cbx_inspect_oper_t<DATA>& oper )
: tally(tally)
, nbound(1)
, opers(NULL)
diff --git a/gcc/cobol/lexio.cc b/gcc/cobol/lexio.cc
index 6b2d1fb..13de5b6 100644
--- a/gcc/cobol/lexio.cc
+++ b/gcc/cobol/lexio.cc
@@ -1905,9 +1905,12 @@ cdftext::process_file( filespan_t mfile, int output, bool second_pass ) {
segments.front().pend, '\n');
nlines.after = std::count(segments.back().p, segments.back().pend, '\n');
if( nlines.delta() < 0 ) {
- yywarn("line %zu: REPLACED %zu lines with %zu lines, "
- "line count off by %d", mfile.lineno(),
- nlines.before, nlines.after, nlines.delta());
+ yywarn("line %lu: REPLACED %lu lines with %lu lines, "
+ "line count off by %d",
+ gb4(mfile.lineno()),
+ gb4(nlines.before),
+ gb4(nlines.after),
+ nlines.delta());
}
int nnl = nlines.delta();
while( nnl-- > 0 ) {
diff --git a/gcc/cobol/lexio.h b/gcc/cobol/lexio.h
index ed642af..a7d2b72 100644
--- a/gcc/cobol/lexio.h
+++ b/gcc/cobol/lexio.h
@@ -110,19 +110,7 @@ struct bytespan_t {
}
};
-/* Location type. Borrowed from parse.h as generated by Bison. */
-#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
-typedef struct YYLTYPE YYLTYPE;
-struct YYLTYPE
-{
- int first_line;
- int first_column;
- int last_line;
- int last_column;
-};
-# define YYLTYPE_IS_DECLARED 1
-# define YYLTYPE_IS_TRIVIAL 1
-#endif
+// YYLTYPE supplied by cbldiag.h. Borrowed from parse.h as generated by Bison.
struct filespan_t : public bytespan_t {
char *cur, *eol, *quote;
diff --git a/gcc/cobol/parse.y b/gcc/cobol/parse.y
index a3195fe..719b94d 100644
--- a/gcc/cobol/parse.y
+++ b/gcc/cobol/parse.y
@@ -1453,6 +1453,7 @@ id_div: cdf_words IDENTIFICATION_DIV '.' program_id
cdf_words: %empty
| cobol_words
+ /* | error { error_msg(@1, "not a COBOL-WORD"); } */
;
cobol_words: cobol_words1
| cobol_words cobol_words1
@@ -2298,8 +2299,8 @@ config_paragraph:
}
}
}
- | REPOSITORY '.'
- | REPOSITORY '.' repo_members '.'
+ | REPOSITORY dot
+ | REPOSITORY dot repo_members '.'
;
repo_members: repo_member
@@ -2950,7 +2951,7 @@ fd_clause: record_desc
f->attr |= external_e;
cbl_unimplemented("AS LITERAL");
}
- | fd_linage
+ | fd_linage { cbl_unimplemented("LINAGE"); }
| fd_report {
cbl_unimplemented("REPORT WRITER");
YYERROR;
@@ -3888,10 +3889,11 @@ data_clauses: data_clause
auto redefined = symbol_redefines(field);
if( redefined && redefined->type == FldPointer ) {
if( yydebug ) {
- yywarn("expanding %s size from %u bytes to %zu "
- "because it redefines %s with USAGE POINTER",
+ yywarn("expanding %s size from %u bytes to "
+ HOST_WIDE_INT_PRINT " "
+ "because it redefines %s with USAGE POINTER",
field->name, field->size(),
- (size_t)int_size_in_bytes(ptr_type_node),
+ int_size_in_bytes(ptr_type_node),
redefined->name);
}
field->embiggen();
diff --git a/gcc/cobol/scan.l b/gcc/cobol/scan.l
index c11f66e..52a0b94 100644
--- a/gcc/cobol/scan.l
+++ b/gcc/cobol/scan.l
@@ -79,6 +79,8 @@ nonseq (([''][[:alnum:]]+][''])|([""][[:alnum:]]+[""]))
INTEGER 0*[1-9][[:digit:]]*
INTEGERZ [[:digit:]]+
+NONWORD [^[:alnum:]$_-]+
+
SPC [[:space:]]+
OSPC [[:space:]]*
EOL \r?\n
@@ -1795,126 +1797,128 @@ USE({SPC}FOR)? { return USE; }
}
<function>{
-
-
- ABS{OSPC}/[(]? { pop_return ABS; }
- ACOS{OSPC}/[(]? { pop_return ACOS; }
- ANNUITY{OSPC}/[(]? { pop_return ANNUITY; }
- ASIN{OSPC}/[(]? { pop_return ASIN; }
- ATAN{OSPC}/[(]? { pop_return ATAN; }
- BASECONVERT{OSPC}/[(]? { pop_return BASECONVERT; }
- BIT-OF{OSPC}/[(]? { pop_return BIT_OF; }
- BIT-TO-CHAR{OSPC}/[(]? { pop_return BIT_TO_CHAR; }
- BOOLEAN-OF-INTEGER{OSPC}/[(]? { pop_return BOOLEAN_OF_INTEGER; }
- BYTE-LENGTH{OSPC}/[(]? { pop_return BYTE_LENGTH; }
- CHAR-NATIONAL{OSPC}/[(]? { pop_return CHAR_NATIONAL; }
- CHAR{OSPC}/[(]? { pop_return CHAR; }
- COMBINED-DATETIME{OSPC}/[(]? { pop_return COMBINED_DATETIME; }
- CONCAT{OSPC}/[(]? { pop_return CONCAT; }
- CONTENT-LENGTH{OSPC}/[(]? { pop_return NO_CONDITION; /* GNU only*/ }
- CONTENT-OF{OSPC}/[(]? { pop_return NO_CONDITION; /* GNU only*/ }
- CONVERT{OSPC}/[(]? { pop_return CONVERT; }
- COS{OSPC}/[(]? { pop_return COS; }
- CURRENCY-SYBOL{OSPC}/[(]? { pop_return NO_CONDITION; /* GNU only*/ }
- CURRENT-DATE{OSPC}/[(]? { pop_return CURRENT_DATE; }
- DATE-OF-INTEGER{OSPC}/[(]? { pop_return DATE_OF_INTEGER; }
- DATE-TO-YYYYMMDD{OSPC}/[(]? { pop_return DATE_TO_YYYYMMDD; }
- DAY-OF-INTEGER{OSPC}/[(]? { pop_return DAY_OF_INTEGER; }
- DAY-TO-YYYYDDD{OSPC}/[(]? { pop_return DAY_TO_YYYYDDD; }
- DISPLAY-OF{OSPC}/[(]? { pop_return DISPLAY_OF; }
- E{OSPC}/[(]? { pop_return E; }
-
- EXCEPTION-FILE-N{OSPC}/[(]? { pop_return EXCEPTION_FILE_N; }
- EXCEPTION-FILE{OSPC}/[(]? { pop_return EXCEPTION_FILE; }
- EXCEPTION-LOCATION-N{OSPC}/[(]? { pop_return EXCEPTION_LOCATION_N; }
- EXCEPTION-LOCATION{OSPC}/[(]? { pop_return EXCEPTION_LOCATION; }
- EXCEPTION-STATEMENT{OSPC}/[(]? { pop_return EXCEPTION_STATEMENT; }
- EXCEPTION-STATUS{OSPC}/[(]? { pop_return EXCEPTION_STATUS; }
-
- EXP{OSPC}/[(]? { pop_return EXP; }
- EXP10{OSPC}/[(]? { pop_return EXP10; }
- FACTORIAL{OSPC}/[(]? { pop_return FACTORIAL; }
- FIND-STRING{OSPC}/[(]? { pop_return FIND_STRING; }
-
- FORMATTED-CURRENT-DATE{OSPC}/[(]? { BEGIN(datetime_fmt); return FORMATTED_CURRENT_DATE; }
- FORMATTED-DATE{OSPC}/[(]? { BEGIN(datetime_fmt); return FORMATTED_DATE; }
- FORMATTED-DATETIME{OSPC}/[(]? { BEGIN(datetime_fmt); return FORMATTED_DATETIME; }
- FORMATTED-TIME{OSPC}/[(]? { BEGIN(datetime_fmt); return FORMATTED_TIME; }
- FRACTION-PART{OSPC}/[(]? { pop_return FRACTION_PART; }
-
- HEX-OF{OSPC}/[(]? { pop_return HEX_OF; }
- HEX-TO-CHAR{OSPC}/[(]? { pop_return HEX_TO_CHAR; }
- HIGHEST-ALGEBRAIC{OSPC}/[(]? { pop_return HIGHEST_ALGEBRAIC; }
-
- INTEGER{OSPC}/[(]? { pop_return INTEGER; }
- INTEGER-OF-BOOLEAN{OSPC}/[(]? { pop_return INTEGER_OF_BOOLEAN; }
- INTEGER-OF-DATE{OSPC}/[(]? { pop_return INTEGER_OF_DATE; }
- INTEGER-OF-DAY{OSPC}/[(]? { pop_return INTEGER_OF_DAY; }
- INTEGER-OF-FORMATTED-DATE{OSPC}/[(]? { BEGIN(datetime_fmt); return INTEGER_OF_FORMATTED_DATE; }
- INTEGER-PART{OSPC}/[(]? { pop_return INTEGER_PART; }
- LENGTH{OSPC}/[(]? { pop_return LENGTH; }
- LOCALE-COMPARE{OSPC}/[(]? { pop_return LOCALE_COMPARE; }
- LOCALE-DATE{OSPC}/[(]? { pop_return LOCALE_DATE; }
- LOCALE-TIME{OSPC}/[(]? { pop_return LOCALE_TIME; }
- LOCALE-TIME-FROM-SECONDS{OSPC}/[(]? { pop_return LOCALE_TIME_FROM_SECONDS; }
- LOG{OSPC}/[(]? { pop_return LOG; }
- LOG10{OSPC}/[(]? { pop_return LOG10; }
- LOWER-CASE{OSPC}/[(]? { pop_return LOWER_CASE; }
- LOWEST-ALGEBRAIC{OSPC}/[(]? { pop_return LOWEST_ALGEBRAIC; }
- MAX{OSPC}/[(]? { pop_return MAXX; }
- MEAN{OSPC}/[(]? { pop_return MEAN; }
- MEDIAN{OSPC}/[(]? { pop_return MEDIAN; }
- MIDRANGE{OSPC}/[(]? { pop_return MIDRANGE; }
- MIN{OSPC}/[(]? { pop_return MINN; }
- MOD{OSPC}/[(]? { pop_return MOD; }
- MODULE-NAME{OSPC}/[(]? { pop_return MODULE_NAME; }
- NATIONAL-OF{OSPC}/[(]? { pop_return NATIONAL_OF; }
- NUMVAL{OSPC}/[(]? { pop_return NUMVAL; }
- NUMVAL-C{OSPC}/[(]? { pop_return NUMVAL_C; }
- NUMVAL-F{OSPC}/[(]? { pop_return NUMVAL_F; }
- ORD{OSPC}/[(]? { pop_return ORD; }
- ORD-MAX{OSPC}/[(]? { pop_return ORD_MAX; }
- ORD-MIN{OSPC}/[(]? { pop_return ORD_MIN; }
- PI{OSPC}/[(]? { pop_return PI; }
- PRESENT-VALUE{OSPC}/[(]? { pop_return PRESENT_VALUE; }
+ ABS/{NONWORD} { pop_return ABS; }
+ ACOS/{NONWORD} { pop_return ACOS; }
+ ANNUITY/{NONWORD} { pop_return ANNUITY; }
+ ASIN/{NONWORD} { pop_return ASIN; }
+ ATAN/{NONWORD} { pop_return ATAN; }
+ BASECONVERT/{NONWORD} { pop_return BASECONVERT; }
+ BIT-OF/{NONWORD} { pop_return BIT_OF; }
+ BIT-TO-CHAR/{NONWORD} { pop_return BIT_TO_CHAR; }
+ BOOLEAN-OF-INTEGER/{NONWORD} { pop_return BOOLEAN_OF_INTEGER; }
+ BYTE-LENGTH/{NONWORD} { pop_return BYTE_LENGTH; }
+ CHAR-NATIONAL/{NONWORD} { pop_return CHAR_NATIONAL; }
+ CHAR/{NONWORD} { pop_return CHAR; }
+ COMBINED-DATETIME/{NONWORD} { pop_return COMBINED_DATETIME; }
+ CONCAT/{NONWORD} { pop_return CONCAT; }
+ CONTENT-LENGTH/{NONWORD} { pop_return NO_CONDITION; /* GNU only*/ }
+ CONTENT-OF/{NONWORD} { pop_return NO_CONDITION; /* GNU only*/ }
+ CONVERT/{NONWORD} { pop_return CONVERT; }
+ COS/{NONWORD} { pop_return COS; }
+ CURRENCY-SYBOL/{NONWORD} { pop_return NO_CONDITION; /* GNU only*/ }
+ CURRENT-DATE/{NONWORD} { pop_return CURRENT_DATE; }
+ DATE-OF-INTEGER/{NONWORD} { pop_return DATE_OF_INTEGER; }
+ DATE-TO-YYYYMMDD/{NONWORD} { pop_return DATE_TO_YYYYMMDD; }
+ DAY-OF-INTEGER/{NONWORD} { pop_return DAY_OF_INTEGER; }
+ DAY-TO-YYYYDDD/{NONWORD} { pop_return DAY_TO_YYYYDDD; }
+ DISPLAY-OF/{NONWORD} { pop_return DISPLAY_OF; }
+ E/{NONWORD} { pop_return E; }
+
+ EXCEPTION-FILE-N/{NONWORD} { pop_return EXCEPTION_FILE_N; }
+ EXCEPTION-FILE/{NONWORD} { pop_return EXCEPTION_FILE; }
+ EXCEPTION-LOCATION-N/{NONWORD} { pop_return EXCEPTION_LOCATION_N; }
+ EXCEPTION-LOCATION/{NONWORD} { pop_return EXCEPTION_LOCATION; }
+ EXCEPTION-STATEMENT/{NONWORD} { pop_return EXCEPTION_STATEMENT; }
+ EXCEPTION-STATUS/{NONWORD} { pop_return EXCEPTION_STATUS; }
+
+ EXP/{NONWORD} { pop_return EXP; }
+ EXP10/{NONWORD} { pop_return EXP10; }
+ FACTORIAL/{NONWORD} { pop_return FACTORIAL; }
+ FIND-STRING/{NONWORD} { pop_return FIND_STRING; }
+
+ FORMATTED-CURRENT-DATE/{NONWORD} { BEGIN(datetime_fmt);
+ return FORMATTED_CURRENT_DATE; }
+ FORMATTED-DATE/{NONWORD} { BEGIN(datetime_fmt); return FORMATTED_DATE; }
+ FORMATTED-DATETIME/{NONWORD} { BEGIN(datetime_fmt); return FORMATTED_DATETIME; }
+ FORMATTED-TIME/{NONWORD} { BEGIN(datetime_fmt); return FORMATTED_TIME; }
+ FRACTION-PART/{NONWORD} { pop_return FRACTION_PART; }
+
+ HEX-OF/{NONWORD} { pop_return HEX_OF; }
+ HEX-TO-CHAR/{NONWORD} { pop_return HEX_TO_CHAR; }
+ HIGHEST-ALGEBRAIC/{NONWORD} { pop_return HIGHEST_ALGEBRAIC; }
+
+ INTEGER/{NONWORD} { pop_return INTEGER; }
+ INTEGER-OF-BOOLEAN/{NONWORD} { pop_return INTEGER_OF_BOOLEAN; }
+ INTEGER-OF-DATE/{NONWORD} { pop_return INTEGER_OF_DATE; }
+ INTEGER-OF-DAY/{NONWORD} { pop_return INTEGER_OF_DAY; }
+ INTEGER-OF-FORMATTED-DATE/{NONWORD} { BEGIN(datetime_fmt);
+ return INTEGER_OF_FORMATTED_DATE; }
+ INTEGER-PART/{NONWORD} { pop_return INTEGER_PART; }
+ LENGTH/{NONWORD} { pop_return LENGTH; }
+ LOCALE-COMPARE/{NONWORD} { pop_return LOCALE_COMPARE; }
+ LOCALE-DATE/{NONWORD} { pop_return LOCALE_DATE; }
+ LOCALE-TIME/{NONWORD} { pop_return LOCALE_TIME; }
+ LOCALE-TIME-FROM-SECONDS/{NONWORD} { pop_return LOCALE_TIME_FROM_SECONDS; }
+ LOG/{NONWORD} { pop_return LOG; }
+ LOG10/{NONWORD} { pop_return LOG10; }
+ LOWER-CASE/{NONWORD} { pop_return LOWER_CASE; }
+ LOWEST-ALGEBRAIC/{NONWORD} { pop_return LOWEST_ALGEBRAIC; }
+ MAX/{NONWORD} { pop_return MAXX; }
+ MEAN/{NONWORD} { pop_return MEAN; }
+ MEDIAN/{NONWORD} { pop_return MEDIAN; }
+ MIDRANGE/{NONWORD} { pop_return MIDRANGE; }
+ MIN/{NONWORD} { pop_return MINN; }
+ MOD/{NONWORD} { pop_return MOD; }
+ MODULE-NAME/{NONWORD} { pop_return MODULE_NAME; }
+ NATIONAL-OF/{NONWORD} { pop_return NATIONAL_OF; }
+ NUMVAL/{NONWORD} { pop_return NUMVAL; }
+ NUMVAL-C/{NONWORD} { pop_return NUMVAL_C; }
+ NUMVAL-F/{NONWORD} { pop_return NUMVAL_F; }
+ ORD/{NONWORD} { pop_return ORD; }
+ ORD-MAX/{NONWORD} { pop_return ORD_MAX; }
+ ORD-MIN/{NONWORD} { pop_return ORD_MIN; }
+ PI/{NONWORD} { pop_return PI; }
+ PRESENT-VALUE/{NONWORD} { pop_return PRESENT_VALUE; }
RANDOM{OSPC}{PARENS} { pop_return RANDOM; }
RANDOM{OSPC}[(] { pop_return RANDOM_SEED; }
RANDOM { pop_return RANDOM; }
- RANGE{OSPC}/[(]? { pop_return RANGE; }
- REM{OSPC}/[(]? { pop_return REM; }
- REVERSE{OSPC}/[(]? { pop_return REVERSE; }
- SECONDS-FROM-FORMATTED-TIME{OSPC}/[(]? { BEGIN(datetime_fmt);
+ RANGE/{NONWORD} { pop_return RANGE; }
+ REM/{NONWORD} { pop_return REM; }
+ REVERSE/{NONWORD} { pop_return REVERSE; }
+ SECONDS-FROM-FORMATTED-TIME/{NONWORD} { BEGIN(datetime_fmt);
return SECONDS_FROM_FORMATTED_TIME; }
- SECONDS-PAST-MIDNIGHT{OSPC}/[(]? { pop_return SECONDS_PAST_MIDNIGHT; }
- SIGN{OSPC}/[(]? { pop_return SIGN; }
- SIN{OSPC}/[(]? { pop_return SIN; }
- SMALLEST-ALGEBRAIC{OSPC}/[(]? { pop_return SMALLEST_ALGEBRAIC; }
- SQRT{OSPC}/[(]? { pop_return SQRT; }
- STANDARD-COMPARE{OSPC}/[(]? { pop_return STANDARD_COMPARE; }
- STANDARD-DEVIATION{OSPC}/[(]? { pop_return STANDARD_DEVIATION; }
- SUBSTITUTE{OSPC}/[(]? { pop_return SUBSTITUTE; }
- SUM{OSPC}/[(]? { pop_return SUM; }
- TAN{OSPC}/[(]? { pop_return TAN; }
- TEST-DATE-YYYYMMDD{OSPC}/[(]? { pop_return TEST_DATE_YYYYMMDD; }
- TEST-DAY-YYYYDDD{OSPC}/[(]? { pop_return TEST_DAY_YYYYDDD; }
- TEST-FORMATTED-DATETIME{OSPC}/[(]? { BEGIN(datetime_fmt); return TEST_FORMATTED_DATETIME; }
- TEST-NUMVAL{OSPC}/[(]? { pop_return TEST_NUMVAL; }
- TEST-NUMVAL-C{OSPC}/[(]? { pop_return TEST_NUMVAL_C; }
- TEST-NUMVAL-F{OSPC}/[(]? { pop_return TEST_NUMVAL_F; }
- TRIM{OSPC}/[(]? { pop_return TRIM; }
- ULENGTH{OSPC}/[(]? { pop_return ULENGTH; }
- UPOS{OSPC}/[(]? { pop_return UPOS; }
- UPPER-CASE{OSPC}/[(]? { pop_return UPPER_CASE; }
- USUBSTR{OSPC}/[(]? { pop_return USUBSTR; }
- USUPPLEMENTARY{OSPC}/[(]? { pop_return USUPPLEMENTARY; }
- UUID4{OSPC}/[(]? { pop_return UUID4; }
- UVALID{OSPC}/[(]? { pop_return UVALID; }
- UWIDTH{OSPC}/[(]? { pop_return UWIDTH; }
- VARIANCE{OSPC}/[(]? { pop_return VARIANCE; }
- WHEN-COMPILED{OSPC}/[(]? { pop_return WHEN_COMPILED; }
- YEAR-TO-YYYY{OSPC}/[(]? { pop_return YEAR_TO_YYYY; }
+ SECONDS-PAST-MIDNIGHT/{NONWORD} { pop_return SECONDS_PAST_MIDNIGHT; }
+ SIGN/{NONWORD} { pop_return SIGN; }
+ SIN/{NONWORD} { pop_return SIN; }
+ SMALLEST-ALGEBRAIC/{NONWORD} { pop_return SMALLEST_ALGEBRAIC; }
+ SQRT/{NONWORD} { pop_return SQRT; }
+ STANDARD-COMPARE/{NONWORD} { pop_return STANDARD_COMPARE; }
+ STANDARD-DEVIATION/{NONWORD} { pop_return STANDARD_DEVIATION; }
+ SUBSTITUTE/{NONWORD} { pop_return SUBSTITUTE; }
+ SUM/{NONWORD} { pop_return SUM; }
+ TAN/{NONWORD} { pop_return TAN; }
+ TEST-DATE-YYYYMMDD/{NONWORD} { pop_return TEST_DATE_YYYYMMDD; }
+ TEST-DAY-YYYYDDD/{NONWORD} { pop_return TEST_DAY_YYYYDDD; }
+ TEST-FORMATTED-DATETIME/{NONWORD} { BEGIN(datetime_fmt); return TEST_FORMATTED_DATETIME; }
+ TEST-NUMVAL/{NONWORD} { pop_return TEST_NUMVAL; }
+ TEST-NUMVAL-C/{NONWORD} { pop_return TEST_NUMVAL_C; }
+ TEST-NUMVAL-F/{NONWORD} { pop_return TEST_NUMVAL_F; }
+ TRIM/{NONWORD} { pop_return TRIM; }
+ ULENGTH/{NONWORD} { pop_return ULENGTH; }
+ UPOS/{NONWORD} { pop_return UPOS; }
+ UPPER-CASE/{NONWORD} { pop_return UPPER_CASE; }
+ USUBSTR/{NONWORD} { pop_return USUBSTR; }
+ USUPPLEMENTARY/{NONWORD} { pop_return USUPPLEMENTARY; }
+ UUID4/{NONWORD} { pop_return UUID4; }
+ UVALID/{NONWORD} { pop_return UVALID; }
+ UWIDTH/{NONWORD} { pop_return UWIDTH; }
+ VARIANCE/{NONWORD} { pop_return VARIANCE; }
+ WHEN-COMPILED/{NONWORD} { pop_return WHEN_COMPILED; }
+ YEAR-TO-YYYY/{NONWORD} { pop_return YEAR_TO_YYYY; }
+
+ /* Matches above include NONWORD because the NAME tests below are otherwise longer, */
{NAME}{OSPC}/[(] { /* If /{OSPC}, "dangerous trailing context" "*/
auto name = null_trim(xstrdup(yytext));
diff --git a/gcc/cobol/scan_ante.h b/gcc/cobol/scan_ante.h
index d2faf5a..7f11532 100644
--- a/gcc/cobol/scan_ante.h
+++ b/gcc/cobol/scan_ante.h
@@ -313,8 +313,9 @@ bool scanner_normal() { return parsing.normal(); }
void scanner_parsing( int token, bool tf ) {
parsing.push( cdf_status_t(token, tf) );
if( yydebug ) {
- yywarn("%10s: parsing now %5s, depth %zu",
- keyword_str(token), boolalpha(parsing.on()), parsing.size());
+ yywarn("%10s: parsing now %5s, depth %lu",
+ keyword_str(token), boolalpha(parsing.on()),
+ gb4(parsing.size()));
parsing.splat();
}
}
@@ -336,8 +337,9 @@ void scanner_parsing_pop() {
}
parsing.pop();
if( yydebug ) {
- yywarn("%10s: parsing now %5s, depth %zu",
- keyword_str(CDF_END_IF), boolalpha(parsing.on()), parsing.size());
+ yywarn("%10s: parsing now %5s, depth %lu",
+ keyword_str(CDF_END_IF), boolalpha(parsing.on()),
+ gb4(parsing.size()));
parsing.splat();
}
}
@@ -577,7 +579,8 @@ binary_integer_usage( const char name[]) {
std::transform(name, name + strlen(name), uname, ftoupper);
dbgmsg("%s:%d: checking %s in %zu keyword_aliases",
- __func__, __LINE__, uname, keyword_aliases.size() );
+ __func__, __LINE__, uname,
+ keyword_aliases.size() );
std::string key = uname;
auto alias = keyword_aliases.find(key);
diff --git a/gcc/cobol/scan_post.h b/gcc/cobol/scan_post.h
index 85feac8..dc31519 100644
--- a/gcc/cobol/scan_post.h
+++ b/gcc/cobol/scan_post.h
@@ -260,13 +260,12 @@ prelex() {
while( is_cdf_token(token) ) {
if( ! run_cdf(token) ) {
- dbgmsg( ">>CDF parser failed" );
- return NO_CONDITION;
+ dbgmsg( ">>CDF parser failed, ydfchar %d", ydfchar );
}
// Return the CDF's discarded lookahead token, if extant.
token = ydfchar > 0? ydfchar : next_token();
if( token == NO_CONDITION && parsing.at_eof() ) {
- return token = YYEOF;
+ return YYEOF;
}
// Reenter cdf parser only if next token could affect parsing state.
@@ -375,7 +374,7 @@ yylex(void) {
token = prelex();
if( yy_flex_debug ) {
if( parsing.in_cdf() ) {
- dbgmsg( "%s:%d: %s routing %s to CDF parser", __func__, __LINE__,
+ dbgmsg( "%s:%d: <%s> routing %s to CDF parser", __func__, __LINE__,
start_condition_is(), keyword_str(token) );
} else if( !parsing.on() ) {
dbgmsg( "eating %s because conditional compilation is FALSE",
diff --git a/gcc/cobol/symbols.cc b/gcc/cobol/symbols.cc
index e540b40..2b9888c 100644
--- a/gcc/cobol/symbols.cc
+++ b/gcc/cobol/symbols.cc
@@ -56,7 +56,7 @@ class symbol_pair_t
{
const symbol_elem_t *first, *last;
public:
- symbol_pair_t( const symbol_elem_t * first, const symbol_elem_t * end = NULL )
+ explicit symbol_pair_t( const symbol_elem_t * first, const symbol_elem_t * end = NULL )
: first(first), last(end)
{}
@@ -160,8 +160,8 @@ symbol_table_extend() {
off_t len = symbols.size();
if( 0 != ftruncate(symbols.fd, len) ) {
- cbl_err( "%s:%d:could not extend symbol table to %zu elements",
- __func__, __LINE__, symbols.capacity);
+ cbl_err( "%s:%d: could not extend symbol table to %lu elements",
+ __func__, __LINE__, gb4(symbols.capacity));
}
/*
@@ -280,7 +280,7 @@ class group_size_t {
enum { constq = constant_e | quoted_e };
static symbol_elem_t
-elementize( cbl_field_t& field ) {
+elementize( const cbl_field_t& field ) {
symbol_elem_t sym (SymField);
sym.elem.field = field;
return sym;
@@ -907,7 +907,7 @@ end_of_group( const cbl_field_t *group, const cbl_field_t *field ) {
class eog_t {
const cbl_field_t * group;
public:
- eog_t( const symbol_elem_t *e ) : group(cbl_field_of(e)) {}
+ explicit eog_t( const symbol_elem_t *e ) : group(cbl_field_of(e)) {}
bool operator()( symbol_elem_t& e ) {
return e.type == SymField && end_of_group(group, cbl_field_of(&e));
@@ -1339,19 +1339,18 @@ immediately_follows( const cbl_field_t *field ) {
bool
is_variable_length( const cbl_field_t *field ) {
- bool odo = false;
- std::find_if( symbol_at(field_index(field)) + 1, symbols_end(),
- [&odo, field]( const auto& elem ) {
- if( elem.type == SymField ) {
- auto f = cbl_field_of(&elem);
- if( f->level <= field->level ) return true;
- if( f->occurs.depending_on ) {
- odo = true;
- return true;
- }
- }
- return false;
- } );
+ // RENAMES may be included in end_of_group.
+ size_t isym = field_index(field), esym = end_of_group(isym);
+ bool odo = std::any_of( symbol_at(isym) + 1, symbol_at_impl(esym),
+ [field]( const auto& elem ) {
+ if( elem.type == SymField ) {
+ auto f = cbl_field_of(&elem);
+ if( field->level < f->level ) { // exclude RENAMES
+ return 0 < f->occurs.depending_on;
+ }
+ }
+ return false;
+ } );
return odo;
}
@@ -1704,7 +1703,6 @@ symbols_update( size_t first, bool parsed_ok ) {
case 1:
pend = calculate_capacity(p);
if( dialect_mf() && is_table(field) ) {
- cbl_field_t *field = cbl_field_of(p);
if( field->data.memsize < field->size() ) {
field->data.memsize = field->size();
}
@@ -2102,7 +2100,7 @@ class parent_elem_set
private:
size_t parent_index;
public:
- parent_elem_set( size_t parent_index )
+ explicit parent_elem_set( size_t parent_index )
: parent_index(parent_index)
{}
void operator()( struct symbol_elem_t& e ) {
@@ -2419,9 +2417,9 @@ symbol_file_add( size_t program, cbl_file_t *file ) {
return e;
}
-struct symbol_elem_t *
-symbol_alphabet_add( size_t program, struct cbl_alphabet_t *alphabet ) {
- struct symbol_elem_t sym{ SymAlphabet, program };
+symbol_elem_t *
+symbol_alphabet_add( size_t program, const cbl_alphabet_t *alphabet ) {
+ symbol_elem_t sym{ SymAlphabet, program };
sym.elem.alphabet = *alphabet;
return symbol_add(&sym);
}
@@ -3230,7 +3228,6 @@ parser_symbol_add2( cbl_field_t *field ) {
static cbl_field_t *
new_literal_add( const char initial[], uint32_t len, enum cbl_field_attr_t attr ) {
- static char empty[2] = "\0";
cbl_field_t *field = NULL;
if( !(attr & quoted_e) )
{
@@ -3240,6 +3237,7 @@ new_literal_add( const char initial[], uint32_t len, enum cbl_field_attr_t attr
}
else
{
+ static char empty[2] = "\0";
field = new_temporary_impl(FldLiteralA);
field->attr |= attr;
field->data.initial = len > 0? initial : empty;
diff --git a/gcc/cobol/symbols.h b/gcc/cobol/symbols.h
index 059d4aa..154c9fe 100644
--- a/gcc/cobol/symbols.h
+++ b/gcc/cobol/symbols.h
@@ -173,7 +173,7 @@ class cbl_domain_elem_t {
{
if( value && ! is_numeric ) {
auto s = consistent_encoding_check(loc, value);
- if( s ) value = s;
+ if( s ) this->value = s;
}
}
const char *name() const { return value; }
@@ -641,7 +641,7 @@ struct cbl_refer_t;
struct cbl_span_t {
cbl_refer_t *from, *len;
- cbl_span_t( cbl_refer_t *from, cbl_refer_t *len = NULL )
+ explicit cbl_span_t( cbl_refer_t *from, cbl_refer_t *len = NULL )
: from(from), len(len) {};
bool is_active() const { return !( from == NULL && len == NULL ); }
@@ -660,12 +660,12 @@ struct cbl_refer_t {
cbl_span_t refmod; // substring bounds
cbl_refer_t()
- : field(NULL), prog_func(NULL)
+ : loc(), field(NULL), prog_func(NULL)
, all(NULL), addr_of(false)
, nsubscript(0), subscripts(NULL), refmod(NULL)
{}
cbl_refer_t( cbl_field_t *field, bool all = false )
- : field(field), prog_func(NULL)
+ : loc(), field(field), prog_func(NULL)
, all(all), addr_of(false)
, nsubscript(0), subscripts(NULL), refmod(NULL)
{}
@@ -675,14 +675,14 @@ struct cbl_refer_t {
, nsubscript(0), subscripts(NULL), refmod(NULL)
{}
cbl_refer_t( cbl_field_t *field, cbl_span_t& refmod )
- : field(field), prog_func(NULL)
+ : loc(), field(field), prog_func(NULL)
, all(false), addr_of(false)
, nsubscript(0), subscripts(NULL), refmod(refmod)
{}
cbl_refer_t( cbl_field_t *field,
size_t nsubscript, cbl_refer_t *subscripts,
cbl_span_t refmod = cbl_span_t(NULL) )
- : field(field), prog_func(NULL)
+ : loc(), field(field), prog_func(NULL)
, all(false), addr_of(false)
, nsubscript(nsubscript) , subscripts( new cbl_refer_t[nsubscript] )
, refmod(refmod)
@@ -690,7 +690,7 @@ struct cbl_refer_t {
std::copy(subscripts, subscripts + nsubscript, this->subscripts);
}
explicit cbl_refer_t( cbl_label_t *prog_func, bool addr_of = true )
- : field(NULL), prog_func(prog_func)
+ : loc(), field(NULL), prog_func(prog_func)
, all(false), addr_of(addr_of)
, nsubscript(0), subscripts(NULL), refmod(cbl_span_t(NULL))
{}
@@ -1419,10 +1419,10 @@ struct cbl_alphabet_t {
add_sequence( const YYLTYPE& loc, const unsigned char seq[] ) {
if( low_index == 0 ) low_index = seq[0];
- unsigned char high_value = last_index > 0? alphabet[last_index] + 1 : 0;
+ unsigned char last = last_index > 0? alphabet[last_index] + 1 : 0;
for( const unsigned char *p = seq; !end_of_string(p); p++ ) {
- assign(loc, *p, high_value++);
+ assign(loc, *p, last++);
}
}
@@ -1430,10 +1430,10 @@ struct cbl_alphabet_t {
add_interval( const YYLTYPE& loc, unsigned char low, unsigned char high ) {
if( low_index == 0 ) low_index = low;
- unsigned char high_value = alphabet[last_index];
+ unsigned char last = alphabet[last_index];
for( unsigned char ch = low; ch < high; ch++ ) {
- assign(loc, ch, high_value++);
+ assign(loc, ch, last++);
}
}
@@ -1524,15 +1524,6 @@ struct cbl_file_key_t {
fields[0] = field;
memset(name, '\0', sizeof(name));
}
- cbl_file_key_t( const cbl_file_key_t *that )
- : unique(that->unique)
- , leftmost(that->leftmost)
- , nfield(that->nfield)
- {
- memcpy(name, that->name, sizeof(name));
- fields = new size_t[nfield];
- std::copy( that->fields, that->fields + that->nfield, fields );
- }
cbl_file_key_t( cbl_name_t name,
const std::list<cbl_field_t *>& fields,
@@ -1636,10 +1627,7 @@ struct symbol_elem_t {
cbl_alphabet_t alphabet;
cbl_file_t file;
cbl_section_t section;
- symbol_elem_u() {
- static const cbl_field_t empty = {};
- field = empty;
- }
+ symbol_elem_u() : field() {}
} elem;
symbol_elem_t( symbol_type_t type = SymField, size_t program = 0 )
@@ -1926,7 +1914,7 @@ struct cbl_until_addresses_t {
struct cbl_label_addresses_t test; // The test at the bottom of the body
struct cbl_label_addresses_t testA; // Starting point of a TEST_AFTER loop
struct cbl_label_addresses_t setup; // The actual entry point
- size_t number_of_conditionals;
+ unsigned int number_of_conditionals;
struct cbl_label_addresses_t condover[MAXIMUM_UNTILS]; // Jumping over the conditional
struct cbl_label_addresses_t condinto[MAXIMUM_UNTILS]; // Jumping into the conditional
struct cbl_label_addresses_t condback[MAXIMUM_UNTILS]; // Jumping back from the conditional
@@ -1990,7 +1978,7 @@ struct cbl_prog_hier_t {
struct program_label_t {
size_t ordinal;
cbl_label_t label;
- program_label_t() : ordinal(0) {}
+ program_label_t() : ordinal(0), label() {}
program_label_t( const symbol_elem_t& e ) {
ordinal = symbol_index(&e);
label = e.elem.label;
@@ -2008,13 +1996,11 @@ struct cbl_prog_hier_t {
struct cbl_perform_tgt_t {
struct cbl_until_addresses_t addresses;
- cbl_perform_tgt_t() : ifrom(0), ito(0) {}
- cbl_perform_tgt_t( cbl_label_t * from, cbl_label_t *to = NULL )
- : ifrom( from? symbol_index(symbol_elem_of(from)) : 0 )
+ cbl_perform_tgt_t() : addresses(), ifrom(0), ito(0) {}
+ explicit cbl_perform_tgt_t( cbl_label_t * from, cbl_label_t *to = NULL )
+ : addresses(), ifrom( from? symbol_index(symbol_elem_of(from)) : 0 )
, ito( to? symbol_index(symbol_elem_of(to)) : 0 )
- {
- addresses = {};
- }
+ {}
cbl_label_t * from( cbl_label_t * label ) {
ifrom = symbol_index(symbol_elem_of(label));
@@ -2252,21 +2238,21 @@ size_t symbols_update( size_t first, bool parsed_ok = true );
void symbol_table_init(void);
void symbol_table_check(void);
-struct symbol_elem_t * symbol_typedef_add( size_t program,
- struct cbl_field_t *field );
-struct symbol_elem_t * symbol_field_add( size_t program,
- struct cbl_field_t *field );
-struct cbl_label_t * symbol_label_add( size_t program,
- struct cbl_label_t *label );
-struct cbl_label_t * symbol_program_add( size_t program, cbl_label_t *input );
-struct symbol_elem_t * symbol_special_add( size_t program,
- struct cbl_special_name_t *special );
-struct symbol_elem_t * symbol_alphabet_add( size_t program,
- struct cbl_alphabet_t *alphabet );
-struct symbol_elem_t * symbol_file_add( size_t program,
- struct cbl_file_t *file );
-struct symbol_elem_t * symbol_section_add( size_t program,
- struct cbl_section_t *section );
+symbol_elem_t * symbol_typedef_add( size_t program,
+ cbl_field_t *field );
+symbol_elem_t * symbol_field_add( size_t program,
+ cbl_field_t *field );
+cbl_label_t * symbol_label_add( size_t program,
+ cbl_label_t *label );
+cbl_label_t * symbol_program_add( size_t program, cbl_label_t *input );
+symbol_elem_t * symbol_special_add( size_t program,
+ cbl_special_name_t *special );
+symbol_elem_t * symbol_alphabet_add( size_t program,
+ const cbl_alphabet_t *alphabet );
+symbol_elem_t * symbol_file_add( size_t program,
+ cbl_file_t *file );
+symbol_elem_t * symbol_section_add( size_t program,
+ cbl_section_t *section );
void symbol_field_location( size_t ifield, const YYLTYPE& loc );
YYLTYPE symbol_field_location( size_t ifield );
diff --git a/gcc/cobol/symfind.cc b/gcc/cobol/symfind.cc
index b4b1b3a..224f3ff 100644
--- a/gcc/cobol/symfind.cc
+++ b/gcc/cobol/symfind.cc
@@ -275,8 +275,8 @@ update_symbol_map( symbol_elem_t *e ) {
class is_name {
const char *name;
public:
- is_name( const char *name ) : name(name) {}
- bool operator()( symbol_map_t::value_type& elem ) {
+ explicit is_name( const char *name ) : name(name) {}
+ bool operator()( const symbol_map_t::value_type& elem ) {
const bool tf = elem.first == name;
return tf;
}
@@ -298,7 +298,7 @@ class reduce_ancestry {
static symbol_map_t::mapped_type
candidates_only( const symbol_map_t::value_type& elem ) { return elem.second; }
public:
- reduce_ancestry( const symbol_map_t& groups )
+ explicit reduce_ancestry( const symbol_map_t& groups )
: candidates( groups.size() )
{
std::transform( groups.begin(), groups.end(), candidates.begin(),
@@ -331,7 +331,7 @@ public:
class different_program {
size_t program;
public:
- different_program( size_t program ) : program(program) {}
+ explicit different_program( size_t program ) : program(program) {}
bool operator()( const symbol_map_t::value_type& item ) const {
return ! item.first.same_program(program);
}
@@ -346,7 +346,7 @@ class in_scope {
}
public:
- in_scope( size_t program ) : program(program) {}
+ explicit in_scope( size_t program ) : program(program) {}
// A symbol is in scope if it's defined by this program or by an ancestor.
bool operator()( const symbol_map_t::value_type& item ) const {
@@ -561,7 +561,7 @@ symbol_find( size_t program, std::list<const char *> names ) {
class in_group {
size_t group;
public:
- in_group( size_t group ) : group(group) {}
+ explicit in_group( size_t group ) : group(group) {}
bool operator()( symbol_map_t::const_reference elem ) const {
return 0 < std::count( elem.second.begin(),
diff --git a/gcc/cobol/util.cc b/gcc/cobol/util.cc
index e92f069..82a72f5 100644
--- a/gcc/cobol/util.cc
+++ b/gcc/cobol/util.cc
@@ -95,6 +95,22 @@ get_current_dir_name ()
}
#endif
+/*
+ * For printing messages, usually the size of the thing is some kind of string
+ * length, and doesn't really need a size_t. For message formatting, use a
+ * simple unsigned long, and warn if that's no good. "gb4" here stands for
+ * "4 Gigabytes".
+ */
+unsigned long
+gb4( size_t input ) {
+ if( input != static_cast<unsigned long>(input) ) {
+ yywarn("size too large to print: %lx:%lx",
+ (unsigned long)(input >> (4 * sizeof(unsigned long))),
+ static_cast<unsigned long>(input));
+ }
+ return input;
+}
+
const char *
symbol_type_str( enum symbol_type_t type )
{
diff --git a/gcc/cobol/util.h b/gcc/cobol/util.h
index 9a968ea..44db645 100644
--- a/gcc/cobol/util.h
+++ b/gcc/cobol/util.h
@@ -48,5 +48,6 @@ void cobol_set_pp_option(int opt);
const char * cobol_filename_restore();
const char * cobol_lineno_save();
+unsigned long gb4( size_t input );
#endif
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index a41317f..6a7eb71 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -5568,6 +5568,7 @@ expand_vx_binary_vec_vec_dup (rtx op_0, rtx op_1, rtx op_2,
case XOR:
case MULT:
case DIV:
+ case UDIV:
icode = code_for_pred_scalar (code, mode);
break;
default:
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 413eae0..99eeba6 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -3943,6 +3943,7 @@ riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UN
switch (GET_CODE (op))
{
case DIV:
+ case UDIV:
*total = get_vector_binary_rtx_cost (op, scalar2vr_cost);
break;
default:
diff --git a/gcc/config/riscv/vector-iterators.md b/gcc/config/riscv/vector-iterators.md
index 86f31f3..36301b0 100644
--- a/gcc/config/riscv/vector-iterators.md
+++ b/gcc/config/riscv/vector-iterators.md
@@ -4042,7 +4042,7 @@
])
(define_code_iterator any_int_binop_no_shift_v_vdup [
- plus minus and ior xor mult div
+ plus minus and ior xor mult div udiv
])
(define_code_iterator any_int_binop_no_shift_vdup_v [
diff --git a/gcc/config/rs6000/rs6000-builtin.cc b/gcc/config/rs6000/rs6000-builtin.cc
index 1118023..bc1580f 100644
--- a/gcc/config/rs6000/rs6000-builtin.cc
+++ b/gcc/config/rs6000/rs6000-builtin.cc
@@ -915,7 +915,7 @@ fold_build_vec_cmp (tree_code code, tree type, tree arg0, tree arg1,
tree cmp_type = truth_type_for (type);
tree zero_vec = build_zero_cst (type);
tree minus_one_vec = build_minus_one_cst (type);
- tree temp = create_tmp_reg_or_ssa_name (cmp_type);
+ tree temp = make_ssa_name (cmp_type);
gimple *g = gimple_build_assign (temp, code, arg0, arg1);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
return fold_build3 (VEC_COND_EXPR, type, temp, minus_one_vec, zero_vec);
@@ -1106,7 +1106,7 @@ rs6000_gimple_fold_mma_builtin (gimple_stmt_iterator *gsi,
if (TREE_TYPE (src_ptr) != src_type)
src_ptr = build1 (NOP_EXPR, src_type, src_ptr);
- tree src = create_tmp_reg_or_ssa_name (TREE_TYPE (src_type));
+ tree src = make_ssa_name (TREE_TYPE (src_type));
gimplify_assign (src, build_simple_mem_ref (src_ptr), &new_seq);
/* If we are not disassembling an accumulator/pair or our destination is
@@ -1130,7 +1130,7 @@ rs6000_gimple_fold_mma_builtin (gimple_stmt_iterator *gsi,
{
new_decl = rs6000_builtin_decls[RS6000_BIF_XXMFACC_INTERNAL];
new_call = gimple_build_call (new_decl, 1, src);
- src = create_tmp_reg_or_ssa_name (vector_quad_type_node);
+ src = make_ssa_name (vector_quad_type_node);
gimple_call_set_lhs (new_call, src);
gimple_seq_add_stmt (&new_seq, new_call);
}
@@ -1146,7 +1146,7 @@ rs6000_gimple_fold_mma_builtin (gimple_stmt_iterator *gsi,
unsigned index = WORDS_BIG_ENDIAN ? i : nvec - 1 - i;
tree dst = build2 (MEM_REF, unsigned_V16QI_type_node, dst_base,
build_int_cst (dst_type, index * 16));
- tree dstssa = create_tmp_reg_or_ssa_name (unsigned_V16QI_type_node);
+ tree dstssa = make_ssa_name (unsigned_V16QI_type_node);
new_call = gimple_build_call (new_decl, 2, src,
build_int_cstu (uint16_type_node, i));
gimple_call_set_lhs (new_call, dstssa);
@@ -1204,7 +1204,7 @@ rs6000_gimple_fold_mma_builtin (gimple_stmt_iterator *gsi,
{
/* This built-in has a pass-by-reference accumulator input, so load it
into a temporary accumulator for use as a pass-by-value input. */
- op[0] = create_tmp_reg_or_ssa_name (vector_quad_type_node);
+ op[0] = make_ssa_name (vector_quad_type_node);
for (unsigned i = 1; i < nopnds; i++)
op[i] = gimple_call_arg (stmt, i);
gimplify_assign (op[0], build_simple_mem_ref (acc), &new_seq);
@@ -1252,9 +1252,9 @@ rs6000_gimple_fold_mma_builtin (gimple_stmt_iterator *gsi,
}
if (fncode == RS6000_BIF_BUILD_PAIR || fncode == RS6000_BIF_ASSEMBLE_PAIR_V)
- lhs = create_tmp_reg_or_ssa_name (vector_pair_type_node);
+ lhs = make_ssa_name (vector_pair_type_node);
else
- lhs = create_tmp_reg_or_ssa_name (vector_quad_type_node);
+ lhs = make_ssa_name (vector_quad_type_node);
gimple_call_set_lhs (new_call, lhs);
gimple_seq_add_stmt (&new_seq, new_call);
gimplify_assign (build_simple_mem_ref (acc), lhs, &new_seq);
@@ -1450,7 +1450,7 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
lhs = gimple_call_lhs (stmt);
- temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
+ temp = make_ssa_name (TREE_TYPE (arg1));
g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
gimple_set_location (g, gimple_location (stmt));
gsi_insert_before (gsi, g, GSI_SAME_STMT);
@@ -1472,7 +1472,7 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
lhs = gimple_call_lhs (stmt);
- temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
+ temp = make_ssa_name (TREE_TYPE (arg1));
g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
gimple_set_location (g, gimple_location (stmt));
gsi_insert_before (gsi, g, GSI_SAME_STMT);
@@ -1512,7 +1512,7 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
lhs = gimple_call_lhs (stmt);
- temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
+ temp = make_ssa_name (TREE_TYPE (arg1));
g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
gimple_set_location (g, gimple_location (stmt));
gsi_insert_before (gsi, g, GSI_SAME_STMT);
@@ -1552,7 +1552,7 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
lhs = gimple_call_lhs (stmt);
- temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
+ temp = make_ssa_name (TREE_TYPE (arg1));
g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
gimple_set_location (g, gimple_location (stmt));
gsi_insert_before (gsi, g, GSI_SAME_STMT);
@@ -1643,7 +1643,7 @@ rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
arg0 = gimple_call_arg (stmt, 0);
arg1 = gimple_call_arg (stmt, 1);
lhs = gimple_call_lhs (stmt);
- temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
+ temp = make_ssa_name (TREE_TYPE (arg1));
g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
gimple_set_location (g, gimple_location (stmt));
gsi_insert_before (gsi, g, GSI_SAME_STMT);
diff --git a/gcc/cp/decl2.cc b/gcc/cp/decl2.cc
index e3fbc40..2bbc618 100644
--- a/gcc/cp/decl2.cc
+++ b/gcc/cp/decl2.cc
@@ -6269,6 +6269,33 @@ mark_single_function (tree expr, tsubst_flags_t complain)
return true;
}
+/* True iff we have started, but not finished, defining FUNCTION_DECL DECL. */
+
+bool
+fn_being_defined (tree decl)
+{
+ /* DECL_INITIAL is set to error_mark_node in grokfndecl for a definition, and
+ changed to BLOCK by poplevel at the end of the function. */
+ return (TREE_CODE (decl) == FUNCTION_DECL
+ && DECL_INITIAL (decl) == error_mark_node);
+}
+
+/* True if DECL is an instantiation of a function template currently being
+ defined. */
+
+bool
+fn_template_being_defined (tree decl)
+{
+ if (TREE_CODE (decl) != FUNCTION_DECL
+ || !DECL_LANG_SPECIFIC (decl)
+ || !DECL_TEMPLOID_INSTANTIATION (decl)
+ || DECL_TEMPLATE_INSTANTIATED (decl))
+ return false;
+ tree tinfo = DECL_TEMPLATE_INFO (decl);
+ tree pattern = DECL_TEMPLATE_RESULT (TI_TEMPLATE (tinfo));
+ return fn_being_defined (pattern);
+}
+
/* Mark DECL (either a _DECL or a BASELINK) as "used" in the program.
If DECL is a specialization or implicitly declared class member,
generate the actual definition. Return false if something goes
@@ -6422,6 +6449,9 @@ mark_used (tree decl, tsubst_flags_t complain /* = tf_warning_or_error */)
maybe_instantiate_decl (decl);
if (!decl_dependent_p (decl)
+ /* Don't require this yet for an instantiation of a function template
+ we're currently defining (c++/120555). */
+ && !fn_template_being_defined (decl)
&& !require_deduced_type (decl, complain))
return false;
@@ -6436,9 +6466,6 @@ mark_used (tree decl, tsubst_flags_t complain /* = tf_warning_or_error */)
&& uses_template_parms (DECL_TI_ARGS (decl)))
return true;
- if (!require_deduced_type (decl, complain))
- return false;
-
if (builtin_pack_fn_p (decl))
{
error ("use of built-in parameter pack %qD outside of a template",
diff --git a/gcc/diagnostic-format-html.cc b/gcc/diagnostic-format-html.cc
index 05d4273..ea2dbbb 100644
--- a/gcc/diagnostic-format-html.cc
+++ b/gcc/diagnostic-format-html.cc
@@ -49,256 +49,6 @@ html_generation_options::html_generation_options ()
{
}
-namespace xml {
-
-/* Disable warnings about quoting issues in the pp_xxx calls below
- that (intentionally) don't follow GCC diagnostic conventions. */
-#if __GNUC__ >= 10
-# pragma GCC diagnostic push
-# pragma GCC diagnostic ignored "-Wformat-diag"
-#endif
-
-
-/* Implementation. */
-
-static void
-write_escaped_text (pretty_printer *pp, const char *text)
-{
- gcc_assert (text);
-
- for (const char *p = text; *p; ++p)
- {
- char ch = *p;
- switch (ch)
- {
- default:
- pp_character (pp, ch);
- break;
- case '\'':
- pp_string (pp, "&apos;");
- break;
- case '"':
- pp_string (pp, "&quot;");
- break;
- case '&':
- pp_string (pp, "&amp;");
- break;
- case '<':
- pp_string (pp, "&lt;");
- break;
- case '>':
- pp_string (pp, "&gt;");
- break;
- }
- }
-}
-
-/* struct node. */
-
-void
-node::dump (FILE *out) const
-{
- pretty_printer pp;
- pp.set_output_stream (out);
- write_as_xml (&pp, 0, true);
- pp_flush (&pp);
-}
-
-/* struct text : public node. */
-
-void
-text::write_as_xml (pretty_printer *pp, int depth, bool indent) const
-{
- if (indent)
- {
- for (int i = 0; i < depth; ++i)
- pp_string (pp, " ");
- }
- write_escaped_text (pp, m_str.c_str ());
- if (indent)
- pp_newline (pp);
-}
-
-/* struct node_with_children : public node. */
-
-void
-node_with_children::add_child (std::unique_ptr<node> node)
-{
- gcc_assert (node.get ());
- m_children.push_back (std::move (node));
-}
-
-void
-node_with_children::add_text (std::string str)
-{
- // Consolidate runs of text
- if (!m_children.empty ())
- if (text *t = m_children.back ()->dyn_cast_text ())
- {
- t->m_str += std::move (str);
- return;
- }
- add_child (std::make_unique <text> (std::move (str)));
-}
-
-
-/* struct document : public node_with_children. */
-
-void
-document::write_as_xml (pretty_printer *pp, int depth, bool indent) const
-{
- pp_string (pp, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
- pp_string (pp, "<!DOCTYPE html\n"
- " PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n"
- " \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">");
- if (indent)
- pp_newline (pp);
- for (auto &iter : m_children)
- iter->write_as_xml (pp, depth, indent);
-}
-
-/* struct element : public node_with_children. */
-
-void
-element::write_as_xml (pretty_printer *pp, int depth, bool indent) const
-{
- if (indent)
- {
- for (int i = 0; i < depth; ++i)
- pp_string (pp, " ");
- }
-
- pp_printf (pp, "<%s", m_kind.c_str ());
- for (auto &key : m_key_insertion_order)
- {
- auto iter = m_attributes.find (key);
- if (iter != m_attributes.end ())
- {
- pp_printf (pp, " %s=\"", key.c_str ());
- write_escaped_text (pp, iter->second.c_str ());
- pp_string (pp, "\"");
- }
- }
- if (m_children.empty ())
- pp_string (pp, "/>");
- else
- {
- const bool indent_children = m_preserve_whitespace ? false : indent;
- pp_string (pp, ">");
- if (indent_children)
- pp_newline (pp);
- for (auto &child : m_children)
- child->write_as_xml (pp, depth + 1, indent_children);
- if (indent_children)
- {
- for (int i = 0; i < depth; ++i)
- pp_string (pp, " ");
- }
- pp_printf (pp, "</%s>", m_kind.c_str ());
- }
-
- if (indent)
- pp_newline (pp);
-}
-
-void
-element::set_attr (const char *name, std::string value)
-{
- auto iter = m_attributes.find (name);
- if (iter == m_attributes.end ())
- m_key_insertion_order.push_back (name);
- m_attributes[name] = std::move (value);
-}
-
-// struct raw : public node
-
-void
-raw::write_as_xml (pretty_printer *pp,
- int /*depth*/, bool /*indent*/) const
-{
- pp_string (pp, m_xml_src.c_str ());
-}
-
-#if __GNUC__ >= 10
-# pragma GCC diagnostic pop
-#endif
-
-// class printer
-
-printer::printer (element &insertion_point)
-{
- m_open_tags.push_back (&insertion_point);
-}
-
-void
-printer::push_tag (std::string name,
- bool preserve_whitespace)
-{
- push_element
- (std::make_unique<element> (std::move (name),
- preserve_whitespace));
-}
-
-void
-printer::push_tag_with_class (std::string name, std::string class_,
- bool preserve_whitespace)
-{
- auto new_element
- = std::make_unique<element> (std::move (name),
- preserve_whitespace);
- new_element->set_attr ("class", class_);
- push_element (std::move (new_element));
-}
-
-void
-printer::pop_tag ()
-{
- m_open_tags.pop_back ();
-}
-
-void
-printer::set_attr (const char *name, std::string value)
-{
- m_open_tags.back ()->set_attr (name, value);
-}
-
-void
-printer::add_text (std::string text)
-{
- element *parent = m_open_tags.back ();
- parent->add_text (std::move (text));
-}
-
-void
-printer::add_raw (std::string text)
-{
- element *parent = m_open_tags.back ();
- parent->add_child (std::make_unique<xml::raw> (std::move (text)));
-}
-
-void
-printer::push_element (std::unique_ptr<element> new_element)
-{
- element *parent = m_open_tags.back ();
- m_open_tags.push_back (new_element.get ());
- parent->add_child (std::move (new_element));
-}
-
-void
-printer::append (std::unique_ptr<node> new_node)
-{
- element *parent = m_open_tags.back ();
- parent->add_child (std::move (new_node));
-}
-
-element *
-printer::get_insertion_point () const
-{
- return m_open_tags.back ();
-}
-
-} // namespace xml
-
class html_builder;
/* Concrete buffering implementation subclass for HTML output. */
@@ -569,6 +319,24 @@ const char * const HTML_SCRIPT
" });\n"
" highlight_current_focus_idx ();\n");
+struct html_doctypedecl : public xml::doctypedecl
+{
+ void write_as_xml (pretty_printer *pp,
+ int depth, bool indent) const final override
+ {
+ if (indent)
+ {
+ for (int i = 0; i < depth; ++i)
+ pp_string (pp, " ");
+ }
+ pp_string (pp, "<!DOCTYPE html\n"
+ " PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n"
+ " \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">");
+ if (indent)
+ pp_newline (pp);
+ }
+};
+
/* html_builder's ctor. */
html_builder::html_builder (diagnostic_context &context,
@@ -586,6 +354,7 @@ html_builder::html_builder (diagnostic_context &context,
gcc_assert (m_line_maps);
m_document = std::make_unique<xml::document> ();
+ m_document->m_doctypedecl = std::make_unique<html_doctypedecl> ();
{
auto html_element = std::make_unique<xml::element> ("html", false);
html_element->set_attr ("xmlns",
@@ -1288,67 +1057,6 @@ test_metadata ()
}
}
-static void
-test_printer ()
-{
- xml::element top ("top", false);
- xml::printer xp (top);
- xp.push_tag ("foo");
- xp.add_text ("hello");
- xp.push_tag ("bar");
- xp.set_attr ("size", "3");
- xp.set_attr ("color", "red");
- xp.add_text ("world");
- xp.push_tag ("baz");
- xp.pop_tag ();
- xp.pop_tag ();
- xp.pop_tag ();
-
- pretty_printer pp;
- top.write_as_xml (&pp, 0, true);
- ASSERT_STREQ
- (pp_formatted_text (&pp),
- "<top>\n"
- " <foo>\n"
- " hello\n"
- " <bar size=\"3\" color=\"red\">\n"
- " world\n"
- " <baz/>\n"
- " </bar>\n"
- " </foo>\n"
- "</top>\n");
-}
-
-// Verify that element attributes preserve insertion order.
-
-static void
-test_attribute_ordering ()
-{
- xml::element top ("top", false);
- xml::printer xp (top);
- xp.push_tag ("chronological");
- xp.set_attr ("maldon", "991");
- xp.set_attr ("hastings", "1066");
- xp.set_attr ("edgehill", "1642");
- xp.set_attr ("naseby", "1645");
- xp.pop_tag ();
- xp.push_tag ("alphabetical");
- xp.set_attr ("edgehill", "1642");
- xp.set_attr ("hastings", "1066");
- xp.set_attr ("maldon", "991");
- xp.set_attr ("naseby", "1645");
- xp.pop_tag ();
-
- pretty_printer pp;
- top.write_as_xml (&pp, 0, true);
- ASSERT_STREQ
- (pp_formatted_text (&pp),
- "<top>\n"
- " <chronological maldon=\"991\" hastings=\"1066\" edgehill=\"1642\" naseby=\"1645\"/>\n"
- " <alphabetical edgehill=\"1642\" hastings=\"1066\" maldon=\"991\" naseby=\"1645\"/>\n"
- "</top>\n");
-}
-
/* Run all of the selftests within this file. */
void
@@ -1357,8 +1065,6 @@ diagnostic_format_html_cc_tests ()
auto_fix_quotes fix_quotes;
test_simple_log ();
test_metadata ();
- test_printer ();
- test_attribute_ordering ();
}
} // namespace selftest
diff --git a/gcc/fortran/f95-lang.cc b/gcc/fortran/f95-lang.cc
index 1f09553..bb4ce6d 100644
--- a/gcc/fortran/f95-lang.cc
+++ b/gcc/fortran/f95-lang.cc
@@ -564,7 +564,7 @@ gfc_builtin_function (tree decl)
return decl;
}
-/* So far we need just these 10 attribute types. */
+/* So far we need just these 12 attribute types. */
#define ATTR_NULL 0
#define ATTR_LEAF_LIST (ECF_LEAF)
#define ATTR_NOTHROW_LEAF_LIST (ECF_NOTHROW | ECF_LEAF)
@@ -580,6 +580,7 @@ gfc_builtin_function (tree decl)
#define ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST \
(ECF_COLD | ECF_NORETURN | \
ECF_NOTHROW | ECF_LEAF)
+#define ATTR_PURE_NOTHROW_LIST (ECF_PURE | ECF_NOTHROW)
static void
gfc_define_builtin (const char *name, tree type, enum built_in_function code,
diff --git a/gcc/fortran/gfortran.h b/gcc/fortran/gfortran.h
index e461aa6..f73b5f9 100644
--- a/gcc/fortran/gfortran.h
+++ b/gcc/fortran/gfortran.h
@@ -3302,8 +3302,10 @@ typedef struct
int flag_init_logical;
int flag_init_character;
char flag_init_character_value;
- bool disable_omp_is_initial_device;
- bool disable_acc_on_device;
+ bool disable_omp_is_initial_device:1;
+ bool disable_omp_get_initial_device:1;
+ bool disable_omp_get_num_devices:1;
+ bool disable_acc_on_device:1;
int fpe;
int fpe_summary;
diff --git a/gcc/fortran/options.cc b/gcc/fortran/options.cc
index ddddc1c..d3c9066 100644
--- a/gcc/fortran/options.cc
+++ b/gcc/fortran/options.cc
@@ -883,6 +883,10 @@ gfc_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
return false; /* Not supported. */
if (!strcmp ("omp_is_initial_device", arg))
gfc_option.disable_omp_is_initial_device = true;
+ else if (!strcmp ("omp_get_initial_device", arg))
+ gfc_option.disable_omp_get_initial_device = true;
+ else if (!strcmp ("omp_get_num_devices", arg))
+ gfc_option.disable_omp_get_num_devices = true;
else if (!strcmp ("acc_on_device", arg))
gfc_option.disable_acc_on_device = true;
else
diff --git a/gcc/fortran/trans-expr.cc b/gcc/fortran/trans-expr.cc
index 74d4265..c8a2076 100644
--- a/gcc/fortran/trans-expr.cc
+++ b/gcc/fortran/trans-expr.cc
@@ -4635,6 +4635,16 @@ get_builtin_fn (gfc_symbol * sym)
&& !strcmp (sym->name, "omp_is_initial_device"))
return builtin_decl_explicit (BUILT_IN_OMP_IS_INITIAL_DEVICE);
+ if (!gfc_option.disable_omp_get_initial_device
+ && flag_openmp && sym->attr.function && sym->ts.type == BT_INTEGER
+ && !strcmp (sym->name, "omp_get_initial_device"))
+ return builtin_decl_explicit (BUILT_IN_OMP_GET_INITIAL_DEVICE);
+
+ if (!gfc_option.disable_omp_get_num_devices
+ && flag_openmp && sym->attr.function && sym->ts.type == BT_INTEGER
+ && !strcmp (sym->name, "omp_get_num_devices"))
+ return builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_DEVICES);
+
if (!gfc_option.disable_acc_on_device
&& flag_openacc && sym->attr.function && sym->ts.type == BT_LOGICAL
&& !strcmp (sym->name, "acc_on_device_h"))
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index 185f9db..729080a 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -191,16 +191,6 @@ can_refer_decl_in_current_unit_p (tree decl, tree from_decl)
return !node || !node->inlined_to;
}
-/* Create a temporary for TYPE for a statement STMT. If the current function
- is in SSA form, a SSA name is created. Otherwise a temporary register
- is made. */
-
-tree
-create_tmp_reg_or_ssa_name (tree type, gimple *stmt)
-{
- return make_ssa_name (type, stmt);
-}
-
/* CVAL is value taken from DECL_INITIAL of variable. Try to transform it into
acceptable form for is_gimple_min_invariant.
FROM_DECL (if non-NULL) specify variable whose constructor contains CVAL. */
@@ -1038,8 +1028,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
{
new_stmt = gimple_build_assign (NULL_TREE, srcmem);
srcmem
- = create_tmp_reg_or_ssa_name (TREE_TYPE (srcmem),
- new_stmt);
+ = make_ssa_name (TREE_TYPE (srcmem), new_stmt);
gimple_assign_set_lhs (new_stmt, srcmem);
gimple_set_vuse (new_stmt, gimple_vuse (stmt));
gimple_set_location (new_stmt, loc);
@@ -1308,8 +1297,7 @@ gimple_fold_builtin_memory_op (gimple_stmt_iterator *gsi,
if (! is_gimple_min_invariant (srcvar))
{
new_stmt = gimple_build_assign (NULL_TREE, srcvar);
- srcvar = create_tmp_reg_or_ssa_name (TREE_TYPE (srcvar),
- new_stmt);
+ srcvar = make_ssa_name (TREE_TYPE (srcvar), new_stmt);
gimple_assign_set_lhs (new_stmt, srcvar);
gimple_set_vuse (new_stmt, gimple_vuse (stmt));
gimple_set_location (new_stmt, loc);
@@ -2281,7 +2269,7 @@ gimple_fold_builtin_strchr (gimple_stmt_iterator *gsi, bool is_strrchr)
gimple_seq stmts = NULL;
gimple *new_stmt = gimple_build_call (strlen_fn, 1, str);
gimple_set_location (new_stmt, loc);
- len = create_tmp_reg_or_ssa_name (size_type_node);
+ len = make_ssa_name (size_type_node);
gimple_call_set_lhs (new_stmt, len);
gimple_seq_add_stmt_without_update (&stmts, new_stmt);
@@ -2427,7 +2415,7 @@ gimple_fold_builtin_strcat (gimple_stmt_iterator *gsi, tree dst, tree src)
gimple_seq stmts = NULL, stmts2;
gimple *repl = gimple_build_call (strlen_fn, 1, dst);
gimple_set_location (repl, loc);
- newdst = create_tmp_reg_or_ssa_name (size_type_node);
+ newdst = make_ssa_name (size_type_node);
gimple_call_set_lhs (repl, newdst);
gimple_seq_add_stmt_without_update (&stmts, repl);
@@ -2660,7 +2648,7 @@ gimple_load_first_char (location_t loc, tree str, gimple_seq *stmts)
tree temp = fold_build2_loc (loc, MEM_REF, cst_uchar_node, str, off0);
gassign *stmt = gimple_build_assign (NULL_TREE, temp);
- var = create_tmp_reg_or_ssa_name (cst_uchar_node, stmt);
+ var = make_ssa_name (cst_uchar_node, stmt);
gimple_assign_set_lhs (stmt, var);
gimple_seq_add_stmt_without_update (stmts, stmt);
@@ -2831,7 +2819,7 @@ gimple_fold_builtin_string_compare (gimple_stmt_iterator *gsi)
if (lhs)
{
- tree c = create_tmp_reg_or_ssa_name (integer_type_node);
+ tree c = make_ssa_name (integer_type_node);
stmt = gimple_build_assign (c, NOP_EXPR, var);
gimple_seq_add_stmt_without_update (&stmts, stmt);
@@ -2853,11 +2841,11 @@ gimple_fold_builtin_string_compare (gimple_stmt_iterator *gsi)
if (lhs)
{
- tree c1 = create_tmp_reg_or_ssa_name (integer_type_node);
+ tree c1 = make_ssa_name (integer_type_node);
gassign *convert1 = gimple_build_assign (c1, NOP_EXPR, temp1);
gimple_seq_add_stmt_without_update (&stmts, convert1);
- tree c2 = create_tmp_reg_or_ssa_name (integer_type_node);
+ tree c2 = make_ssa_name (integer_type_node);
gassign *convert2 = gimple_build_assign (c2, NOP_EXPR, temp2);
gimple_seq_add_stmt_without_update (&stmts, convert2);
@@ -4097,6 +4085,40 @@ gimple_fold_builtin_omp_is_initial_device (gimple_stmt_iterator *gsi)
return false;
}
+/* omp_get_initial_device was in OpenMP 5.0/5.1 explicitly and in
+ 5.0 implicitly the same as omp_get_num_devices; since 6.0 it is
+ unspecified whether -1 or omp_get_num_devices() is returned. For
+ better backward compatibility, use omp_get_num_devices() on the
+ host - and -1 on the device (where the result is unspecified). */
+
+static bool
+gimple_fold_builtin_omp_get_initial_device (gimple_stmt_iterator *gsi)
+{
+#if ACCEL_COMPILER
+ replace_call_with_value (gsi, build_int_cst (integer_type_node, -1));
+#else
+ if (!ENABLE_OFFLOADING)
+ replace_call_with_value (gsi, integer_zero_node);
+ else
+ {
+ tree fn = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_DEVICES);
+ gcall *repl = gimple_build_call (fn, 0);
+ replace_call_with_call_and_fold (gsi, repl);
+ }
+#endif
+ return true;
+}
+
+static bool
+gimple_fold_builtin_omp_get_num_devices (gimple_stmt_iterator *gsi)
+{
+ if (!ENABLE_OFFLOADING)
+ {
+ replace_call_with_value (gsi, integer_zero_node);
+ return true;
+ }
+ return false;
+}
/* Fold a call to __builtin_acc_on_device. */
@@ -5341,6 +5363,12 @@ gimple_fold_builtin (gimple_stmt_iterator *gsi)
case BUILT_IN_OMP_IS_INITIAL_DEVICE:
return gimple_fold_builtin_omp_is_initial_device (gsi);
+ case BUILT_IN_OMP_GET_INITIAL_DEVICE:
+ return gimple_fold_builtin_omp_get_initial_device (gsi);
+
+ case BUILT_IN_OMP_GET_NUM_DEVICES:
+ return gimple_fold_builtin_omp_get_num_devices (gsi);
+
case BUILT_IN_REALLOC:
return gimple_fold_builtin_realloc (gsi);
@@ -10647,7 +10675,7 @@ gimple_build (gimple_stmt_iterator *gsi,
gsi->bb ? follow_all_ssa_edges : gimple_build_valueize);
if (!res)
{
- res = create_tmp_reg_or_ssa_name (type);
+ res = make_ssa_name (type);
gimple *stmt;
if (code == REALPART_EXPR
|| code == IMAGPART_EXPR
@@ -10679,7 +10707,7 @@ gimple_build (gimple_stmt_iterator *gsi,
gsi->bb ? follow_all_ssa_edges : gimple_build_valueize);
if (!res)
{
- res = create_tmp_reg_or_ssa_name (type);
+ res = make_ssa_name (type);
gimple *stmt = gimple_build_assign (res, code, op0, op1);
gimple_set_location (stmt, loc);
gimple_seq_add_stmt_without_update (&seq, stmt);
@@ -10706,7 +10734,7 @@ gimple_build (gimple_stmt_iterator *gsi,
gsi->bb ? follow_all_ssa_edges : gimple_build_valueize);
if (!res)
{
- res = create_tmp_reg_or_ssa_name (type);
+ res = make_ssa_name (type);
gimple *stmt;
if (code == BIT_FIELD_REF)
stmt = gimple_build_assign (res, code,
@@ -10742,7 +10770,7 @@ gimple_build (gimple_stmt_iterator *gsi,
}
if (!VOID_TYPE_P (type))
{
- res = create_tmp_reg_or_ssa_name (type);
+ res = make_ssa_name (type);
gimple_call_set_lhs (stmt, res);
}
gimple_set_location (stmt, loc);
@@ -10777,7 +10805,7 @@ gimple_build (gimple_stmt_iterator *gsi,
}
if (!VOID_TYPE_P (type))
{
- res = create_tmp_reg_or_ssa_name (type);
+ res = make_ssa_name (type);
gimple_call_set_lhs (stmt, res);
}
gimple_set_location (stmt, loc);
@@ -10814,7 +10842,7 @@ gimple_build (gimple_stmt_iterator *gsi,
}
if (!VOID_TYPE_P (type))
{
- res = create_tmp_reg_or_ssa_name (type);
+ res = make_ssa_name (type);
gimple_call_set_lhs (stmt, res);
}
gimple_set_location (stmt, loc);
@@ -10852,7 +10880,7 @@ gimple_build (gimple_stmt_iterator *gsi,
}
if (!VOID_TYPE_P (type))
{
- res = create_tmp_reg_or_ssa_name (type);
+ res = make_ssa_name (type);
gimple_call_set_lhs (stmt, res);
}
gimple_set_location (stmt, loc);
diff --git a/gcc/gimple-fold.h b/gcc/gimple-fold.h
index 8b1e246b..e3cf1f6 100644
--- a/gcc/gimple-fold.h
+++ b/gcc/gimple-fold.h
@@ -22,7 +22,6 @@ along with GCC; see the file COPYING3. If not see
#ifndef GCC_GIMPLE_FOLD_H
#define GCC_GIMPLE_FOLD_H
-extern tree create_tmp_reg_or_ssa_name (tree, gimple *stmt = NULL);
extern tree canonicalize_constructor_val (tree, tree);
extern tree get_symbol_constant_value (tree);
struct c_strlen_data;
diff --git a/gcc/m2/gm2-compiler/M2Quads.mod b/gcc/m2/gm2-compiler/M2Quads.mod
index 3c29fdd..b5455d0 100644
--- a/gcc/m2/gm2-compiler/M2Quads.mod
+++ b/gcc/m2/gm2-compiler/M2Quads.mod
@@ -11299,6 +11299,35 @@ END CheckReturnType ;
(*
+ BuildReturnLower - check the return type and value to ensure type
+ compatibility and no range overflow will occur.
+*)
+
+PROCEDURE BuildReturnLower (tokcombined, tokexpr: CARDINAL; e1, t1: CARDINAL) ;
+VAR
+ e2, t2: CARDINAL ;
+BEGIN
+ (* This will check that the type returned is compatible with
+ the formal return type of the procedure. *)
+ CheckReturnType (tokcombined, CurrentProc, e1, t1) ;
+ (* Dereference LeftValue if necessary. *)
+ IF GetMode (e1) = LeftValue
+ THEN
+ t2 := GetSType (CurrentProc) ;
+ e2 := MakeTemporary (tokexpr, RightValue) ;
+ PutVar(e2, t2) ;
+ CheckPointerThroughNil (tokexpr, e1) ;
+ doIndrX (tokexpr, e2, e1) ;
+ e1 := e2
+ END ;
+ (* Here we check the data contents to ensure no overflow. *)
+ BuildRange (InitReturnRangeCheck (tokcombined, CurrentProc, e1)) ;
+ GenQuadOtok (tokcombined, ReturnValueOp, e1, NulSym, CurrentProc, FALSE,
+ tokcombined, UnknownTokenNo, GetDeclaredMod (CurrentProc))
+END BuildReturnLower ;
+
+
+(*
BuildReturn - Builds the Return part of the procedure.
tokreturn is the location of the RETURN keyword.
The Stack is expected to contain:
@@ -11317,7 +11346,6 @@ PROCEDURE BuildReturn (tokreturn: CARDINAL) ;
VAR
tokcombined,
tokexpr : CARDINAL ;
- e2, t2,
e1, t1,
t, f,
Des : CARDINAL ;
@@ -11337,26 +11365,18 @@ BEGIN
tokcombined := MakeVirtualTok (tokreturn, tokreturn, tokexpr) ;
IF e1 # NulSym
THEN
- (* this will check that the type returned is compatible with
- the formal return type of the procedure. *)
- CheckReturnType (tokcombined, CurrentProc, e1, t1) ;
- (* dereference LeftValue if necessary *)
- IF GetMode (e1) = LeftValue
- THEN
- t2 := GetSType (CurrentProc) ;
- e2 := MakeTemporary (tokexpr, RightValue) ;
- PutVar(e2, t2) ;
- CheckPointerThroughNil (tokexpr, e1) ;
- doIndrX (tokexpr, e2, e1) ;
- (* here we check the data contents to ensure no overflow. *)
- BuildRange (InitReturnRangeCheck (tokcombined, CurrentProc, e2)) ;
- GenQuadOtok (tokcombined, ReturnValueOp, e2, NulSym, CurrentProc, FALSE,
- tokcombined, UnknownTokenNo, GetDeclaredMod (CurrentProc))
+ (* Check we are in a procedure scope and that the procedure has a return type. *)
+ IF CurrentProc = NulSym
+ THEN
+ MetaErrorT0 (tokcombined,
+ '{%1E} attempting to return a value when not in a procedure scope')
+ ELSIF GetSType (CurrentProc) = NulSym
+ THEN
+ MetaErrorT1 (tokcombined,
+ 'attempting to return a value from procedure {%1Ea} which does not have a return type',
+ CurrentProc)
ELSE
- (* here we check the data contents to ensure no overflow. *)
- BuildRange (InitReturnRangeCheck (tokcombined, CurrentProc, e1)) ;
- GenQuadOtok (tokcombined, ReturnValueOp, e1, NulSym, CurrentProc, FALSE,
- tokcombined, UnknownTokenNo, GetDeclaredMod (CurrentProc))
+ BuildReturnLower (tokcombined, tokexpr, e1, t1)
END
END ;
GenQuadO (tokcombined, GotoOp, NulSym, NulSym, PopWord (ReturnStack), FALSE) ;
diff --git a/gcc/omp-builtins.def b/gcc/omp-builtins.def
index f73fb7b..db1ec96 100644
--- a/gcc/omp-builtins.def
+++ b/gcc/omp-builtins.def
@@ -71,7 +71,12 @@ DEF_GOACC_BUILTIN_ONLY (BUILT_IN_GOACC_SINGLE_COPY_END, "GOACC_single_copy_end",
DEF_GOMP_BUILTIN_COMPILER (BUILT_IN_OMP_IS_INITIAL_DEVICE,
"omp_is_initial_device", BT_FN_INT,
- ATTR_CONST_NOTHROW_LEAF_LIST)
+ ATTR_CONST_NOTHROW_LIST)
+DEF_GOMP_BUILTIN_COMPILER (BUILT_IN_OMP_GET_INITIAL_DEVICE,
+ "omp_get_initial_device", BT_FN_INT,
+ ATTR_PURE_NOTHROW_LIST)
+DEF_GOMP_BUILTIN_COMPILER (BUILT_IN_OMP_GET_NUM_DEVICES, "omp_get_num_devices",
+ BT_FN_INT, ATTR_PURE_NOTHROW_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_THREAD_NUM, "omp_get_thread_num",
BT_FN_INT, ATTR_CONST_NOTHROW_LEAF_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_NUM_THREADS, "omp_get_num_threads",
@@ -88,8 +93,6 @@ DEF_GOMP_BUILTIN (BUILT_IN_OMP_SET_DEFAULT_DEVICE, "omp_set_default_device",
BT_FN_INT, ATTR_NOTHROW_LEAF_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_INTEROP_INT, "omp_get_interop_int",
BT_FN_PTRMODE_PTR_INT_PTR, ATTR_NOTHROW_LEAF_LIST)
-DEF_GOMP_BUILTIN (BUILT_IN_OMP_GET_NUM_DEVICES, "omp_get_num_devices",
- BT_FN_INT, ATTR_NOTHROW_LEAF_LIST)
DEF_GOMP_BUILTIN (BUILT_IN_GOMP_ATOMIC_START, "GOMP_atomic_start",
BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
diff --git a/gcc/selftest-run-tests.cc b/gcc/selftest-run-tests.cc
index 0090e56..df49a67 100644
--- a/gcc/selftest-run-tests.cc
+++ b/gcc/selftest-run-tests.cc
@@ -80,6 +80,7 @@ selftest::run_tests ()
optinfo_emit_json_cc_tests ();
ordered_hash_map_tests_cc_tests ();
splay_tree_cc_tests ();
+ xml_cc_tests ();
/* Mid-level data structures. */
input_cc_tests ();
diff --git a/gcc/selftest.h b/gcc/selftest.h
index cd85840..94acf62 100644
--- a/gcc/selftest.h
+++ b/gcc/selftest.h
@@ -220,6 +220,7 @@ extern void attribs_cc_tests ();
extern void bitmap_cc_tests ();
extern void cgraph_cc_tests ();
extern void convert_cc_tests ();
+extern void dbgcnt_cc_tests ();
extern void diagnostic_color_cc_tests ();
extern void diagnostic_format_html_cc_tests ();
extern void diagnostic_format_json_cc_tests ();
@@ -237,12 +238,15 @@ extern void gcc_attribute_urlifier_cc_tests ();
extern void gcc_urlifier_cc_tests ();
extern void ggc_tests_cc_tests ();
extern void gimple_cc_tests ();
+extern void gimple_range_tests ();
extern void hash_map_tests_cc_tests ();
extern void hash_set_tests_cc_tests ();
extern void input_cc_tests ();
+extern void ipa_modref_tree_cc_tests ();
extern void json_cc_tests ();
-extern void lazy_diagnostic_path_cc_tests ();
extern void json_parser_cc_tests ();
+extern void lazy_diagnostic_path_cc_tests ();
+extern void opt_suggestions_cc_tests ();
extern void optinfo_emit_json_cc_tests ();
extern void opts_cc_tests ();
extern void opts_diagnostic_cc_tests ();
@@ -250,11 +254,10 @@ extern void ordered_hash_map_tests_cc_tests ();
extern void path_coverage_cc_tests ();
extern void predict_cc_tests ();
extern void pretty_print_cc_tests ();
-extern void range_tests ();
extern void range_op_tests ();
-extern void relation_tests ();
-extern void gimple_range_tests ();
+extern void range_tests ();
extern void read_rtl_function_cc_tests ();
+extern void relation_tests ();
extern void rtl_tests_cc_tests ();
extern void sbitmap_cc_tests ();
extern void selftest_cc_tests ();
@@ -273,9 +276,7 @@ extern void typed_splay_tree_cc_tests ();
extern void vec_cc_tests ();
extern void vec_perm_indices_cc_tests ();
extern void wide_int_cc_tests ();
-extern void opt_suggestions_cc_tests ();
-extern void dbgcnt_cc_tests ();
-extern void ipa_modref_tree_cc_tests ();
+extern void xml_cc_tests ();
extern int num_passes;
diff --git a/gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device-2.c b/gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device-2.c
new file mode 100644
index 0000000..891f5cf
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device-2.c
@@ -0,0 +1,29 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O1 -fdump-tree-optimized -fno-builtin-omp_get_num_devices -fno-builtin-omp_get_initial_device" } */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int omp_get_initial_device ();
+extern int omp_get_num_devices ();
+#ifdef __cplusplus
+}
+#endif
+
+int f()
+{
+/* The following assumes that omp_get_initial_device () will not return
+ omp_initial_device (== -1), which is also permitted since OpenMP 6.0. */
+ if (omp_get_initial_device () != omp_get_num_devices ()) __builtin_abort ();
+
+ if (omp_get_num_devices () != omp_get_num_devices ()) __builtin_abort ();
+
+ if (omp_get_initial_device () != omp_get_initial_device ()) __builtin_abort ();
+
+ return omp_get_num_devices ();
+}
+
+/* { dg-final { scan-tree-dump-times "abort" 3 "optimized" } } */
+
+/* { dg-final { scan-tree-dump-times "omp_get_num_devices" 4 "optimized" } } */
+/* { dg-final { scan-tree-dump-times "omp_get_initial_device" 3 "optimized" } } */
diff --git a/gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device.c b/gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device.c
new file mode 100644
index 0000000..4b17143
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/omp_get_num_devices_initial_device.c
@@ -0,0 +1,32 @@
+/* { dg-do compile } */
+/* { dg-additional-options "-O1 -fdump-tree-optimized" } */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+extern int omp_get_initial_device ();
+extern int omp_get_num_devices ();
+#ifdef __cplusplus
+}
+#endif
+
+int f()
+{
+/* The following assumes that omp_get_initial_device () will not return
+ omp_initial_device (== -1), which is also permitted since OpenMP 6.0. */
+ if (omp_get_initial_device () != omp_get_num_devices ()) __builtin_abort ();
+
+ if (omp_get_num_devices () != omp_get_num_devices ()) __builtin_abort ();
+
+ if (omp_get_initial_device () != omp_get_initial_device ()) __builtin_abort ();
+
+ return omp_get_num_devices ();
+}
+
+/* { dg-final { scan-tree-dump-not "abort" "optimized" } } */
+
+/* { dg-final { scan-tree-dump-not "omp_get_num_devices;" "optimized" { target { ! offloading_enabled } } } } */
+/* { dg-final { scan-tree-dump "return 0;" "optimized" { target { ! offloading_enabled } } } } */
+
+/* { dg-final { scan-tree-dump-times "omp_get_num_devices;" 1 "optimized" { target offloading_enabled } } } */
+/* { dg-final { scan-tree-dump "_1 = __builtin_omp_get_num_devices \\(\\);\[\\r\\n\]+\[ \]+return _1;" "optimized" { target offloading_enabled } } } */
diff --git a/gcc/testsuite/g++.dg/cpp1z/constexpr-if39.C b/gcc/testsuite/g++.dg/cpp1z/constexpr-if39.C
new file mode 100644
index 0000000..38ae7a0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp1z/constexpr-if39.C
@@ -0,0 +1,30 @@
+// PR c++/120555
+// { dg-do compile { target c++17 } }
+
+struct A { int m; };
+
+template<class T>
+constexpr auto f() {
+ if constexpr (sizeof(T) == sizeof(int))
+ return 1;
+ else
+ return A{f<int>()};
+}
+
+static_assert(f<bool>().m == 1);
+static_assert(f<int>() == 1);
+
+template <class T> constexpr auto g();
+
+template<class T>
+constexpr auto f2() {
+ if constexpr (sizeof(T) == sizeof(int))
+ return 1;
+ else
+ return A{g<int>()}; // { dg-error "auto" }
+}
+
+template <class T> constexpr auto g() { return A{1}; }
+
+static_assert(f2<bool>().m == 1);
+static_assert(f2<int>() == 1);
diff --git a/gcc/testsuite/gcc.target/arc/fma-1.c b/gcc/testsuite/gcc.target/arc/fma-1.c
index c195ad9..b32989f 100644
--- a/gcc/testsuite/gcc.target/arc/fma-1.c
+++ b/gcc/testsuite/gcc.target/arc/fma-1.c
@@ -2,7 +2,8 @@
/* { dg-skip-if "FPU not available" { arc700 || arc6xx } } */
/* { dg-options "-s -std=gnu11 -O2 -frounding-math -mfpu=fpus_all" } */
-const float a, b = 7.8539818525e01;
+const float b = 7.8539818525e01;
+extern const float a;
/* Check if the fma operation is generated correctly. */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv-nofm.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv-nofm.c
index 4685ed2..a8be5ed 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv-nofm.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv-nofm.c
@@ -5,8 +5,8 @@
/* { dg-final { scan-assembler-times {\tvdiv\.vv} 8 } } */
/* { dg-final { scan-assembler-not {\tvdiv\.vx} } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vv} 5 } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vx} 3 } } */
+/* { dg-final { scan-assembler-times {\tvdivu\.vv} 8 } } */
+/* { dg-final { scan-assembler-not {\tvdivu\.vx} } } */
/* { dg-final { scan-assembler-times {\tvfdiv\.vv} 6 } } */
/* { dg-final { scan-assembler-not {\tvfdiv\.vf} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv.c
index 59c48d2..7feee0e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv32gcv.c
@@ -5,8 +5,8 @@
/* { dg-final { scan-assembler-times {\tvdiv\.vv} 8 } } */
/* { dg-final { scan-assembler-not {\tvdiv\.vx} } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vv} 5 } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vx} 3 } } */
+/* { dg-final { scan-assembler-times {\tvdivu\.vv} 8 } } */
+/* { dg-final { scan-assembler-not {\tvdivu\.vx} } } */
/* Division by constant is done by calculating a reciprocal and
then multiplying. Hence we do not expect 6 vfdivs. */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv-nofm.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv-nofm.c
index b574dc4..766b17f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv-nofm.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv-nofm.c
@@ -5,8 +5,8 @@
/* { dg-final { scan-assembler-times {\tvdiv\.vv} 8 } } */
/* { dg-final { scan-assembler-not {\tvdiv\.vx} } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vv} 4 } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vx} 4 } } */
+/* { dg-final { scan-assembler-times {\tvdivu\.vv} 8 } } */
+/* { dg-final { scan-assembler-not {\tvdivu\.vx} } } */
/* { dg-final { scan-assembler-times {\tvfdiv\.vv} 6 } } */
/* { dg-final { scan-assembler-not {\tvfdiv\.vf} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv.c
index 9b46c6b..c59c664 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/binop/vdiv-rv64gcv.c
@@ -5,8 +5,8 @@
/* { dg-final { scan-assembler-times {\tvdiv\.vv} 8 } } */
/* { dg-final { scan-assembler-not {\tvdiv\.vx} } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vv} 4 } } */
-/* { dg-final { scan-assembler-times {\tvdivu\.vx} 4 } } */
+/* { dg-final { scan-assembler-times {\tvdivu\.vv} 8 } } */
+/* { dg-final { scan-assembler-not {\tvdivu\.vx} } } */
/* Division by constant is done by calculating a reciprocal and
then multiplying. Hence we do not expect 6 vfdivs. */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u16.c
index 7e107d3..92fbf22 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u16.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub);
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-times {vadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vsub.vx} 1 } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-times {vand.vx} 1 } } */
/* { dg-final { scan-assembler-times {vor.vx} 1 } } */
/* { dg-final { scan-assembler-times {vxor.vx} 1 } } */
+/* { dg-final { scan-assembler-times {vdivu.vx} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u32.c
index f8ffab7..f487b42 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u32.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub);
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-times {vadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vsub.vx} 1 } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-times {vand.vx} 1 } } */
/* { dg-final { scan-assembler-times {vor.vx} 1 } } */
/* { dg-final { scan-assembler-times {vxor.vx} 1 } } */
+/* { dg-final { scan-assembler-times {vdivu.vx} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u64.c
index 31d2945..761d25c 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u64.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub);
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-times {vadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vsub.vx} 1 } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-times {vand.vx} 1 } } */
/* { dg-final { scan-assembler-times {vor.vx} 1 } } */
/* { dg-final { scan-assembler-times {vxor.vx} 1 } } */
+/* { dg-final { scan-assembler-times {vdivu.vx} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u8.c
index 59e0334..0018bd2 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-1-u8.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub);
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-times {vadd.vx} 1 } } */
/* { dg-final { scan-assembler-times {vsub.vx} 1 } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-times {vand.vx} 1 } } */
/* { dg-final { scan-assembler-times {vor.vx} 1 } } */
/* { dg-final { scan-assembler-times {vxor.vx} 1 } } */
+/* { dg-final { scan-assembler-times {vdivu.vx} 1 } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u16.c
index 9612e3f..554acac 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u16.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u32.c
index 9641802..0e74a3b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u32.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u64.c
index 0d173e0..8e831ce 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u64.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u8.c
index 931295e..dad7562 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-2-u8.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u16.c
index 24b4aa7..873fbf3 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u16.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u32.c
index 0c1552a..cbf35e1 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u32.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u64.c
index 8364f19..3ccf42b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u64.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u8.c
index 8a31111..50825ef 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-3-u8.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_0_WRAP(T, -, rsub)
DEF_VX_BINARY_CASE_0_WRAP(T, &, and)
DEF_VX_BINARY_CASE_0_WRAP(T, |, or)
DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
+DEF_VX_BINARY_CASE_0_WRAP(T, /, div)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_0_WRAP(T, ^, xor)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u16.c
index 4bc0850..58e4a1e 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u16.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u32.c
index 255273d..3d5f535 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u32.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X4)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X4)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X4)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u64.c
index d21f61b..0edb925 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u64.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u8.c
index 51492ae..5a3c114 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-4-u8.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u16.c
index 2ab0967..2559935 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u16.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X8)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X8)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X8)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X8)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X8)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X8)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u32.c
index 799c5db..594093f 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u32.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X4)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X4)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X4)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u64.c
index a5d25d21..a5be019 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u64.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u8.c
index 61d2b24..e0220ca 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-5-u8.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u16.c
index 57220f2..d6a3510 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u16.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u16.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X8);
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X8)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X8)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X8)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X8)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X8)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u32.c
index 45244a0..2886e3b 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u32.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u32.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X4);
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X4)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X4)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X4)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X4)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u64.c
index 3bad130..c9ece25 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u64.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u64.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY);
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY)
/* { dg-final { scan-assembler-not {vadd.vx} } } */
/* { dg-final { scan-assembler-not {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY)
/* { dg-final { scan-assembler-not {vand.vx} } } */
/* { dg-final { scan-assembler-not {vor.vx} } } */
/* { dg-final { scan-assembler-not {vxor.vx} } } */
+/* { dg-final { scan-assembler-not {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u8.c
index bb860dd..87a3ef8 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u8.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx-6-u8.c
@@ -11,6 +11,7 @@ DEF_VX_BINARY_REVERSE_CASE_1_WRAP(T, -, rsub, VX_BINARY_REVERSE_BODY_X16);
DEF_VX_BINARY_CASE_1_WRAP(T, &, and, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, |, or, VX_BINARY_BODY_X16)
DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
+DEF_VX_BINARY_CASE_1_WRAP(T, /, div, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vadd.vx} } } */
/* { dg-final { scan-assembler {vsub.vx} } } */
@@ -18,3 +19,4 @@ DEF_VX_BINARY_CASE_1_WRAP(T, ^, xor, VX_BINARY_BODY_X16)
/* { dg-final { scan-assembler {vand.vx} } } */
/* { dg-final { scan-assembler {vor.vx} } } */
/* { dg-final { scan-assembler {vxor.vx} } } */
+/* { dg-final { scan-assembler {vdivu.vx} } } */
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h
index ed8c562..5130709 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_binary_data.h
@@ -2750,4 +2750,200 @@ int64_t TEST_BINARY_DATA(int64_t, div)[][3][N] =
},
};
+uint8_t TEST_BINARY_DATA(uint8_t, div)[][3][N] =
+{
+ {
+ { 2 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 4, 4, 4, 4,
+ 7, 7, 7, 7,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ 3, 3, 3, 3,
+ },
+ },
+ {
+ { 127 },
+ {
+ 127, 127, 127, 127,
+ 1, 1, 1, 1,
+ 128, 128, 128, 128,
+ 2, 2, 2, 2,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+ {
+ { 128 },
+ {
+ 127, 127, 127, 127,
+ 255, 255, 255, 255,
+ 128, 128, 128, 128,
+ 1, 1, 1, 1,
+ },
+ {
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
+uint16_t TEST_BINARY_DATA(uint16_t, div)[][3][N] =
+{
+ {
+ { 2 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 4, 4, 4, 4,
+ 7, 7, 7, 7,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ 3, 3, 3, 3,
+ },
+ },
+ {
+ { 32767 },
+ {
+ 32767, 32767, 32767, 32767,
+ 1, 1, 1, 1,
+ 32768, 32768, 32768, 32768,
+ 2, 2, 2, 2,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+ {
+ { 32768 },
+ {
+ 32767, 32767, 32767, 32767,
+ 65535, 65535, 65535, 65535,
+ 32768, 32768, 32768, 32768,
+ 1, 1, 1, 1,
+ },
+ {
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
+uint32_t TEST_BINARY_DATA(uint32_t, div)[][3][N] =
+{
+ {
+ { 2 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 4, 4, 4, 4,
+ 7, 7, 7, 7,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ 3, 3, 3, 3,
+ },
+ },
+ {
+ { 2147483647 },
+ {
+ 2147483647, 2147483647, 2147483647, 2147483647,
+ 1, 1, 1, 1,
+ 2147483648, 2147483648, 2147483648, 2147483648,
+ 2, 2, 2, 2,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+ {
+ { 2147483648 },
+ {
+ 2147483647, 2147483647, 2147483647, 2147483647,
+ 4294967295, 4294967295, 4294967295, 4294967295,
+ 2147483648, 2147483648, 2147483648, 2147483648,
+ 1, 1, 1, 1,
+ },
+ {
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
+uint64_t TEST_BINARY_DATA(uint64_t, div)[][3][N] =
+{
+ {
+ { 2 },
+ {
+ 2, 2, 2, 2,
+ 1, 1, 1, 1,
+ 4, 4, 4, 4,
+ 7, 7, 7, 7,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 2, 2, 2, 2,
+ 3, 3, 3, 3,
+ },
+ },
+ {
+ { 9223372036854775807ull },
+ {
+ 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull,
+ 1, 1, 1, 1,
+ 9223372036854775808ull, 9223372036854775808ull, 9223372036854775808ull, 9223372036854775808ull,
+ 2, 2, 2, 2,
+ },
+ {
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+ {
+ { 9223372036854775808ull },
+ {
+ 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull, 9223372036854775807ull,
+ 18446744073709551615ull, 18446744073709551615ull, 18446744073709551615ull, 18446744073709551615ull,
+ 9223372036854775808ull, 9223372036854775808ull, 9223372036854775808ull, 9223372036854775808ull,
+ 1, 1, 1, 1,
+ },
+ {
+ 0, 0, 0, 0,
+ 1, 1, 1, 1,
+ 1, 1, 1, 1,
+ 0, 0, 0, 0,
+ },
+ },
+};
+
#endif
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u16.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u16.c
new file mode 100644
index 0000000..afb848d
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u16.c
@@ -0,0 +1,15 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T uint16_t
+#define NAME div
+
+DEF_VX_BINARY_CASE_0_WRAP(T, /, NAME)
+
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+#define TEST_RUN(T, NAME, out, in, x, n) RUN_VX_BINARY_CASE_0_WRAP(T, NAME, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u32.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u32.c
new file mode 100644
index 0000000..4acaa5b
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u32.c
@@ -0,0 +1,15 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T uint32_t
+#define NAME div
+
+DEF_VX_BINARY_CASE_0_WRAP(T, /, NAME)
+
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+#define TEST_RUN(T, NAME, out, in, x, n) RUN_VX_BINARY_CASE_0_WRAP(T, NAME, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u64.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u64.c
new file mode 100644
index 0000000..335a909
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u64.c
@@ -0,0 +1,15 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T uint64_t
+#define NAME div
+
+DEF_VX_BINARY_CASE_0_WRAP(T, /, NAME)
+
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+#define TEST_RUN(T, NAME, out, in, x, n) RUN_VX_BINARY_CASE_0_WRAP(T, NAME, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u8.c b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u8.c
new file mode 100644
index 0000000..160d362
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/rvv/autovec/vx_vf/vx_vdiv-run-1-u8.c
@@ -0,0 +1,15 @@
+/* { dg-do run { target { riscv_v } } } */
+/* { dg-additional-options "-std=c99 --param=gpr2vr-cost=0" } */
+
+#include "vx_binary.h"
+#include "vx_binary_data.h"
+
+#define T uint8_t
+#define NAME div
+
+DEF_VX_BINARY_CASE_0_WRAP(T, /, NAME)
+
+#define TEST_DATA TEST_BINARY_DATA_WRAP(T, NAME)
+#define TEST_RUN(T, NAME, out, in, x, n) RUN_VX_BINARY_CASE_0_WRAP(T, NAME, out, in, x, n)
+
+#include "vx_binary_run.h"
diff --git a/gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device-2.f90 b/gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device-2.f90
new file mode 100644
index 0000000..18613d4
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device-2.f90
@@ -0,0 +1,21 @@
+! { dg-do compile }
+! { dg-additional-options "-O1 -fdump-tree-optimized -fno-builtin-omp_get_num_devices -fno-builtin-omp_get_initial_device" }
+integer function f() result(ret)
+ interface
+ integer function omp_get_initial_device (); end
+ integer function omp_get_num_devices (); end
+ end interface
+
+ if (omp_get_initial_device () /= omp_get_num_devices ()) error stop
+
+ if (omp_get_num_devices () /= omp_get_num_devices ()) error stop
+
+ if (omp_get_initial_device () /= omp_get_initial_device ()) error stop
+
+ ret = omp_get_num_devices ()
+end
+
+! { dg-final { scan-tree-dump-times "error_stop" 3 "optimized" } }
+
+! { dg-final { scan-tree-dump-times "omp_get_num_devices" 4 "optimized" } }
+! { dg-final { scan-tree-dump-times "omp_get_initial_device" 3 "optimized" } }
diff --git a/gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device.f90 b/gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device.f90
new file mode 100644
index 0000000..5409f12
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/gomp/omp_get_num_devices_initial_device.f90
@@ -0,0 +1,24 @@
+! { dg-do compile }
+! { dg-additional-options "-O1 -fdump-tree-optimized" }
+integer function f() result(ret)
+ interface
+ integer function omp_get_initial_device (); end
+ integer function omp_get_num_devices (); end
+ end interface
+
+ if (omp_get_initial_device () /= omp_get_num_devices ()) error stop
+
+ if (omp_get_num_devices () /= omp_get_num_devices ()) error stop
+
+ if (omp_get_initial_device () /= omp_get_initial_device ()) error stop
+
+ ret = omp_get_num_devices ()
+end
+
+! { dg-final { scan-tree-dump-not "error_stop" "optimized" } }
+
+! { dg-final { scan-tree-dump-not "omp_get_num_devices;" "optimized" { target { ! offloading_enabled } } } }
+! { dg-final { scan-tree-dump "return 0;" "optimized" { target { ! offloading_enabled } } } }
+
+! { dg-final { scan-tree-dump-times "omp_get_num_devices;" 1 "optimized" { target offloading_enabled } } }
+! { dg-final { scan-tree-dump "_1 = __builtin_omp_get_num_devices \\(\\);\[\\r\\n\]+\[ \]+return _1;" "optimized" { target offloading_enabled } } }
diff --git a/gcc/testsuite/gm2/iso/fail/badreturn.mod b/gcc/testsuite/gm2/iso/fail/badreturn.mod
new file mode 100644
index 0000000..5417961
--- /dev/null
+++ b/gcc/testsuite/gm2/iso/fail/badreturn.mod
@@ -0,0 +1,5 @@
+MODULE badreturn ;
+
+BEGIN
+ RETURN 0
+END badreturn. \ No newline at end of file
diff --git a/gcc/testsuite/gm2/iso/fail/badreturn2.mod b/gcc/testsuite/gm2/iso/fail/badreturn2.mod
new file mode 100644
index 0000000..a4b9008
--- /dev/null
+++ b/gcc/testsuite/gm2/iso/fail/badreturn2.mod
@@ -0,0 +1,12 @@
+MODULE badreturn2 ;
+
+
+PROCEDURE foo ;
+BEGIN
+ RETURN 0
+END foo ;
+
+
+BEGIN
+ foo
+END badreturn2.
diff --git a/gcc/testsuite/gm2/iso/pass/modulereturn.mod b/gcc/testsuite/gm2/iso/pass/modulereturn.mod
new file mode 100644
index 0000000..b39947d
--- /dev/null
+++ b/gcc/testsuite/gm2/iso/pass/modulereturn.mod
@@ -0,0 +1,5 @@
+MODULE modulereturn ;
+
+BEGIN
+ RETURN
+END modulereturn.
diff --git a/gcc/testsuite/gm2/iso/pass/modulereturn2.mod b/gcc/testsuite/gm2/iso/pass/modulereturn2.mod
new file mode 100644
index 0000000..934cfae
--- /dev/null
+++ b/gcc/testsuite/gm2/iso/pass/modulereturn2.mod
@@ -0,0 +1,10 @@
+MODULE modulereturn2 ;
+
+
+BEGIN
+ RETURN
+EXCEPT
+ RETURN
+FINALLY
+ RETURN
+END modulereturn2.
diff --git a/gcc/text-art/widget.cc b/gcc/text-art/widget.cc
index 3c68018..5d3e517 100644
--- a/gcc/text-art/widget.cc
+++ b/gcc/text-art/widget.cc
@@ -200,6 +200,15 @@ test_wrapper_widget ()
}
static void
+test_empty_wrapper_widget ()
+{
+ style_manager sm;
+ wrapper_widget w (nullptr);
+ canvas c (w.to_canvas (sm));
+ ASSERT_CANVAS_STREQ (c, false, "");
+}
+
+static void
test_vbox_1 ()
{
style_manager sm;
@@ -263,6 +272,7 @@ text_art_widget_cc_tests ()
test_test_widget ();
test_text_widget ();
test_wrapper_widget ();
+ test_empty_wrapper_widget ();
test_vbox_1 ();
test_vbox_2 ();
test_canvas_widget ();
diff --git a/gcc/text-art/widget.h b/gcc/text-art/widget.h
index 55655eb..ab7b1d3 100644
--- a/gcc/text-art/widget.h
+++ b/gcc/text-art/widget.h
@@ -146,15 +146,20 @@ class wrapper_widget : public widget
}
canvas::size_t calc_req_size () override
{
- return m_child->get_req_size ();
+ if (m_child)
+ return m_child->get_req_size ();
+ else
+ return canvas::size_t (0,0);
}
void update_child_alloc_rects () override
{
- m_child->set_alloc_rect (get_alloc_rect ());
+ if (m_child)
+ m_child->set_alloc_rect (get_alloc_rect ());
}
void paint_to_canvas (canvas &canvas) override
{
- m_child->paint_to_canvas (canvas);
+ if (m_child)
+ m_child->paint_to_canvas (canvas);
}
private:
std::unique_ptr<widget> m_child;
diff --git a/gcc/tree-inline.cc b/gcc/tree-inline.cc
index 1a72e31..fa8f8fc 100644
--- a/gcc/tree-inline.cc
+++ b/gcc/tree-inline.cc
@@ -2239,7 +2239,7 @@ copy_bb (copy_body_data *id, basic_block bb,
}
else if (nargs != 0)
{
- tree newlhs = create_tmp_reg_or_ssa_name (integer_type_node);
+ tree newlhs = make_ssa_name (integer_type_node);
count = build_int_cst (integer_type_node, nargs);
new_stmt = gimple_build_assign (gimple_call_lhs (stmt),
PLUS_EXPR, newlhs, count);
diff --git a/gcc/tree-ssa-phiopt.cc b/gcc/tree-ssa-phiopt.cc
index bf493e1..2e4f9da 100644
--- a/gcc/tree-ssa-phiopt.cc
+++ b/gcc/tree-ssa-phiopt.cc
@@ -3605,7 +3605,8 @@ cond_store_replacement (basic_block middle_bb, basic_block join_bb,
static bool
cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
basic_block join_bb, gimple *then_assign,
- gimple *else_assign)
+ gimple *else_assign,
+ gphi *vphi)
{
tree lhs_base, lhs, then_rhs, else_rhs, name;
location_t then_locus, else_locus;
@@ -3672,6 +3673,14 @@ cond_if_else_store_replacement_1 (basic_block then_bb, basic_block else_bb,
add_phi_arg (newphi, else_rhs, EDGE_SUCC (else_bb, 0), else_locus);
new_stmt = gimple_build_assign (lhs, gimple_phi_result (newphi));
+ /* Update the vdef for the new store statement. */
+ tree newvphilhs = make_ssa_name (gimple_vop (cfun));
+ tree vdef = gimple_phi_result (vphi);
+ gimple_set_vuse (new_stmt, newvphilhs);
+ gimple_set_vdef (new_stmt, vdef);
+ gimple_phi_set_result (vphi, newvphilhs);
+ SSA_NAME_DEF_STMT (vdef) = new_stmt;
+ update_stmt (vphi);
if (dump_file && (dump_flags & TDF_DETAILS))
{
fprintf(dump_file, "to use phi:\n");
@@ -3782,7 +3791,7 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
gimple *else_assign = single_trailing_store_in_bb (else_bb, else_vdef);
if (else_assign)
return cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
- then_assign, else_assign);
+ then_assign, else_assign, vphi);
}
/* If either vectorization or if-conversion is disabled then do
@@ -3921,7 +3930,7 @@ cond_if_else_store_replacement (basic_block then_bb, basic_block else_bb,
then_store = store_pair.first;
else_store = store_pair.second;
res = cond_if_else_store_replacement_1 (then_bb, else_bb, join_bb,
- then_store, else_store);
+ then_store, else_store, vphi);
ok = ok || res;
}
diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
index 3710694..f699d80 100644
--- a/gcc/tree-vect-stmts.cc
+++ b/gcc/tree-vect-stmts.cc
@@ -2097,310 +2097,246 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
known at compile time. */
gcc_assert (!STMT_VINFO_STRIDED_P (first_stmt_info) || gap == 0);
- /* Stores can't yet have gaps. */
- gcc_assert (slp_node || vls_type == VLS_LOAD || gap == 0);
-
- if (slp_node)
+ /* For SLP vectorization we directly vectorize a subchain
+ without permutation. */
+ if (! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
+ first_dr_info = STMT_VINFO_DR_INFO (SLP_TREE_SCALAR_STMTS (slp_node)[0]);
+ if (STMT_VINFO_STRIDED_P (first_stmt_info))
+ /* Try to use consecutive accesses of as many elements as possible,
+ separated by the stride, until we have a complete vector.
+ Fall back to scalar accesses if that isn't possible. */
+ *memory_access_type = VMAT_STRIDED_SLP;
+ else
{
- /* For SLP vectorization we directly vectorize a subchain
- without permutation. */
- if (! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
- first_dr_info
- = STMT_VINFO_DR_INFO (SLP_TREE_SCALAR_STMTS (slp_node)[0]);
- if (STMT_VINFO_STRIDED_P (first_stmt_info))
- /* Try to use consecutive accesses of as many elements as possible,
- separated by the stride, until we have a complete vector.
- Fall back to scalar accesses if that isn't possible. */
- *memory_access_type = VMAT_STRIDED_SLP;
- else
+ int cmp = compare_step_with_zero (vinfo, stmt_info);
+ if (cmp < 0)
{
- int cmp = compare_step_with_zero (vinfo, stmt_info);
- if (cmp < 0)
+ if (single_element_p)
+ /* ??? The VMAT_CONTIGUOUS_REVERSE code generation is
+ only correct for single element "interleaving" SLP. */
+ *memory_access_type = get_negative_load_store_type
+ (vinfo, stmt_info, vectype, vls_type, 1,
+ &neg_ldst_offset);
+ else
{
- if (single_element_p)
- /* ??? The VMAT_CONTIGUOUS_REVERSE code generation is
- only correct for single element "interleaving" SLP. */
- *memory_access_type = get_negative_load_store_type
- (vinfo, stmt_info, vectype, vls_type, 1,
- &neg_ldst_offset);
+ /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
+ separated by the stride, until we have a complete vector.
+ Fall back to scalar accesses if that isn't possible. */
+ if (multiple_p (nunits, group_size))
+ *memory_access_type = VMAT_STRIDED_SLP;
else
- {
- /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
- separated by the stride, until we have a complete vector.
- Fall back to scalar accesses if that isn't possible. */
- if (multiple_p (nunits, group_size))
- *memory_access_type = VMAT_STRIDED_SLP;
- else
- *memory_access_type = VMAT_ELEMENTWISE;
- }
- }
- else if (cmp == 0 && loop_vinfo)
- {
- gcc_assert (vls_type == VLS_LOAD);
- *memory_access_type = VMAT_INVARIANT;
+ *memory_access_type = VMAT_ELEMENTWISE;
}
- /* Try using LOAD/STORE_LANES. */
- else if (slp_node->ldst_lanes
- && (*lanes_ifn
- = (vls_type == VLS_LOAD
- ? vect_load_lanes_supported (vectype, group_size,
- masked_p, elsvals)
- : vect_store_lanes_supported (vectype, group_size,
- masked_p))) != IFN_LAST)
- *memory_access_type = VMAT_LOAD_STORE_LANES;
- else if (!loop_vinfo && slp_node->avoid_stlf_fail)
+ }
+ else if (cmp == 0 && loop_vinfo)
+ {
+ gcc_assert (vls_type == VLS_LOAD);
+ *memory_access_type = VMAT_INVARIANT;
+ }
+ /* Try using LOAD/STORE_LANES. */
+ else if (slp_node->ldst_lanes
+ && (*lanes_ifn
+ = (vls_type == VLS_LOAD
+ ? vect_load_lanes_supported (vectype, group_size,
+ masked_p, elsvals)
+ : vect_store_lanes_supported (vectype, group_size,
+ masked_p))) != IFN_LAST)
+ *memory_access_type = VMAT_LOAD_STORE_LANES;
+ else if (!loop_vinfo && slp_node->avoid_stlf_fail)
+ {
+ *memory_access_type = VMAT_ELEMENTWISE;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "using element-wise load to avoid disrupting "
+ "cross iteration store-to-load forwarding\n");
+ }
+ else
+ *memory_access_type = VMAT_CONTIGUOUS;
+
+ /* If this is single-element interleaving with an element
+ distance that leaves unused vector loads around fall back
+ to elementwise access if possible - we otherwise least
+ create very sub-optimal code in that case (and
+ blow up memory, see PR65518). */
+ if (loop_vinfo
+ && single_element_p
+ && (*memory_access_type == VMAT_CONTIGUOUS
+ || *memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+ && maybe_gt (group_size, TYPE_VECTOR_SUBPARTS (vectype)))
+ {
+ if (SLP_TREE_LANES (slp_node) == 1)
{
*memory_access_type = VMAT_ELEMENTWISE;
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "using element-wise load to avoid disrupting "
- "cross iteration store-to-load forwarding\n");
+ "single-element interleaving not supported "
+ "for not adjacent vector loads, using "
+ "elementwise access\n");
}
else
- *memory_access_type = VMAT_CONTIGUOUS;
-
- /* If this is single-element interleaving with an element
- distance that leaves unused vector loads around fall back
- to elementwise access if possible - we otherwise least
- create very sub-optimal code in that case (and
- blow up memory, see PR65518). */
- if (loop_vinfo
- && single_element_p
- && (*memory_access_type == VMAT_CONTIGUOUS
- || *memory_access_type == VMAT_CONTIGUOUS_REVERSE)
- && maybe_gt (group_size, TYPE_VECTOR_SUBPARTS (vectype)))
- {
- if (SLP_TREE_LANES (slp_node) == 1)
- {
- *memory_access_type = VMAT_ELEMENTWISE;
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "single-element interleaving not supported "
- "for not adjacent vector loads, using "
- "elementwise access\n");
- }
- else
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "single-element interleaving not supported "
- "for not adjacent vector loads\n");
- return false;
- }
- }
-
- /* For single-element interleaving also fall back to elementwise
- access in case we did not lower a permutation and cannot
- code generate it. */
- auto_vec<tree> temv;
- unsigned n_perms;
- if (loop_vinfo
- && single_element_p
- && SLP_TREE_LANES (slp_node) == 1
- && (*memory_access_type == VMAT_CONTIGUOUS
- || *memory_access_type == VMAT_CONTIGUOUS_REVERSE)
- && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
- && !vect_transform_slp_perm_load
- (loop_vinfo, slp_node, temv, NULL,
- LOOP_VINFO_VECT_FACTOR (loop_vinfo), true, &n_perms))
{
- *memory_access_type = VMAT_ELEMENTWISE;
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "single-element interleaving permutation not "
- "supported, using elementwise access\n");
- }
-
- overrun_p = (loop_vinfo && gap != 0
- && *memory_access_type != VMAT_ELEMENTWISE);
- if (overrun_p && vls_type != VLS_LOAD)
- {
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Grouped store with gaps requires"
- " non-consecutive accesses\n");
+ "single-element interleaving not supported "
+ "for not adjacent vector loads\n");
return false;
}
+ }
- unsigned HOST_WIDE_INT dr_size
- = vect_get_scalar_dr_size (first_dr_info);
- poly_int64 off = 0;
- if (*memory_access_type == VMAT_CONTIGUOUS_REVERSE)
- off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size;
-
- /* An overrun is fine if the trailing elements are smaller
- than the alignment boundary B. Every vector access will
- be a multiple of B and so we are guaranteed to access a
- non-gap element in the same B-sized block. */
- if (overrun_p
- && gap < (vect_known_alignment_in_bytes (first_dr_info,
- vectype, off) / dr_size))
- overrun_p = false;
-
- /* When we have a contiguous access across loop iterations
- but the access in the loop doesn't cover the full vector
- we can end up with no gap recorded but still excess
- elements accessed, see PR103116. Make sure we peel for
- gaps if necessary and sufficient and give up if not.
-
- If there is a combination of the access not covering the full
- vector and a gap recorded then we may need to peel twice. */
- bool large_vector_overrun_p = false;
- if (loop_vinfo
- && (*memory_access_type == VMAT_CONTIGUOUS
- || *memory_access_type == VMAT_CONTIGUOUS_REVERSE)
- && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
- && !multiple_p (group_size * LOOP_VINFO_VECT_FACTOR (loop_vinfo),
- nunits))
- large_vector_overrun_p = overrun_p = true;
-
- /* If the gap splits the vector in half and the target
- can do half-vector operations avoid the epilogue peeling
- by simply loading half of the vector only. Usually
- the construction with an upper zero half will be elided. */
- dr_alignment_support alss;
- int misalign = dr_misalignment (first_dr_info, vectype, off);
- tree half_vtype;
- poly_uint64 remain;
- unsigned HOST_WIDE_INT tem, num;
- if (overrun_p
- && !masked_p
- && *memory_access_type != VMAT_LOAD_STORE_LANES
- && (((alss = vect_supportable_dr_alignment (vinfo, first_dr_info,
- vectype, misalign)))
- == dr_aligned
- || alss == dr_unaligned_supported)
- && can_div_trunc_p (group_size
- * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap,
- nunits, &tem, &remain)
- && (known_eq (remain, 0u)
- || (known_ne (remain, 0u)
- && constant_multiple_p (nunits, remain, &num)
- && (vector_vector_composition_type (vectype, num,
- &half_vtype)
- != NULL_TREE))))
- overrun_p = false;
-
- if (overrun_p && !can_overrun_p)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Peeling for outer loop is not supported\n");
- return false;
- }
+ /* For single-element interleaving also fall back to elementwise
+ access in case we did not lower a permutation and cannot
+ code generate it. */
+ auto_vec<tree> temv;
+ unsigned n_perms;
+ if (loop_vinfo
+ && single_element_p
+ && SLP_TREE_LANES (slp_node) == 1
+ && (*memory_access_type == VMAT_CONTIGUOUS
+ || *memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+ && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
+ && !vect_transform_slp_perm_load
+ (loop_vinfo, slp_node, temv, NULL,
+ LOOP_VINFO_VECT_FACTOR (loop_vinfo), true, &n_perms))
+ {
+ *memory_access_type = VMAT_ELEMENTWISE;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "single-element interleaving permutation not "
+ "supported, using elementwise access\n");
+ }
- /* Peeling for gaps assumes that a single scalar iteration
- is enough to make sure the last vector iteration doesn't
- access excess elements. */
- if (overrun_p
- && (!can_div_trunc_p (group_size
- * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap,
- nunits, &tem, &remain)
- || maybe_lt (remain + group_size, nunits)))
- {
- /* But peeling a single scalar iteration is enough if
- we can use the next power-of-two sized partial
- access and that is sufficiently small to be covered
- by the single scalar iteration. */
- unsigned HOST_WIDE_INT cnunits, cvf, cremain, cpart_size;
- if (masked_p
- || !nunits.is_constant (&cnunits)
- || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&cvf)
- || (((cremain = (group_size * cvf - gap) % cnunits), true)
- && ((cpart_size = (1 << ceil_log2 (cremain))), true)
- && (cremain + group_size < cpart_size
- || vector_vector_composition_type
- (vectype, cnunits / cpart_size,
- &half_vtype) == NULL_TREE)))
- {
- /* If all fails we can still resort to niter masking unless
- the vectors used are too big, so enforce the use of
- partial vectors. */
- if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
- && !large_vector_overrun_p)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "peeling for gaps insufficient for "
- "access unless using partial "
- "vectors\n");
- LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P (loop_vinfo) = true;
- }
- else
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "peeling for gaps insufficient for "
- "access\n");
- return false;
- }
- }
- else if (large_vector_overrun_p)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "can't operate on partial vectors because "
- "only unmasked loads handle access "
- "shortening required because of gaps at "
- "the end of the access\n");
- LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
- }
- }
+ overrun_p = (loop_vinfo && gap != 0
+ && *memory_access_type != VMAT_ELEMENTWISE);
+ if (overrun_p && vls_type != VLS_LOAD)
+ {
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Grouped store with gaps requires"
+ " non-consecutive accesses\n");
+ return false;
}
- }
- else
- {
- /* We can always handle this case using elementwise accesses,
- but see if something more efficient is available. */
- *memory_access_type = VMAT_ELEMENTWISE;
-
- /* If there is a gap at the end of the group then these optimizations
- would access excess elements in the last iteration. */
- bool would_overrun_p = (gap != 0);
- /* An overrun is fine if the trailing elements are smaller than the
- alignment boundary B. Every vector access will be a multiple of B
- and so we are guaranteed to access a non-gap element in the
- same B-sized block. */
- if (would_overrun_p
- && !masked_p
- && gap < (vect_known_alignment_in_bytes (first_dr_info, vectype)
- / vect_get_scalar_dr_size (first_dr_info)))
- would_overrun_p = false;
- if (!STMT_VINFO_STRIDED_P (first_stmt_info)
- && (can_overrun_p || !would_overrun_p)
- && compare_step_with_zero (vinfo, stmt_info) > 0)
+ unsigned HOST_WIDE_INT dr_size = vect_get_scalar_dr_size (first_dr_info);
+ poly_int64 off = 0;
+ if (*memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+ off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size;
+
+ /* An overrun is fine if the trailing elements are smaller
+ than the alignment boundary B. Every vector access will
+ be a multiple of B and so we are guaranteed to access a
+ non-gap element in the same B-sized block. */
+ if (overrun_p
+ && gap < (vect_known_alignment_in_bytes (first_dr_info,
+ vectype, off) / dr_size))
+ overrun_p = false;
+
+ /* When we have a contiguous access across loop iterations
+ but the access in the loop doesn't cover the full vector
+ we can end up with no gap recorded but still excess
+ elements accessed, see PR103116. Make sure we peel for
+ gaps if necessary and sufficient and give up if not.
+
+ If there is a combination of the access not covering the full
+ vector and a gap recorded then we may need to peel twice. */
+ bool large_vector_overrun_p = false;
+ if (loop_vinfo
+ && (*memory_access_type == VMAT_CONTIGUOUS
+ || *memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+ && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()
+ && !multiple_p (group_size * LOOP_VINFO_VECT_FACTOR (loop_vinfo),
+ nunits))
+ large_vector_overrun_p = overrun_p = true;
+
+ /* If the gap splits the vector in half and the target
+ can do half-vector operations avoid the epilogue peeling
+ by simply loading half of the vector only. Usually
+ the construction with an upper zero half will be elided. */
+ dr_alignment_support alss;
+ int misalign = dr_misalignment (first_dr_info, vectype, off);
+ tree half_vtype;
+ poly_uint64 remain;
+ unsigned HOST_WIDE_INT tem, num;
+ if (overrun_p
+ && !masked_p
+ && *memory_access_type != VMAT_LOAD_STORE_LANES
+ && (((alss = vect_supportable_dr_alignment (vinfo, first_dr_info,
+ vectype, misalign)))
+ == dr_aligned
+ || alss == dr_unaligned_supported)
+ && can_div_trunc_p (group_size
+ * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap,
+ nunits, &tem, &remain)
+ && (known_eq (remain, 0u)
+ || (known_ne (remain, 0u)
+ && constant_multiple_p (nunits, remain, &num)
+ && (vector_vector_composition_type (vectype, num, &half_vtype)
+ != NULL_TREE))))
+ overrun_p = false;
+
+ if (overrun_p && !can_overrun_p)
{
- /* First cope with the degenerate case of a single-element
- vector. */
- if (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U))
- ;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "Peeling for outer loop is not supported\n");
+ return false;
+ }
- else
- {
- /* Otherwise try using LOAD/STORE_LANES. */
- *lanes_ifn
- = vls_type == VLS_LOAD
- ? vect_load_lanes_supported (vectype, group_size, masked_p,
- elsvals)
- : vect_store_lanes_supported (vectype, group_size,
- masked_p);
- if (*lanes_ifn != IFN_LAST)
+ /* Peeling for gaps assumes that a single scalar iteration
+ is enough to make sure the last vector iteration doesn't
+ access excess elements. */
+ if (overrun_p
+ && (!can_div_trunc_p (group_size
+ * LOOP_VINFO_VECT_FACTOR (loop_vinfo) - gap,
+ nunits, &tem, &remain)
+ || maybe_lt (remain + group_size, nunits)))
+ {
+ /* But peeling a single scalar iteration is enough if
+ we can use the next power-of-two sized partial
+ access and that is sufficiently small to be covered
+ by the single scalar iteration. */
+ unsigned HOST_WIDE_INT cnunits, cvf, cremain, cpart_size;
+ if (masked_p
+ || !nunits.is_constant (&cnunits)
+ || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&cvf)
+ || (((cremain = (group_size * cvf - gap) % cnunits), true)
+ && ((cpart_size = (1 << ceil_log2 (cremain))), true)
+ && (cremain + group_size < cpart_size
+ || (vector_vector_composition_type (vectype,
+ cnunits / cpart_size,
+ &half_vtype)
+ == NULL_TREE))))
+ {
+ /* If all fails we can still resort to niter masking unless
+ the vectors used are too big, so enforce the use of
+ partial vectors. */
+ if (LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo)
+ && !large_vector_overrun_p)
{
- *memory_access_type = VMAT_LOAD_STORE_LANES;
- overrun_p = would_overrun_p;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "peeling for gaps insufficient for "
+ "access unless using partial "
+ "vectors\n");
+ LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P (loop_vinfo) = true;
}
-
- /* If that fails, try using permuting loads. */
- else if (vls_type == VLS_LOAD
- ? vect_grouped_load_supported (vectype,
- single_element_p,
- group_size)
- : vect_grouped_store_supported (vectype, group_size))
+ else
{
- *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
- overrun_p = would_overrun_p;
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "peeling for gaps insufficient for "
+ "access\n");
+ return false;
}
}
+ else if (large_vector_overrun_p)
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "can't operate on partial vectors because "
+ "only unmasked loads handle access "
+ "shortening required because of gaps at "
+ "the end of the access\n");
+ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo) = false;
+ }
}
}
@@ -2414,7 +2350,7 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
if ((*memory_access_type == VMAT_ELEMENTWISE
|| *memory_access_type == VMAT_STRIDED_SLP)
&& single_element_p
- && (!slp_node || SLP_TREE_LANES (slp_node) == 1)
+ && SLP_TREE_LANES (slp_node) == 1
&& loop_vinfo
&& vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
masked_p, gs_info, elsvals))
@@ -2494,7 +2430,7 @@ static bool
get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
tree vectype, slp_tree slp_node,
bool masked_p, vec_load_store_type vls_type,
- unsigned int ncopies,
+ unsigned int,
vect_memory_access_type *memory_access_type,
poly_int64 *poffset,
dr_alignment_support *alignment_support_scheme,
@@ -2560,54 +2496,13 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
is irrelevant for them. */
*alignment_support_scheme = dr_unaligned_supported;
}
- else if (STMT_VINFO_GROUPED_ACCESS (stmt_info) || slp_node)
- {
- if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp_node,
- masked_p,
- vls_type, memory_access_type, poffset,
- alignment_support_scheme,
- misalignment, gs_info, lanes_ifn,
- elsvals))
- return false;
- }
- else if (STMT_VINFO_STRIDED_P (stmt_info))
- {
- gcc_assert (!slp_node);
- if (loop_vinfo
- && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
- masked_p, gs_info, elsvals))
- *memory_access_type = VMAT_GATHER_SCATTER;
- else
- *memory_access_type = VMAT_ELEMENTWISE;
- /* Alignment is irrelevant here. */
- *alignment_support_scheme = dr_unaligned_supported;
- }
- else
- {
- int cmp = compare_step_with_zero (vinfo, stmt_info);
- if (cmp == 0)
- {
- gcc_assert (vls_type == VLS_LOAD);
- *memory_access_type = VMAT_INVARIANT;
- /* Invariant accesses perform only component accesses, alignment
- is irrelevant for them. */
- *alignment_support_scheme = dr_unaligned_supported;
- }
- else
- {
- if (cmp < 0)
- *memory_access_type = get_negative_load_store_type
- (vinfo, stmt_info, vectype, vls_type, ncopies, poffset);
- else
- *memory_access_type = VMAT_CONTIGUOUS;
- *misalignment = dr_misalignment (STMT_VINFO_DR_INFO (stmt_info),
- vectype, *poffset);
- *alignment_support_scheme
- = vect_supportable_dr_alignment (vinfo,
- STMT_VINFO_DR_INFO (stmt_info),
- vectype, *misalignment);
- }
- }
+ else if (!get_group_load_store_type (vinfo, stmt_info, vectype, slp_node,
+ masked_p,
+ vls_type, memory_access_type, poffset,
+ alignment_support_scheme,
+ misalignment, gs_info, lanes_ifn,
+ elsvals))
+ return false;
if ((*memory_access_type == VMAT_ELEMENTWISE
|| *memory_access_type == VMAT_STRIDED_SLP)
@@ -2731,7 +2626,7 @@ get_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
such only the first load in the group is aligned, the rest are not.
Because of this the permutes may break the alignment requirements that
have been set, and as such we should for now, reject them. */
- if (slp_node && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
+ if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8352,12 +8247,10 @@ vectorizable_store (vec_info *vinfo,
tree dataref_ptr = NULL_TREE;
tree dataref_offset = NULL_TREE;
gimple *ptr_incr = NULL;
- int ncopies;
int j;
stmt_vec_info first_stmt_info;
bool grouped_store;
unsigned int group_size, i;
- bool slp = (slp_node != NULL);
unsigned int vec_num;
bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
tree aggr_type;
@@ -8403,7 +8296,7 @@ vectorizable_store (vec_info *vinfo,
return false;
int mask_index = internal_fn_mask_index (ifn);
- if (mask_index >= 0 && slp_node)
+ if (mask_index >= 0)
mask_index = vect_slp_child_index_for_operand
(call, mask_index, STMT_VINFO_GATHER_SCATTER_P (stmt_info));
if (mask_index >= 0
@@ -8415,9 +8308,9 @@ vectorizable_store (vec_info *vinfo,
/* Cannot have hybrid store SLP -- that would mean storing to the
same location twice. */
- gcc_assert (slp == PURE_SLP_STMT (stmt_info));
+ gcc_assert (PURE_SLP_STMT (stmt_info));
- tree vectype = STMT_VINFO_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
+ tree vectype = SLP_TREE_VECTYPE (stmt_info), rhs_vectype = NULL_TREE;
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
if (loop_vinfo)
@@ -8428,20 +8321,10 @@ vectorizable_store (vec_info *vinfo,
else
vf = 1;
- /* Multiple types in SLP are handled by creating the appropriate number of
- vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
- case of SLP. */
- if (slp)
- ncopies = 1;
- else
- ncopies = vect_get_num_copies (loop_vinfo, vectype);
-
- gcc_assert (ncopies >= 1);
-
/* FORNOW. This restriction should be relaxed. */
if (loop
&& nested_in_vect_loop_p (loop, stmt_info)
- && (ncopies > 1 || (slp && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)))
+ && SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8467,13 +8350,12 @@ vectorizable_store (vec_info *vinfo,
poly_int64 poffset;
internal_fn lanes_ifn;
if (!get_load_store_type (vinfo, stmt_info, vectype, slp_node, mask, vls_type,
- ncopies, &memory_access_type, &poffset,
+ 1, &memory_access_type, &poffset,
&alignment_support_scheme, &misalignment, &gs_info,
&lanes_ifn))
return false;
- if (slp_node
- && slp_node->ldst_lanes
+ if (slp_node->ldst_lanes
&& memory_access_type != VMAT_LOAD_STORE_LANES)
{
if (dump_enabled_p ())
@@ -8520,8 +8402,7 @@ vectorizable_store (vec_info *vinfo,
dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info), *first_dr_info = NULL;
grouped_store = (STMT_VINFO_GROUPED_ACCESS (stmt_info)
- && memory_access_type != VMAT_GATHER_SCATTER
- && (slp || memory_access_type != VMAT_CONTIGUOUS));
+ && memory_access_type != VMAT_GATHER_SCATTER);
if (grouped_store)
{
first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
@@ -8546,8 +8427,7 @@ vectorizable_store (vec_info *vinfo,
if (costing_p) /* transformation not required. */
{
STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info) = memory_access_type;
- if (slp_node)
- SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
+ SLP_TREE_MEMORY_ACCESS_TYPE (slp_node) = memory_access_type;
if (loop_vinfo
&& LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P (loop_vinfo))
@@ -8556,11 +8436,10 @@ vectorizable_store (vec_info *vinfo,
memory_access_type, &gs_info,
mask);
- if (slp_node
- && (!vect_maybe_update_slp_op_vectype (op_node, vectype)
- || (mask
- && !vect_maybe_update_slp_op_vectype (mask_node,
- mask_vectype))))
+ if (!vect_maybe_update_slp_op_vectype (op_node, vectype)
+ || (mask
+ && !vect_maybe_update_slp_op_vectype (mask_node,
+ mask_vectype)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -8578,22 +8457,8 @@ vectorizable_store (vec_info *vinfo,
"Vectorizing an unaligned access.\n");
STMT_VINFO_TYPE (stmt_info) = store_vec_info_type;
-
- /* As function vect_transform_stmt shows, for interleaving stores
- the whole chain is vectorized when the last store in the chain
- is reached, the other stores in the group are skipped. So we
- want to only cost the last one here, but it's not trivial to
- get the last, as it's equivalent to use the first one for
- costing, use the first one instead. */
- if (grouped_store
- && !slp
- && first_stmt_info != stmt_info)
- return true;
}
- if (slp_node)
- gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info));
- else
- gcc_assert (memory_access_type == STMT_VINFO_MEMORY_ACCESS_TYPE (stmt_info));
+ gcc_assert (memory_access_type == SLP_TREE_MEMORY_ACCESS_TYPE (stmt_info));
/* Transform. */
@@ -8602,14 +8467,14 @@ vectorizable_store (vec_info *vinfo,
if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) >= 3)
{
gcc_assert (memory_access_type == VMAT_CONTIGUOUS);
- gcc_assert (!slp || SLP_TREE_LANES (slp_node) == 1);
+ gcc_assert (SLP_TREE_LANES (slp_node) == 1);
if (costing_p)
{
unsigned int inside_cost = 0, prologue_cost = 0;
if (vls_type == VLS_STORE_INVARIANT)
prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
stmt_info, 0, vect_prologue);
- vect_get_store_cost (vinfo, stmt_info, slp_node, ncopies,
+ vect_get_store_cost (vinfo, stmt_info, slp_node, 1,
alignment_support_scheme, misalignment,
&inside_cost, cost_vec);
@@ -8622,67 +8487,28 @@ vectorizable_store (vec_info *vinfo,
return true;
}
return vectorizable_scan_store (vinfo, stmt_info, slp_node,
- gsi, vec_stmt, ncopies);
+ gsi, vec_stmt, 1);
}
- if (grouped_store || slp)
- {
- /* FORNOW */
- gcc_assert (!grouped_store
- || !loop
- || !nested_in_vect_loop_p (loop, stmt_info));
+ /* FORNOW */
+ gcc_assert (!grouped_store
+ || !loop
+ || !nested_in_vect_loop_p (loop, stmt_info));
- if (slp)
- {
- grouped_store = false;
- /* VEC_NUM is the number of vect stmts to be created for this
- group. */
- vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
- gcc_assert (!STMT_VINFO_GROUPED_ACCESS (first_stmt_info)
- || (DR_GROUP_FIRST_ELEMENT (first_stmt_info)
- == first_stmt_info));
- first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
- op = vect_get_store_rhs (first_stmt_info);
- }
- else
- /* VEC_NUM is the number of vect stmts to be created for this
- group. */
- vec_num = group_size;
+ grouped_store = false;
+ /* VEC_NUM is the number of vect stmts to be created for this
+ group. */
+ vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ first_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[0];
+ gcc_assert (!STMT_VINFO_GROUPED_ACCESS (first_stmt_info)
+ || (DR_GROUP_FIRST_ELEMENT (first_stmt_info) == first_stmt_info));
+ first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
+ op = vect_get_store_rhs (first_stmt_info);
- ref_type = get_group_alias_ptr_type (first_stmt_info);
- }
- else
- ref_type = reference_alias_ptr_type (DR_REF (first_dr_info->dr));
+ ref_type = get_group_alias_ptr_type (first_stmt_info);
if (!costing_p && dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location, "transform store. ncopies = %d\n",
- ncopies);
-
- /* Check if we need to update prologue cost for invariant,
- and update it accordingly if so. If it's not for
- interleaving store, we can just check vls_type; but if
- it's for interleaving store, need to check the def_type
- of the stored value since the current vls_type is just
- for first_stmt_info. */
- auto update_prologue_cost = [&](unsigned *prologue_cost, tree store_rhs)
- {
- gcc_assert (costing_p);
- if (slp)
- return;
- if (grouped_store)
- {
- gcc_assert (store_rhs);
- enum vect_def_type cdt;
- gcc_assert (vect_is_simple_use (store_rhs, vinfo, &cdt));
- if (cdt != vect_constant_def && cdt != vect_external_def)
- return;
- }
- else if (vls_type != VLS_STORE_INVARIANT)
- return;
- *prologue_cost += record_stmt_cost (cost_vec, 1, scalar_to_vec,
- slp_node, 0, vect_prologue);
- };
+ dump_printf_loc (MSG_NOTE, vect_location, "transform store.\n");
if (memory_access_type == VMAT_ELEMENTWISE
|| memory_access_type == VMAT_STRIDED_SLP)
@@ -8690,14 +8516,12 @@ vectorizable_store (vec_info *vinfo,
unsigned inside_cost = 0, prologue_cost = 0;
gimple_stmt_iterator incr_gsi;
bool insert_after;
- gimple *incr;
tree offvar = NULL_TREE;
tree ivstep;
tree running_off;
tree stride_base, stride_step, alias_off;
tree vec_oprnd = NULL_TREE;
tree dr_offset;
- unsigned int g;
/* Checked by get_load_store_type. */
unsigned int const_nunits = nunits.to_constant ();
@@ -8735,116 +8559,112 @@ vectorizable_store (vec_info *vinfo,
unsigned lnel = 1;
tree ltype = elem_type;
tree lvectype = vectype;
- if (slp)
- {
- HOST_WIDE_INT n = gcd (group_size, const_nunits);
- if (n == const_nunits)
- {
- int mis_align = dr_misalignment (first_dr_info, vectype);
- /* With VF > 1 we advance the DR by step, if that is constant
- and only aligned when performed VF times, DR alignment
- analysis can analyze this as aligned since it assumes
- contiguous accesses. But that is not how we code generate
- here, so adjust for this. */
- if (maybe_gt (vf, 1u)
- && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
- DR_TARGET_ALIGNMENT (first_dr_info)))
- mis_align = -1;
- dr_alignment_support dr_align
- = vect_supportable_dr_alignment (vinfo, dr_info, vectype,
- mis_align);
- if (dr_align == dr_aligned
- || dr_align == dr_unaligned_supported)
- {
- nstores = 1;
- lnel = const_nunits;
- ltype = vectype;
- lvectype = vectype;
- alignment_support_scheme = dr_align;
- misalignment = mis_align;
- }
- }
- else if (n > 1)
- {
- nstores = const_nunits / n;
- lnel = n;
- ltype = build_vector_type (elem_type, n);
+ HOST_WIDE_INT n = gcd (group_size, const_nunits);
+ if (n == const_nunits)
+ {
+ int mis_align = dr_misalignment (first_dr_info, vectype);
+ /* With VF > 1 we advance the DR by step, if that is constant
+ and only aligned when performed VF times, DR alignment
+ analysis can analyze this as aligned since it assumes
+ contiguous accesses. But that is not how we code generate
+ here, so adjust for this. */
+ if (maybe_gt (vf, 1u)
+ && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+ DR_TARGET_ALIGNMENT (first_dr_info)))
+ mis_align = -1;
+ dr_alignment_support dr_align
+ = vect_supportable_dr_alignment (vinfo, dr_info, vectype,
+ mis_align);
+ if (dr_align == dr_aligned
+ || dr_align == dr_unaligned_supported)
+ {
+ nstores = 1;
+ lnel = const_nunits;
+ ltype = vectype;
lvectype = vectype;
- int mis_align = dr_misalignment (first_dr_info, ltype);
- if (maybe_gt (vf, 1u)
- && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
- DR_TARGET_ALIGNMENT (first_dr_info)))
- mis_align = -1;
- dr_alignment_support dr_align
- = vect_supportable_dr_alignment (vinfo, dr_info, ltype,
- mis_align);
alignment_support_scheme = dr_align;
misalignment = mis_align;
-
- /* First check if vec_extract optab doesn't support extraction
- of vector elts directly. */
- scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
- machine_mode vmode;
- if (!VECTOR_MODE_P (TYPE_MODE (vectype))
- || !related_vector_mode (TYPE_MODE (vectype), elmode,
- n).exists (&vmode)
- || (convert_optab_handler (vec_extract_optab,
- TYPE_MODE (vectype), vmode)
- == CODE_FOR_nothing)
- || !(dr_align == dr_aligned
- || dr_align == dr_unaligned_supported))
- {
- /* Try to avoid emitting an extract of vector elements
- by performing the extracts using an integer type of the
- same size, extracting from a vector of those and then
- re-interpreting it as the original vector type if
- supported. */
- unsigned lsize
- = n * GET_MODE_BITSIZE (elmode);
- unsigned int lnunits = const_nunits / n;
- /* If we can't construct such a vector fall back to
- element extracts from the original vector type and
- element size stores. */
- if (int_mode_for_size (lsize, 0).exists (&elmode)
- && VECTOR_MODE_P (TYPE_MODE (vectype))
- && related_vector_mode (TYPE_MODE (vectype), elmode,
- lnunits).exists (&vmode)
- && (convert_optab_handler (vec_extract_optab,
- vmode, elmode)
- != CODE_FOR_nothing))
- {
- nstores = lnunits;
- lnel = n;
- ltype = build_nonstandard_integer_type (lsize, 1);
- lvectype = build_vector_type (ltype, nstores);
- }
- /* Else fall back to vector extraction anyway.
- Fewer stores are more important than avoiding spilling
- of the vector we extract from. Compared to the
- construction case in vectorizable_load no store-forwarding
- issue exists here for reasonable archs. But only
- if the store is supported. */
- else if (!(dr_align == dr_aligned
- || dr_align == dr_unaligned_supported))
- {
- nstores = const_nunits;
- lnel = 1;
- ltype = elem_type;
- lvectype = vectype;
- }
- }
}
- unsigned align;
- if (alignment_support_scheme == dr_aligned)
- align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
- else
- align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
- /* Alignment is at most the access size if we do multiple stores. */
- if (nstores > 1)
- align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
- ltype = build_aligned_type (ltype, align * BITS_PER_UNIT);
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
}
+ else if (n > 1)
+ {
+ nstores = const_nunits / n;
+ lnel = n;
+ ltype = build_vector_type (elem_type, n);
+ lvectype = vectype;
+ int mis_align = dr_misalignment (first_dr_info, ltype);
+ if (maybe_gt (vf, 1u)
+ && !multiple_p (DR_STEP_ALIGNMENT (first_dr_info->dr),
+ DR_TARGET_ALIGNMENT (first_dr_info)))
+ mis_align = -1;
+ dr_alignment_support dr_align
+ = vect_supportable_dr_alignment (vinfo, dr_info, ltype,
+ mis_align);
+ alignment_support_scheme = dr_align;
+ misalignment = mis_align;
+
+ /* First check if vec_extract optab doesn't support extraction
+ of vector elts directly. */
+ scalar_mode elmode = SCALAR_TYPE_MODE (elem_type);
+ machine_mode vmode;
+ if (!VECTOR_MODE_P (TYPE_MODE (vectype))
+ || !related_vector_mode (TYPE_MODE (vectype), elmode,
+ n).exists (&vmode)
+ || (convert_optab_handler (vec_extract_optab,
+ TYPE_MODE (vectype), vmode)
+ == CODE_FOR_nothing)
+ || !(dr_align == dr_aligned
+ || dr_align == dr_unaligned_supported))
+ {
+ /* Try to avoid emitting an extract of vector elements
+ by performing the extracts using an integer type of the
+ same size, extracting from a vector of those and then
+ re-interpreting it as the original vector type if
+ supported. */
+ unsigned lsize = n * GET_MODE_BITSIZE (elmode);
+ unsigned int lnunits = const_nunits / n;
+ /* If we can't construct such a vector fall back to
+ element extracts from the original vector type and
+ element size stores. */
+ if (int_mode_for_size (lsize, 0).exists (&elmode)
+ && VECTOR_MODE_P (TYPE_MODE (vectype))
+ && related_vector_mode (TYPE_MODE (vectype), elmode,
+ lnunits).exists (&vmode)
+ && (convert_optab_handler (vec_extract_optab,
+ vmode, elmode)
+ != CODE_FOR_nothing))
+ {
+ nstores = lnunits;
+ lnel = n;
+ ltype = build_nonstandard_integer_type (lsize, 1);
+ lvectype = build_vector_type (ltype, nstores);
+ }
+ /* Else fall back to vector extraction anyway.
+ Fewer stores are more important than avoiding spilling
+ of the vector we extract from. Compared to the
+ construction case in vectorizable_load no store-forwarding
+ issue exists here for reasonable archs. But only
+ if the store is supported. */
+ else if (!(dr_align == dr_aligned
+ || dr_align == dr_unaligned_supported))
+ {
+ nstores = const_nunits;
+ lnel = 1;
+ ltype = elem_type;
+ lvectype = vectype;
+ }
+ }
+ }
+ unsigned align;
+ if (alignment_support_scheme == dr_aligned)
+ align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+ else
+ align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+ /* Alignment is at most the access size if we do multiple stores. */
+ if (nstores > 1)
+ align = MIN (tree_to_uhwi (TYPE_SIZE_UNIT (ltype)), align);
+ ltype = build_aligned_type (ltype, align * BITS_PER_UNIT);
+ int ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
if (!costing_p)
{
@@ -8858,7 +8678,6 @@ vectorizable_store (vec_info *vinfo,
ivstep = cse_and_gimplify_to_preheader (loop_vinfo, ivstep);
create_iv (stride_base, PLUS_EXPR, ivstep, NULL, loop, &incr_gsi,
insert_after, &offvar, NULL);
- incr = gsi_stmt (incr_gsi);
stride_step = cse_and_gimplify_to_preheader (loop_vinfo, stride_step);
}
@@ -8869,104 +8688,68 @@ vectorizable_store (vec_info *vinfo,
/* For costing some adjacent vector stores, we'd like to cost with
the total number of them once instead of cost each one by one. */
unsigned int n_adjacent_stores = 0;
- for (g = 0; g < group_size; g++)
+ running_off = offvar;
+ if (!costing_p)
+ vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op,
+ &vec_oprnds);
+ unsigned int group_el = 0;
+ unsigned HOST_WIDE_INT elsz
+ = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
+ for (j = 0; j < ncopies; j++)
{
- running_off = offvar;
if (!costing_p)
{
- if (g)
+ vec_oprnd = vec_oprnds[j];
+ /* Pun the vector to extract from if necessary. */
+ if (lvectype != vectype)
{
- tree size = TYPE_SIZE_UNIT (ltype);
- tree pos
- = fold_build2 (MULT_EXPR, sizetype, size_int (g), size);
- tree newoff = copy_ssa_name (running_off, NULL);
- incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
- running_off, pos);
- vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
- running_off = newoff;
+ tree tem = make_ssa_name (lvectype);
+ tree cvt = build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd);
+ gimple *pun = gimple_build_assign (tem, cvt);
+ vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
+ vec_oprnd = tem;
}
}
- if (!slp)
- op = vect_get_store_rhs (next_stmt_info);
- if (!costing_p)
- vect_get_vec_defs (vinfo, next_stmt_info, slp_node, ncopies, op,
- &vec_oprnds);
- else
- update_prologue_cost (&prologue_cost, op);
- unsigned int group_el = 0;
- unsigned HOST_WIDE_INT
- elsz = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
- for (j = 0; j < ncopies; j++)
+ for (i = 0; i < nstores; i++)
{
- if (!costing_p)
+ if (costing_p)
{
- vec_oprnd = vec_oprnds[j];
- /* Pun the vector to extract from if necessary. */
- if (lvectype != vectype)
- {
- tree tem = make_ssa_name (lvectype);
- tree cvt
- = build1 (VIEW_CONVERT_EXPR, lvectype, vec_oprnd);
- gimple *pun = gimple_build_assign (tem, cvt);
- vect_finish_stmt_generation (vinfo, stmt_info, pun, gsi);
- vec_oprnd = tem;
- }
+ n_adjacent_stores++;
+ continue;
}
- for (i = 0; i < nstores; i++)
+ tree newref, newoff;
+ gimple *incr, *assign;
+ tree size = TYPE_SIZE (ltype);
+ /* Extract the i'th component. */
+ tree pos = fold_build2 (MULT_EXPR, bitsizetype,
+ bitsize_int (i), size);
+ tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
+ size, pos);
+
+ elem = force_gimple_operand_gsi (gsi, elem, true, NULL_TREE, true,
+ GSI_SAME_STMT);
+
+ tree this_off = build_int_cst (TREE_TYPE (alias_off),
+ group_el * elsz);
+ newref = build2 (MEM_REF, ltype, running_off, this_off);
+ vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
+
+ /* And store it to *running_off. */
+ assign = gimple_build_assign (newref, elem);
+ vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi);
+
+ group_el += lnel;
+ if (group_el == group_size)
{
- if (costing_p)
- {
- n_adjacent_stores++;
- continue;
- }
- tree newref, newoff;
- gimple *incr, *assign;
- tree size = TYPE_SIZE (ltype);
- /* Extract the i'th component. */
- tree pos = fold_build2 (MULT_EXPR, bitsizetype,
- bitsize_int (i), size);
- tree elem = fold_build3 (BIT_FIELD_REF, ltype, vec_oprnd,
- size, pos);
-
- elem = force_gimple_operand_gsi (gsi, elem, true,
- NULL_TREE, true,
- GSI_SAME_STMT);
-
- tree this_off = build_int_cst (TREE_TYPE (alias_off),
- group_el * elsz);
- newref = build2 (MEM_REF, ltype,
- running_off, this_off);
- vect_copy_ref_info (newref, DR_REF (first_dr_info->dr));
-
- /* And store it to *running_off. */
- assign = gimple_build_assign (newref, elem);
- vect_finish_stmt_generation (vinfo, stmt_info, assign, gsi);
-
- group_el += lnel;
- if (! slp
- || group_el == group_size)
- {
- newoff = copy_ssa_name (running_off, NULL);
- incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
- running_off, stride_step);
- vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
+ newoff = copy_ssa_name (running_off, NULL);
+ incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
+ running_off, stride_step);
+ vect_finish_stmt_generation (vinfo, stmt_info, incr, gsi);
- running_off = newoff;
- group_el = 0;
- }
- if (g == group_size - 1
- && !slp)
- {
- if (j == 0 && i == 0)
- *vec_stmt = assign;
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (assign);
- }
+ running_off = newoff;
+ group_el = 0;
}
}
- next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
- vec_oprnds.truncate(0);
- if (slp)
- break;
}
if (costing_p)
@@ -9107,7 +8890,7 @@ vectorizable_store (vec_info *vinfo,
if (memory_access_type == VMAT_LOAD_STORE_LANES)
{
- if (costing_p && slp_node)
+ if (costing_p)
/* Update all incoming store operand nodes, the general handling
above only handles the mask and the first store operand node. */
for (slp_tree child : SLP_TREE_CHILDREN (slp_node))
@@ -9123,49 +8906,18 @@ vectorizable_store (vec_info *vinfo,
/* For costing some adjacent vector stores, we'd like to cost with
the total number of them once instead of cost each one by one. */
unsigned int n_adjacent_stores = 0;
- if (slp)
- ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size;
+ int ncopies = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node) / group_size;
for (j = 0; j < ncopies; j++)
{
- gimple *new_stmt;
if (j == 0)
{
- /* For interleaved stores we collect vectorized defs for all
- the stores in the group in DR_CHAIN. DR_CHAIN is then used
- as an input to vect_permute_store_chain(). */
- stmt_vec_info next_stmt_info = first_stmt_info;
- for (i = 0; i < group_size; i++)
- {
- /* Since gaps are not supported for interleaved stores,
- DR_GROUP_SIZE is the exact number of stmts in the
- chain. Therefore, NEXT_STMT_INFO can't be NULL_TREE. */
- op = vect_get_store_rhs (next_stmt_info);
- if (costing_p)
- update_prologue_cost (&prologue_cost, op);
- else if (!slp)
- {
- vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
- ncopies, op,
- gvec_oprnds[i]);
- vec_oprnd = (*gvec_oprnds[i])[0];
- dr_chain.quick_push (vec_oprnd);
- }
- next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
- }
-
if (!costing_p)
{
if (mask)
{
- if (slp_node)
- vect_get_slp_defs (mask_node, &vec_masks);
- else
- vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
- mask, &vec_masks,
- mask_vectype);
+ vect_get_slp_defs (mask_node, &vec_masks);
vec_mask = vec_masks[0];
}
-
dataref_ptr
= vect_create_data_ref_ptr (vinfo, first_stmt_info,
aggr_type, NULL, offset, &dummy,
@@ -9175,19 +8927,6 @@ vectorizable_store (vec_info *vinfo,
else if (!costing_p)
{
gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
- /* DR_CHAIN is then used as an input to
- vect_permute_store_chain(). */
- if (!slp)
- {
- /* We should have caught mismatched types earlier. */
- gcc_assert (
- useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd)));
- for (i = 0; i < group_size; i++)
- {
- vec_oprnd = (*gvec_oprnds[i])[j];
- dr_chain[i] = vec_oprnd;
- }
- }
if (mask)
vec_mask = vec_masks[j];
dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
@@ -9211,17 +8950,12 @@ vectorizable_store (vec_info *vinfo,
/* Store the individual vectors into the array. */
for (i = 0; i < group_size; i++)
{
- if (slp)
- {
- slp_tree child;
- if (i == 0 || !mask_node)
- child = SLP_TREE_CHILDREN (slp_node)[i];
- else
- child = SLP_TREE_CHILDREN (slp_node)[i + 1];
- vec_oprnd = SLP_TREE_VEC_DEFS (child)[j];
- }
+ slp_tree child;
+ if (i == 0 || !mask_node)
+ child = SLP_TREE_CHILDREN (slp_node)[i];
else
- vec_oprnd = dr_chain[i];
+ child = SLP_TREE_CHILDREN (slp_node)[i + 1];
+ vec_oprnd = SLP_TREE_VEC_DEFS (child)[j];
write_vector_array (vinfo, stmt_info, gsi, vec_oprnd, vec_array,
i);
}
@@ -9287,14 +9021,9 @@ vectorizable_store (vec_info *vinfo,
}
gimple_call_set_nothrow (call, true);
vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
- new_stmt = call;
/* Record that VEC_ARRAY is now dead. */
vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
- if (j == 0 && !slp)
- *vec_stmt = new_stmt;
- if (!slp)
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
if (costing_p)
@@ -9318,7 +9047,7 @@ vectorizable_store (vec_info *vinfo,
gcc_assert (!grouped_store);
auto_vec<tree> vec_offsets;
unsigned int inside_cost = 0, prologue_cost = 0;
- int num_stmts = ncopies * vec_num;
+ int num_stmts = vec_num;
for (j = 0; j < num_stmts; j++)
{
gimple *new_stmt;
@@ -9332,21 +9061,9 @@ vectorizable_store (vec_info *vinfo,
/* Since the store is not grouped, DR_GROUP_SIZE is 1, and
DR_CHAIN is of size 1. */
gcc_assert (group_size == 1);
- if (slp_node)
- vect_get_slp_defs (op_node, gvec_oprnds[0]);
- else
- vect_get_vec_defs_for_operand (vinfo, first_stmt_info,
- num_stmts, op, gvec_oprnds[0]);
+ vect_get_slp_defs (op_node, gvec_oprnds[0]);
if (mask)
- {
- if (slp_node)
- vect_get_slp_defs (mask_node, &vec_masks);
- else
- vect_get_vec_defs_for_operand (vinfo, stmt_info,
- num_stmts,
- mask, &vec_masks,
- mask_vectype);
- }
+ vect_get_slp_defs (mask_node, &vec_masks);
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
vect_get_gather_scatter_ops (loop_vinfo, loop, stmt_info,
@@ -9638,17 +9355,10 @@ vectorizable_store (vec_info *vinfo,
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
- if (slp)
- slp_node->push_vec_def (new_stmt);
+ slp_node->push_vec_def (new_stmt);
}
-
- if (!slp && !costing_p)
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
}
- if (!slp && !costing_p)
- *vec_stmt = STMT_VINFO_VEC_STMTS (stmt_info)[0];
-
if (costing_p && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: inside_cost = %d, "
@@ -9669,330 +9379,262 @@ vectorizable_store (vec_info *vinfo,
unsigned int n_adjacent_stores = 0;
auto_vec<tree> result_chain (group_size);
auto_vec<tree, 1> vec_oprnds;
- for (j = 0; j < ncopies; j++)
+ gimple *new_stmt;
+ if (!costing_p)
{
- gimple *new_stmt;
- if (j == 0)
- {
- if (slp && !costing_p)
- {
- /* Get vectorized arguments for SLP_NODE. */
- vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op,
- &vec_oprnds, mask, &vec_masks);
- vec_oprnd = vec_oprnds[0];
- if (mask)
- vec_mask = vec_masks[0];
- }
- else
- {
- /* For interleaved stores we collect vectorized defs for all the
- stores in the group in DR_CHAIN. DR_CHAIN is then used as an
- input to vect_permute_store_chain().
-
- If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN
- is of size 1. */
- stmt_vec_info next_stmt_info = first_stmt_info;
- for (i = 0; i < group_size; i++)
- {
- /* Since gaps are not supported for interleaved stores,
- DR_GROUP_SIZE is the exact number of stmts in the chain.
- Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
- that there is no interleaving, DR_GROUP_SIZE is 1,
- and only one iteration of the loop will be executed. */
- op = vect_get_store_rhs (next_stmt_info);
- if (costing_p)
- update_prologue_cost (&prologue_cost, op);
- else
- {
- vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
- ncopies, op,
- gvec_oprnds[i]);
- vec_oprnd = (*gvec_oprnds[i])[0];
- dr_chain.quick_push (vec_oprnd);
- }
- next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
- }
- if (mask && !costing_p)
- {
- vect_get_vec_defs_for_operand (vinfo, stmt_info, ncopies,
- mask, &vec_masks,
- mask_vectype);
- vec_mask = vec_masks[0];
- }
- }
+ /* Get vectorized arguments for SLP_NODE. */
+ vect_get_vec_defs (vinfo, stmt_info, slp_node, 1, op,
+ &vec_oprnds, mask, &vec_masks);
+ vec_oprnd = vec_oprnds[0];
+ if (mask)
+ vec_mask = vec_masks[0];
+ }
+ else
+ {
+ /* For interleaved stores we collect vectorized defs for all the
+ stores in the group in DR_CHAIN. DR_CHAIN is then used as an
+ input to vect_permute_store_chain().
- /* We should have catched mismatched types earlier. */
- gcc_assert (costing_p
- || useless_type_conversion_p (vectype,
- TREE_TYPE (vec_oprnd)));
- bool simd_lane_access_p
- = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0;
- if (!costing_p
- && simd_lane_access_p
- && !loop_masks
- && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
- && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
- && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
- && integer_zerop (DR_INIT (first_dr_info->dr))
- && alias_sets_conflict_p (get_alias_set (aggr_type),
- get_alias_set (TREE_TYPE (ref_type))))
+ If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN
+ is of size 1. */
+ stmt_vec_info next_stmt_info = first_stmt_info;
+ for (i = 0; i < group_size; i++)
+ {
+ /* Since gaps are not supported for interleaved stores,
+ DR_GROUP_SIZE is the exact number of stmts in the chain.
+ Therefore, NEXT_STMT_INFO can't be NULL_TREE. In case
+ that there is no interleaving, DR_GROUP_SIZE is 1,
+ and only one iteration of the loop will be executed. */
+ op = vect_get_store_rhs (next_stmt_info);
+ if (!costing_p)
{
- dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
- dataref_offset = build_int_cst (ref_type, 0);
+ vect_get_vec_defs_for_operand (vinfo, next_stmt_info,
+ 1, op, gvec_oprnds[i]);
+ vec_oprnd = (*gvec_oprnds[i])[0];
+ dr_chain.quick_push (vec_oprnd);
}
- else if (!costing_p)
- dataref_ptr
- = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
- simd_lane_access_p ? loop : NULL,
- offset, &dummy, gsi, &ptr_incr,
- simd_lane_access_p, bump);
+ next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
}
- else if (!costing_p)
+ if (mask && !costing_p)
{
- gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
- /* DR_CHAIN is then used as an input to vect_permute_store_chain().
- If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN is
- of size 1. */
- for (i = 0; i < group_size; i++)
- {
- vec_oprnd = (*gvec_oprnds[i])[j];
- dr_chain[i] = vec_oprnd;
- }
- if (mask)
- vec_mask = vec_masks[j];
- if (dataref_offset)
- dataref_offset = int_const_binop (PLUS_EXPR, dataref_offset, bump);
- else
- dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
- stmt_info, bump);
+ vect_get_vec_defs_for_operand (vinfo, stmt_info, 1,
+ mask, &vec_masks, mask_vectype);
+ vec_mask = vec_masks[0];
}
+ }
- new_stmt = NULL;
- if (grouped_store)
- {
- /* Permute. */
- gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
- if (costing_p)
- {
- int group_size = DR_GROUP_SIZE (first_stmt_info);
- int nstmts = ceil_log2 (group_size) * group_size;
- inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
- slp_node, 0, vect_body);
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "vect_model_store_cost: "
- "strided group_size = %d .\n",
- group_size);
- }
- else
- vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
- gsi, &result_chain);
- }
+ /* We should have catched mismatched types earlier. */
+ gcc_assert (costing_p
+ || useless_type_conversion_p (vectype, TREE_TYPE (vec_oprnd)));
+ bool simd_lane_access_p
+ = STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info) != 0;
+ if (!costing_p
+ && simd_lane_access_p
+ && !loop_masks
+ && TREE_CODE (DR_BASE_ADDRESS (first_dr_info->dr)) == ADDR_EXPR
+ && VAR_P (TREE_OPERAND (DR_BASE_ADDRESS (first_dr_info->dr), 0))
+ && integer_zerop (get_dr_vinfo_offset (vinfo, first_dr_info))
+ && integer_zerop (DR_INIT (first_dr_info->dr))
+ && alias_sets_conflict_p (get_alias_set (aggr_type),
+ get_alias_set (TREE_TYPE (ref_type))))
+ {
+ dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
+ dataref_offset = build_int_cst (ref_type, 0);
+ }
+ else if (!costing_p)
+ dataref_ptr = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
+ simd_lane_access_p ? loop : NULL,
+ offset, &dummy, gsi, &ptr_incr,
+ simd_lane_access_p, bump);
- stmt_vec_info next_stmt_info = first_stmt_info;
- for (i = 0; i < vec_num; i++)
+ new_stmt = NULL;
+ if (grouped_store)
+ {
+ /* Permute. */
+ gcc_assert (memory_access_type == VMAT_CONTIGUOUS_PERMUTE);
+ if (costing_p)
{
- if (!costing_p)
- {
- if (slp)
- vec_oprnd = vec_oprnds[i];
- else if (grouped_store)
- /* For grouped stores vectorized defs are interleaved in
- vect_permute_store_chain(). */
- vec_oprnd = result_chain[i];
- }
-
- if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
- {
- if (costing_p)
- inside_cost += record_stmt_cost (cost_vec, 1, vec_perm,
- slp_node, 0, vect_body);
- else
- {
- tree perm_mask = perm_mask_for_reverse (vectype);
- tree perm_dest = vect_create_destination_var (
- vect_get_store_rhs (stmt_info), vectype);
- tree new_temp = make_ssa_name (perm_dest);
-
- /* Generate the permute statement. */
- gimple *perm_stmt
- = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
- vec_oprnd, perm_mask);
- vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt,
- gsi);
+ int group_size = DR_GROUP_SIZE (first_stmt_info);
+ int nstmts = ceil_log2 (group_size) * group_size;
+ inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
+ slp_node, 0, vect_body);
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_NOTE, vect_location, "vect_model_store_cost: "
+ "strided group_size = %d .\n", group_size);
+ }
+ else
+ vect_permute_store_chain (vinfo, dr_chain, group_size, stmt_info,
+ gsi, &result_chain);
+ }
- perm_stmt = SSA_NAME_DEF_STMT (new_temp);
- vec_oprnd = new_temp;
- }
- }
+ for (i = 0; i < vec_num; i++)
+ {
+ if (!costing_p)
+ vec_oprnd = vec_oprnds[i];
+ if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
+ {
if (costing_p)
+ inside_cost += record_stmt_cost (cost_vec, 1, vec_perm,
+ slp_node, 0, vect_body);
+ else
{
- n_adjacent_stores++;
+ tree perm_mask = perm_mask_for_reverse (vectype);
+ tree perm_dest
+ = vect_create_destination_var (vect_get_store_rhs (stmt_info),
+ vectype);
+ tree new_temp = make_ssa_name (perm_dest);
- if (!slp)
- {
- next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
- if (!next_stmt_info)
- break;
- }
+ /* Generate the permute statement. */
+ gimple *perm_stmt
+ = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
+ vec_oprnd, perm_mask);
+ vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
- continue;
+ perm_stmt = SSA_NAME_DEF_STMT (new_temp);
+ vec_oprnd = new_temp;
}
+ }
- tree final_mask = NULL_TREE;
- tree final_len = NULL_TREE;
- tree bias = NULL_TREE;
- if (loop_masks)
- final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
- vec_num * ncopies, vectype,
- vec_num * j + i);
- if (slp && vec_mask)
- vec_mask = vec_masks[i];
- if (vec_mask)
- final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask,
- vec_mask, gsi);
-
- if (i > 0)
- /* Bump the vector pointer. */
- dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
- stmt_info, bump);
+ if (costing_p)
+ {
+ n_adjacent_stores++;
+ continue;
+ }
- unsigned misalign;
- unsigned HOST_WIDE_INT align;
- align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
- if (alignment_support_scheme == dr_aligned)
- misalign = 0;
- else if (misalignment == DR_MISALIGNMENT_UNKNOWN)
- {
- align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
- misalign = 0;
- }
- else
- misalign = misalignment;
- if (dataref_offset == NULL_TREE
- && TREE_CODE (dataref_ptr) == SSA_NAME)
- set_ptr_info_alignment (get_ptr_info (dataref_ptr), align,
- misalign);
- align = least_bit_hwi (misalign | align);
-
- /* Compute IFN when LOOP_LENS or final_mask valid. */
- machine_mode vmode = TYPE_MODE (vectype);
- machine_mode new_vmode = vmode;
- internal_fn partial_ifn = IFN_LAST;
- if (loop_lens)
- {
- opt_machine_mode new_ovmode
- = get_len_load_store_mode (vmode, false, &partial_ifn);
- new_vmode = new_ovmode.require ();
- unsigned factor
- = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
- final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
- vec_num * ncopies, vectype,
- vec_num * j + i, factor);
- }
- else if (final_mask)
- {
- if (!can_vec_mask_load_store_p (
- vmode, TYPE_MODE (TREE_TYPE (final_mask)), false,
- &partial_ifn))
- gcc_unreachable ();
- }
+ tree final_mask = NULL_TREE;
+ tree final_len = NULL_TREE;
+ tree bias = NULL_TREE;
+ if (loop_masks)
+ final_mask = vect_get_loop_mask (loop_vinfo, gsi, loop_masks,
+ vec_num, vectype, i);
+ if (vec_mask)
+ vec_mask = vec_masks[i];
+ if (vec_mask)
+ final_mask = prepare_vec_mask (loop_vinfo, mask_vectype, final_mask,
+ vec_mask, gsi);
+
+ if (i > 0)
+ /* Bump the vector pointer. */
+ dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr, gsi,
+ stmt_info, bump);
+
+ unsigned misalign;
+ unsigned HOST_WIDE_INT align;
+ align = known_alignment (DR_TARGET_ALIGNMENT (first_dr_info));
+ if (alignment_support_scheme == dr_aligned)
+ misalign = 0;
+ else if (misalignment == DR_MISALIGNMENT_UNKNOWN)
+ {
+ align = dr_alignment (vect_dr_behavior (vinfo, first_dr_info));
+ misalign = 0;
+ }
+ else
+ misalign = misalignment;
+ if (dataref_offset == NULL_TREE
+ && TREE_CODE (dataref_ptr) == SSA_NAME)
+ set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, misalign);
+ align = least_bit_hwi (misalign | align);
+
+ /* Compute IFN when LOOP_LENS or final_mask valid. */
+ machine_mode vmode = TYPE_MODE (vectype);
+ machine_mode new_vmode = vmode;
+ internal_fn partial_ifn = IFN_LAST;
+ if (loop_lens)
+ {
+ opt_machine_mode new_ovmode
+ = get_len_load_store_mode (vmode, false, &partial_ifn);
+ new_vmode = new_ovmode.require ();
+ unsigned factor
+ = (new_ovmode == vmode) ? 1 : GET_MODE_UNIT_SIZE (vmode);
+ final_len = vect_get_loop_len (loop_vinfo, gsi, loop_lens,
+ vec_num, vectype, i, factor);
+ }
+ else if (final_mask)
+ {
+ if (!can_vec_mask_load_store_p (vmode,
+ TYPE_MODE (TREE_TYPE (final_mask)),
+ false, &partial_ifn))
+ gcc_unreachable ();
+ }
- if (partial_ifn == IFN_MASK_LEN_STORE)
+ if (partial_ifn == IFN_MASK_LEN_STORE)
+ {
+ if (!final_len)
{
- if (!final_len)
- {
- /* Pass VF value to 'len' argument of
- MASK_LEN_STORE if LOOP_LENS is invalid. */
- final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype));
- }
- if (!final_mask)
- {
- /* Pass all ones value to 'mask' argument of
- MASK_LEN_STORE if final_mask is invalid. */
- mask_vectype = truth_type_for (vectype);
- final_mask = build_minus_one_cst (mask_vectype);
- }
+ /* Pass VF value to 'len' argument of
+ MASK_LEN_STORE if LOOP_LENS is invalid. */
+ final_len = size_int (TYPE_VECTOR_SUBPARTS (vectype));
}
- if (final_len)
+ if (!final_mask)
{
- signed char biasval
- = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
-
- bias = build_int_cst (intQI_type_node, biasval);
+ /* Pass all ones value to 'mask' argument of
+ MASK_LEN_STORE if final_mask is invalid. */
+ mask_vectype = truth_type_for (vectype);
+ final_mask = build_minus_one_cst (mask_vectype);
}
+ }
+ if (final_len)
+ {
+ signed char biasval = LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS (loop_vinfo);
+ bias = build_int_cst (intQI_type_node, biasval);
+ }
- /* Arguments are ready. Create the new vector stmt. */
- if (final_len)
- {
- gcall *call;
- tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
- /* Need conversion if it's wrapped with VnQI. */
- if (vmode != new_vmode)
- {
- tree new_vtype
- = build_vector_type_for_mode (unsigned_intQI_type_node,
- new_vmode);
- tree var = vect_get_new_ssa_name (new_vtype, vect_simple_var);
- vec_oprnd = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
- gassign *new_stmt
- = gimple_build_assign (var, VIEW_CONVERT_EXPR, vec_oprnd);
- vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
- vec_oprnd = var;
- }
-
- if (partial_ifn == IFN_MASK_LEN_STORE)
- call = gimple_build_call_internal (IFN_MASK_LEN_STORE, 6,
- dataref_ptr, ptr, final_mask,
- final_len, bias, vec_oprnd);
- else
- call = gimple_build_call_internal (IFN_LEN_STORE, 5,
- dataref_ptr, ptr, final_len,
- bias, vec_oprnd);
- gimple_call_set_nothrow (call, true);
- vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
- new_stmt = call;
- }
- else if (final_mask)
- {
- tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
- gcall *call
- = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
- ptr, final_mask, vec_oprnd);
- gimple_call_set_nothrow (call, true);
- vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
- new_stmt = call;
- }
- else
- {
- data_ref
- = fold_build2 (MEM_REF, vectype, dataref_ptr,
- dataref_offset ? dataref_offset
- : build_int_cst (ref_type, 0));
- if (alignment_support_scheme == dr_aligned)
- ;
- else
- TREE_TYPE (data_ref)
- = build_aligned_type (TREE_TYPE (data_ref),
- align * BITS_PER_UNIT);
- vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
- new_stmt = gimple_build_assign (data_ref, vec_oprnd);
+ /* Arguments are ready. Create the new vector stmt. */
+ if (final_len)
+ {
+ gcall *call;
+ tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
+ /* Need conversion if it's wrapped with VnQI. */
+ if (vmode != new_vmode)
+ {
+ tree new_vtype
+ = build_vector_type_for_mode (unsigned_intQI_type_node,
+ new_vmode);
+ tree var = vect_get_new_ssa_name (new_vtype, vect_simple_var);
+ vec_oprnd = build1 (VIEW_CONVERT_EXPR, new_vtype, vec_oprnd);
+ gassign *new_stmt
+ = gimple_build_assign (var, VIEW_CONVERT_EXPR, vec_oprnd);
vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
+ vec_oprnd = var;
}
- if (slp)
- continue;
-
- next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
- if (!next_stmt_info)
- break;
+ if (partial_ifn == IFN_MASK_LEN_STORE)
+ call = gimple_build_call_internal (IFN_MASK_LEN_STORE, 6,
+ dataref_ptr, ptr, final_mask,
+ final_len, bias, vec_oprnd);
+ else
+ call = gimple_build_call_internal (IFN_LEN_STORE, 5,
+ dataref_ptr, ptr, final_len,
+ bias, vec_oprnd);
+ gimple_call_set_nothrow (call, true);
+ vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
+ new_stmt = call;
}
- if (!slp && !costing_p)
+ else if (final_mask)
{
- if (j == 0)
- *vec_stmt = new_stmt;
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ tree ptr = build_int_cst (ref_type, align * BITS_PER_UNIT);
+ gcall *call
+ = gimple_build_call_internal (IFN_MASK_STORE, 4, dataref_ptr,
+ ptr, final_mask, vec_oprnd);
+ gimple_call_set_nothrow (call, true);
+ vect_finish_stmt_generation (vinfo, stmt_info, call, gsi);
+ new_stmt = call;
+ }
+ else
+ {
+ data_ref = fold_build2 (MEM_REF, vectype, dataref_ptr,
+ dataref_offset ? dataref_offset
+ : build_int_cst (ref_type, 0));
+ if (alignment_support_scheme == dr_aligned)
+ ;
+ else
+ TREE_TYPE (data_ref)
+ = build_aligned_type (TREE_TYPE (data_ref),
+ align * BITS_PER_UNIT);
+ vect_copy_ref_info (data_ref, DR_REF (first_dr_info->dr));
+ new_stmt = gimple_build_assign (data_ref, vec_oprnd);
+ vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi);
}
}
@@ -10024,11 +9666,11 @@ vectorizable_store (vec_info *vinfo,
{
/* Spill. */
prologue_cost
- += record_stmt_cost (cost_vec, ncopies, vector_store,
+ += record_stmt_cost (cost_vec, 1, vector_store,
slp_node, 0, vect_epilogue);
/* Loads. */
prologue_cost
- += record_stmt_cost (cost_vec, ncopies * nregs, scalar_load,
+ += record_stmt_cost (cost_vec, nregs, scalar_load,
slp_node, 0, vect_epilogue);
}
}
diff --git a/gcc/xml.cc b/gcc/xml.cc
new file mode 100644
index 0000000..6c95288
--- /dev/null
+++ b/gcc/xml.cc
@@ -0,0 +1,367 @@
+/* XML support for diagnostics.
+ Copyright (C) 2024-2025 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#define INCLUDE_MAP
+#define INCLUDE_STRING
+#define INCLUDE_VECTOR
+#include "system.h"
+#include "coretypes.h"
+#include "xml.h"
+#include "xml-printer.h"
+#include "pretty-print.h"
+#include "selftest.h"
+
+namespace xml {
+
+/* Disable warnings about quoting issues in the pp_xxx calls below
+ that (intentionally) don't follow GCC diagnostic conventions. */
+#if __GNUC__ >= 10
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wformat-diag"
+#endif
+
+
+/* Implementation. */
+
+static void
+write_escaped_text (pretty_printer *pp, const char *text)
+{
+ gcc_assert (text);
+
+ for (const char *p = text; *p; ++p)
+ {
+ char ch = *p;
+ switch (ch)
+ {
+ default:
+ pp_character (pp, ch);
+ break;
+ case '\'':
+ pp_string (pp, "&apos;");
+ break;
+ case '"':
+ pp_string (pp, "&quot;");
+ break;
+ case '&':
+ pp_string (pp, "&amp;");
+ break;
+ case '<':
+ pp_string (pp, "&lt;");
+ break;
+ case '>':
+ pp_string (pp, "&gt;");
+ break;
+ }
+ }
+}
+
+/* struct node. */
+
+void
+node::dump (FILE *out) const
+{
+ pretty_printer pp;
+ pp.set_output_stream (out);
+ write_as_xml (&pp, 0, true);
+ pp_flush (&pp);
+}
+
+/* struct text : public node. */
+
+void
+text::write_as_xml (pretty_printer *pp, int depth, bool indent) const
+{
+ if (indent)
+ {
+ for (int i = 0; i < depth; ++i)
+ pp_string (pp, " ");
+ }
+ write_escaped_text (pp, m_str.c_str ());
+ if (indent)
+ pp_newline (pp);
+}
+
+/* struct node_with_children : public node. */
+
+void
+node_with_children::add_child (std::unique_ptr<node> node)
+{
+ gcc_assert (node.get ());
+ m_children.push_back (std::move (node));
+}
+
+void
+node_with_children::add_text (std::string str)
+{
+ // Consolidate runs of text
+ if (!m_children.empty ())
+ if (text *t = m_children.back ()->dyn_cast_text ())
+ {
+ t->m_str += std::move (str);
+ return;
+ }
+ add_child (std::make_unique <text> (std::move (str)));
+}
+
+
+/* struct document : public node_with_children. */
+
+void
+document::write_as_xml (pretty_printer *pp, int depth, bool indent) const
+{
+ pp_string (pp, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
+ if (m_doctypedecl)
+ m_doctypedecl->write_as_xml (pp, depth, indent);
+ for (auto &iter : m_children)
+ iter->write_as_xml (pp, depth, indent);
+}
+
+/* struct element : public node_with_children. */
+
+void
+element::write_as_xml (pretty_printer *pp, int depth, bool indent) const
+{
+ if (indent)
+ {
+ for (int i = 0; i < depth; ++i)
+ pp_string (pp, " ");
+ }
+
+ pp_printf (pp, "<%s", m_kind.c_str ());
+ for (auto &key : m_key_insertion_order)
+ {
+ auto iter = m_attributes.find (key);
+ if (iter != m_attributes.end ())
+ {
+ pp_printf (pp, " %s=\"", key.c_str ());
+ write_escaped_text (pp, iter->second.c_str ());
+ pp_string (pp, "\"");
+ }
+ }
+ if (m_children.empty ())
+ pp_string (pp, "/>");
+ else
+ {
+ const bool indent_children = m_preserve_whitespace ? false : indent;
+ pp_string (pp, ">");
+ if (indent_children)
+ pp_newline (pp);
+ for (auto &child : m_children)
+ child->write_as_xml (pp, depth + 1, indent_children);
+ if (indent_children)
+ {
+ for (int i = 0; i < depth; ++i)
+ pp_string (pp, " ");
+ }
+ pp_printf (pp, "</%s>", m_kind.c_str ());
+ }
+
+ if (indent)
+ pp_newline (pp);
+}
+
+void
+element::set_attr (const char *name, std::string value)
+{
+ auto iter = m_attributes.find (name);
+ if (iter == m_attributes.end ())
+ m_key_insertion_order.push_back (name);
+ m_attributes[name] = std::move (value);
+}
+
+// struct raw : public node
+
+void
+raw::write_as_xml (pretty_printer *pp,
+ int /*depth*/, bool /*indent*/) const
+{
+ pp_string (pp, m_xml_src.c_str ());
+}
+
+#if __GNUC__ >= 10
+# pragma GCC diagnostic pop
+#endif
+
+// class printer
+
+printer::printer (element &insertion_point)
+{
+ m_open_tags.push_back (&insertion_point);
+}
+
+void
+printer::push_tag (std::string name,
+ bool preserve_whitespace)
+{
+ push_element
+ (std::make_unique<element> (std::move (name),
+ preserve_whitespace));
+}
+
+void
+printer::push_tag_with_class (std::string name, std::string class_,
+ bool preserve_whitespace)
+{
+ auto new_element
+ = std::make_unique<element> (std::move (name),
+ preserve_whitespace);
+ new_element->set_attr ("class", class_);
+ push_element (std::move (new_element));
+}
+
+void
+printer::pop_tag ()
+{
+ m_open_tags.pop_back ();
+}
+
+void
+printer::set_attr (const char *name, std::string value)
+{
+ m_open_tags.back ()->set_attr (name, value);
+}
+
+void
+printer::add_text (std::string text)
+{
+ element *parent = m_open_tags.back ();
+ parent->add_text (std::move (text));
+}
+
+void
+printer::add_raw (std::string text)
+{
+ element *parent = m_open_tags.back ();
+ parent->add_child (std::make_unique<xml::raw> (std::move (text)));
+}
+
+void
+printer::push_element (std::unique_ptr<element> new_element)
+{
+ element *parent = m_open_tags.back ();
+ m_open_tags.push_back (new_element.get ());
+ parent->add_child (std::move (new_element));
+}
+
+void
+printer::append (std::unique_ptr<node> new_node)
+{
+ element *parent = m_open_tags.back ();
+ parent->add_child (std::move (new_node));
+}
+
+element *
+printer::get_insertion_point () const
+{
+ return m_open_tags.back ();
+}
+
+} // namespace xml
+
+#if CHECKING_P
+
+namespace selftest {
+
+static void
+test_no_dtd ()
+{
+ xml::document doc;
+ pretty_printer pp;
+ doc.write_as_xml (&pp, 0, true);
+ ASSERT_STREQ
+ (pp_formatted_text (&pp),
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
+}
+
+static void
+test_printer ()
+{
+ xml::element top ("top", false);
+ xml::printer xp (top);
+ xp.push_tag ("foo");
+ xp.add_text ("hello");
+ xp.push_tag ("bar");
+ xp.set_attr ("size", "3");
+ xp.set_attr ("color", "red");
+ xp.add_text ("world");
+ xp.push_tag ("baz");
+ xp.pop_tag ();
+ xp.pop_tag ();
+ xp.pop_tag ();
+
+ pretty_printer pp;
+ top.write_as_xml (&pp, 0, true);
+ ASSERT_STREQ
+ (pp_formatted_text (&pp),
+ "<top>\n"
+ " <foo>\n"
+ " hello\n"
+ " <bar size=\"3\" color=\"red\">\n"
+ " world\n"
+ " <baz/>\n"
+ " </bar>\n"
+ " </foo>\n"
+ "</top>\n");
+}
+
+// Verify that element attributes preserve insertion order.
+
+static void
+test_attribute_ordering ()
+{
+ xml::element top ("top", false);
+ xml::printer xp (top);
+ xp.push_tag ("chronological");
+ xp.set_attr ("maldon", "991");
+ xp.set_attr ("hastings", "1066");
+ xp.set_attr ("edgehill", "1642");
+ xp.set_attr ("naseby", "1645");
+ xp.pop_tag ();
+ xp.push_tag ("alphabetical");
+ xp.set_attr ("edgehill", "1642");
+ xp.set_attr ("hastings", "1066");
+ xp.set_attr ("maldon", "991");
+ xp.set_attr ("naseby", "1645");
+ xp.pop_tag ();
+
+ pretty_printer pp;
+ top.write_as_xml (&pp, 0, true);
+ ASSERT_STREQ
+ (pp_formatted_text (&pp),
+ "<top>\n"
+ " <chronological maldon=\"991\" hastings=\"1066\" edgehill=\"1642\" naseby=\"1645\"/>\n"
+ " <alphabetical edgehill=\"1642\" hastings=\"1066\" maldon=\"991\" naseby=\"1645\"/>\n"
+ "</top>\n");
+}
+
+/* Run all of the selftests within this file. */
+
+void
+xml_cc_tests ()
+{
+ test_no_dtd ();
+ test_printer ();
+ test_attribute_ordering ();
+}
+
+} // namespace selftest
+
+#endif /* CHECKING_P */
diff --git a/gcc/xml.h b/gcc/xml.h
index 523a44d..3c5813a 100644
--- a/gcc/xml.h
+++ b/gcc/xml.h
@@ -29,6 +29,7 @@ struct node;
struct node_with_children;
struct document;
struct element;
+ struct doctypedecl;
struct node
{
@@ -72,6 +73,13 @@ struct document : public node_with_children
{
void write_as_xml (pretty_printer *pp,
int depth, bool indent) const final override;
+
+ std::unique_ptr<doctypedecl> m_doctypedecl;
+};
+
+struct doctypedecl : public node
+{
+ // still abstract
};
struct element : public node_with_children