aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ada/Make-generated.in4
-rw-r--r--gcc/ada/gcc-interface/Makefile.in2
-rw-r--r--gcc/cfgexpand.cc13
-rw-r--r--gcc/common/config/riscv/riscv-common.cc221
-rw-r--r--gcc/fortran/trans-intrinsic.cc51
-rw-r--r--gcc/gimple-fold.cc26
-rw-r--r--gcc/testsuite/c-c++-common/pr118868-1.c9
-rw-r--r--gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C24
-rw-r--r--gcc/testsuite/gfortran.dg/pr120191_1.f90614
-rw-r--r--gcc/testsuite/gfortran.dg/pr120191_2.f9084
-rw-r--r--gcc/testsuite/gfortran.dg/pr120191_3.f9023
-rw-r--r--gcc/testsuite/gfortran.dg/pr120196.f9026
-rw-r--r--gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp2
-rw-r--r--gcc/tree-cfg.cc12
-rw-r--r--gcc/tree-vect-loop.cc1020
-rw-r--r--libgcobol/libgcobol.cc5
-rw-r--r--libgfortran/generated/findloc2_s1.c4
-rw-r--r--libgfortran/generated/findloc2_s4.c4
-rw-r--r--libgfortran/generated/maxloc1_16_s1.c5
-rw-r--r--libgfortran/generated/maxloc1_16_s4.c5
-rw-r--r--libgfortran/generated/maxloc1_4_s1.c5
-rw-r--r--libgfortran/generated/maxloc1_4_s4.c5
-rw-r--r--libgfortran/generated/maxloc1_8_s1.c5
-rw-r--r--libgfortran/generated/maxloc1_8_s4.c5
-rw-r--r--libgfortran/generated/maxloc2_16_s1.c4
-rw-r--r--libgfortran/generated/maxloc2_16_s4.c4
-rw-r--r--libgfortran/generated/maxloc2_4_s1.c4
-rw-r--r--libgfortran/generated/maxloc2_4_s4.c4
-rw-r--r--libgfortran/generated/maxloc2_8_s1.c4
-rw-r--r--libgfortran/generated/maxloc2_8_s4.c4
-rw-r--r--libgfortran/generated/minloc1_16_s1.c5
-rw-r--r--libgfortran/generated/minloc1_16_s4.c5
-rw-r--r--libgfortran/generated/minloc1_4_s1.c5
-rw-r--r--libgfortran/generated/minloc1_4_s4.c5
-rw-r--r--libgfortran/generated/minloc1_8_s1.c5
-rw-r--r--libgfortran/generated/minloc1_8_s4.c5
-rw-r--r--libgfortran/generated/minloc2_16_s1.c4
-rw-r--r--libgfortran/generated/minloc2_16_s4.c4
-rw-r--r--libgfortran/generated/minloc2_4_s1.c4
-rw-r--r--libgfortran/generated/minloc2_4_s4.c4
-rw-r--r--libgfortran/generated/minloc2_8_s1.c4
-rw-r--r--libgfortran/generated/minloc2_8_s4.c4
-rw-r--r--libgfortran/m4/ifindloc2.m44
-rw-r--r--libgfortran/m4/ifunction-s.m45
-rw-r--r--libgfortran/m4/maxloc2s.m44
-rw-r--r--libgfortran/m4/minloc2s.m44
-rw-r--r--libiberty/regex.c2
47 files changed, 1282 insertions, 989 deletions
diff --git a/gcc/ada/Make-generated.in b/gcc/ada/Make-generated.in
index 95c2a1d..5cb1b32 100644
--- a/gcc/ada/Make-generated.in
+++ b/gcc/ada/Make-generated.in
@@ -18,7 +18,7 @@ GEN_IL_FLAGS = -gnata -gnat2012 -gnatw.g -gnatyg -gnatU $(GEN_IL_INCLUDES)
ada/seinfo_tables.ads ada/seinfo_tables.adb ada/sinfo.h ada/einfo.h ada/nmake.ads ada/nmake.adb ada/seinfo.ads ada/sinfo-nodes.ads ada/sinfo-nodes.adb ada/einfo-entities.ads ada/einfo-entities.adb: ada/stamp-gen_il ; @true
ada/stamp-gen_il: $(fsrcdir)/ada/gen_il*
$(MKDIR) ada/gen_il
- cd ada/gen_il; gnatmake -q -g $(GEN_IL_FLAGS) gen_il-main
+ cd ada/gen_il; gnatmake -g $(GEN_IL_FLAGS) gen_il-main
# Ignore errors to work around finalization issues in older compilers
- cd ada/gen_il; ./gen_il-main
$(fsrcdir)/../move-if-change ada/gen_il/seinfo_tables.ads ada/seinfo_tables.ads
@@ -46,7 +46,7 @@ ada/stamp-snames : ada/snames.ads-tmpl ada/snames.adb-tmpl ada/snames.h-tmpl ada
-$(MKDIR) ada/bldtools/snamest
$(RM) $(addprefix ada/bldtools/snamest/,$(notdir $^))
$(CP) $^ ada/bldtools/snamest
- cd ada/bldtools/snamest && gnatmake -q xsnamest && ./xsnamest
+ cd ada/bldtools/snamest && gnatmake xsnamest && ./xsnamest
$(fsrcdir)/../move-if-change ada/bldtools/snamest/snames.ns ada/snames.ads
$(fsrcdir)/../move-if-change ada/bldtools/snamest/snames.nb ada/snames.adb
$(fsrcdir)/../move-if-change ada/bldtools/snamest/snames.nh ada/snames.h
diff --git a/gcc/ada/gcc-interface/Makefile.in b/gcc/ada/gcc-interface/Makefile.in
index 4ffdc1e..2c42cb1 100644
--- a/gcc/ada/gcc-interface/Makefile.in
+++ b/gcc/ada/gcc-interface/Makefile.in
@@ -634,7 +634,7 @@ OSCONS_EXTRACT=$(GCC_FOR_ADA_RTS) $(GNATLIBCFLAGS_FOR_C) -S s-oscons-tmplt.i
-$(MKDIR) ./bldtools/oscons
$(RM) $(addprefix ./bldtools/oscons/,$(notdir $^))
$(CP) $^ ./bldtools/oscons
- (cd ./bldtools/oscons ; gnatmake -q xoscons)
+ (cd ./bldtools/oscons ; gnatmake xoscons)
$(RTSDIR)/s-oscons.ads: ../stamp-gnatlib1-$(RTSDIR) s-oscons-tmplt.c gsocket.h ./bldtools/oscons/xoscons
$(RM) $(RTSDIR)/s-oscons-tmplt.i $(RTSDIR)/s-oscons-tmplt.s
diff --git a/gcc/cfgexpand.cc b/gcc/cfgexpand.cc
index 2b27076..277ef65 100644
--- a/gcc/cfgexpand.cc
+++ b/gcc/cfgexpand.cc
@@ -766,7 +766,12 @@ vars_ssa_cache::operator() (tree name)
/* If the cache exists for the use, don't try to recreate it. */
if (exists (use))
- continue;
+ {
+ /* Update the cache here, this can reduce the number of
+ times through the update loop below. */
+ update (old_name, use);
+ continue;
+ }
/* Create the cache bitmap for the use and also
so we don't go into an infinite loop for some phi nodes with loops. */
@@ -804,9 +809,11 @@ vars_ssa_cache::operator() (tree name)
bool changed;
do {
changed = false;
- for (auto &e : update_cache_list)
+ unsigned int i;
+ std::pair<tree,tree> *e;
+ FOR_EACH_VEC_ELT_REVERSE (update_cache_list, i, e)
{
- if (update (e.second, e.first))
+ if (update (e->second, e->first))
changed = true;
}
} while (changed);
diff --git a/gcc/common/config/riscv/riscv-common.cc b/gcc/common/config/riscv/riscv-common.cc
index d50aff4..3d3ca11 100644
--- a/gcc/common/config/riscv/riscv-common.cc
+++ b/gcc/common/config/riscv/riscv-common.cc
@@ -47,21 +47,20 @@ typedef int (gcc_options::*opt_var_ref_t);
typedef int (cl_target_option::*cl_opt_var_ref_t);
/* Types for recording extension to internal flag. */
-struct riscv_ext_flag_table_t
+struct riscv_extra_ext_flag_table_t
{
- riscv_ext_flag_table_t (const char *ext, opt_var_ref_t var_ref,
- cl_opt_var_ref_t cl_var_ref, int mask)
- : ext (ext), var_ref (var_ref), cl_var_ref (cl_var_ref), mask (mask)
- {}
- riscv_ext_flag_table_t (opt_var_ref_t var_ref,
- cl_opt_var_ref_t cl_var_ref, int mask)
- : ext (nullptr), var_ref (var_ref), cl_var_ref (cl_var_ref), mask (mask)
- {}
-
const char *ext;
opt_var_ref_t var_ref;
cl_opt_var_ref_t cl_var_ref;
int mask;
+};
+
+/* Types for recording extension to internal flag. */
+struct riscv_ext_flag_table_t
+{
+ opt_var_ref_t var_ref;
+ cl_opt_var_ref_t cl_var_ref;
+ int mask;
void clean (gcc_options *opts) const { opts->*var_ref &= ~mask; }
@@ -1475,76 +1474,12 @@ riscv_arch_str (bool version_p)
#define RISCV_EXT_FLAG_ENTRY(NAME, VAR, MASK) \
{NAME, &gcc_options::VAR, &cl_target_option::VAR, MASK}
-/* Mapping table between extension to internal flag. */
-static const riscv_ext_flag_table_t riscv_ext_flag_table[] =
+/* Mapping table between extension to internal flag,
+ this table is not needed to add manually unless there is speical rule. */
+static const riscv_extra_ext_flag_table_t riscv_extra_ext_flag_table[] =
{
- RISCV_EXT_FLAG_ENTRY ("e", x_riscv_base_subext, MASK_RVE),
- RISCV_EXT_FLAG_ENTRY ("m", x_riscv_base_subext, MASK_MUL),
- RISCV_EXT_FLAG_ENTRY ("a", x_riscv_base_subext, MASK_ATOMIC),
- RISCV_EXT_FLAG_ENTRY ("f", x_riscv_base_subext, MASK_HARD_FLOAT),
- RISCV_EXT_FLAG_ENTRY ("d", x_riscv_base_subext, MASK_DOUBLE_FLOAT),
- RISCV_EXT_FLAG_ENTRY ("c", x_riscv_base_subext, MASK_RVC),
- RISCV_EXT_FLAG_ENTRY ("v", x_riscv_isa_flags, MASK_FULL_V),
- RISCV_EXT_FLAG_ENTRY ("v", x_riscv_isa_flags, MASK_VECTOR),
-
- RISCV_EXT_FLAG_ENTRY ("zicsr", x_riscv_zi_subext, MASK_ZICSR),
- RISCV_EXT_FLAG_ENTRY ("zifencei", x_riscv_zi_subext, MASK_ZIFENCEI),
- RISCV_EXT_FLAG_ENTRY ("zicond", x_riscv_zi_subext, MASK_ZICOND),
-
- RISCV_EXT_FLAG_ENTRY ("za64rs", x_riscv_za_subext, MASK_ZA64RS),
- RISCV_EXT_FLAG_ENTRY ("za128rs", x_riscv_za_subext, MASK_ZA128RS),
- RISCV_EXT_FLAG_ENTRY ("zawrs", x_riscv_za_subext, MASK_ZAWRS),
- RISCV_EXT_FLAG_ENTRY ("zaamo", x_riscv_za_subext, MASK_ZAAMO),
- RISCV_EXT_FLAG_ENTRY ("zalrsc", x_riscv_za_subext, MASK_ZALRSC),
- RISCV_EXT_FLAG_ENTRY ("zabha", x_riscv_za_subext, MASK_ZABHA),
- RISCV_EXT_FLAG_ENTRY ("zacas", x_riscv_za_subext, MASK_ZACAS),
- RISCV_EXT_FLAG_ENTRY ("zama16b", x_riscv_za_subext, MASK_ZAMA16B),
-
- RISCV_EXT_FLAG_ENTRY ("zba", x_riscv_zb_subext, MASK_ZBA),
- RISCV_EXT_FLAG_ENTRY ("zbb", x_riscv_zb_subext, MASK_ZBB),
- RISCV_EXT_FLAG_ENTRY ("zbc", x_riscv_zb_subext, MASK_ZBC),
- RISCV_EXT_FLAG_ENTRY ("zbs", x_riscv_zb_subext, MASK_ZBS),
-
- RISCV_EXT_FLAG_ENTRY ("zfinx", x_riscv_zinx_subext, MASK_ZFINX),
- RISCV_EXT_FLAG_ENTRY ("zdinx", x_riscv_zinx_subext, MASK_ZDINX),
- RISCV_EXT_FLAG_ENTRY ("zhinx", x_riscv_zinx_subext, MASK_ZHINX),
- RISCV_EXT_FLAG_ENTRY ("zhinxmin", x_riscv_zinx_subext, MASK_ZHINXMIN),
-
- RISCV_EXT_FLAG_ENTRY ("zbkb", x_riscv_zk_subext, MASK_ZBKB),
- RISCV_EXT_FLAG_ENTRY ("zbkc", x_riscv_zk_subext, MASK_ZBKC),
- RISCV_EXT_FLAG_ENTRY ("zbkx", x_riscv_zk_subext, MASK_ZBKX),
- RISCV_EXT_FLAG_ENTRY ("zknd", x_riscv_zk_subext, MASK_ZKND),
- RISCV_EXT_FLAG_ENTRY ("zkne", x_riscv_zk_subext, MASK_ZKNE),
- RISCV_EXT_FLAG_ENTRY ("zknh", x_riscv_zk_subext, MASK_ZKNH),
- RISCV_EXT_FLAG_ENTRY ("zkr", x_riscv_zk_subext, MASK_ZKR),
- RISCV_EXT_FLAG_ENTRY ("zksed", x_riscv_zk_subext, MASK_ZKSED),
- RISCV_EXT_FLAG_ENTRY ("zksh", x_riscv_zk_subext, MASK_ZKSH),
- RISCV_EXT_FLAG_ENTRY ("zkt", x_riscv_zk_subext, MASK_ZKT),
-
- RISCV_EXT_FLAG_ENTRY ("zihintntl", x_riscv_zi_subext, MASK_ZIHINTNTL),
- RISCV_EXT_FLAG_ENTRY ("zihintpause", x_riscv_zi_subext, MASK_ZIHINTPAUSE),
- RISCV_EXT_FLAG_ENTRY ("ziccamoa", x_riscv_zi_subext, MASK_ZICCAMOA),
- RISCV_EXT_FLAG_ENTRY ("ziccif", x_riscv_zi_subext, MASK_ZICCIF),
- RISCV_EXT_FLAG_ENTRY ("zicclsm", x_riscv_zi_subext, MASK_ZICCLSM),
- RISCV_EXT_FLAG_ENTRY ("ziccrse", x_riscv_zi_subext, MASK_ZICCRSE),
- RISCV_EXT_FLAG_ENTRY ("zilsd", x_riscv_zi_subext, MASK_ZILSD),
-
- RISCV_EXT_FLAG_ENTRY ("zicboz", x_riscv_zi_subext, MASK_ZICBOZ),
- RISCV_EXT_FLAG_ENTRY ("zicbom", x_riscv_zi_subext, MASK_ZICBOM),
- RISCV_EXT_FLAG_ENTRY ("zicbop", x_riscv_zi_subext, MASK_ZICBOP),
- RISCV_EXT_FLAG_ENTRY ("zic64b", x_riscv_zi_subext, MASK_ZIC64B),
-
- RISCV_EXT_FLAG_ENTRY ("zicfiss", x_riscv_zi_subext, MASK_ZICFISS),
- RISCV_EXT_FLAG_ENTRY ("zicfilp", x_riscv_zi_subext, MASK_ZICFILP),
-
- RISCV_EXT_FLAG_ENTRY ("zimop", x_riscv_zi_subext, MASK_ZIMOP),
- RISCV_EXT_FLAG_ENTRY ("zcmop", x_riscv_zc_subext, MASK_ZCMOP),
-
RISCV_EXT_FLAG_ENTRY ("zve32x", x_riscv_isa_flags, MASK_VECTOR),
- RISCV_EXT_FLAG_ENTRY ("zve32f", x_riscv_isa_flags, MASK_VECTOR),
- RISCV_EXT_FLAG_ENTRY ("zve64x", x_riscv_isa_flags, MASK_VECTOR),
- RISCV_EXT_FLAG_ENTRY ("zve64f", x_riscv_isa_flags, MASK_VECTOR),
- RISCV_EXT_FLAG_ENTRY ("zve64d", x_riscv_isa_flags, MASK_VECTOR),
+ RISCV_EXT_FLAG_ENTRY ("v", x_riscv_isa_flags, MASK_FULL_V),
/* We don't need to put complete ELEN/ELEN_FP info here, due to the
implication relation of vector extension.
@@ -1561,91 +1496,6 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] =
RISCV_EXT_FLAG_ENTRY ("zvfhmin", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_FP_16),
RISCV_EXT_FLAG_ENTRY ("zvfh", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_FP_16),
- RISCV_EXT_FLAG_ENTRY ("zvbb", x_riscv_zvb_subext, MASK_ZVBB),
- RISCV_EXT_FLAG_ENTRY ("zvbc", x_riscv_zvb_subext, MASK_ZVBC),
- RISCV_EXT_FLAG_ENTRY ("zvkb", x_riscv_zvb_subext, MASK_ZVKB),
- RISCV_EXT_FLAG_ENTRY ("zvkg", x_riscv_zvk_subext, MASK_ZVKG),
- RISCV_EXT_FLAG_ENTRY ("zvkned", x_riscv_zvk_subext, MASK_ZVKNED),
- RISCV_EXT_FLAG_ENTRY ("zvknha", x_riscv_zvk_subext, MASK_ZVKNHA),
- RISCV_EXT_FLAG_ENTRY ("zvknhb", x_riscv_zvk_subext, MASK_ZVKNHB),
- RISCV_EXT_FLAG_ENTRY ("zvksed", x_riscv_zvk_subext, MASK_ZVKSED),
- RISCV_EXT_FLAG_ENTRY ("zvksh", x_riscv_zvk_subext, MASK_ZVKSH),
- RISCV_EXT_FLAG_ENTRY ("zvkn", x_riscv_zvk_subext, MASK_ZVKN),
- RISCV_EXT_FLAG_ENTRY ("zvknc", x_riscv_zvk_subext, MASK_ZVKNC),
- RISCV_EXT_FLAG_ENTRY ("zvkng", x_riscv_zvk_subext, MASK_ZVKNG),
- RISCV_EXT_FLAG_ENTRY ("zvks", x_riscv_zvk_subext, MASK_ZVKS),
- RISCV_EXT_FLAG_ENTRY ("zvksc", x_riscv_zvk_subext, MASK_ZVKSC),
- RISCV_EXT_FLAG_ENTRY ("zvksg", x_riscv_zvk_subext, MASK_ZVKSG),
- RISCV_EXT_FLAG_ENTRY ("zvkt", x_riscv_zvk_subext, MASK_ZVKT),
-
- RISCV_EXT_FLAG_ENTRY ("zvl32b", x_riscv_zvl_subext, MASK_ZVL32B),
- RISCV_EXT_FLAG_ENTRY ("zvl64b", x_riscv_zvl_subext, MASK_ZVL64B),
- RISCV_EXT_FLAG_ENTRY ("zvl128b", x_riscv_zvl_subext, MASK_ZVL128B),
- RISCV_EXT_FLAG_ENTRY ("zvl256b", x_riscv_zvl_subext, MASK_ZVL256B),
- RISCV_EXT_FLAG_ENTRY ("zvl512b", x_riscv_zvl_subext, MASK_ZVL512B),
- RISCV_EXT_FLAG_ENTRY ("zvl1024b", x_riscv_zvl_subext, MASK_ZVL1024B),
- RISCV_EXT_FLAG_ENTRY ("zvl2048b", x_riscv_zvl_subext, MASK_ZVL2048B),
- RISCV_EXT_FLAG_ENTRY ("zvl4096b", x_riscv_zvl_subext, MASK_ZVL4096B),
- RISCV_EXT_FLAG_ENTRY ("zvl8192b", x_riscv_zvl_subext, MASK_ZVL8192B),
- RISCV_EXT_FLAG_ENTRY ("zvl16384b", x_riscv_zvl_subext, MASK_ZVL16384B),
- RISCV_EXT_FLAG_ENTRY ("zvl32768b", x_riscv_zvl_subext, MASK_ZVL32768B),
- RISCV_EXT_FLAG_ENTRY ("zvl65536b", x_riscv_zvl_subext, MASK_ZVL65536B),
-
- RISCV_EXT_FLAG_ENTRY ("zfbfmin", x_riscv_zf_subext, MASK_ZFBFMIN),
- RISCV_EXT_FLAG_ENTRY ("zfhmin", x_riscv_zf_subext, MASK_ZFHMIN),
- RISCV_EXT_FLAG_ENTRY ("zfh", x_riscv_zf_subext, MASK_ZFH),
- RISCV_EXT_FLAG_ENTRY ("zvfbfmin", x_riscv_zvf_subext, MASK_ZVFBFMIN),
- RISCV_EXT_FLAG_ENTRY ("zvfbfwma", x_riscv_zvf_subext, MASK_ZVFBFWMA),
- RISCV_EXT_FLAG_ENTRY ("zvfhmin", x_riscv_zvf_subext, MASK_ZVFHMIN),
- RISCV_EXT_FLAG_ENTRY ("zvfh", x_riscv_zvf_subext, MASK_ZVFH),
-
- RISCV_EXT_FLAG_ENTRY ("zfa", x_riscv_zf_subext, MASK_ZFA),
-
- RISCV_EXT_FLAG_ENTRY ("zmmul", x_riscv_zm_subext, MASK_ZMMUL),
-
- /* Code-size reduction extensions. */
- RISCV_EXT_FLAG_ENTRY ("zca", x_riscv_zc_subext, MASK_ZCA),
- RISCV_EXT_FLAG_ENTRY ("zcb", x_riscv_zc_subext, MASK_ZCB),
- RISCV_EXT_FLAG_ENTRY ("zce", x_riscv_zc_subext, MASK_ZCE),
- RISCV_EXT_FLAG_ENTRY ("zcf", x_riscv_zc_subext, MASK_ZCF),
- RISCV_EXT_FLAG_ENTRY ("zcd", x_riscv_zc_subext, MASK_ZCD),
- RISCV_EXT_FLAG_ENTRY ("zcmp", x_riscv_zc_subext, MASK_ZCMP),
- RISCV_EXT_FLAG_ENTRY ("zcmt", x_riscv_zc_subext, MASK_ZCMT),
- RISCV_EXT_FLAG_ENTRY ("zclsd", x_riscv_zc_subext, MASK_ZCLSD),
-
- RISCV_EXT_FLAG_ENTRY ("svade", x_riscv_sv_subext, MASK_SVADE),
- RISCV_EXT_FLAG_ENTRY ("svadu", x_riscv_sv_subext, MASK_SVADU),
- RISCV_EXT_FLAG_ENTRY ("svinval", x_riscv_sv_subext, MASK_SVINVAL),
- RISCV_EXT_FLAG_ENTRY ("svnapot", x_riscv_sv_subext, MASK_SVNAPOT),
- RISCV_EXT_FLAG_ENTRY ("svvptc", x_riscv_sv_subext, MASK_SVVPTC),
-
- RISCV_EXT_FLAG_ENTRY ("ssnpm", x_riscv_ss_subext, MASK_SSNPM),
- RISCV_EXT_FLAG_ENTRY ("smnpm", x_riscv_sm_subext, MASK_SMNPM),
- RISCV_EXT_FLAG_ENTRY ("smmpm", x_riscv_sm_subext, MASK_SMMPM),
- RISCV_EXT_FLAG_ENTRY ("sspm", x_riscv_ss_subext, MASK_SSPM),
- RISCV_EXT_FLAG_ENTRY ("supm", x_riscv_su_subext, MASK_SUPM),
-
- RISCV_EXT_FLAG_ENTRY ("ztso", x_riscv_zt_subext, MASK_ZTSO),
-
- RISCV_EXT_FLAG_ENTRY ("xcvmac", x_riscv_xcv_subext, MASK_XCVMAC),
- RISCV_EXT_FLAG_ENTRY ("xcvalu", x_riscv_xcv_subext, MASK_XCVALU),
- RISCV_EXT_FLAG_ENTRY ("xcvelw", x_riscv_xcv_subext, MASK_XCVELW),
- RISCV_EXT_FLAG_ENTRY ("xcvsimd", x_riscv_xcv_subext, MASK_XCVSIMD),
- RISCV_EXT_FLAG_ENTRY ("xcvbi", x_riscv_xcv_subext, MASK_XCVBI),
-
- RISCV_EXT_FLAG_ENTRY ("xtheadba", x_riscv_xthead_subext, MASK_XTHEADBA),
- RISCV_EXT_FLAG_ENTRY ("xtheadbb", x_riscv_xthead_subext, MASK_XTHEADBB),
- RISCV_EXT_FLAG_ENTRY ("xtheadbs", x_riscv_xthead_subext, MASK_XTHEADBS),
- RISCV_EXT_FLAG_ENTRY ("xtheadcmo", x_riscv_xthead_subext, MASK_XTHEADCMO),
- RISCV_EXT_FLAG_ENTRY ("xtheadcondmov", x_riscv_xthead_subext, MASK_XTHEADCONDMOV),
- RISCV_EXT_FLAG_ENTRY ("xtheadfmemidx", x_riscv_xthead_subext, MASK_XTHEADFMEMIDX),
- RISCV_EXT_FLAG_ENTRY ("xtheadfmv", x_riscv_xthead_subext, MASK_XTHEADFMV),
- RISCV_EXT_FLAG_ENTRY ("xtheadint", x_riscv_xthead_subext, MASK_XTHEADINT),
- RISCV_EXT_FLAG_ENTRY ("xtheadmac", x_riscv_xthead_subext, MASK_XTHEADMAC),
- RISCV_EXT_FLAG_ENTRY ("xtheadmemidx", x_riscv_xthead_subext, MASK_XTHEADMEMIDX),
- RISCV_EXT_FLAG_ENTRY ("xtheadmempair", x_riscv_xthead_subext, MASK_XTHEADMEMPAIR),
- RISCV_EXT_FLAG_ENTRY ("xtheadsync", x_riscv_xthead_subext, MASK_XTHEADSYNC),
- RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_xthead_subext, MASK_XTHEADVECTOR),
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_32),
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_64),
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_vector_elen_flags, MASK_VECTOR_ELEN_FP_32),
@@ -1659,14 +1509,6 @@ static const riscv_ext_flag_table_t riscv_ext_flag_table[] =
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_isa_flags, MASK_FULL_V),
RISCV_EXT_FLAG_ENTRY ("xtheadvector", x_riscv_isa_flags, MASK_VECTOR),
- RISCV_EXT_FLAG_ENTRY ("xventanacondops", x_riscv_xventana_subext, MASK_XVENTANACONDOPS),
-
- RISCV_EXT_FLAG_ENTRY ("xsfvcp", x_riscv_xsf_subext, MASK_XSFVCP),
- RISCV_EXT_FLAG_ENTRY ("xsfcease", x_riscv_xsf_subext, MASK_XSFCEASE),
- RISCV_EXT_FLAG_ENTRY ("xsfvqmaccqoq", x_riscv_xsf_subext, MASK_XSFVQMACCQOQ),
- RISCV_EXT_FLAG_ENTRY ("xsfvqmaccdod", x_riscv_xsf_subext, MASK_XSFVQMACCDOD),
- RISCV_EXT_FLAG_ENTRY ("xsfvfnrclipxfqf", x_riscv_xsf_subext, MASK_XSFVFNRCLIPXFQF),
-
{NULL, NULL, NULL, 0}
};
@@ -1675,9 +1517,9 @@ static void
apply_extra_extension_flags (const char *ext,
std::vector<riscv_ext_flag_table_t> &flag_table)
{
- const riscv_ext_flag_table_t *arch_ext_flag_tab;
- for (arch_ext_flag_tab = &riscv_ext_flag_table[0]; arch_ext_flag_tab->ext;
- ++arch_ext_flag_tab)
+ const riscv_extra_ext_flag_table_t *arch_ext_flag_tab;
+ for (arch_ext_flag_tab = &riscv_extra_ext_flag_table[0];
+ arch_ext_flag_tab->ext; ++arch_ext_flag_tab)
{
if (strcmp (arch_ext_flag_tab->ext, ext) == 0)
{
@@ -1714,24 +1556,21 @@ riscv_set_arch_by_subset_list (riscv_subset_list *subset_list,
{
if (opts)
{
- const riscv_ext_flag_table_t *arch_ext_flag_tab;
/* Clean up target flags before we set. */
- for (arch_ext_flag_tab = &riscv_ext_flag_table[0]; arch_ext_flag_tab->ext;
- ++arch_ext_flag_tab)
- opts->*arch_ext_flag_tab->var_ref &= ~arch_ext_flag_tab->mask;
+ for (const auto &[ext_name, ext_info] : riscv_ext_infos)
+ ext_info.clean_opts (opts);
if (subset_list->xlen () == 32)
opts->x_riscv_isa_flags &= ~MASK_64BIT;
else if (subset_list->xlen () == 64)
opts->x_riscv_isa_flags |= MASK_64BIT;
- for (arch_ext_flag_tab = &riscv_ext_flag_table[0];
- arch_ext_flag_tab->ext;
- ++arch_ext_flag_tab)
- {
- if (subset_list->lookup (arch_ext_flag_tab->ext))
- opts->*arch_ext_flag_tab->var_ref |= arch_ext_flag_tab->mask;
- }
+ for (const auto &[ext_name, ext_info] : riscv_ext_infos)
+ if (subset_list->lookup (ext_name.c_str ()))
+ {
+ /* Set the extension flag. */
+ ext_info.set_opts (opts);
+ }
}
}
@@ -1741,16 +1580,10 @@ bool
riscv_ext_is_subset (struct cl_target_option *opts,
struct cl_target_option *subset)
{
- const riscv_ext_flag_table_t *arch_ext_flag_tab;
- for (arch_ext_flag_tab = &riscv_ext_flag_table[0];
- arch_ext_flag_tab->ext;
- ++arch_ext_flag_tab)
+ for (const auto &[ext_name, ext_info] : riscv_ext_infos)
{
- if (subset->*arch_ext_flag_tab->cl_var_ref & arch_ext_flag_tab->mask)
- {
- if (!(opts->*arch_ext_flag_tab->cl_var_ref & arch_ext_flag_tab->mask))
- return false;
- }
+ if (ext_info.check_opts (opts) && !ext_info.check_opts (subset))
+ return false;
}
return true;
}
diff --git a/gcc/fortran/trans-intrinsic.cc b/gcc/fortran/trans-intrinsic.cc
index 440cbdd..fce5ee2 100644
--- a/gcc/fortran/trans-intrinsic.cc
+++ b/gcc/fortran/trans-intrinsic.cc
@@ -4715,22 +4715,6 @@ maybe_absent_optional_variable (gfc_expr *e)
}
-/* Remove unneeded kind= argument from actual argument list when the
- result conversion is dealt with in a different place. */
-
-static void
-strip_kind_from_actual (gfc_actual_arglist * actual)
-{
- for (gfc_actual_arglist *a = actual; a; a = a->next)
- {
- if (a && a->name && strcmp (a->name, "kind") == 0)
- {
- gfc_free_expr (a->expr);
- a->expr = NULL;
- }
- }
-}
-
/* Emit code for minloc or maxloc intrinsic. There are many different cases
we need to handle. For performance reasons we sometimes create two
loops instead of one, where the second one is much simpler.
@@ -4925,7 +4909,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
tree b_if, b_else;
tree back;
gfc_loopinfo loop, *ploop;
- gfc_actual_arglist *actual, *array_arg, *dim_arg, *mask_arg, *kind_arg;
+ gfc_actual_arglist *array_arg, *dim_arg, *mask_arg, *kind_arg;
gfc_actual_arglist *back_arg;
gfc_ss *arrayss = nullptr;
gfc_ss *maskss = nullptr;
@@ -4944,8 +4928,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
int n;
bool optional_mask;
- actual = expr->value.function.actual;
- array_arg = actual;
+ array_arg = expr->value.function.actual;
dim_arg = array_arg->next;
mask_arg = dim_arg->next;
kind_arg = mask_arg->next;
@@ -4954,14 +4937,16 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
bool dim_present = dim_arg->expr != nullptr;
bool nested_loop = dim_present && expr->rank > 0;
- /* The last argument, BACK, is passed by value. Ensure that
- by setting its name to %VAL. */
- for (gfc_actual_arglist *a = actual; a; a = a->next)
+ /* Remove kind. */
+ if (kind_arg->expr)
{
- if (a->next == NULL)
- a->name = "%VAL";
+ gfc_free_expr (kind_arg->expr);
+ kind_arg->expr = NULL;
}
+ /* Pass BACK argument by value. */
+ back_arg->name = "%VAL";
+
if (se->ss)
{
if (se->ss->info->useflags)
@@ -4983,25 +4968,19 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op)
}
}
- arrayexpr = actual->expr;
+ arrayexpr = array_arg->expr;
- /* Special case for character maxloc. Remove unneeded actual
- arguments, then call a library function. */
+ /* Special case for character maxloc. Remove unneeded "dim" actual
+ argument, then call a library function. */
if (arrayexpr->ts.type == BT_CHARACTER)
{
gcc_assert (expr->rank == 0);
- gfc_actual_arglist *a = actual;
- strip_kind_from_actual (a);
- while (a)
+ if (dim_arg->expr)
{
- if (a->name && strcmp (a->name, "dim") == 0)
- {
- gfc_free_expr (a->expr);
- a->expr = NULL;
- }
- a = a->next;
+ gfc_free_expr (dim_arg->expr);
+ dim_arg->expr = NULL;
}
gfc_conv_intrinsic_funcall (se, expr);
return;
diff --git a/gcc/gimple-fold.cc b/gcc/gimple-fold.cc
index e63fd6f..b8c1588 100644
--- a/gcc/gimple-fold.cc
+++ b/gcc/gimple-fold.cc
@@ -6276,6 +6276,32 @@ replace_stmt_with_simplification (gimple_stmt_iterator *gsi,
}
else if (!inplace)
{
+ /* For throwing comparisons, see if the GIMPLE_COND is the same as
+ the comparison would be.
+ This can happen due to the match pattern for
+ `(ne (cmp @0 @1) integer_zerop)` which creates a new expression
+ for the comparison. */
+ if (TREE_CODE_CLASS (code) == tcc_comparison
+ && flag_exceptions
+ && cfun->can_throw_non_call_exceptions
+ && operation_could_trap_p (code,
+ FLOAT_TYPE_P (TREE_TYPE (ops[0])),
+ false, NULL_TREE))
+ {
+ tree lhs = gimple_cond_lhs (cond_stmt);
+ if (gimple_cond_code (cond_stmt) == NE_EXPR
+ && TREE_CODE (lhs) == SSA_NAME
+ && INTEGRAL_TYPE_P (TREE_TYPE (lhs))
+ && integer_zerop (gimple_cond_rhs (cond_stmt)))
+ {
+ gimple *s = SSA_NAME_DEF_STMT (lhs);
+ if (is_gimple_assign (s)
+ && gimple_assign_rhs_code (s) == code
+ && operand_equal_p (gimple_assign_rhs1 (s), ops[0])
+ && operand_equal_p (gimple_assign_rhs2 (s), ops[1]))
+ return false;
+ }
+ }
tree res = maybe_push_res_to_seq (res_op, seq);
if (!res)
return false;
diff --git a/gcc/testsuite/c-c++-common/pr118868-1.c b/gcc/testsuite/c-c++-common/pr118868-1.c
new file mode 100644
index 0000000..d0a9e77f7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/pr118868-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+
+/* PR middle-end/118868 */
+
+/* __builtin_assoc_barrier should work on pointers without any ICE */
+void *f(void *a)
+{
+ return __builtin_assoc_barrier(a);
+}
diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C b/gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C
new file mode 100644
index 0000000..605f989
--- /dev/null
+++ b/gcc/testsuite/g++.dg/tree-ssa/pr119903-1.C
@@ -0,0 +1,24 @@
+// { dg-do compile { target c++11 } }
+// { dg-options "-O2 -fnon-call-exceptions -ftrapping-math -fdump-tree-optimized-eh" }
+
+// PR tree-optimization/119903
+// match and simplify would cause the internal throwable fp comparison
+// to become only external throwable and lose the landing pad.
+
+int f() noexcept;
+int g() noexcept;
+
+int m(double a)
+{
+ try {
+ if (a < 1.0)
+ return f();
+ return g();
+ }catch(...)
+ {
+ return -1;
+ }
+}
+
+// Make sure There is a landing pad for the non-call exception from the comparison.
+// { dg-final { scan-tree-dump "LP " "optimized" } }
diff --git a/gcc/testsuite/gfortran.dg/pr120191_1.f90 b/gcc/testsuite/gfortran.dg/pr120191_1.f90
new file mode 100644
index 0000000..13a787d
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr120191_1.f90
@@ -0,0 +1,614 @@
+! PR fortran/120191
+! { dg-do run }
+
+ integer(kind=1) :: a1(10, 10, 10), b1(10)
+ integer(kind=2) :: a2(10, 10, 10), b2(10)
+ integer(kind=4) :: a4(10, 10, 10), b4(10)
+ integer(kind=8) :: a8(10, 10, 10), b8(10)
+ real(kind=4) :: r4(10, 10, 10), s4(10)
+ real(kind=8) :: r8(10, 10, 10), s8(10)
+ logical :: l1(10, 10, 10), l2(10), l3
+ l1 = .true.
+ l2 = .true.
+ l3 = .true.
+ a1 = 0
+ if (any (maxloc (a1) .ne. 1)) stop 1
+ if (any (maxloc (a1, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (a1, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (a1, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (a1, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (a1, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (maxloc (a1, 1) .ne. 1)) stop 7
+ if (any (maxloc (a1, 1, back=.false.) .ne. 1)) stop 8
+ if (any (maxloc (a1, 1, back=.true.) .ne. 10)) stop 9
+ if (any (maxloc (a1, 1, kind=1) .ne. 1)) stop 10
+ if (any (maxloc (a1, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (maxloc (a1, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (maxloc (a1, 1, l1) .ne. 1)) stop 13
+ if (any (maxloc (a1, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (maxloc (a1, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (maxloc (a1, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (maxloc (a1, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (maxloc (a1, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (maxloc (a1, 1, l3) .ne. 1)) stop 19
+ if (any (maxloc (a1, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (maxloc (a1, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (maxloc (a1, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (maxloc (a1, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (maxloc (a1, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b1 = 0
+ if (any (maxloc (b1) .ne. 1)) stop 1
+ if (any (maxloc (b1, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (b1, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (b1, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (b1, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (b1, kind=8, back=.true.) .ne. 10)) stop 6
+ if (maxloc (b1, 1) .ne. 1) stop 7
+ if (maxloc (b1, 1, back=.false.) .ne. 1) stop 8
+ if (maxloc (b1, 1, back=.true.) .ne. 10) stop 9
+ if (maxloc (b1, 1, kind=1) .ne. 1) stop 10
+ if (maxloc (b1, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (maxloc (b1, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (maxloc (b1, 1, l2) .ne. 1) stop 13
+ if (maxloc (b1, 1, l2, back=.false.) .ne. 1) stop 14
+ if (maxloc (b1, 1, l2, back=.true.) .ne. 10) stop 15
+ if (maxloc (b1, 1, l2, kind=8) .ne. 1) stop 16
+ if (maxloc (b1, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (maxloc (b1, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (maxloc (b1, 1, l3) .ne. 1) stop 19
+ if (maxloc (b1, 1, l3, back=.false.) .ne. 1) stop 20
+ if (maxloc (b1, 1, l3, back=.true.) .ne. 10) stop 21
+ if (maxloc (b1, 1, l3, kind=8) .ne. 1) stop 22
+ if (maxloc (b1, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (maxloc (b1, 1, l3, 2, .true.) .ne. 10) stop 24
+ a2 = 0
+ if (any (maxloc (a2) .ne. 1)) stop 1
+ if (any (maxloc (a2, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (a2, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (a2, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (a2, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (a2, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (maxloc (a2, 1) .ne. 1)) stop 7
+ if (any (maxloc (a2, 1, back=.false.) .ne. 1)) stop 8
+ if (any (maxloc (a2, 1, back=.true.) .ne. 10)) stop 9
+ if (any (maxloc (a2, 1, kind=1) .ne. 1)) stop 10
+ if (any (maxloc (a2, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (maxloc (a2, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (maxloc (a2, 1, l1) .ne. 1)) stop 13
+ if (any (maxloc (a2, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (maxloc (a2, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (maxloc (a2, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (maxloc (a2, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (maxloc (a2, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (maxloc (a2, 1, l3) .ne. 1)) stop 19
+ if (any (maxloc (a2, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (maxloc (a2, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (maxloc (a2, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (maxloc (a2, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (maxloc (a2, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b2 = 0
+ if (any (maxloc (b2) .ne. 1)) stop 1
+ if (any (maxloc (b2, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (b2, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (b2, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (b2, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (b2, kind=8, back=.true.) .ne. 10)) stop 6
+ if (maxloc (b2, 1) .ne. 1) stop 7
+ if (maxloc (b2, 1, back=.false.) .ne. 1) stop 8
+ if (maxloc (b2, 1, back=.true.) .ne. 10) stop 9
+ if (maxloc (b2, 1, kind=1) .ne. 1) stop 10
+ if (maxloc (b2, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (maxloc (b2, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (maxloc (b2, 1, l2) .ne. 1) stop 13
+ if (maxloc (b2, 1, l2, back=.false.) .ne. 1) stop 14
+ if (maxloc (b2, 1, l2, back=.true.) .ne. 10) stop 15
+ if (maxloc (b2, 1, l2, kind=8) .ne. 1) stop 16
+ if (maxloc (b2, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (maxloc (b2, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (maxloc (b2, 1, l3) .ne. 1) stop 19
+ if (maxloc (b2, 1, l3, back=.false.) .ne. 1) stop 20
+ if (maxloc (b2, 1, l3, back=.true.) .ne. 10) stop 21
+ if (maxloc (b2, 1, l3, kind=8) .ne. 1) stop 22
+ if (maxloc (b2, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (maxloc (b2, 1, l3, 2, .true.) .ne. 10) stop 24
+ a4 = 0
+ if (any (maxloc (a4) .ne. 1)) stop 1
+ if (any (maxloc (a4, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (a4, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (a4, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (a4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (a4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (maxloc (a4, 1) .ne. 1)) stop 7
+ if (any (maxloc (a4, 1, back=.false.) .ne. 1)) stop 8
+ if (any (maxloc (a4, 1, back=.true.) .ne. 10)) stop 9
+ if (any (maxloc (a4, 1, kind=1) .ne. 1)) stop 10
+ if (any (maxloc (a4, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (maxloc (a4, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (maxloc (a4, 1, l1) .ne. 1)) stop 13
+ if (any (maxloc (a4, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (maxloc (a4, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (maxloc (a4, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (maxloc (a4, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (maxloc (a4, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (maxloc (a4, 1, l3) .ne. 1)) stop 19
+ if (any (maxloc (a4, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (maxloc (a4, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (maxloc (a4, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (maxloc (a4, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (maxloc (a4, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b4 = 0
+ if (any (maxloc (b4) .ne. 1)) stop 1
+ if (any (maxloc (b4, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (b4, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (b4, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (b4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (b4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (maxloc (b4, 1) .ne. 1) stop 7
+ if (maxloc (b4, 1, back=.false.) .ne. 1) stop 8
+ if (maxloc (b4, 1, back=.true.) .ne. 10) stop 9
+ if (maxloc (b4, 1, kind=1) .ne. 1) stop 10
+ if (maxloc (b4, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (maxloc (b4, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (maxloc (b4, 1, l2) .ne. 1) stop 13
+ if (maxloc (b4, 1, l2, back=.false.) .ne. 1) stop 14
+ if (maxloc (b4, 1, l2, back=.true.) .ne. 10) stop 15
+ if (maxloc (b4, 1, l2, kind=8) .ne. 1) stop 16
+ if (maxloc (b4, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (maxloc (b4, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (maxloc (b4, 1, l3) .ne. 1) stop 19
+ if (maxloc (b4, 1, l3, back=.false.) .ne. 1) stop 20
+ if (maxloc (b4, 1, l3, back=.true.) .ne. 10) stop 21
+ if (maxloc (b4, 1, l3, kind=8) .ne. 1) stop 22
+ if (maxloc (b4, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (maxloc (b4, 1, l3, 2, .true.) .ne. 10) stop 24
+ a8 = 0
+ if (any (maxloc (a8) .ne. 1)) stop 1
+ if (any (maxloc (a8, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (a8, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (a8, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (a8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (a8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (maxloc (a8, 1) .ne. 1)) stop 7
+ if (any (maxloc (a8, 1, back=.false.) .ne. 1)) stop 8
+ if (any (maxloc (a8, 1, back=.true.) .ne. 10)) stop 9
+ if (any (maxloc (a8, 1, kind=1) .ne. 1)) stop 10
+ if (any (maxloc (a8, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (maxloc (a8, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (maxloc (a8, 1, l1) .ne. 1)) stop 13
+ if (any (maxloc (a8, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (maxloc (a8, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (maxloc (a8, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (maxloc (a8, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (maxloc (a8, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (maxloc (a8, 1, l3) .ne. 1)) stop 19
+ if (any (maxloc (a8, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (maxloc (a8, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (maxloc (a8, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (maxloc (a8, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (maxloc (a8, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b8 = 0
+ if (any (maxloc (b8) .ne. 1)) stop 1
+ if (any (maxloc (b8, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (b8, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (b8, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (b8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (b8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (maxloc (b8, 1) .ne. 1) stop 7
+ if (maxloc (b8, 1, back=.false.) .ne. 1) stop 8
+ if (maxloc (b8, 1, back=.true.) .ne. 10) stop 9
+ if (maxloc (b8, 1, kind=1) .ne. 1) stop 10
+ if (maxloc (b8, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (maxloc (b8, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (maxloc (b8, 1, l2) .ne. 1) stop 13
+ if (maxloc (b8, 1, l2, back=.false.) .ne. 1) stop 14
+ if (maxloc (b8, 1, l2, back=.true.) .ne. 10) stop 15
+ if (maxloc (b8, 1, l2, kind=8) .ne. 1) stop 16
+ if (maxloc (b8, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (maxloc (b8, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (maxloc (b8, 1, l3) .ne. 1) stop 19
+ if (maxloc (b8, 1, l3, back=.false.) .ne. 1) stop 20
+ if (maxloc (b8, 1, l3, back=.true.) .ne. 10) stop 21
+ if (maxloc (b8, 1, l3, kind=8) .ne. 1) stop 22
+ if (maxloc (b8, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (maxloc (b8, 1, l3, 2, .true.) .ne. 10) stop 24
+ r4 = 0.0
+ if (any (maxloc (r4) .ne. 1)) stop 1
+ if (any (maxloc (r4, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (r4, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (r4, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (r4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (r4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (maxloc (r4, 1) .ne. 1)) stop 7
+ if (any (maxloc (r4, 1, back=.false.) .ne. 1)) stop 8
+ if (any (maxloc (r4, 1, back=.true.) .ne. 10)) stop 9
+ if (any (maxloc (r4, 1, kind=1) .ne. 1)) stop 10
+ if (any (maxloc (r4, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (maxloc (r4, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (maxloc (r4, 1, l1) .ne. 1)) stop 13
+ if (any (maxloc (r4, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (maxloc (r4, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (maxloc (r4, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (maxloc (r4, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (maxloc (r4, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (maxloc (r4, 1, l3) .ne. 1)) stop 19
+ if (any (maxloc (r4, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (maxloc (r4, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (maxloc (r4, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (maxloc (r4, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (maxloc (r4, 1, l3, 2, .true.) .ne. 10)) stop 24
+ s4 = 0.0
+ if (any (maxloc (s4) .ne. 1)) stop 1
+ if (any (maxloc (s4, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (s4, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (s4, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (s4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (s4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (maxloc (s4, 1) .ne. 1) stop 7
+ if (maxloc (s4, 1, back=.false.) .ne. 1) stop 8
+ if (maxloc (s4, 1, back=.true.) .ne. 10) stop 9
+ if (maxloc (s4, 1, kind=1) .ne. 1) stop 10
+ if (maxloc (s4, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (maxloc (s4, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (maxloc (s4, 1, l2) .ne. 1) stop 13
+ if (maxloc (s4, 1, l2, back=.false.) .ne. 1) stop 14
+ if (maxloc (s4, 1, l2, back=.true.) .ne. 10) stop 15
+ if (maxloc (s4, 1, l2, kind=8) .ne. 1) stop 16
+ if (maxloc (s4, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (maxloc (s4, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (maxloc (s4, 1, l3) .ne. 1) stop 19
+ if (maxloc (s4, 1, l3, back=.false.) .ne. 1) stop 20
+ if (maxloc (s4, 1, l3, back=.true.) .ne. 10) stop 21
+ if (maxloc (s4, 1, l3, kind=8) .ne. 1) stop 22
+ if (maxloc (s4, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (maxloc (s4, 1, l3, 2, .true.) .ne. 10) stop 24
+ r8 = 0.0
+ if (any (maxloc (r8) .ne. 1)) stop 1
+ if (any (maxloc (r8, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (r8, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (r8, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (r8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (r8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (maxloc (r8, 1) .ne. 1)) stop 7
+ if (any (maxloc (r8, 1, back=.false.) .ne. 1)) stop 8
+ if (any (maxloc (r8, 1, back=.true.) .ne. 10)) stop 9
+ if (any (maxloc (r8, 1, kind=1) .ne. 1)) stop 10
+ if (any (maxloc (r8, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (maxloc (r8, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (maxloc (r8, 1, l1) .ne. 1)) stop 13
+ if (any (maxloc (r8, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (maxloc (r8, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (maxloc (r8, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (maxloc (r8, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (maxloc (r8, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (maxloc (r8, 1, l3) .ne. 1)) stop 19
+ if (any (maxloc (r8, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (maxloc (r8, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (maxloc (r8, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (maxloc (r8, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (maxloc (r8, 1, l3, 2, .true.) .ne. 10)) stop 24
+ s8 = 0.0
+ if (any (maxloc (s8) .ne. 1)) stop 1
+ if (any (maxloc (s8, back=.false.) .ne. 1)) stop 2
+ if (any (maxloc (s8, back=.true.) .ne. 10)) stop 3
+ if (any (maxloc (s8, kind=2) .ne. 1)) stop 4
+ if (any (maxloc (s8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (maxloc (s8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (maxloc (s8, 1) .ne. 1) stop 7
+ if (maxloc (s8, 1, back=.false.) .ne. 1) stop 8
+ if (maxloc (s8, 1, back=.true.) .ne. 10) stop 9
+ if (maxloc (s8, 1, kind=1) .ne. 1) stop 10
+ if (maxloc (s8, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (maxloc (s8, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (maxloc (s8, 1, l2) .ne. 1) stop 13
+ if (maxloc (s8, 1, l2, back=.false.) .ne. 1) stop 14
+ if (maxloc (s8, 1, l2, back=.true.) .ne. 10) stop 15
+ if (maxloc (s8, 1, l2, kind=8) .ne. 1) stop 16
+ if (maxloc (s8, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (maxloc (s8, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (maxloc (s8, 1, l3) .ne. 1) stop 19
+ if (maxloc (s8, 1, l3, back=.false.) .ne. 1) stop 20
+ if (maxloc (s8, 1, l3, back=.true.) .ne. 10) stop 21
+ if (maxloc (s8, 1, l3, kind=8) .ne. 1) stop 22
+ if (maxloc (s8, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (maxloc (s8, 1, l3, 2, .true.) .ne. 10) stop 24
+ a1 = 0
+ if (any (minloc (a1) .ne. 1)) stop 1
+ if (any (minloc (a1, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (a1, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (a1, kind=2) .ne. 1)) stop 4
+ if (any (minloc (a1, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (a1, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (minloc (a1, 1) .ne. 1)) stop 7
+ if (any (minloc (a1, 1, back=.false.) .ne. 1)) stop 8
+ if (any (minloc (a1, 1, back=.true.) .ne. 10)) stop 9
+ if (any (minloc (a1, 1, kind=1) .ne. 1)) stop 10
+ if (any (minloc (a1, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (minloc (a1, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (minloc (a1, 1, l1) .ne. 1)) stop 13
+ if (any (minloc (a1, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (minloc (a1, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (minloc (a1, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (minloc (a1, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (minloc (a1, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (minloc (a1, 1, l3) .ne. 1)) stop 19
+ if (any (minloc (a1, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (minloc (a1, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (minloc (a1, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (minloc (a1, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (minloc (a1, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b1 = 0
+ if (any (minloc (b1) .ne. 1)) stop 1
+ if (any (minloc (b1, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (b1, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (b1, kind=2) .ne. 1)) stop 4
+ if (any (minloc (b1, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (b1, kind=8, back=.true.) .ne. 10)) stop 6
+ if (minloc (b1, 1) .ne. 1) stop 7
+ if (minloc (b1, 1, back=.false.) .ne. 1) stop 8
+ if (minloc (b1, 1, back=.true.) .ne. 10) stop 9
+ if (minloc (b1, 1, kind=1) .ne. 1) stop 10
+ if (minloc (b1, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (minloc (b1, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (minloc (b1, 1, l2) .ne. 1) stop 13
+ if (minloc (b1, 1, l2, back=.false.) .ne. 1) stop 14
+ if (minloc (b1, 1, l2, back=.true.) .ne. 10) stop 15
+ if (minloc (b1, 1, l2, kind=8) .ne. 1) stop 16
+ if (minloc (b1, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (minloc (b1, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (minloc (b1, 1, l3) .ne. 1) stop 19
+ if (minloc (b1, 1, l3, back=.false.) .ne. 1) stop 20
+ if (minloc (b1, 1, l3, back=.true.) .ne. 10) stop 21
+ if (minloc (b1, 1, l3, kind=8) .ne. 1) stop 22
+ if (minloc (b1, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (minloc (b1, 1, l3, 2, .true.) .ne. 10) stop 24
+ a2 = 0
+ if (any (minloc (a2) .ne. 1)) stop 1
+ if (any (minloc (a2, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (a2, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (a2, kind=2) .ne. 1)) stop 4
+ if (any (minloc (a2, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (a2, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (minloc (a2, 1) .ne. 1)) stop 7
+ if (any (minloc (a2, 1, back=.false.) .ne. 1)) stop 8
+ if (any (minloc (a2, 1, back=.true.) .ne. 10)) stop 9
+ if (any (minloc (a2, 1, kind=1) .ne. 1)) stop 10
+ if (any (minloc (a2, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (minloc (a2, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (minloc (a2, 1, l1) .ne. 1)) stop 13
+ if (any (minloc (a2, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (minloc (a2, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (minloc (a2, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (minloc (a2, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (minloc (a2, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (minloc (a2, 1, l3) .ne. 1)) stop 19
+ if (any (minloc (a2, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (minloc (a2, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (minloc (a2, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (minloc (a2, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (minloc (a2, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b2 = 0
+ if (any (minloc (b2) .ne. 1)) stop 1
+ if (any (minloc (b2, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (b2, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (b2, kind=2) .ne. 1)) stop 4
+ if (any (minloc (b2, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (b2, kind=8, back=.true.) .ne. 10)) stop 6
+ if (minloc (b2, 1) .ne. 1) stop 7
+ if (minloc (b2, 1, back=.false.) .ne. 1) stop 8
+ if (minloc (b2, 1, back=.true.) .ne. 10) stop 9
+ if (minloc (b2, 1, kind=1) .ne. 1) stop 10
+ if (minloc (b2, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (minloc (b2, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (minloc (b2, 1, l2) .ne. 1) stop 13
+ if (minloc (b2, 1, l2, back=.false.) .ne. 1) stop 14
+ if (minloc (b2, 1, l2, back=.true.) .ne. 10) stop 15
+ if (minloc (b2, 1, l2, kind=8) .ne. 1) stop 16
+ if (minloc (b2, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (minloc (b2, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (minloc (b2, 1, l3) .ne. 1) stop 19
+ if (minloc (b2, 1, l3, back=.false.) .ne. 1) stop 20
+ if (minloc (b2, 1, l3, back=.true.) .ne. 10) stop 21
+ if (minloc (b2, 1, l3, kind=8) .ne. 1) stop 22
+ if (minloc (b2, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (minloc (b2, 1, l3, 2, .true.) .ne. 10) stop 24
+ a4 = 0
+ if (any (minloc (a4) .ne. 1)) stop 1
+ if (any (minloc (a4, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (a4, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (a4, kind=2) .ne. 1)) stop 4
+ if (any (minloc (a4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (a4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (minloc (a4, 1) .ne. 1)) stop 7
+ if (any (minloc (a4, 1, back=.false.) .ne. 1)) stop 8
+ if (any (minloc (a4, 1, back=.true.) .ne. 10)) stop 9
+ if (any (minloc (a4, 1, kind=1) .ne. 1)) stop 10
+ if (any (minloc (a4, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (minloc (a4, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (minloc (a4, 1, l1) .ne. 1)) stop 13
+ if (any (minloc (a4, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (minloc (a4, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (minloc (a4, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (minloc (a4, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (minloc (a4, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (minloc (a4, 1, l3) .ne. 1)) stop 19
+ if (any (minloc (a4, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (minloc (a4, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (minloc (a4, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (minloc (a4, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (minloc (a4, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b4 = 0
+ if (any (minloc (b4) .ne. 1)) stop 1
+ if (any (minloc (b4, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (b4, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (b4, kind=2) .ne. 1)) stop 4
+ if (any (minloc (b4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (b4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (minloc (b4, 1) .ne. 1) stop 7
+ if (minloc (b4, 1, back=.false.) .ne. 1) stop 8
+ if (minloc (b4, 1, back=.true.) .ne. 10) stop 9
+ if (minloc (b4, 1, kind=1) .ne. 1) stop 10
+ if (minloc (b4, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (minloc (b4, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (minloc (b4, 1, l2) .ne. 1) stop 13
+ if (minloc (b4, 1, l2, back=.false.) .ne. 1) stop 14
+ if (minloc (b4, 1, l2, back=.true.) .ne. 10) stop 15
+ if (minloc (b4, 1, l2, kind=8) .ne. 1) stop 16
+ if (minloc (b4, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (minloc (b4, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (minloc (b4, 1, l3) .ne. 1) stop 19
+ if (minloc (b4, 1, l3, back=.false.) .ne. 1) stop 20
+ if (minloc (b4, 1, l3, back=.true.) .ne. 10) stop 21
+ if (minloc (b4, 1, l3, kind=8) .ne. 1) stop 22
+ if (minloc (b4, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (minloc (b4, 1, l3, 2, .true.) .ne. 10) stop 24
+ a8 = 0
+ if (any (minloc (a8) .ne. 1)) stop 1
+ if (any (minloc (a8, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (a8, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (a8, kind=2) .ne. 1)) stop 4
+ if (any (minloc (a8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (a8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (minloc (a8, 1) .ne. 1)) stop 7
+ if (any (minloc (a8, 1, back=.false.) .ne. 1)) stop 8
+ if (any (minloc (a8, 1, back=.true.) .ne. 10)) stop 9
+ if (any (minloc (a8, 1, kind=1) .ne. 1)) stop 10
+ if (any (minloc (a8, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (minloc (a8, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (minloc (a8, 1, l1) .ne. 1)) stop 13
+ if (any (minloc (a8, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (minloc (a8, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (minloc (a8, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (minloc (a8, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (minloc (a8, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (minloc (a8, 1, l3) .ne. 1)) stop 19
+ if (any (minloc (a8, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (minloc (a8, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (minloc (a8, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (minloc (a8, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (minloc (a8, 1, l3, 2, .true.) .ne. 10)) stop 24
+ b8 = 0
+ if (any (minloc (b8) .ne. 1)) stop 1
+ if (any (minloc (b8, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (b8, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (b8, kind=2) .ne. 1)) stop 4
+ if (any (minloc (b8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (b8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (minloc (b8, 1) .ne. 1) stop 7
+ if (minloc (b8, 1, back=.false.) .ne. 1) stop 8
+ if (minloc (b8, 1, back=.true.) .ne. 10) stop 9
+ if (minloc (b8, 1, kind=1) .ne. 1) stop 10
+ if (minloc (b8, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (minloc (b8, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (minloc (b8, 1, l2) .ne. 1) stop 13
+ if (minloc (b8, 1, l2, back=.false.) .ne. 1) stop 14
+ if (minloc (b8, 1, l2, back=.true.) .ne. 10) stop 15
+ if (minloc (b8, 1, l2, kind=8) .ne. 1) stop 16
+ if (minloc (b8, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (minloc (b8, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (minloc (b8, 1, l3) .ne. 1) stop 19
+ if (minloc (b8, 1, l3, back=.false.) .ne. 1) stop 20
+ if (minloc (b8, 1, l3, back=.true.) .ne. 10) stop 21
+ if (minloc (b8, 1, l3, kind=8) .ne. 1) stop 22
+ if (minloc (b8, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (minloc (b8, 1, l3, 2, .true.) .ne. 10) stop 24
+ r4 = 0.0
+ if (any (minloc (r4) .ne. 1)) stop 1
+ if (any (minloc (r4, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (r4, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (r4, kind=2) .ne. 1)) stop 4
+ if (any (minloc (r4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (r4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (minloc (r4, 1) .ne. 1)) stop 7
+ if (any (minloc (r4, 1, back=.false.) .ne. 1)) stop 8
+ if (any (minloc (r4, 1, back=.true.) .ne. 10)) stop 9
+ if (any (minloc (r4, 1, kind=1) .ne. 1)) stop 10
+ if (any (minloc (r4, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (minloc (r4, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (minloc (r4, 1, l1) .ne. 1)) stop 13
+ if (any (minloc (r4, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (minloc (r4, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (minloc (r4, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (minloc (r4, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (minloc (r4, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (minloc (r4, 1, l3) .ne. 1)) stop 19
+ if (any (minloc (r4, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (minloc (r4, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (minloc (r4, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (minloc (r4, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (minloc (r4, 1, l3, 2, .true.) .ne. 10)) stop 24
+ s4 = 0.0
+ if (any (minloc (s4) .ne. 1)) stop 1
+ if (any (minloc (s4, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (s4, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (s4, kind=2) .ne. 1)) stop 4
+ if (any (minloc (s4, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (s4, kind=8, back=.true.) .ne. 10)) stop 6
+ if (minloc (s4, 1) .ne. 1) stop 7
+ if (minloc (s4, 1, back=.false.) .ne. 1) stop 8
+ if (minloc (s4, 1, back=.true.) .ne. 10) stop 9
+ if (minloc (s4, 1, kind=1) .ne. 1) stop 10
+ if (minloc (s4, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (minloc (s4, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (minloc (s4, 1, l2) .ne. 1) stop 13
+ if (minloc (s4, 1, l2, back=.false.) .ne. 1) stop 14
+ if (minloc (s4, 1, l2, back=.true.) .ne. 10) stop 15
+ if (minloc (s4, 1, l2, kind=8) .ne. 1) stop 16
+ if (minloc (s4, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (minloc (s4, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (minloc (s4, 1, l3) .ne. 1) stop 19
+ if (minloc (s4, 1, l3, back=.false.) .ne. 1) stop 20
+ if (minloc (s4, 1, l3, back=.true.) .ne. 10) stop 21
+ if (minloc (s4, 1, l3, kind=8) .ne. 1) stop 22
+ if (minloc (s4, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (minloc (s4, 1, l3, 2, .true.) .ne. 10) stop 24
+ r8 = 0.0
+ if (any (minloc (r8) .ne. 1)) stop 1
+ if (any (minloc (r8, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (r8, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (r8, kind=2) .ne. 1)) stop 4
+ if (any (minloc (r8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (r8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (any (minloc (r8, 1) .ne. 1)) stop 7
+ if (any (minloc (r8, 1, back=.false.) .ne. 1)) stop 8
+ if (any (minloc (r8, 1, back=.true.) .ne. 10)) stop 9
+ if (any (minloc (r8, 1, kind=1) .ne. 1)) stop 10
+ if (any (minloc (r8, 1, kind=2, back=.false.) .ne. 1)) stop 11
+ if (any (minloc (r8, 1, kind=4, back=.true.) .ne. 10)) stop 12
+ if (any (minloc (r8, 1, l1) .ne. 1)) stop 13
+ if (any (minloc (r8, 1, l1, back=.false.) .ne. 1)) stop 14
+ if (any (minloc (r8, 1, l1, back=.true.) .ne. 10)) stop 15
+ if (any (minloc (r8, 1, l1, kind=8) .ne. 1)) stop 16
+ if (any (minloc (r8, 1, l1, 4, .false.) .ne. 1)) stop 17
+ if (any (minloc (r8, 1, l1, 2, .true.) .ne. 10)) stop 18
+ if (any (minloc (r8, 1, l3) .ne. 1)) stop 19
+ if (any (minloc (r8, 1, l3, back=.false.) .ne. 1)) stop 20
+ if (any (minloc (r8, 1, l3, back=.true.) .ne. 10)) stop 21
+ if (any (minloc (r8, 1, l3, kind=8) .ne. 1)) stop 22
+ if (any (minloc (r8, 1, l3, 4, .false.) .ne. 1)) stop 23
+ if (any (minloc (r8, 1, l3, 2, .true.) .ne. 10)) stop 24
+ s8 = 0.0
+ if (any (minloc (s8) .ne. 1)) stop 1
+ if (any (minloc (s8, back=.false.) .ne. 1)) stop 2
+ if (any (minloc (s8, back=.true.) .ne. 10)) stop 3
+ if (any (minloc (s8, kind=2) .ne. 1)) stop 4
+ if (any (minloc (s8, kind=4, back=.false.) .ne. 1)) stop 5
+ if (any (minloc (s8, kind=8, back=.true.) .ne. 10)) stop 6
+ if (minloc (s8, 1) .ne. 1) stop 7
+ if (minloc (s8, 1, back=.false.) .ne. 1) stop 8
+ if (minloc (s8, 1, back=.true.) .ne. 10) stop 9
+ if (minloc (s8, 1, kind=1) .ne. 1) stop 10
+ if (minloc (s8, 1, kind=2, back=.false.) .ne. 1) stop 11
+ if (minloc (s8, 1, kind=4, back=.true.) .ne. 10) stop 12
+ if (minloc (s8, 1, l2) .ne. 1) stop 13
+ if (minloc (s8, 1, l2, back=.false.) .ne. 1) stop 14
+ if (minloc (s8, 1, l2, back=.true.) .ne. 10) stop 15
+ if (minloc (s8, 1, l2, kind=8) .ne. 1) stop 16
+ if (minloc (s8, 1, l2, 4, .false.) .ne. 1) stop 17
+ if (minloc (s8, 1, l2, 2, .true.) .ne. 10) stop 18
+ if (minloc (s8, 1, l3) .ne. 1) stop 19
+ if (minloc (s8, 1, l3, back=.false.) .ne. 1) stop 20
+ if (minloc (s8, 1, l3, back=.true.) .ne. 10) stop 21
+ if (minloc (s8, 1, l3, kind=8) .ne. 1) stop 22
+ if (minloc (s8, 1, l3, 4, .false.) .ne. 1) stop 23
+ if (minloc (s8, 1, l3, 2, .true.) .ne. 10) stop 24
+end
diff --git a/gcc/testsuite/gfortran.dg/pr120191_2.f90 b/gcc/testsuite/gfortran.dg/pr120191_2.f90
new file mode 100644
index 0000000..6334286
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr120191_2.f90
@@ -0,0 +1,84 @@
+! PR fortran/120191
+! { dg-do run }
+
+ character(kind=1, len=2) :: a(4, 4, 4), b(4)
+ logical :: l(4, 4, 4), m, n(4)
+ a = 'aa'
+ b = 'aa'
+ l = .true.
+ m = .true.
+ n = .true.
+ if (any (maxloc (a) .ne. 1)) stop 1
+ if (any (maxloc (a, dim=1) .ne. 1)) stop 2
+ if (any (maxloc (a, 1) .ne. 1)) stop 3
+ if (any (maxloc (a, dim=1, mask=l, kind=4, back=.false.) .ne. 1)) stop 4
+ if (any (maxloc (a, 1, l, 4, .false.) .ne. 1)) stop 5
+ if (any (maxloc (a, dim=1, mask=m, kind=4, back=.false.) .ne. 1)) stop 6
+ if (any (maxloc (a, 1, m, 4, .false.) .ne. 1)) stop 7
+ if (any (maxloc (a, dim=1, mask=l, kind=4, back=.true.) .ne. 4)) stop 8
+ if (any (maxloc (a, 1, l, 4, .true.) .ne. 4)) stop 9
+ if (any (maxloc (a, dim=1, mask=m, kind=4, back=.true.) .ne. 4)) stop 10
+ if (any (maxloc (a, 1, m, 4, .true.) .ne. 4)) stop 11
+ if (any (maxloc (b) .ne. 1)) stop 12
+ if (maxloc (b, dim=1) .ne. 1) stop 13
+ if (maxloc (b, 1) .ne. 1) stop 14
+ if (maxloc (b, dim=1, mask=n, kind=4, back=.false.) .ne. 1) stop 15
+ if (maxloc (b, 1, n, 4, .false.) .ne. 1) stop 16
+ if (maxloc (b, dim=1, mask=m, kind=4, back=.false.) .ne. 1) stop 17
+ if (maxloc (b, 1, m, 4, .false.) .ne. 1) stop 18
+ if (maxloc (b, dim=1, mask=n, kind=4, back=.true.) .ne. 4) stop 19
+ if (maxloc (b, 1, n, 4, .true.) .ne. 4) stop 20
+ if (maxloc (b, dim=1, mask=m, kind=4, back=.true.) .ne. 4) stop 21
+ if (maxloc (b, 1, m, 4, .true.) .ne. 4) stop 22
+ l = .false.
+ m = .false.
+ n = .false.
+ if (any (maxloc (a, dim=1, mask=l, kind=4, back=.false.) .ne. 0)) stop 23
+ if (any (maxloc (a, 1, l, 4, .false.) .ne. 0)) stop 24
+ if (maxloc (b, dim=1, mask=n, kind=4, back=.false.) .ne. 0) stop 25
+ if (maxloc (b, 1, n, 4, .false.) .ne. 0) stop 26
+ if (maxloc (b, dim=1, mask=m, kind=4, back=.false.) .ne. 0) stop 27
+ if (maxloc (b, 1, m, 4, .false.) .ne. 0) stop 28
+ if (maxloc (b, dim=1, mask=n, kind=4, back=.true.) .ne. 0) stop 29
+ if (maxloc (b, 1, n, 4, .true.) .ne. 0) stop 30
+ if (maxloc (b, dim=1, mask=m, kind=4, back=.true.) .ne. 0) stop 31
+ if (maxloc (b, 1, m, 4, .true.) .ne. 0) stop 32
+ l = .true.
+ m = .true.
+ n = .true.
+ if (any (minloc (a) .ne. 1)) stop 1
+ if (any (minloc (a, dim=1) .ne. 1)) stop 2
+ if (any (minloc (a, 1) .ne. 1)) stop 3
+ if (any (minloc (a, dim=1, mask=l, kind=4, back=.false.) .ne. 1)) stop 4
+ if (any (minloc (a, 1, l, 4, .false.) .ne. 1)) stop 5
+ if (any (minloc (a, dim=1, mask=m, kind=4, back=.false.) .ne. 1)) stop 6
+ if (any (minloc (a, 1, m, 4, .false.) .ne. 1)) stop 7
+ if (any (minloc (a, dim=1, mask=l, kind=4, back=.true.) .ne. 4)) stop 8
+ if (any (minloc (a, 1, l, 4, .true.) .ne. 4)) stop 9
+ if (any (minloc (a, dim=1, mask=m, kind=4, back=.true.) .ne. 4)) stop 10
+ if (any (minloc (a, 1, m, 4, .true.) .ne. 4)) stop 11
+ if (any (minloc (b) .ne. 1)) stop 12
+ if (minloc (b, dim=1) .ne. 1) stop 13
+ if (minloc (b, 1) .ne. 1) stop 14
+ if (minloc (b, dim=1, mask=n, kind=4, back=.false.) .ne. 1) stop 15
+ if (minloc (b, 1, n, 4, .false.) .ne. 1) stop 16
+ if (minloc (b, dim=1, mask=m, kind=4, back=.false.) .ne. 1) stop 17
+ if (minloc (b, 1, m, 4, .false.) .ne. 1) stop 18
+ if (minloc (b, dim=1, mask=n, kind=4, back=.true.) .ne. 4) stop 19
+ if (minloc (b, 1, n, 4, .true.) .ne. 4) stop 20
+ if (minloc (b, dim=1, mask=m, kind=4, back=.true.) .ne. 4) stop 21
+ if (minloc (b, 1, m, 4, .true.) .ne. 4) stop 22
+ l = .false.
+ m = .false.
+ n = .false.
+ if (any (minloc (a, dim=1, mask=l, kind=4, back=.false.) .ne. 0)) stop 23
+ if (any (minloc (a, 1, l, 4, .false.) .ne. 0)) stop 24
+ if (minloc (b, dim=1, mask=n, kind=4, back=.false.) .ne. 0) stop 25
+ if (minloc (b, 1, n, 4, .false.) .ne. 0) stop 26
+ if (minloc (b, dim=1, mask=m, kind=4, back=.false.) .ne. 0) stop 27
+ if (minloc (b, 1, m, 4, .false.) .ne. 0) stop 28
+ if (minloc (b, dim=1, mask=n, kind=4, back=.true.) .ne. 0) stop 29
+ if (minloc (b, 1, n, 4, .true.) .ne. 0) stop 30
+ if (minloc (b, dim=1, mask=m, kind=4, back=.true.) .ne. 0) stop 31
+ if (minloc (b, 1, m, 4, .true.) .ne. 0) stop 32
+end
diff --git a/gcc/testsuite/gfortran.dg/pr120191_3.f90 b/gcc/testsuite/gfortran.dg/pr120191_3.f90
new file mode 100644
index 0000000..26e4095
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr120191_3.f90
@@ -0,0 +1,23 @@
+! PR fortran/120191
+! { dg-do run }
+
+ character(kind=1, len=2) :: a(4, 4, 4), b(4)
+ logical :: l(4, 4, 4), m, n(4)
+ a = 'aa'
+ b = 'aa'
+ l = .false.
+ m = .false.
+ n = .false.
+ if (any (maxloc (a, dim=1, mask=m, kind=4, back=.false.) .ne. 0)) stop 1
+ if (any (maxloc (a, 1, m, 4, .false.) .ne. 0)) stop 2
+ if (any (maxloc (a, dim=1, mask=l, kind=4, back=.true.) .ne. 0)) stop 3
+ if (any (maxloc (a, 1, l, 4, .true.) .ne. 0)) stop 4
+ if (any (maxloc (a, dim=1, mask=m, kind=4, back=.true.) .ne. 0)) stop 5
+ if (any (maxloc (a, 1, m, 4, .true.) .ne. 0)) stop 6
+ if (any (minloc (a, dim=1, mask=m, kind=4, back=.false.) .ne. 0)) stop 7
+ if (any (minloc (a, 1, m, 4, .false.) .ne. 0)) stop 8
+ if (any (minloc (a, dim=1, mask=l, kind=4, back=.true.) .ne. 0)) stop 9
+ if (any (minloc (a, 1, l, 4, .true.) .ne. 0)) stop 10
+ if (any (minloc (a, dim=1, mask=m, kind=4, back=.true.) .ne. 0)) stop 11
+ if (any (minloc (a, 1, m, 4, .true.) .ne. 0)) stop 12
+end
diff --git a/gcc/testsuite/gfortran.dg/pr120196.f90 b/gcc/testsuite/gfortran.dg/pr120196.f90
new file mode 100644
index 0000000..368c43a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pr120196.f90
@@ -0,0 +1,26 @@
+! PR libfortran/120196
+! { dg-do run }
+
+program pr120196
+ character(len=:, kind=1), allocatable :: a(:), s
+ character(len=:, kind=4), allocatable :: b(:), t
+ logical, allocatable :: l(:)
+ logical :: m
+ allocate (character(len=16, kind=1) :: a(10), s)
+ allocate (l(10))
+ a(:) = ""
+ s = "*"
+ l = .true.
+ m = .true.
+ if (findloc (a, s, dim=1, back=.true.) .ne. 0) stop 1
+ if (findloc (a, s, mask=l, dim=1, back=.true.) .ne. 0) stop 2
+ if (findloc (a, s, mask=m, dim=1, back=.true.) .ne. 0) stop 3
+ deallocate (a, s)
+ allocate (character(len=16, kind=4) :: b(10), t)
+ b(:) = ""
+ t = "*"
+ if (findloc (b, t, dim=1, back=.true.) .ne. 0) stop 4
+ if (findloc (b, t, mask=l, dim=1, back=.true.) .ne. 0) stop 5
+ if (findloc (b, t, mask=m, dim=1, back=.true.) .ne. 0) stop 6
+ deallocate (b, t, l)
+end program pr120196
diff --git a/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp b/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp
index 8a41ff8..6ddf2d5 100644
--- a/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp
+++ b/gcc/testsuite/gm2.dg/doc/examples/plugin/fail/doc-examples-plugin-fail.exp
@@ -11,7 +11,7 @@ gm2_init_pim4 $srcdir/$subdir
dg-init
# If the --enable-plugin has not been enabled during configure, bail.
-if { ![gm2-dg-frontend-configure-check "enable-plugin" ] } {
+if { ![info exists TESTING_IN_BUILD_TREE] || ![info exists ENABLE_PLUGIN] } {
return
}
diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
index 6a95b82..928459a 100644
--- a/gcc/tree-cfg.cc
+++ b/gcc/tree-cfg.cc
@@ -3870,7 +3870,6 @@ verify_gimple_assign_unary (gassign *stmt)
case NEGATE_EXPR:
case ABS_EXPR:
case BIT_NOT_EXPR:
- case PAREN_EXPR:
case CONJ_EXPR:
/* Disallow pointer and offset types for many of the unary gimple. */
if (POINTER_TYPE_P (lhs_type)
@@ -3883,6 +3882,17 @@ verify_gimple_assign_unary (gassign *stmt)
}
break;
+ case PAREN_EXPR:
+ /* Disallow non arthmetic types on PAREN_EXPR. */
+ if (AGGREGATE_TYPE_P (lhs_type))
+ {
+ error ("invalid types for %qs", code_name);
+ debug_generic_expr (lhs_type);
+ debug_generic_expr (rhs1_type);
+ return true;
+ }
+ break;
+
case ABSU_EXPR:
if (!ANY_INTEGRAL_TYPE_P (lhs_type)
|| !TYPE_UNSIGNED (lhs_type)
diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
index fe6f3cf..2d1a688 100644
--- a/gcc/tree-vect-loop.cc
+++ b/gcc/tree-vect-loop.cc
@@ -9698,7 +9698,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree vectype = SLP_TREE_VECTYPE (slp_node);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
enum vect_induction_op_type induction_type
= STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE (stmt_info);
@@ -9723,7 +9723,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
/* TODO: Support multi-lane SLP for nonlinear iv. There should be separate
vector iv update for each iv and a permutation to generate wanted
vector iv. */
- if (slp_node && SLP_TREE_LANES (slp_node) > 1)
+ if (SLP_TREE_LANES (slp_node) > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
@@ -9934,13 +9934,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
UNKNOWN_LOCATION);
- if (slp_node)
- slp_node->push_vec_def (induction_phi);
- else
- {
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (induction_phi);
- *vec_stmt = induction_phi;
- }
+ slp_node->push_vec_def (induction_phi);
/* In case that vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
@@ -9970,10 +9964,7 @@ vectorizable_nonlinear_induction (loop_vec_info loop_vinfo,
induction_type);
gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
new_stmt = SSA_NAME_DEF_STMT (vec_def);
- if (slp_node)
- slp_node->push_vec_def (new_stmt);
- else
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ slp_node->push_vec_def (new_stmt);
}
}
@@ -9999,15 +9990,13 @@ vectorizable_induction (loop_vec_info loop_vinfo,
stmt_vector_for_cost *cost_vec)
{
class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
- unsigned ncopies;
bool nested_in_vect_loop = false;
class loop *iv_loop;
tree vec_def;
edge pe = loop_preheader_edge (loop);
basic_block new_bb;
- tree new_vec, vec_init = NULL_TREE, vec_step, t;
+ tree vec_init = NULL_TREE, vec_step, t;
tree new_name;
- gimple *new_stmt;
gphi *induction_phi;
tree induc_def, vec_dest;
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
@@ -10034,15 +10023,9 @@ vectorizable_induction (loop_vec_info loop_vinfo,
return vectorizable_nonlinear_induction (loop_vinfo, stmt_info,
vec_stmt, slp_node, cost_vec);
- tree vectype = STMT_VINFO_VECTYPE (stmt_info);
+ tree vectype = SLP_TREE_VECTYPE (slp_node);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
- if (slp_node)
- ncopies = 1;
- else
- ncopies = vect_get_num_copies (loop_vinfo, vectype);
- gcc_assert (ncopies >= 1);
-
/* FORNOW. These restrictions should be relaxed. */
if (nested_in_vect_loop_p (loop, stmt_info))
{
@@ -10052,14 +10035,6 @@ vectorizable_induction (loop_vec_info loop_vinfo,
edge latch_e;
tree loop_arg;
- if (ncopies > 1)
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "multiple types in nested loop.\n");
- return false;
- }
-
exit_phi = NULL;
latch_e = loop_latch_edge (loop->inner);
loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
@@ -10096,7 +10071,7 @@ vectorizable_induction (loop_vec_info loop_vinfo,
iv_loop = loop;
gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
- if (slp_node && (!nunits.is_constant () && SLP_TREE_LANES (slp_node) != 1))
+ if (!nunits.is_constant () && SLP_TREE_LANES (slp_node) != 1)
{
/* The current SLP code creates the step value element-by-element. */
if (dump_enabled_p ())
@@ -10152,41 +10127,28 @@ vectorizable_induction (loop_vec_info loop_vinfo,
if (!vec_stmt) /* transformation not required. */
{
unsigned inside_cost = 0, prologue_cost = 0;
- if (slp_node)
- {
- /* We eventually need to set a vector type on invariant
- arguments. */
- unsigned j;
- slp_tree child;
- FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (slp_node), j, child)
- if (!vect_maybe_update_slp_op_vectype
- (child, SLP_TREE_VECTYPE (slp_node)))
- {
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "incompatible vector types for "
- "invariants\n");
- return false;
- }
- /* loop cost for vec_loop. */
- inside_cost
- = record_stmt_cost (cost_vec,
- SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
- vector_stmt, stmt_info, 0, vect_body);
- /* prologue cost for vec_init (if not nested) and step. */
- prologue_cost = record_stmt_cost (cost_vec, 1 + !nested_in_vect_loop,
- scalar_to_vec,
- stmt_info, 0, vect_prologue);
- }
- else /* if (!slp_node) */
- {
- /* loop cost for vec_loop. */
- inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
- stmt_info, 0, vect_body);
- /* prologue cost for vec_init and vec_step. */
- prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
- stmt_info, 0, vect_prologue);
- }
+ /* We eventually need to set a vector type on invariant
+ arguments. */
+ unsigned j;
+ slp_tree child;
+ FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (slp_node), j, child)
+ if (!vect_maybe_update_slp_op_vectype
+ (child, SLP_TREE_VECTYPE (slp_node)))
+ {
+ if (dump_enabled_p ())
+ dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ "incompatible vector types for "
+ "invariants\n");
+ return false;
+ }
+ /* loop cost for vec_loop. */
+ inside_cost = record_stmt_cost (cost_vec,
+ SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node),
+ vector_stmt, stmt_info, 0, vect_body);
+ /* prologue cost for vec_init (if not nested) and step. */
+ prologue_cost = record_stmt_cost (cost_vec, 1 + !nested_in_vect_loop,
+ scalar_to_vec,
+ stmt_info, 0, vect_prologue);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_induction_cost: inside_cost = %d, "
@@ -10217,670 +10179,374 @@ vectorizable_induction (loop_vec_info loop_vinfo,
with group size 3 we need
[i0, i1, i2, i0 + S0] [i1 + S1, i2 + S2, i0 + 2*S0, i1 + 2*S1]
[i2 + 2*S2, i0 + 3*S0, i1 + 3*S1, i2 + 3*S2]. */
- if (slp_node)
+ gimple_stmt_iterator incr_si;
+ bool insert_after;
+ standard_iv_increment_position (iv_loop, &incr_si, &insert_after);
+
+ /* The initial values are vectorized, but any lanes > group_size
+ need adjustment. */
+ slp_tree init_node
+ = SLP_TREE_CHILDREN (slp_node)[pe->dest_idx];
+
+ /* Gather steps. Since we do not vectorize inductions as
+ cycles we have to reconstruct the step from SCEV data. */
+ unsigned group_size = SLP_TREE_LANES (slp_node);
+ tree *steps = XALLOCAVEC (tree, group_size);
+ tree *inits = XALLOCAVEC (tree, group_size);
+ stmt_vec_info phi_info;
+ FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, phi_info)
+ {
+ steps[i] = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
+ if (!init_node)
+ inits[i] = gimple_phi_arg_def (as_a<gphi *> (phi_info->stmt),
+ pe->dest_idx);
+ }
+
+ /* Now generate the IVs. */
+ unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
+ gcc_assert (multiple_p (nunits * nvects, group_size));
+ unsigned nivs;
+ unsigned HOST_WIDE_INT const_nunits;
+ if (nested_in_vect_loop)
+ nivs = nvects;
+ else if (nunits.is_constant (&const_nunits))
{
- gimple_stmt_iterator incr_si;
- bool insert_after;
- standard_iv_increment_position (iv_loop, &incr_si, &insert_after);
-
- /* The initial values are vectorized, but any lanes > group_size
- need adjustment. */
- slp_tree init_node
- = SLP_TREE_CHILDREN (slp_node)[pe->dest_idx];
-
- /* Gather steps. Since we do not vectorize inductions as
- cycles we have to reconstruct the step from SCEV data. */
- unsigned group_size = SLP_TREE_LANES (slp_node);
- tree *steps = XALLOCAVEC (tree, group_size);
- tree *inits = XALLOCAVEC (tree, group_size);
- stmt_vec_info phi_info;
- FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (slp_node), i, phi_info)
- {
- steps[i] = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (phi_info);
- if (!init_node)
- inits[i] = gimple_phi_arg_def (as_a<gphi *> (phi_info->stmt),
- pe->dest_idx);
- }
-
- /* Now generate the IVs. */
- unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
- gcc_assert (multiple_p (nunits * nvects, group_size));
- unsigned nivs;
- unsigned HOST_WIDE_INT const_nunits;
- if (nested_in_vect_loop)
- nivs = nvects;
- else if (nunits.is_constant (&const_nunits))
- {
- /* Compute the number of distinct IVs we need. First reduce
- group_size if it is a multiple of const_nunits so we get
- one IV for a group_size of 4 but const_nunits 2. */
- unsigned group_sizep = group_size;
- if (group_sizep % const_nunits == 0)
- group_sizep = group_sizep / const_nunits;
- nivs = least_common_multiple (group_sizep,
- const_nunits) / const_nunits;
- }
- else
- {
- gcc_assert (SLP_TREE_LANES (slp_node) == 1);
- nivs = 1;
- }
- gimple_seq init_stmts = NULL;
- tree lupdate_mul = NULL_TREE;
- if (!nested_in_vect_loop)
+ /* Compute the number of distinct IVs we need. First reduce
+ group_size if it is a multiple of const_nunits so we get
+ one IV for a group_size of 4 but const_nunits 2. */
+ unsigned group_sizep = group_size;
+ if (group_sizep % const_nunits == 0)
+ group_sizep = group_sizep / const_nunits;
+ nivs = least_common_multiple (group_sizep, const_nunits) / const_nunits;
+ }
+ else
+ {
+ gcc_assert (SLP_TREE_LANES (slp_node) == 1);
+ nivs = 1;
+ }
+ gimple_seq init_stmts = NULL;
+ tree lupdate_mul = NULL_TREE;
+ if (!nested_in_vect_loop)
+ {
+ if (nunits.is_constant (&const_nunits))
{
- if (nunits.is_constant (&const_nunits))
- {
- /* The number of iterations covered in one vector iteration. */
- unsigned lup_mul = (nvects * const_nunits) / group_size;
- lupdate_mul
- = build_vector_from_val (step_vectype,
- SCALAR_FLOAT_TYPE_P (stept)
- ? build_real_from_wide (stept, lup_mul,
- UNSIGNED)
- : build_int_cstu (stept, lup_mul));
- }
- else
- {
- if (SCALAR_FLOAT_TYPE_P (stept))
- {
- tree tem = build_int_cst (integer_type_node, vf);
- lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR,
- stept, tem);
- }
- else
- lupdate_mul = build_int_cst (stept, vf);
- lupdate_mul = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- lupdate_mul);
- }
+ /* The number of iterations covered in one vector iteration. */
+ unsigned lup_mul = (nvects * const_nunits) / group_size;
+ lupdate_mul
+ = build_vector_from_val (step_vectype,
+ SCALAR_FLOAT_TYPE_P (stept)
+ ? build_real_from_wide (stept, lup_mul,
+ UNSIGNED)
+ : build_int_cstu (stept, lup_mul));
}
- tree peel_mul = NULL_TREE;
- if (LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo))
+ else
{
if (SCALAR_FLOAT_TYPE_P (stept))
- peel_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept,
- LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
- else
- peel_mul = gimple_convert (&init_stmts, stept,
- LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
- peel_mul = gimple_build_vector_from_val (&init_stmts,
- step_vectype, peel_mul);
-
- /* If early break then we have to create a new PHI which we can use as
- an offset to adjust the induction reduction in early exits.
-
- This is because when peeling for alignment using masking, the first
- few elements of the vector can be inactive. As such if we find the
- entry in the first iteration we have adjust the starting point of
- the scalar code.
-
- We do this by creating a new scalar PHI that keeps track of whether
- we are the first iteration of the loop (with the additional masking)
- or whether we have taken a loop iteration already.
-
- The generated sequence:
-
- pre-header:
- bb1:
- i_1 = <number of leading inactive elements>
-
- header:
- bb2:
- i_2 = PHI <i_1(bb1), 0(latch)>
- …
-
- early-exit:
- bb3:
- i_3 = iv_step * i_2 + PHI<vector-iv>
-
- The first part of the adjustment to create i_1 and i_2 are done here
- and the last part creating i_3 is done in
- vectorizable_live_operations when the induction extraction is
- materialized. */
- if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo)
- && !LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo))
{
- auto skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- tree ty_skip_niters = TREE_TYPE (skip_niters);
- tree break_lhs_phi = vect_get_new_vect_var (ty_skip_niters,
- vect_scalar_var,
- "pfa_iv_offset");
- gphi *nphi = create_phi_node (break_lhs_phi, bb);
- add_phi_arg (nphi, skip_niters, pe, UNKNOWN_LOCATION);
- add_phi_arg (nphi, build_zero_cst (ty_skip_niters),
- loop_latch_edge (iv_loop), UNKNOWN_LOCATION);
-
- LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo)
- = PHI_RESULT (nphi);
+ tree tem = build_int_cst (integer_type_node, vf);
+ lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept, tem);
}
+ else
+ lupdate_mul = build_int_cst (stept, vf);
+ lupdate_mul = gimple_build_vector_from_val (&init_stmts, step_vectype,
+ lupdate_mul);
}
- tree step_mul = NULL_TREE;
- unsigned ivn;
- auto_vec<tree> vec_steps;
- for (ivn = 0; ivn < nivs; ++ivn)
+ }
+ tree peel_mul = NULL_TREE;
+ if (LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo))
+ {
+ if (SCALAR_FLOAT_TYPE_P (stept))
+ peel_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept,
+ LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
+ else
+ peel_mul = gimple_convert (&init_stmts, stept,
+ LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo));
+ peel_mul = gimple_build_vector_from_val (&init_stmts,
+ step_vectype, peel_mul);
+
+ /* If early break then we have to create a new PHI which we can use as
+ an offset to adjust the induction reduction in early exits.
+
+ This is because when peeling for alignment using masking, the first
+ few elements of the vector can be inactive. As such if we find the
+ entry in the first iteration we have adjust the starting point of
+ the scalar code.
+
+ We do this by creating a new scalar PHI that keeps track of whether
+ we are the first iteration of the loop (with the additional masking)
+ or whether we have taken a loop iteration already.
+
+ The generated sequence:
+
+ pre-header:
+ bb1:
+ i_1 = <number of leading inactive elements>
+
+ header:
+ bb2:
+ i_2 = PHI <i_1(bb1), 0(latch)>
+ …
+
+ early-exit:
+ bb3:
+ i_3 = iv_step * i_2 + PHI<vector-iv>
+
+ The first part of the adjustment to create i_1 and i_2 are done here
+ and the last part creating i_3 is done in
+ vectorizable_live_operations when the induction extraction is
+ materialized. */
+ if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo)
+ && !LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo))
+ {
+ auto skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
+ tree ty_skip_niters = TREE_TYPE (skip_niters);
+ tree break_lhs_phi = vect_get_new_vect_var (ty_skip_niters,
+ vect_scalar_var,
+ "pfa_iv_offset");
+ gphi *nphi = create_phi_node (break_lhs_phi, bb);
+ add_phi_arg (nphi, skip_niters, pe, UNKNOWN_LOCATION);
+ add_phi_arg (nphi, build_zero_cst (ty_skip_niters),
+ loop_latch_edge (iv_loop), UNKNOWN_LOCATION);
+
+ LOOP_VINFO_MASK_NITERS_PFA_OFFSET (loop_vinfo) = PHI_RESULT (nphi);
+ }
+ }
+ tree step_mul = NULL_TREE;
+ unsigned ivn;
+ auto_vec<tree> vec_steps;
+ for (ivn = 0; ivn < nivs; ++ivn)
+ {
+ gimple_seq stmts = NULL;
+ bool invariant = true;
+ if (nunits.is_constant (&const_nunits))
{
- gimple_seq stmts = NULL;
- bool invariant = true;
- if (nunits.is_constant (&const_nunits))
+ tree_vector_builder step_elts (step_vectype, const_nunits, 1);
+ tree_vector_builder init_elts (vectype, const_nunits, 1);
+ tree_vector_builder mul_elts (step_vectype, const_nunits, 1);
+ for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
{
- tree_vector_builder step_elts (step_vectype, const_nunits, 1);
- tree_vector_builder init_elts (vectype, const_nunits, 1);
- tree_vector_builder mul_elts (step_vectype, const_nunits, 1);
- for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
- {
- /* The scalar steps of the IVs. */
- tree elt = steps[(ivn*const_nunits + eltn) % group_size];
- elt = gimple_convert (&init_stmts,
- TREE_TYPE (step_vectype), elt);
- step_elts.quick_push (elt);
- if (!init_node)
- {
- /* The scalar inits of the IVs if not vectorized. */
- elt = inits[(ivn*const_nunits + eltn) % group_size];
- if (!useless_type_conversion_p (TREE_TYPE (vectype),
- TREE_TYPE (elt)))
- elt = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
- TREE_TYPE (vectype), elt);
- init_elts.quick_push (elt);
- }
- /* The number of steps to add to the initial values. */
- unsigned mul_elt = (ivn*const_nunits + eltn) / group_size;
- mul_elts.quick_push (SCALAR_FLOAT_TYPE_P (stept)
- ? build_real_from_wide (stept, mul_elt,
- UNSIGNED)
- : build_int_cstu (stept, mul_elt));
- }
- vec_step = gimple_build_vector (&init_stmts, &step_elts);
- step_mul = gimple_build_vector (&init_stmts, &mul_elts);
+ /* The scalar steps of the IVs. */
+ tree elt = steps[(ivn*const_nunits + eltn) % group_size];
+ elt = gimple_convert (&init_stmts, TREE_TYPE (step_vectype), elt);
+ step_elts.quick_push (elt);
if (!init_node)
- vec_init = gimple_build_vector (&init_stmts, &init_elts);
- }
- else
- {
- if (init_node)
- ;
- else if (INTEGRAL_TYPE_P (TREE_TYPE (steps[0])))
- {
- new_name = gimple_convert (&init_stmts, stept, inits[0]);
- /* Build the initial value directly as a VEC_SERIES_EXPR. */
- vec_init = gimple_build (&init_stmts, VEC_SERIES_EXPR,
- step_vectype, new_name, steps[0]);
- if (!useless_type_conversion_p (vectype, step_vectype))
- vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
- vectype, vec_init);
- }
- else
{
- /* Build:
- [base, base, base, ...]
- + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
- gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (steps[0])));
- gcc_assert (flag_associative_math);
- gcc_assert (index_vectype != NULL_TREE);
-
- tree index = build_index_vector (index_vectype, 0, 1);
- new_name = gimple_convert (&init_stmts, TREE_TYPE (steps[0]),
- inits[0]);
- tree base_vec = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- new_name);
- tree step_vec = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- steps[0]);
- vec_init = gimple_build (&init_stmts, FLOAT_EXPR,
- step_vectype, index);
- vec_init = gimple_build (&init_stmts, MULT_EXPR,
- step_vectype, vec_init, step_vec);
- vec_init = gimple_build (&init_stmts, PLUS_EXPR,
- step_vectype, vec_init, base_vec);
- if (!useless_type_conversion_p (vectype, step_vectype))
- vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
- vectype, vec_init);
+ /* The scalar inits of the IVs if not vectorized. */
+ elt = inits[(ivn*const_nunits + eltn) % group_size];
+ if (!useless_type_conversion_p (TREE_TYPE (vectype),
+ TREE_TYPE (elt)))
+ elt = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
+ TREE_TYPE (vectype), elt);
+ init_elts.quick_push (elt);
}
- /* iv_loop is nested in the loop to be vectorized. Generate:
- vec_step = [S, S, S, S] */
- t = unshare_expr (steps[0]);
- gcc_assert (CONSTANT_CLASS_P (t)
- || TREE_CODE (t) == SSA_NAME);
- vec_step = gimple_build_vector_from_val (&init_stmts,
- step_vectype, t);
- }
- vec_steps.safe_push (vec_step);
- if (peel_mul)
- {
- if (!step_mul)
- step_mul = peel_mul;
- else
- step_mul = gimple_build (&init_stmts,
- MINUS_EXPR, step_vectype,
- step_mul, peel_mul);
- }
-
- /* Create the induction-phi that defines the induction-operand. */
- vec_dest = vect_get_new_vect_var (vectype, vect_simple_var,
- "vec_iv_");
- induction_phi = create_phi_node (vec_dest, iv_loop->header);
- induc_def = PHI_RESULT (induction_phi);
-
- /* Create the iv update inside the loop */
- tree up = vec_step;
- if (lupdate_mul)
- {
- if (LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
- {
- /* When we're using loop_len produced by SELEC_VL, the
- non-final iterations are not always processing VF
- elements. So vectorize induction variable instead of
-
- _21 = vect_vec_iv_.6_22 + { VF, ... };
-
- We should generate:
-
- _35 = .SELECT_VL (ivtmp_33, VF);
- vect_cst__22 = [vec_duplicate_expr] _35;
- _21 = vect_vec_iv_.6_22 + vect_cst__22; */
- vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
- tree len = vect_get_loop_len (loop_vinfo, NULL, lens, 1,
- vectype, 0, 0);
- if (SCALAR_FLOAT_TYPE_P (stept))
- expr = gimple_build (&stmts, FLOAT_EXPR, stept, len);
- else
- expr = gimple_convert (&stmts, stept, len);
- lupdate_mul = gimple_build_vector_from_val (&stmts,
- step_vectype,
- expr);
- up = gimple_build (&stmts, MULT_EXPR,
- step_vectype, vec_step, lupdate_mul);
- }
- else
- up = gimple_build (&init_stmts,
- MULT_EXPR, step_vectype,
- vec_step, lupdate_mul);
- }
- vec_def = gimple_convert (&stmts, step_vectype, induc_def);
- vec_def = gimple_build (&stmts,
- PLUS_EXPR, step_vectype, vec_def, up);
- vec_def = gimple_convert (&stmts, vectype, vec_def);
- insert_iv_increment (&incr_si, insert_after, stmts);
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
- UNKNOWN_LOCATION);
-
- if (init_node)
- vec_init = vect_get_slp_vect_def (init_node, ivn);
- if (!nested_in_vect_loop
- && step_mul
- && !integer_zerop (step_mul))
- {
- gcc_assert (invariant);
- vec_def = gimple_convert (&init_stmts, step_vectype, vec_init);
- up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
- vec_step, step_mul);
- vec_def = gimple_build (&init_stmts, PLUS_EXPR, step_vectype,
- vec_def, up);
- vec_init = gimple_convert (&init_stmts, vectype, vec_def);
- }
-
- /* Set the arguments of the phi node: */
- add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
-
- slp_node->push_vec_def (induction_phi);
- }
- if (!nested_in_vect_loop)
- {
- /* Fill up to the number of vectors we need for the whole group. */
- if (nunits.is_constant (&const_nunits))
- nivs = least_common_multiple (group_size,
- const_nunits) / const_nunits;
- else
- nivs = 1;
- vec_steps.reserve (nivs-ivn);
- for (; ivn < nivs; ++ivn)
- {
- slp_node->push_vec_def (SLP_TREE_VEC_DEFS (slp_node)[0]);
- vec_steps.quick_push (vec_steps[0]);
+ /* The number of steps to add to the initial values. */
+ unsigned mul_elt = (ivn*const_nunits + eltn) / group_size;
+ mul_elts.quick_push (SCALAR_FLOAT_TYPE_P (stept)
+ ? build_real_from_wide (stept, mul_elt,
+ UNSIGNED)
+ : build_int_cstu (stept, mul_elt));
}
+ vec_step = gimple_build_vector (&init_stmts, &step_elts);
+ step_mul = gimple_build_vector (&init_stmts, &mul_elts);
+ if (!init_node)
+ vec_init = gimple_build_vector (&init_stmts, &init_elts);
}
-
- /* Re-use IVs when we can. We are generating further vector
- stmts by adding VF' * stride to the IVs generated above. */
- if (ivn < nvects)
+ else
{
- if (nunits.is_constant (&const_nunits))
+ if (init_node)
+ ;
+ else if (INTEGRAL_TYPE_P (TREE_TYPE (steps[0])))
{
- unsigned vfp = (least_common_multiple (group_size, const_nunits)
- / group_size);
- lupdate_mul
- = build_vector_from_val (step_vectype,
- SCALAR_FLOAT_TYPE_P (stept)
- ? build_real_from_wide (stept,
- vfp, UNSIGNED)
- : build_int_cstu (stept, vfp));
+ new_name = gimple_convert (&init_stmts, stept, inits[0]);
+ /* Build the initial value directly as a VEC_SERIES_EXPR. */
+ vec_init = gimple_build (&init_stmts, VEC_SERIES_EXPR,
+ step_vectype, new_name, steps[0]);
+ if (!useless_type_conversion_p (vectype, step_vectype))
+ vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
+ vectype, vec_init);
}
else
{
- if (SCALAR_FLOAT_TYPE_P (stept))
- {
- tree tem = build_int_cst (integer_type_node, nunits);
- lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR,
- stept, tem);
- }
- else
- lupdate_mul = build_int_cst (stept, nunits);
- lupdate_mul = gimple_build_vector_from_val (&init_stmts,
- step_vectype,
- lupdate_mul);
- }
- for (; ivn < nvects; ++ivn)
- {
- gimple *iv
- = SSA_NAME_DEF_STMT (SLP_TREE_VEC_DEFS (slp_node)[ivn - nivs]);
- tree def = gimple_get_lhs (iv);
- if (ivn < 2*nivs)
- vec_steps[ivn - nivs]
- = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
- vec_steps[ivn - nivs], lupdate_mul);
- gimple_seq stmts = NULL;
- def = gimple_convert (&stmts, step_vectype, def);
- def = gimple_build (&stmts, PLUS_EXPR, step_vectype,
- def, vec_steps[ivn % nivs]);
- def = gimple_convert (&stmts, vectype, def);
- if (gimple_code (iv) == GIMPLE_PHI)
- gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
- else
- {
- gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
- gsi_insert_seq_after (&tgsi, stmts, GSI_CONTINUE_LINKING);
- }
- slp_node->push_vec_def (def);
+ /* Build:
+ [base, base, base, ...]
+ + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
+ gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (steps[0])));
+ gcc_assert (flag_associative_math);
+ gcc_assert (index_vectype != NULL_TREE);
+
+ tree index = build_index_vector (index_vectype, 0, 1);
+ new_name = gimple_convert (&init_stmts, TREE_TYPE (steps[0]),
+ inits[0]);
+ tree base_vec = gimple_build_vector_from_val (&init_stmts,
+ step_vectype,
+ new_name);
+ tree step_vec = gimple_build_vector_from_val (&init_stmts,
+ step_vectype,
+ steps[0]);
+ vec_init = gimple_build (&init_stmts, FLOAT_EXPR,
+ step_vectype, index);
+ vec_init = gimple_build (&init_stmts, MULT_EXPR,
+ step_vectype, vec_init, step_vec);
+ vec_init = gimple_build (&init_stmts, PLUS_EXPR,
+ step_vectype, vec_init, base_vec);
+ if (!useless_type_conversion_p (vectype, step_vectype))
+ vec_init = gimple_build (&init_stmts, VIEW_CONVERT_EXPR,
+ vectype, vec_init);
}
+ /* iv_loop is nested in the loop to be vectorized. Generate:
+ vec_step = [S, S, S, S] */
+ t = unshare_expr (steps[0]);
+ gcc_assert (CONSTANT_CLASS_P (t)
+ || TREE_CODE (t) == SSA_NAME);
+ vec_step = gimple_build_vector_from_val (&init_stmts,
+ step_vectype, t);
+ }
+ vec_steps.safe_push (vec_step);
+ if (peel_mul)
+ {
+ if (!step_mul)
+ step_mul = peel_mul;
+ else
+ step_mul = gimple_build (&init_stmts,
+ MINUS_EXPR, step_vectype,
+ step_mul, peel_mul);
}
- new_bb = gsi_insert_seq_on_edge_immediate (pe, init_stmts);
- gcc_assert (!new_bb);
+ /* Create the induction-phi that defines the induction-operand. */
+ vec_dest = vect_get_new_vect_var (vectype, vect_simple_var,
+ "vec_iv_");
+ induction_phi = create_phi_node (vec_dest, iv_loop->header);
+ induc_def = PHI_RESULT (induction_phi);
- return true;
- }
-
- tree init_expr = vect_phi_initial_value (phi);
-
- gimple_seq stmts = NULL;
- if (!nested_in_vect_loop)
- {
- /* Convert the initial value to the IV update type. */
- tree new_type = TREE_TYPE (step_expr);
- init_expr = gimple_convert (&stmts, new_type, init_expr);
-
- /* If we are using the loop mask to "peel" for alignment then we need
- to adjust the start value here. */
- tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
- if (skip_niters != NULL_TREE)
+ /* Create the iv update inside the loop */
+ tree up = vec_step;
+ if (lupdate_mul)
{
- if (FLOAT_TYPE_P (vectype))
- skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
- skip_niters);
- else
- skip_niters = gimple_convert (&stmts, new_type, skip_niters);
- tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
- skip_niters, step_expr);
- init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
- init_expr, skip_step);
- }
- }
+ if (LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
+ {
+ /* When we're using loop_len produced by SELEC_VL, the
+ non-final iterations are not always processing VF
+ elements. So vectorize induction variable instead of
- if (stmts)
- {
- new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
- gcc_assert (!new_bb);
- }
+ _21 = vect_vec_iv_.6_22 + { VF, ... };
- /* Create the vector that holds the initial_value of the induction. */
- if (nested_in_vect_loop)
- {
- /* iv_loop is nested in the loop to be vectorized. init_expr had already
- been created during vectorization of previous stmts. We obtain it
- from the STMT_VINFO_VEC_STMT of the defining stmt. */
- auto_vec<tree> vec_inits;
- vect_get_vec_defs_for_operand (loop_vinfo, stmt_info, 1,
- init_expr, &vec_inits);
- vec_init = vec_inits[0];
- /* If the initial value is not of proper type, convert it. */
- if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
- {
- new_stmt
- = gimple_build_assign (vect_get_new_ssa_name (vectype,
- vect_simple_var,
- "vec_iv_"),
- VIEW_CONVERT_EXPR,
- build1 (VIEW_CONVERT_EXPR, vectype,
- vec_init));
- vec_init = gimple_assign_lhs (new_stmt);
- new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
- new_stmt);
- gcc_assert (!new_bb);
- }
- }
- else
- {
- /* iv_loop is the loop to be vectorized. Create:
- vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
- stmts = NULL;
- new_name = gimple_convert (&stmts, TREE_TYPE (step_expr), init_expr);
+ We should generate:
- unsigned HOST_WIDE_INT const_nunits;
- if (nunits.is_constant (&const_nunits))
- {
- tree_vector_builder elts (step_vectype, const_nunits, 1);
- elts.quick_push (new_name);
- for (i = 1; i < const_nunits; i++)
- {
- /* Create: new_name_i = new_name + step_expr */
- new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
- new_name, step_expr);
- elts.quick_push (new_name);
+ _35 = .SELECT_VL (ivtmp_33, VF);
+ vect_cst__22 = [vec_duplicate_expr] _35;
+ _21 = vect_vec_iv_.6_22 + vect_cst__22; */
+ vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
+ tree len = vect_get_loop_len (loop_vinfo, NULL, lens, 1,
+ vectype, 0, 0);
+ if (SCALAR_FLOAT_TYPE_P (stept))
+ expr = gimple_build (&stmts, FLOAT_EXPR, stept, len);
+ else
+ expr = gimple_convert (&stmts, stept, len);
+ lupdate_mul = gimple_build_vector_from_val (&stmts, step_vectype,
+ expr);
+ up = gimple_build (&stmts, MULT_EXPR,
+ step_vectype, vec_step, lupdate_mul);
}
- /* Create a vector from [new_name_0, new_name_1, ...,
- new_name_nunits-1] */
- vec_init = gimple_build_vector (&stmts, &elts);
- }
- else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
- /* Build the initial value directly from a VEC_SERIES_EXPR. */
- vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, step_vectype,
- new_name, step_expr);
- else
- {
- /* Build:
- [base, base, base, ...]
- + (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
- gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
- gcc_assert (flag_associative_math);
- gcc_assert (index_vectype != NULL_TREE);
-
- tree index = build_index_vector (index_vectype, 0, 1);
- tree base_vec = gimple_build_vector_from_val (&stmts, step_vectype,
- new_name);
- tree step_vec = gimple_build_vector_from_val (&stmts, step_vectype,
- step_expr);
- vec_init = gimple_build (&stmts, FLOAT_EXPR, step_vectype, index);
- vec_init = gimple_build (&stmts, MULT_EXPR, step_vectype,
- vec_init, step_vec);
- vec_init = gimple_build (&stmts, PLUS_EXPR, step_vectype,
- vec_init, base_vec);
- }
- vec_init = gimple_convert (&stmts, vectype, vec_init);
+ else
+ up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
+ vec_step, lupdate_mul);
+ }
+ vec_def = gimple_convert (&stmts, step_vectype, induc_def);
+ vec_def = gimple_build (&stmts, PLUS_EXPR, step_vectype, vec_def, up);
+ vec_def = gimple_convert (&stmts, vectype, vec_def);
+ insert_iv_increment (&incr_si, insert_after, stmts);
+ add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
+ UNKNOWN_LOCATION);
- if (stmts)
+ if (init_node)
+ vec_init = vect_get_slp_vect_def (init_node, ivn);
+ if (!nested_in_vect_loop
+ && step_mul
+ && !integer_zerop (step_mul))
{
- new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
- gcc_assert (!new_bb);
+ gcc_assert (invariant);
+ vec_def = gimple_convert (&init_stmts, step_vectype, vec_init);
+ up = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
+ vec_step, step_mul);
+ vec_def = gimple_build (&init_stmts, PLUS_EXPR, step_vectype,
+ vec_def, up);
+ vec_init = gimple_convert (&init_stmts, vectype, vec_def);
}
- }
-
-
- /* Create the vector that holds the step of the induction. */
- gimple_stmt_iterator *step_iv_si = NULL;
- if (nested_in_vect_loop)
- /* iv_loop is nested in the loop to be vectorized. Generate:
- vec_step = [S, S, S, S] */
- new_name = step_expr;
- else if (LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo))
- {
- /* When we're using loop_len produced by SELEC_VL, the non-final
- iterations are not always processing VF elements. So vectorize
- induction variable instead of
- _21 = vect_vec_iv_.6_22 + { VF, ... };
+ /* Set the arguments of the phi node: */
+ add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
- We should generate:
-
- _35 = .SELECT_VL (ivtmp_33, VF);
- vect_cst__22 = [vec_duplicate_expr] _35;
- _21 = vect_vec_iv_.6_22 + vect_cst__22; */
- gcc_assert (!slp_node);
- gimple_seq seq = NULL;
- vec_loop_lens *lens = &LOOP_VINFO_LENS (loop_vinfo);
- tree len = vect_get_loop_len (loop_vinfo, NULL, lens, 1, vectype, 0, 0);
- expr = force_gimple_operand (fold_convert (TREE_TYPE (step_expr),
- unshare_expr (len)),
- &seq, true, NULL_TREE);
- new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr), expr,
- step_expr);
- gsi_insert_seq_before (&si, seq, GSI_SAME_STMT);
- step_iv_si = &si;
+ slp_node->push_vec_def (induction_phi);
}
- else
+ if (!nested_in_vect_loop)
{
- /* iv_loop is the loop to be vectorized. Generate:
- vec_step = [VF*S, VF*S, VF*S, VF*S] */
- gimple_seq seq = NULL;
- if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
- {
- expr = build_int_cst (integer_type_node, vf);
- expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
- }
+ /* Fill up to the number of vectors we need for the whole group. */
+ if (nunits.is_constant (&const_nunits))
+ nivs = least_common_multiple (group_size, const_nunits) / const_nunits;
else
- expr = build_int_cst (TREE_TYPE (step_expr), vf);
- new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
- expr, step_expr);
- if (seq)
+ nivs = 1;
+ vec_steps.reserve (nivs-ivn);
+ for (; ivn < nivs; ++ivn)
{
- new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
- gcc_assert (!new_bb);
+ slp_node->push_vec_def (SLP_TREE_VEC_DEFS (slp_node)[0]);
+ vec_steps.quick_push (vec_steps[0]);
}
}
- t = unshare_expr (new_name);
- gcc_assert (CONSTANT_CLASS_P (new_name)
- || TREE_CODE (new_name) == SSA_NAME);
- new_vec = build_vector_from_val (step_vectype, t);
- vec_step = vect_init_vector (loop_vinfo, stmt_info,
- new_vec, step_vectype, step_iv_si);
-
-
- /* Create the following def-use cycle:
- loop prolog:
- vec_init = ...
- vec_step = ...
- loop:
- vec_iv = PHI <vec_init, vec_loop>
- ...
- STMT
- ...
- vec_loop = vec_iv + vec_step; */
-
- /* Create the induction-phi that defines the induction-operand. */
- vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
- induction_phi = create_phi_node (vec_dest, iv_loop->header);
- induc_def = PHI_RESULT (induction_phi);
-
- /* Create the iv update inside the loop */
- stmts = NULL;
- vec_def = gimple_convert (&stmts, step_vectype, induc_def);
- vec_def = gimple_build (&stmts, PLUS_EXPR, step_vectype, vec_def, vec_step);
- vec_def = gimple_convert (&stmts, vectype, vec_def);
- gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
- new_stmt = SSA_NAME_DEF_STMT (vec_def);
-
- /* Set the arguments of the phi node: */
- add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
- UNKNOWN_LOCATION);
-
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (induction_phi);
- *vec_stmt = induction_phi;
-
- /* In case that vectorization factor (VF) is bigger than the number
- of elements that we can fit in a vectype (nunits), we have to generate
- more than one vector stmt - i.e - we need to "unroll" the
- vector stmt by a factor VF/nunits. For more details see documentation
- in vectorizable_operation. */
-
- if (ncopies > 1)
+ /* Re-use IVs when we can. We are generating further vector
+ stmts by adding VF' * stride to the IVs generated above. */
+ if (ivn < nvects)
{
- gimple_seq seq = NULL;
- /* FORNOW. This restriction should be relaxed. */
- gcc_assert (!nested_in_vect_loop);
- /* We expect LOOP_VINFO_USING_SELECT_VL_P to be false if ncopies > 1. */
- gcc_assert (!LOOP_VINFO_USING_SELECT_VL_P (loop_vinfo));
-
- /* Create the vector that holds the step of the induction. */
- if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
+ if (nunits.is_constant (&const_nunits))
{
- expr = build_int_cst (integer_type_node, nunits);
- expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
+ unsigned vfp = (least_common_multiple (group_size, const_nunits)
+ / group_size);
+ lupdate_mul
+ = build_vector_from_val (step_vectype,
+ SCALAR_FLOAT_TYPE_P (stept)
+ ? build_real_from_wide (stept,
+ vfp, UNSIGNED)
+ : build_int_cstu (stept, vfp));
}
else
- expr = build_int_cst (TREE_TYPE (step_expr), nunits);
- new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
- expr, step_expr);
- if (seq)
{
- new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
- gcc_assert (!new_bb);
- }
-
- t = unshare_expr (new_name);
- gcc_assert (CONSTANT_CLASS_P (new_name)
- || TREE_CODE (new_name) == SSA_NAME);
- new_vec = build_vector_from_val (step_vectype, t);
- vec_step = vect_init_vector (loop_vinfo, stmt_info,
- new_vec, step_vectype, NULL);
-
- vec_def = induc_def;
- for (i = 1; i < ncopies + 1; i++)
- {
- /* vec_i = vec_prev + vec_step */
- gimple_seq stmts = NULL;
- vec_def = gimple_convert (&stmts, step_vectype, vec_def);
- vec_def = gimple_build (&stmts,
- PLUS_EXPR, step_vectype, vec_def, vec_step);
- vec_def = gimple_convert (&stmts, vectype, vec_def);
-
- gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
- if (i < ncopies)
+ if (SCALAR_FLOAT_TYPE_P (stept))
{
- new_stmt = SSA_NAME_DEF_STMT (vec_def);
- STMT_VINFO_VEC_STMTS (stmt_info).safe_push (new_stmt);
+ tree tem = build_int_cst (integer_type_node, nunits);
+ lupdate_mul = gimple_build (&init_stmts, FLOAT_EXPR, stept, tem);
}
else
+ lupdate_mul = build_int_cst (stept, nunits);
+ lupdate_mul = gimple_build_vector_from_val (&init_stmts, step_vectype,
+ lupdate_mul);
+ }
+ for (; ivn < nvects; ++ivn)
+ {
+ gimple *iv
+ = SSA_NAME_DEF_STMT (SLP_TREE_VEC_DEFS (slp_node)[ivn - nivs]);
+ tree def = gimple_get_lhs (iv);
+ if (ivn < 2*nivs)
+ vec_steps[ivn - nivs]
+ = gimple_build (&init_stmts, MULT_EXPR, step_vectype,
+ vec_steps[ivn - nivs], lupdate_mul);
+ gimple_seq stmts = NULL;
+ def = gimple_convert (&stmts, step_vectype, def);
+ def = gimple_build (&stmts, PLUS_EXPR, step_vectype,
+ def, vec_steps[ivn % nivs]);
+ def = gimple_convert (&stmts, vectype, def);
+ if (gimple_code (iv) == GIMPLE_PHI)
+ gsi_insert_seq_before (&si, stmts, GSI_SAME_STMT);
+ else
{
- /* vec_1 = vec_iv + (VF/n * S)
- vec_2 = vec_1 + (VF/n * S)
- ...
- vec_n = vec_prev + (VF/n * S) = vec_iv + VF * S = vec_loop
-
- vec_n is used as vec_loop to save the large step register and
- related operations. */
- add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
- UNKNOWN_LOCATION);
+ gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
+ gsi_insert_seq_after (&tgsi, stmts, GSI_CONTINUE_LINKING);
}
+ slp_node->push_vec_def (def);
}
}
- if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "transform induction: created def-use cycle: %G%G",
- (gimple *) induction_phi, SSA_NAME_DEF_STMT (vec_def));
+ new_bb = gsi_insert_seq_on_edge_immediate (pe, init_stmts);
+ gcc_assert (!new_bb);
return true;
}
diff --git a/libgcobol/libgcobol.cc b/libgcobol/libgcobol.cc
index 2de87cb..56b1a7b 100644
--- a/libgcobol/libgcobol.cc
+++ b/libgcobol/libgcobol.cc
@@ -75,6 +75,11 @@
#include "exceptl.h"
+/* BSD extension. */
+#if !defined(LOG_PERROR)
+#define LOG_PERROR 0
+#endif
+
#if !defined (HAVE_STRFROMF32)
# if __FLT_MANT_DIG__ == 24 && __FLT_MAX_EXP__ == 128
static int
diff --git a/libgfortran/generated/findloc2_s1.c b/libgfortran/generated/findloc2_s1.c
index 0dcfcc5..eeea821 100644
--- a/libgfortran/generated/findloc2_s1.c
+++ b/libgfortran/generated/findloc2_s1.c
@@ -49,7 +49,7 @@ findloc2_s1 (gfc_array_s1 * const restrict array, const GFC_UINTEGER_1 * restric
if (back)
{
src = array->base_addr + (extent - 1) * sstride;
- for (i = extent; i >= 0; i--)
+ for (i = extent; i > 0; i--)
{
if (compare_string (len_array, (char *) src, len_value, (char *) value) == 0)
return i;
@@ -112,7 +112,7 @@ mfindloc2_s1 (gfc_array_s1 * const restrict array,
{
src = array->base_addr + (extent - 1) * sstride;
mbase += (extent - 1) * mstride;
- for (i = extent; i >= 0; i--)
+ for (i = extent; i > 0; i--)
{
if (*mbase && (compare_string (len_array, (char *) src, len_value, (char *) value) == 0))
return i;
diff --git a/libgfortran/generated/findloc2_s4.c b/libgfortran/generated/findloc2_s4.c
index 3ac0d00..a336e34 100644
--- a/libgfortran/generated/findloc2_s4.c
+++ b/libgfortran/generated/findloc2_s4.c
@@ -49,7 +49,7 @@ findloc2_s4 (gfc_array_s4 * const restrict array, const GFC_UINTEGER_4 * restric
if (back)
{
src = array->base_addr + (extent - 1) * sstride;
- for (i = extent; i >= 0; i--)
+ for (i = extent; i > 0; i--)
{
if (compare_string_char4 (len_array, src, len_value, value) == 0)
return i;
@@ -112,7 +112,7 @@ mfindloc2_s4 (gfc_array_s4 * const restrict array,
{
src = array->base_addr + (extent - 1) * sstride;
mbase += (extent - 1) * mstride;
- for (i = extent; i >= 0; i--)
+ for (i = extent; i > 0; i--)
{
if (*mbase && (compare_string_char4 (len_array, src, len_value, value) == 0))
return i;
diff --git a/libgfortran/generated/maxloc1_16_s1.c b/libgfortran/generated/maxloc1_16_s1.c
index cbab817..21ea81a 100644
--- a/libgfortran/generated/maxloc1_16_s1.c
+++ b/libgfortran/generated/maxloc1_16_s1.c
@@ -457,7 +457,7 @@ smaxloc1_16_s1 (gfc_array_i16 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ smaxloc1_16_s1 (gfc_array_i16 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/maxloc1_16_s4.c b/libgfortran/generated/maxloc1_16_s4.c
index d7d8893..47e14c1 100644
--- a/libgfortran/generated/maxloc1_16_s4.c
+++ b/libgfortran/generated/maxloc1_16_s4.c
@@ -457,7 +457,7 @@ smaxloc1_16_s4 (gfc_array_i16 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ smaxloc1_16_s4 (gfc_array_i16 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/maxloc1_4_s1.c b/libgfortran/generated/maxloc1_4_s1.c
index 51740ee..66ee8d0 100644
--- a/libgfortran/generated/maxloc1_4_s1.c
+++ b/libgfortran/generated/maxloc1_4_s1.c
@@ -457,7 +457,7 @@ smaxloc1_4_s1 (gfc_array_i4 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ smaxloc1_4_s1 (gfc_array_i4 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/maxloc1_4_s4.c b/libgfortran/generated/maxloc1_4_s4.c
index cf04d6d..7d889c0 100644
--- a/libgfortran/generated/maxloc1_4_s4.c
+++ b/libgfortran/generated/maxloc1_4_s4.c
@@ -457,7 +457,7 @@ smaxloc1_4_s4 (gfc_array_i4 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ smaxloc1_4_s4 (gfc_array_i4 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/maxloc1_8_s1.c b/libgfortran/generated/maxloc1_8_s1.c
index a35e552..d4711e2 100644
--- a/libgfortran/generated/maxloc1_8_s1.c
+++ b/libgfortran/generated/maxloc1_8_s1.c
@@ -457,7 +457,7 @@ smaxloc1_8_s1 (gfc_array_i8 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ smaxloc1_8_s1 (gfc_array_i8 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/maxloc1_8_s4.c b/libgfortran/generated/maxloc1_8_s4.c
index e264779..dea360e 100644
--- a/libgfortran/generated/maxloc1_8_s4.c
+++ b/libgfortran/generated/maxloc1_8_s4.c
@@ -457,7 +457,7 @@ smaxloc1_8_s4 (gfc_array_i8 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ smaxloc1_8_s4 (gfc_array_i8 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/maxloc2_16_s1.c b/libgfortran/generated/maxloc2_16_s1.c
index 6e860ee..d38d422 100644
--- a/libgfortran/generated/maxloc2_16_s1.c
+++ b/libgfortran/generated/maxloc2_16_s1.c
@@ -152,8 +152,8 @@ GFC_INTEGER_16
smaxloc2_16_s1 (gfc_array_s1 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return maxloc2_16_s1 (array, len, back);
+ if (mask == NULL || *mask)
+ return maxloc2_16_s1 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/maxloc2_16_s4.c b/libgfortran/generated/maxloc2_16_s4.c
index e4ac04c..09fdbf8 100644
--- a/libgfortran/generated/maxloc2_16_s4.c
+++ b/libgfortran/generated/maxloc2_16_s4.c
@@ -152,8 +152,8 @@ GFC_INTEGER_16
smaxloc2_16_s4 (gfc_array_s4 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return maxloc2_16_s4 (array, len, back);
+ if (mask == NULL || *mask)
+ return maxloc2_16_s4 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/maxloc2_4_s1.c b/libgfortran/generated/maxloc2_4_s1.c
index 78a5012..0804f59 100644
--- a/libgfortran/generated/maxloc2_4_s1.c
+++ b/libgfortran/generated/maxloc2_4_s1.c
@@ -152,8 +152,8 @@ GFC_INTEGER_4
smaxloc2_4_s1 (gfc_array_s1 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return maxloc2_4_s1 (array, len, back);
+ if (mask == NULL || *mask)
+ return maxloc2_4_s1 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/maxloc2_4_s4.c b/libgfortran/generated/maxloc2_4_s4.c
index 399dab7..6dac06e 100644
--- a/libgfortran/generated/maxloc2_4_s4.c
+++ b/libgfortran/generated/maxloc2_4_s4.c
@@ -152,8 +152,8 @@ GFC_INTEGER_4
smaxloc2_4_s4 (gfc_array_s4 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return maxloc2_4_s4 (array, len, back);
+ if (mask == NULL || *mask)
+ return maxloc2_4_s4 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/maxloc2_8_s1.c b/libgfortran/generated/maxloc2_8_s1.c
index 9e1d36f9..5ced3c6 100644
--- a/libgfortran/generated/maxloc2_8_s1.c
+++ b/libgfortran/generated/maxloc2_8_s1.c
@@ -152,8 +152,8 @@ GFC_INTEGER_8
smaxloc2_8_s1 (gfc_array_s1 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return maxloc2_8_s1 (array, len, back);
+ if (mask == NULL || *mask)
+ return maxloc2_8_s1 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/maxloc2_8_s4.c b/libgfortran/generated/maxloc2_8_s4.c
index a44c6f6..78ae1be 100644
--- a/libgfortran/generated/maxloc2_8_s4.c
+++ b/libgfortran/generated/maxloc2_8_s4.c
@@ -152,8 +152,8 @@ GFC_INTEGER_8
smaxloc2_8_s4 (gfc_array_s4 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return maxloc2_8_s4 (array, len, back);
+ if (mask == NULL || *mask)
+ return maxloc2_8_s4 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/minloc1_16_s1.c b/libgfortran/generated/minloc1_16_s1.c
index 8228009..b654608 100644
--- a/libgfortran/generated/minloc1_16_s1.c
+++ b/libgfortran/generated/minloc1_16_s1.c
@@ -457,7 +457,7 @@ sminloc1_16_s1 (gfc_array_i16 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ sminloc1_16_s1 (gfc_array_i16 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/minloc1_16_s4.c b/libgfortran/generated/minloc1_16_s4.c
index e40bf54..2e709a7 100644
--- a/libgfortran/generated/minloc1_16_s4.c
+++ b/libgfortran/generated/minloc1_16_s4.c
@@ -457,7 +457,7 @@ sminloc1_16_s4 (gfc_array_i16 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ sminloc1_16_s4 (gfc_array_i16 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/minloc1_4_s1.c b/libgfortran/generated/minloc1_4_s1.c
index 199d254..61dad55 100644
--- a/libgfortran/generated/minloc1_4_s1.c
+++ b/libgfortran/generated/minloc1_4_s1.c
@@ -457,7 +457,7 @@ sminloc1_4_s1 (gfc_array_i4 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ sminloc1_4_s1 (gfc_array_i4 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/minloc1_4_s4.c b/libgfortran/generated/minloc1_4_s4.c
index 1f0174b..49c25d0 100644
--- a/libgfortran/generated/minloc1_4_s4.c
+++ b/libgfortran/generated/minloc1_4_s4.c
@@ -457,7 +457,7 @@ sminloc1_4_s4 (gfc_array_i4 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ sminloc1_4_s4 (gfc_array_i4 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/minloc1_8_s1.c b/libgfortran/generated/minloc1_8_s1.c
index 39bdb9b..c0ac6e6 100644
--- a/libgfortran/generated/minloc1_8_s1.c
+++ b/libgfortran/generated/minloc1_8_s1.c
@@ -457,7 +457,7 @@ sminloc1_8_s1 (gfc_array_i8 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ sminloc1_8_s1 (gfc_array_i8 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/minloc1_8_s4.c b/libgfortran/generated/minloc1_8_s4.c
index ed74ac9..29624d0 100644
--- a/libgfortran/generated/minloc1_8_s4.c
+++ b/libgfortran/generated/minloc1_8_s4.c
@@ -457,7 +457,7 @@ sminloc1_8_s4 (gfc_array_i8 * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -465,8 +465,7 @@ sminloc1_8_s4 (gfc_array_i8 * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/generated/minloc2_16_s1.c b/libgfortran/generated/minloc2_16_s1.c
index 6381ad6..9b4a92d 100644
--- a/libgfortran/generated/minloc2_16_s1.c
+++ b/libgfortran/generated/minloc2_16_s1.c
@@ -154,8 +154,8 @@ GFC_INTEGER_16
sminloc2_16_s1 (gfc_array_s1 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return minloc2_16_s1 (array, len, back);
+ if (mask == NULL || *mask)
+ return minloc2_16_s1 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/minloc2_16_s4.c b/libgfortran/generated/minloc2_16_s4.c
index 11011b7..eac46fa 100644
--- a/libgfortran/generated/minloc2_16_s4.c
+++ b/libgfortran/generated/minloc2_16_s4.c
@@ -154,8 +154,8 @@ GFC_INTEGER_16
sminloc2_16_s4 (gfc_array_s4 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return minloc2_16_s4 (array, len, back);
+ if (mask == NULL || *mask)
+ return minloc2_16_s4 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/minloc2_4_s1.c b/libgfortran/generated/minloc2_4_s1.c
index 631484a..bb22f6c 100644
--- a/libgfortran/generated/minloc2_4_s1.c
+++ b/libgfortran/generated/minloc2_4_s1.c
@@ -154,8 +154,8 @@ GFC_INTEGER_4
sminloc2_4_s1 (gfc_array_s1 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return minloc2_4_s1 (array, len, back);
+ if (mask == NULL || *mask)
+ return minloc2_4_s1 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/minloc2_4_s4.c b/libgfortran/generated/minloc2_4_s4.c
index d606437..f3020d6 100644
--- a/libgfortran/generated/minloc2_4_s4.c
+++ b/libgfortran/generated/minloc2_4_s4.c
@@ -154,8 +154,8 @@ GFC_INTEGER_4
sminloc2_4_s4 (gfc_array_s4 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return minloc2_4_s4 (array, len, back);
+ if (mask == NULL || *mask)
+ return minloc2_4_s4 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/minloc2_8_s1.c b/libgfortran/generated/minloc2_8_s1.c
index b02200b..04ec913 100644
--- a/libgfortran/generated/minloc2_8_s1.c
+++ b/libgfortran/generated/minloc2_8_s1.c
@@ -154,8 +154,8 @@ GFC_INTEGER_8
sminloc2_8_s1 (gfc_array_s1 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return minloc2_8_s1 (array, len, back);
+ if (mask == NULL || *mask)
+ return minloc2_8_s1 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/generated/minloc2_8_s4.c b/libgfortran/generated/minloc2_8_s4.c
index 9d33d13..fbb6d08 100644
--- a/libgfortran/generated/minloc2_8_s4.c
+++ b/libgfortran/generated/minloc2_8_s4.c
@@ -154,8 +154,8 @@ GFC_INTEGER_8
sminloc2_8_s4 (gfc_array_s4 * const restrict array,
GFC_LOGICAL_4 *mask, GFC_LOGICAL_4 back, gfc_charlen_type len)
{
- if (mask)
- return minloc2_8_s4 (array, len, back);
+ if (mask == NULL || *mask)
+ return minloc2_8_s4 (array, back, len);
else
return 0;
}
diff --git a/libgfortran/m4/ifindloc2.m4 b/libgfortran/m4/ifindloc2.m4
index c6f909a..d309d8b 100644
--- a/libgfortran/m4/ifindloc2.m4
+++ b/libgfortran/m4/ifindloc2.m4
@@ -41,7 +41,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
if (back)
{
src = array->base_addr + (extent - 1) * sstride;
- for (i = extent; i >= 0; i--)
+ for (i = extent; i > 0; i--)
{
if ('comparison`'`)
return i;
@@ -94,7 +94,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
{
src = array->base_addr + (extent - 1) * sstride;
mbase += (extent - 1) * mstride;
- for (i = extent; i >= 0; i--)
+ for (i = extent; i > 0; i--)
{
if (*mbase && ('comparison`'`))
return i;
diff --git a/libgfortran/m4/ifunction-s.m4 b/libgfortran/m4/ifunction-s.m4
index 8275f65..22182e9 100644
--- a/libgfortran/m4/ifunction-s.m4
+++ b/libgfortran/m4/ifunction-s.m4
@@ -421,7 +421,7 @@ s'name`'rtype_qual`_'atype_code` ('rtype` * const restrict retarray,
for (n = 0; n < dim; n++)
{
- extent[n] = GFC_DESCRIPTOR_EXTENT(array,n) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n);
if (extent[n] <= 0)
extent[n] = 0;
@@ -429,8 +429,7 @@ s'name`'rtype_qual`_'atype_code` ('rtype` * const restrict retarray,
for (n = dim; n < rank; n++)
{
- extent[n] =
- GFC_DESCRIPTOR_EXTENT(array,n + 1) * string_len;
+ extent[n] = GFC_DESCRIPTOR_EXTENT(array,n + 1);
if (extent[n] <= 0)
extent[n] = 0;
diff --git a/libgfortran/m4/maxloc2s.m4 b/libgfortran/m4/maxloc2s.m4
index 49ecae7..b6070b4 100644
--- a/libgfortran/m4/maxloc2s.m4
+++ b/libgfortran/m4/maxloc2s.m4
@@ -153,8 +153,8 @@ export_proto(s'name`'rtype_qual`_'atype_code`);
s'name`'rtype_qual`_'atype_code` ('atype` * const restrict array,
GFC_LOGICAL_4 *mask'back_arg`, gfc_charlen_type len)
{
- if (mask)
- return 'name`'rtype_qual`_'atype_code` (array, len, back);
+ if (mask == NULL || *mask)
+ return 'name`'rtype_qual`_'atype_code` (array, back, len);
else
return 0;
}
diff --git a/libgfortran/m4/minloc2s.m4 b/libgfortran/m4/minloc2s.m4
index 8e7b4ab..9524fc4 100644
--- a/libgfortran/m4/minloc2s.m4
+++ b/libgfortran/m4/minloc2s.m4
@@ -155,8 +155,8 @@ export_proto(s'name`'rtype_qual`_'atype_code`);
s'name`'rtype_qual`_'atype_code` ('atype` * const restrict array,
GFC_LOGICAL_4 *mask'back_arg`, gfc_charlen_type len)
{
- if (mask)
- return 'name`'rtype_qual`_'atype_code` (array, len, back);
+ if (mask == NULL || *mask)
+ return 'name`'rtype_qual`_'atype_code` (array, back, len);
else
return 0;
}
diff --git a/libiberty/regex.c b/libiberty/regex.c
index bc36f43..8337dea 100644
--- a/libiberty/regex.c
+++ b/libiberty/regex.c
@@ -3468,7 +3468,7 @@ PREFIX(regex_compile) (const char *ARG_PREFIX(pattern),
PATFETCH (c);
if ((c == '.' && *p == ']') || p == pend)
break;
- if (c1 < sizeof (str))
+ if (c1 < sizeof (str) - 1)
str[c1++] = c;
else
/* This is in any case an invalid class name. */