aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/ChangeLog41
-rw-r--r--gcc/c-family/ChangeLog9
-rw-r--r--gcc/c-family/c-common.h3
-rw-r--r--gcc/c-family/c-omp.c56
-rw-r--r--gcc/c-family/c-pragma.c1
-rw-r--r--gcc/c-family/c-pragma.h2
-rw-r--r--gcc/c/ChangeLog12
-rw-r--r--gcc/c/c-parser.c143
-rw-r--r--gcc/c/c-typeck.c1
-rw-r--r--gcc/cp/ChangeLog16
-rw-r--r--gcc/cp/cp-tree.h2
-rw-r--r--gcc/cp/parser.c165
-rw-r--r--gcc/cp/pt.c1
-rw-r--r--gcc/cp/semantics.c1
-rw-r--r--gcc/gimplify.c312
-rw-r--r--gcc/omp-low.c92
-rw-r--r--gcc/testsuite/ChangeLog14
-rw-r--r--gcc/testsuite/c-c++-common/gomp/cancel-1.c16
-rw-r--r--gcc/testsuite/c-c++-common/gomp/clauses-1.c133
-rw-r--r--gcc/testsuite/c-c++-common/gomp/loop-1.c271
-rw-r--r--gcc/testsuite/c-c++-common/gomp/loop-2.c294
-rw-r--r--gcc/testsuite/c-c++-common/gomp/loop-3.c145
-rw-r--r--gcc/testsuite/c-c++-common/gomp/loop-4.c46
-rw-r--r--gcc/testsuite/c-c++-common/gomp/loop-5.c56
-rw-r--r--gcc/testsuite/c-c++-common/gomp/order-3.c48
-rw-r--r--gcc/testsuite/c-c++-common/gomp/simd-setjmp-1.c68
-rw-r--r--gcc/testsuite/c-c++-common/gomp/teams-2.c44
-rw-r--r--gcc/tree-core.h10
-rw-r--r--gcc/tree-pretty-print.c23
-rw-r--r--gcc/tree.c3
-rw-r--r--gcc/tree.def4
-rw-r--r--gcc/tree.h19
-rw-r--r--libgomp/ChangeLog4
-rw-r--r--libgomp/testsuite/libgomp.c-c++-common/loop-1.c127
34 files changed, 2020 insertions, 162 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 40f6db5..537799e 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,5 +1,46 @@
2019-07-20 Jakub Jelinek <jakub@redhat.com>
+ * tree.def (OMP_LOOP): New tree code.
+ * tree-core.h (enum omp_clause_code): Add OMP_CLAUSE_BIND.
+ (enum omp_clause_bind_kind): New enum.
+ (struct tree_omp_clause): Add subcode.bind_kind.
+ * tree.h (OMP_LOOP_CHECK): Rename to ...
+ (OMP_LOOPING_CHECK): ... this.
+ (OMP_FOR_BODY, OMP_FOR_CLAUSES, OMP_FOR_INIT, OMP_FOR_COND,
+ OMP_FOR_INCR, OMP_FOR_PRE_BODY, OMP_FOR_ORIG_DECLS): Use
+ OMP_LOOPING_CHECK instead of OMP_LOOP_CHECK.
+ (OMP_CLAUSE_BIND_KIND): Define.
+ * tree.c (omp_clause_num_ops, omp_clause_code_name): Add
+ bind clause entries.
+ (walk_tree_1): Handle OMP_CLAUSE_BIND.
+ * tree-pretty-print.c (dump_omp_clause): Likewise.
+ (dump_generic_node): Handle OMP_LOOP.
+ * gimplify.c (enum omp_region_type): Add ORT_IMPLICIT_TARGET.
+ (in_omp_construct): New variable.
+ (is_gimple_stmt): Handle OMP_LOOP.
+ (gimplify_scan_omp_clauses): For lastprivate don't set
+ check_non_private if code == OMP_LOOP. For reduction clause
+ on OMP_LOOP combined with parallel or teams propagate as shared
+ on the combined construct. Handle OMP_CLAUSE_BIND.
+ (gimplify_adjust_omp_clauses): Handle OMP_CLAUSE_BIND.
+ (gimplify_omp_for): Pass OMP_LOOP instead of OMP_{FOR,DISTRIBUTE}
+ for constructs from a loop construct to gimplify_scan_omp_clauses.
+ Don't predetermine iterator linear on OMP_SIMD from loop construct.
+ (replace_reduction_placeholders, gimplify_omp_loop): New functions.
+ (gimplify_omp_workshare): Use ORT_IMPLICIT_TARGET instead of trying
+ to match the implicit ORT_TARGET construct around whole body.
+ Temporarily clear in_omp_construct when processing body.
+ (gimplify_expr): Handle OMP_LOOP. For OMP_MASTER, OMP_TASKGROUP
+ etc. temporarily set in_omp_construct when processing body.
+ (gimplify_body): Create ORT_IMPLICIT_TARGET instead of ORT_TARGET.
+ * omp-low.c (struct omp_context): Add loop_p.
+ (build_outer_var_ref): Treat ctx->loop_p similarly to simd construct
+ in that the original var might be private.
+ (scan_sharing_clauses): Handle OMP_CLAUSE_BIND.
+ (check_omp_nesting_restrictions): Adjust nesting restrictions for
+ addition of loop construct.
+ (scan_omp_1_stmt): Allow setjmp inside of loop construct.
+
* omp-low.c (lower_rec_input_clauses): Don't force simd arrays for
lastprivate non-addressable iterator of a collapse(1) simd.
diff --git a/gcc/c-family/ChangeLog b/gcc/c-family/ChangeLog
index d360759..e645254 100644
--- a/gcc/c-family/ChangeLog
+++ b/gcc/c-family/ChangeLog
@@ -1,3 +1,12 @@
+2019-07-20 Jakub Jelinek <jakub@redhat.com>
+
+ * c-pragma.h (enum pragma_kind): Add PRAGMA_OMP_LOOP.
+ (enum pragma_omp_clause): Add PRAGMA_OMP_CLAUSE_BIND.
+ * c-pragma.c (omp_pragmas_simd): Add PRAGMA_OMP_LOOP entry.
+ * c-common.h (enum c_omp_clause_split): Add C_OMP_CLAUSE_SPLIT_LOOP.
+ * c-omp.c (c_omp_split_clauses): Add support for 4 new combined
+ constructs with the loop construct.
+
2019-07-13 Jakub Jelinek <jakub@redhat.com>
PR c/91149
diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
index 5ac6e5e..117d729 100644
--- a/gcc/c-family/c-common.h
+++ b/gcc/c-family/c-common.h
@@ -1148,7 +1148,8 @@ enum c_omp_clause_split
C_OMP_CLAUSE_SPLIT_SIMD,
C_OMP_CLAUSE_SPLIT_COUNT,
C_OMP_CLAUSE_SPLIT_SECTIONS = C_OMP_CLAUSE_SPLIT_FOR,
- C_OMP_CLAUSE_SPLIT_TASKLOOP = C_OMP_CLAUSE_SPLIT_FOR
+ C_OMP_CLAUSE_SPLIT_TASKLOOP = C_OMP_CLAUSE_SPLIT_FOR,
+ C_OMP_CLAUSE_SPLIT_LOOP = C_OMP_CLAUSE_SPLIT_FOR
};
enum c_omp_region_type
diff --git a/gcc/c-family/c-omp.c b/gcc/c-family/c-omp.c
index dc59bd2..10f7c4e 100644
--- a/gcc/c-family/c-omp.c
+++ b/gcc/c-family/c-omp.c
@@ -1263,7 +1263,7 @@ c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
}
/* This function attempts to split or duplicate clauses for OpenMP
- combined/composite constructs. Right now there are 26 different
+ combined/composite constructs. Right now there are 30 different
constructs. CODE is the innermost construct in the combined construct,
and MASK allows to determine which constructs are combined together,
as every construct has at least one clause that no other construct
@@ -1278,6 +1278,7 @@ c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
#pragma omp master taskloop simd
#pragma omp parallel for
#pragma omp parallel for simd
+ #pragma omp parallel loop
#pragma omp parallel master
#pragma omp parallel master taskloop
#pragma omp parallel master taskloop simd
@@ -1285,17 +1286,20 @@ c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
#pragma omp target parallel
#pragma omp target parallel for
#pragma omp target parallel for simd
+ #pragma omp target parallel loop
#pragma omp target teams
#pragma omp target teams distribute
#pragma omp target teams distribute parallel for
#pragma omp target teams distribute parallel for simd
#pragma omp target teams distribute simd
+ #pragma omp target teams loop
#pragma omp target simd
#pragma omp taskloop simd
#pragma omp teams distribute
#pragma omp teams distribute parallel for
#pragma omp teams distribute parallel for simd
- #pragma omp teams distribute simd */
+ #pragma omp teams distribute simd
+ #pragma omp teams loop */
void
c_omp_split_clauses (location_t loc, enum tree_code code,
@@ -1375,7 +1379,11 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
case OMP_CLAUSE_PRIORITY:
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
- /* Duplicate this to all of taskloop, distribute, for and simd. */
+ case OMP_CLAUSE_BIND:
+ s = C_OMP_CLAUSE_SPLIT_LOOP;
+ break;
+ /* Duplicate this to all of taskloop, distribute, for, simd and
+ loop. */
case OMP_CLAUSE_COLLAPSE:
if (code == OMP_SIMD)
{
@@ -1418,6 +1426,8 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
+ else if (code == OMP_LOOP)
+ s = C_OMP_CLAUSE_SPLIT_LOOP;
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
@@ -1435,12 +1445,13 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break;
+ case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break;
default: gcc_unreachable ();
}
break;
/* Firstprivate clause is supported on all constructs but
- simd and master. Put it on the outermost of those and duplicate on
- teams and parallel. */
+ simd, master and loop. Put it on the outermost of those and
+ duplicate on teams and parallel. */
case OMP_CLAUSE_FIRSTPRIVATE:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
@@ -1486,7 +1497,7 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else
/* This must be
- #pragma omp parallel{, for{, simd}, sections}
+ #pragma omp parallel{, for{, simd}, sections,loop}
or
#pragma omp target parallel. */
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
@@ -1495,10 +1506,11 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
!= 0)
{
/* This must be one of
- #pragma omp {,target }teams distribute
+ #pragma omp {,target }teams {distribute,loop}
#pragma omp target teams
#pragma omp {,target }teams distribute simd. */
gcc_assert (code == OMP_DISTRIBUTE
+ || code == OMP_LOOP
|| code == OMP_TEAMS
|| code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_TEAMS;
@@ -1526,9 +1538,9 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
s = C_OMP_CLAUSE_SPLIT_FOR;
}
break;
- /* Lastprivate is allowed on distribute, for, sections, taskloop and
- simd. In parallel {for{, simd},sections} we actually want to put
- it on parallel rather than for or sections. */
+ /* Lastprivate is allowed on distribute, for, sections, taskloop, loop
+ and simd. In parallel {for{, simd},sections} we actually want to
+ put it on parallel rather than for or sections. */
case OMP_CLAUSE_LASTPRIVATE:
if (code == OMP_DISTRIBUTE)
{
@@ -1560,6 +1572,11 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
}
+ if (code == OMP_LOOP)
+ {
+ s = C_OMP_CLAUSE_SPLIT_LOOP;
+ break;
+ }
gcc_assert (code == OMP_SIMD);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
@@ -1632,7 +1649,7 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
}
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
- /* order clauses are allowed on for and simd. */
+ /* order clauses are allowed on for, simd and loop. */
case OMP_CLAUSE_ORDER:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
@@ -1647,22 +1664,24 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
+ else if (code == OMP_LOOP)
+ s = C_OMP_CLAUSE_SPLIT_LOOP;
else
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
- /* Reduction is allowed on simd, for, parallel, sections, taskloop
- and teams. Duplicate it on all of them, but omit on for or
+ /* Reduction is allowed on simd, for, parallel, sections, taskloop,
+ teams and loop. Duplicate it on all of them, but omit on for or
sections if parallel is present (unless inscan, in that case
- omit on parallel). If taskloop is combined with
+ omit on parallel). If taskloop or loop is combined with
parallel, omit it on parallel. */
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_TASK (clauses))
{
- if (code == OMP_SIMD /* || code == OMP_LOOP */)
+ if (code == OMP_SIMD || code == OMP_LOOP)
{
error_at (OMP_CLAUSE_LOCATION (clauses),
"invalid %<task%> reduction modifier on construct "
- "combined with %<simd%>" /* or %<loop%> */);
+ "combined with %<simd%> or %<loop%>");
OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
}
else if (code != OMP_SECTIONS
@@ -1739,6 +1758,8 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else if (code == OMP_TASKLOOP)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
+ else if (code == OMP_LOOP)
+ s = C_OMP_CLAUSE_SPLIT_LOOP;
else if (code == OMP_SIMD)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
@@ -1930,7 +1951,8 @@ c_omp_split_clauses (location_t loc, enum tree_code code,
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
- && code != OMP_SECTIONS)
+ && code != OMP_SECTIONS
+ && code != OMP_LOOP)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
if (code != OMP_SIMD)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
diff --git a/gcc/c-family/c-pragma.c b/gcc/c-family/c-pragma.c
index 9af713d..9fee84b 100644
--- a/gcc/c-family/c-pragma.c
+++ b/gcc/c-family/c-pragma.c
@@ -1318,6 +1318,7 @@ static const struct omp_pragma_def omp_pragmas_simd[] = {
{ "declare", PRAGMA_OMP_DECLARE },
{ "distribute", PRAGMA_OMP_DISTRIBUTE },
{ "for", PRAGMA_OMP_FOR },
+ { "loop", PRAGMA_OMP_LOOP },
{ "ordered", PRAGMA_OMP_ORDERED },
{ "parallel", PRAGMA_OMP_PARALLEL },
{ "scan", PRAGMA_OMP_SCAN },
diff --git a/gcc/c-family/c-pragma.h b/gcc/c-family/c-pragma.h
index 4239ada..e8a509f 100644
--- a/gcc/c-family/c-pragma.h
+++ b/gcc/c-family/c-pragma.h
@@ -52,6 +52,7 @@ enum pragma_kind {
PRAGMA_OMP_END_DECLARE_TARGET,
PRAGMA_OMP_FLUSH,
PRAGMA_OMP_FOR,
+ PRAGMA_OMP_LOOP,
PRAGMA_OMP_MASTER,
PRAGMA_OMP_ORDERED,
PRAGMA_OMP_PARALLEL,
@@ -84,6 +85,7 @@ enum pragma_omp_clause {
PRAGMA_OMP_CLAUSE_NONE = 0,
PRAGMA_OMP_CLAUSE_ALIGNED,
+ PRAGMA_OMP_CLAUSE_BIND,
PRAGMA_OMP_CLAUSE_COLLAPSE,
PRAGMA_OMP_CLAUSE_COPYIN,
PRAGMA_OMP_CLAUSE_COPYPRIVATE,
diff --git a/gcc/c/ChangeLog b/gcc/c/ChangeLog
index ee0c559..a75f0d8 100644
--- a/gcc/c/ChangeLog
+++ b/gcc/c/ChangeLog
@@ -1,3 +1,15 @@
+2019-07-20 Jakub Jelinek <jakub@redhat.com>
+
+ * c-parser.c (c_parser_omp_clause_name): Handle bind clause.
+ (c_parser_omp_clause_bind): New function.
+ (c_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_BIND.
+ (OMP_LOOP_CLAUSE_MASK): Define.
+ (c_parser_omp_loop): New function.
+ (c_parser_omp_parallel, c_parser_omp_teams): Handle parsing of
+ loop combined with parallel or teams.
+ (c_parser_omp_construct): Handle PRAGMA_OMP_LOOP.
+ * c-typeck.c (c_finish_omp_clauses): Handle OMP_CLAUSE_BIND.
+
2019-07-18 Richard Sandiford <richard.sandiford@arm.com>
PR c/53633
diff --git a/gcc/c/c-parser.c b/gcc/c/c-parser.c
index 1f83c24..6721049 100644
--- a/gcc/c/c-parser.c
+++ b/gcc/c/c-parser.c
@@ -11688,6 +11688,10 @@ c_parser_omp_clause_name (c_parser *parser)
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
break;
+ case 'b':
+ if (!strcmp ("bind", p))
+ result = PRAGMA_OMP_CLAUSE_BIND;
+ break;
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
@@ -13507,6 +13511,45 @@ c_parser_omp_clause_order (c_parser *parser, tree list)
}
+/* OpenMP 5.0:
+ bind ( teams | parallel | thread ) */
+
+static tree
+c_parser_omp_clause_bind (c_parser *parser, tree list)
+{
+ location_t loc = c_parser_peek_token (parser)->location;
+ tree c;
+ const char *p;
+ enum omp_clause_bind_kind kind = OMP_CLAUSE_BIND_THREAD;
+
+ matching_parens parens;
+ if (!parens.require_open (parser))
+ return list;
+ if (!c_parser_next_token_is (parser, CPP_NAME))
+ {
+ invalid:
+ c_parser_error (parser,
+ "expected %<teams%>, %<parallel%> or %<thread%>");
+ parens.skip_until_found_close (parser);
+ return list;
+ }
+ p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
+ if (strcmp (p, "teams") == 0)
+ kind = OMP_CLAUSE_BIND_TEAMS;
+ else if (strcmp (p, "parallel") == 0)
+ kind = OMP_CLAUSE_BIND_PARALLEL;
+ else if (strcmp (p, "thread") != 0)
+ goto invalid;
+ c_parser_consume_token (parser);
+ parens.skip_until_found_close (parser);
+ /* check_no_duplicate_clause (list, OMP_CLAUSE_BIND, "bind"); */
+ c = build_omp_clause (loc, OMP_CLAUSE_BIND);
+ OMP_CLAUSE_BIND_KIND (c) = kind;
+ OMP_CLAUSE_CHAIN (c) = list;
+ return c;
+}
+
+
/* OpenMP 2.5:
ordered
@@ -15066,6 +15109,10 @@ c_parser_omp_all_clauses (c_parser *parser, omp_clause_mask mask,
switch (c_kind)
{
+ case PRAGMA_OMP_CLAUSE_BIND:
+ clauses = c_parser_omp_clause_bind (parser, clauses);
+ c_name = "bind";
+ break;
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = c_parser_omp_clause_collapse (parser, clauses);
c_name = "collapse";
@@ -17248,6 +17295,46 @@ omp_split_clauses (location_t loc, enum tree_code code,
cclauses[i] = c_finish_omp_clauses (cclauses[i], C_ORT_OMP);
}
+/* OpenMP 5.0:
+ #pragma omp loop loop-clause[optseq] new-line
+ for-loop
+
+ LOC is the location of the #pragma token.
+*/
+
+#define OMP_LOOP_CLAUSE_MASK \
+ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_BIND) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
+
+static tree
+c_parser_omp_loop (location_t loc, c_parser *parser,
+ char *p_name, omp_clause_mask mask, tree *cclauses,
+ bool *if_p)
+{
+ tree block, clauses, ret;
+
+ strcat (p_name, " loop");
+ mask |= OMP_LOOP_CLAUSE_MASK;
+
+ clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
+ if (cclauses)
+ {
+ omp_split_clauses (loc, OMP_LOOP, mask, clauses, cclauses);
+ clauses = cclauses[C_OMP_CLAUSE_SPLIT_LOOP];
+ }
+
+ block = c_begin_compound_stmt (true);
+ ret = c_parser_omp_for_loop (loc, parser, OMP_LOOP, clauses, cclauses, if_p);
+ block = c_end_compound_stmt (loc, block, true);
+ add_stmt (block);
+
+ return ret;
+}
+
/* OpenMP 4.0:
#pragma omp simd simd-clause[optseq] new-line
for-loop
@@ -17713,10 +17800,10 @@ c_parser_omp_parallel (location_t loc, c_parser *parser,
c_parser_skip_to_pragma_eol (parser);
return NULL_TREE;
}
- else if (cclauses == NULL && c_parser_next_token_is (parser, CPP_NAME))
+ else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
- if (strcmp (p, "master") == 0)
+ if (cclauses == NULL && strcmp (p, "master") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
@@ -17736,12 +17823,34 @@ c_parser_omp_parallel (location_t loc, c_parser *parser,
return ret;
return stmt;
}
+ else if (strcmp (p, "loop") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ if (cclauses == NULL)
+ cclauses = cclauses_buf;
+
+ c_parser_consume_token (parser);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return c_parser_omp_loop (loc, parser, p_name, mask, cclauses,
+ if_p);
+ block = c_begin_omp_parallel ();
+ tree ret = c_parser_omp_loop (loc, parser, p_name, mask, cclauses,
+ if_p);
+ stmt
+ = c_finish_omp_parallel (loc,
+ cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
+ block);
+ if (ret == NULL_TREE)
+ return ret;
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ return stmt;
+ }
else if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
- else if (strcmp (p, "sections") == 0)
+ else if (cclauses == NULL && strcmp (p, "sections") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
@@ -18121,6 +18230,30 @@ c_parser_omp_teams (location_t loc, c_parser *parser,
SET_EXPR_LOCATION (ret, loc);
return add_stmt (ret);
}
+ else if (strcmp (p, "loop") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ if (cclauses == NULL)
+ cclauses = cclauses_buf;
+
+ c_parser_consume_token (parser);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return c_parser_omp_loop (loc, parser, p_name, mask, cclauses,
+ if_p);
+ block = c_begin_omp_parallel ();
+ ret = c_parser_omp_loop (loc, parser, p_name, mask, cclauses, if_p);
+ block = c_end_compound_stmt (loc, block, true);
+ if (ret == NULL)
+ return ret;
+ clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
+ ret = make_node (OMP_TEAMS);
+ TREE_TYPE (ret) = void_type_node;
+ OMP_TEAMS_CLAUSES (ret) = clauses;
+ OMP_TEAMS_BODY (ret) = block;
+ OMP_TEAMS_COMBINED (ret) = 1;
+ SET_EXPR_LOCATION (ret, loc);
+ return add_stmt (ret);
+ }
}
if (!flag_openmp) /* flag_openmp_simd */
{
@@ -19670,6 +19803,10 @@ c_parser_omp_construct (c_parser *parser, bool *if_p)
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_for (loc, parser, p_name, mask, NULL, if_p);
break;
+ case PRAGMA_OMP_LOOP:
+ strcpy (p_name, "#pragma omp");
+ stmt = c_parser_omp_loop (loc, parser, p_name, mask, NULL, if_p);
+ break;
case PRAGMA_OMP_MASTER:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_master (loc, parser, p_name, mask, NULL, if_p);
diff --git a/gcc/c/c-typeck.c b/gcc/c/c-typeck.c
index e4ce03d..9a1a910 100644
--- a/gcc/c/c-typeck.c
+++ b/gcc/c/c-typeck.c
@@ -14674,6 +14674,7 @@ c_finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_DEFAULTMAP:
+ case OMP_CLAUSE_BIND:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index c1fc980..c776243 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,19 @@
+2019-07-20 Jakub Jelinek <jakub@redhat.com>
+
+ * cp-tree.h (OMP_FOR_GIMPLIFYING_P): Use OMP_LOOPING_CHECK
+ instead of OMP_LOOP_CHECK.
+ * parser.c (cp_parser_omp_clause_name): Handle bind clause.
+ (cp_parser_omp_clause_bind): New function.
+ (cp_parser_omp_all_clauses): Handle PRAGMA_OMP_CLAUSE_BIND.
+ (OMP_LOOP_CLAUSE_MASK): Define.
+ (cp_parser_omp_loop): New function.
+ (cp_parser_omp_parallel, cp_parser_omp_teams): Handle parsing of
+ loop combined with parallel or teams.
+ (cp_parser_omp_construct): Handle PRAGMA_OMP_LOOP.
+ (cp_parser_pragma): Likewise.
+ * pt.c (tsubst_expr): Handle OMP_LOOP.
+ * semantics.c (finish_omp_clauses): Handle OMP_CLAUSE_BIND.
+
2019-07-19 Jason Merrill <jason@redhat.com>
PR c++/90101 - dependent class non-type parameter.
diff --git a/gcc/cp/cp-tree.h b/gcc/cp/cp-tree.h
index 970296d..6068745 100644
--- a/gcc/cp/cp-tree.h
+++ b/gcc/cp/cp-tree.h
@@ -4923,7 +4923,7 @@ more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
- (TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
+ (TREE_LANG_FLAG_0 (OMP_LOOPING_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
diff --git a/gcc/cp/parser.c b/gcc/cp/parser.c
index 1a5da1d..5c379aa 100644
--- a/gcc/cp/parser.c
+++ b/gcc/cp/parser.c
@@ -32409,6 +32409,10 @@ cp_parser_omp_clause_name (cp_parser *parser)
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
break;
+ case 'b':
+ if (!strcmp ("bind", p))
+ result = PRAGMA_OMP_CLAUSE_BIND;
+ break;
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
@@ -33945,6 +33949,56 @@ cp_parser_omp_clause_order (cp_parser *parser, tree list, location_t location)
return list;
}
+/* OpenMP 5.0:
+ bind ( teams | parallel | thread ) */
+
+static tree
+cp_parser_omp_clause_bind (cp_parser *parser, tree list,
+ location_t location)
+{
+ tree c;
+ const char *p;
+ enum omp_clause_bind_kind kind = OMP_CLAUSE_BIND_THREAD;
+
+ matching_parens parens;
+ if (!parens.require_open (parser))
+ return list;
+
+ if (!cp_lexer_next_token_is (parser->lexer, CPP_NAME))
+ {
+ invalid:
+ cp_parser_error (parser,
+ "expected %<teams%>, %<parallel%> or %<thread%>");
+ goto out_err;
+ }
+ else
+ {
+ tree id = cp_lexer_peek_token (parser->lexer)->u.value;
+ p = IDENTIFIER_POINTER (id);
+ }
+ if (strcmp (p, "teams") == 0)
+ kind = OMP_CLAUSE_BIND_TEAMS;
+ else if (strcmp (p, "parallel") == 0)
+ kind = OMP_CLAUSE_BIND_PARALLEL;
+ else if (strcmp (p, "thread") != 0)
+ goto invalid;
+ cp_lexer_consume_token (parser->lexer);
+ if (!parens.require_close (parser))
+ goto out_err;
+
+ /* check_no_duplicate_clause (list, OMP_CLAUSE_BIND, "bind", location); */
+ c = build_omp_clause (location, OMP_CLAUSE_BIND);
+ OMP_CLAUSE_BIND_KIND (c) = kind;
+ OMP_CLAUSE_CHAIN (c) = list;
+ return c;
+
+ out_err:
+ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true,
+ /*or_comma=*/false,
+ /*consume_paren=*/true);
+ return list;
+}
+
/* OpenMP 2.5:
ordered
@@ -35462,6 +35516,11 @@ cp_parser_omp_all_clauses (cp_parser *parser, omp_clause_mask mask,
switch (c_kind)
{
+ case PRAGMA_OMP_CLAUSE_BIND:
+ clauses = cp_parser_omp_clause_bind (parser, clauses,
+ token->location);
+ c_name = "bind";
+ break;
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = cp_parser_omp_clause_collapse (parser, clauses,
token->location);
@@ -37578,6 +37637,50 @@ cp_omp_split_clauses (location_t loc, enum tree_code code,
cclauses[i] = finish_omp_clauses (cclauses[i], C_ORT_OMP);
}
+/* OpenMP 5.0:
+ #pragma omp loop loop-clause[optseq] new-line
+ for-loop */
+
+#define OMP_LOOP_CLAUSE_MASK \
+ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_BIND) \
+ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
+
+static tree
+cp_parser_omp_loop (cp_parser *parser, cp_token *pragma_tok,
+ char *p_name, omp_clause_mask mask, tree *cclauses,
+ bool *if_p)
+{
+ tree clauses, sb, ret;
+ unsigned int save;
+ location_t loc = cp_lexer_peek_token (parser->lexer)->location;
+
+ strcat (p_name, " loop");
+ mask |= OMP_LOOP_CLAUSE_MASK;
+
+ clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok,
+ cclauses == NULL);
+ if (cclauses)
+ {
+ cp_omp_split_clauses (loc, OMP_LOOP, mask, clauses, cclauses);
+ clauses = cclauses[C_OMP_CLAUSE_SPLIT_LOOP];
+ }
+
+ keep_next_level (true);
+ sb = begin_omp_structured_block ();
+ save = cp_parser_begin_omp_structured_block (parser);
+
+ ret = cp_parser_omp_for_loop (parser, OMP_LOOP, clauses, cclauses, if_p);
+
+ cp_parser_end_omp_structured_block (parser, save);
+ add_stmt (finish_omp_for_block (finish_omp_structured_block (sb), ret));
+
+ return ret;
+}
+
/* OpenMP 4.0:
#pragma omp simd simd-clause[optseq] new-line
for-loop */
@@ -38038,11 +38141,11 @@ cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok,
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
- else if (cclauses == NULL && cp_lexer_next_token_is (parser->lexer, CPP_NAME))
+ else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME))
{
tree id = cp_lexer_peek_token (parser->lexer)->u.value;
const char *p = IDENTIFIER_POINTER (id);
- if (strcmp (p, "master") == 0)
+ if (cclauses == NULL && strcmp (p, "master") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
@@ -38060,12 +38163,34 @@ cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok,
return ret;
return stmt;
}
+ else if (strcmp (p, "loop") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ if (cclauses == NULL)
+ cclauses = cclauses_buf;
+
+ cp_lexer_consume_token (parser->lexer);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return cp_parser_omp_loop (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ block = begin_omp_parallel ();
+ save = cp_parser_begin_omp_structured_block (parser);
+ tree ret = cp_parser_omp_loop (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ cp_parser_end_omp_structured_block (parser, save);
+ stmt = finish_omp_parallel (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
+ block);
+ if (ret == NULL_TREE)
+ return ret;
+ OMP_PARALLEL_COMBINED (stmt) = 1;
+ return stmt;
+ }
else if (!flag_openmp) /* flag_openmp_simd */
{
cp_parser_skip_to_pragma_eol (parser, pragma_tok);
return NULL_TREE;
}
- else if (strcmp (p, "sections") == 0)
+ else if (cclauses == NULL && strcmp (p, "sections") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
@@ -38460,6 +38585,34 @@ cp_parser_omp_teams (cp_parser *parser, cp_token *pragma_tok,
SET_EXPR_LOCATION (ret, loc);
return add_stmt (ret);
}
+ else if (strcmp (p, "loop") == 0)
+ {
+ tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
+ if (cclauses == NULL)
+ cclauses = cclauses_buf;
+
+ cp_lexer_consume_token (parser->lexer);
+ if (!flag_openmp) /* flag_openmp_simd */
+ return cp_parser_omp_loop (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ keep_next_level (true);
+ sb = begin_omp_structured_block ();
+ save = cp_parser_begin_omp_structured_block (parser);
+ ret = cp_parser_omp_loop (parser, pragma_tok, p_name, mask,
+ cclauses, if_p);
+ cp_parser_end_omp_structured_block (parser, save);
+ tree body = finish_omp_structured_block (sb);
+ if (ret == NULL)
+ return ret;
+ clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
+ ret = make_node (OMP_TEAMS);
+ TREE_TYPE (ret) = void_type_node;
+ OMP_TEAMS_CLAUSES (ret) = clauses;
+ OMP_TEAMS_BODY (ret) = body;
+ OMP_TEAMS_COMBINED (ret) = 1;
+ SET_EXPR_LOCATION (ret, loc);
+ return add_stmt (ret);
+ }
}
if (!flag_openmp) /* flag_openmp_simd */
{
@@ -40716,6 +40869,11 @@ cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok, bool *if_p)
stmt = cp_parser_omp_for (parser, pragma_tok, p_name, mask, NULL,
if_p);
break;
+ case PRAGMA_OMP_LOOP:
+ strcpy (p_name, "#pragma omp");
+ stmt = cp_parser_omp_loop (parser, pragma_tok, p_name, mask, NULL,
+ if_p);
+ break;
case PRAGMA_OMP_MASTER:
strcpy (p_name, "#pragma omp");
stmt = cp_parser_omp_master (parser, pragma_tok, p_name, mask, NULL,
@@ -41352,6 +41510,7 @@ cp_parser_pragma (cp_parser *parser, enum pragma_context context, bool *if_p)
case PRAGMA_OMP_CRITICAL:
case PRAGMA_OMP_DISTRIBUTE:
case PRAGMA_OMP_FOR:
+ case PRAGMA_OMP_LOOP:
case PRAGMA_OMP_MASTER:
case PRAGMA_OMP_PARALLEL:
case PRAGMA_OMP_SECTIONS:
diff --git a/gcc/cp/pt.c b/gcc/cp/pt.c
index e433413..b6eda7e 100644
--- a/gcc/cp/pt.c
+++ b/gcc/cp/pt.c
@@ -17553,6 +17553,7 @@ tsubst_expr (tree t, tree args, tsubst_flags_t complain, tree in_decl,
break;
case OMP_FOR:
+ case OMP_LOOP:
case OMP_SIMD:
case OMP_DISTRIBUTE:
case OMP_TASKLOOP:
diff --git a/gcc/cp/semantics.c b/gcc/cp/semantics.c
index 1a21705..269092d 100644
--- a/gcc/cp/semantics.c
+++ b/gcc/cp/semantics.c
@@ -7550,6 +7550,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_DEFAULTMAP:
+ case OMP_CLAUSE_BIND:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_SEQ:
diff --git a/gcc/gimplify.c b/gcc/gimplify.c
index 66df5c5..723897f 100644
--- a/gcc/gimplify.c
+++ b/gcc/gimplify.c
@@ -154,6 +154,7 @@ enum omp_region_type
/* Data region with offloading. */
ORT_TARGET = 0x80,
ORT_COMBINED_TARGET = ORT_TARGET | 1,
+ ORT_IMPLICIT_TARGET = ORT_TARGET | 2,
/* OpenACC variants. */
ORT_ACC = 0x100, /* A generic OpenACC region. */
@@ -228,6 +229,7 @@ struct gimplify_omp_ctx
static struct gimplify_ctx *gimplify_ctxp;
static struct gimplify_omp_ctx *gimplify_omp_ctxp;
+static bool in_omp_construct;
/* Forward declaration. */
static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool);
@@ -5533,6 +5535,7 @@ is_gimple_stmt (tree t)
case OMP_FOR:
case OMP_SIMD:
case OMP_DISTRIBUTE:
+ case OMP_LOOP:
case OACC_LOOP:
case OMP_SCAN:
case OMP_SECTIONS:
@@ -8185,7 +8188,8 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
break;
}
flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT;
- check_non_private = "lastprivate";
+ if (code != OMP_LOOP)
+ check_non_private = "lastprivate";
decl = OMP_CLAUSE_DECL (c);
if (error_operand_p (decl))
goto do_add;
@@ -9142,15 +9146,20 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
" or private in outer context", DECL_NAME (decl));
}
do_notice:
- if (((region_type & ORT_TASKLOOP) == ORT_TASKLOOP
- || (region_type == ORT_WORKSHARE
- && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
- && OMP_CLAUSE_REDUCTION_INSCAN (c)))
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& outer_ctx
- && outer_ctx->region_type == ORT_COMBINED_PARALLEL
- && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
- || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
- || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE))
+ && ((region_type & ORT_TASKLOOP) == ORT_TASKLOOP
+ || (region_type == ORT_WORKSHARE
+ && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ && (OMP_CLAUSE_REDUCTION_INSCAN (c)
+ || code == OMP_LOOP)))
+ && (outer_ctx->region_type == ORT_COMBINED_PARALLEL
+ || (code == OMP_LOOP
+ && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
+ && ((outer_ctx->region_type & ORT_COMBINED_TEAMS)
+ == ORT_COMBINED_TEAMS))))
{
splay_tree_node on
= splay_tree_lookup (outer_ctx->variables,
@@ -9274,6 +9283,7 @@ gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p,
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
+ case OMP_CLAUSE_BIND:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
break;
@@ -10239,6 +10249,7 @@ gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p,
case OMP_CLAUSE_HINT:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
+ case OMP_CLAUSE_BIND:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_ASYNC:
@@ -10764,9 +10775,12 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
}
}
+ bool loop_p = (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_BIND)
+ != NULL_TREE);
if (TREE_CODE (for_stmt) != OMP_TASKLOOP)
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, ort,
- TREE_CODE (for_stmt));
+ loop_p && TREE_CODE (for_stmt) != OMP_SIMD
+ ? OMP_LOOP : TREE_CODE (for_stmt));
if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE)
gimplify_omp_ctxp->distribute = true;
@@ -10997,7 +11011,7 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
n->value &= ~GOVD_LASTPRIVATE_CONDITIONAL;
}
}
- else if (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1)
+ else if (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1 && !loop_p)
{
c = build_omp_clause (input_location, OMP_CLAUSE_LINEAR);
OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1;
@@ -11740,6 +11754,259 @@ gimplify_omp_for (tree *expr_p, gimple_seq *pre_p)
return GS_ALL_DONE;
}
+/* Helper for gimplify_omp_loop, called through walk_tree. */
+
+static tree
+replace_reduction_placeholders (tree *tp, int *walk_subtrees, void *data)
+{
+ if (DECL_P (*tp))
+ {
+ tree *d = (tree *) data;
+ if (*tp == OMP_CLAUSE_REDUCTION_PLACEHOLDER (d[0]))
+ {
+ *tp = OMP_CLAUSE_REDUCTION_PLACEHOLDER (d[1]);
+ *walk_subtrees = 0;
+ }
+ else if (*tp == OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (d[0]))
+ {
+ *tp = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (d[1]);
+ *walk_subtrees = 0;
+ }
+ }
+ return NULL_TREE;
+}
+
+/* Gimplify the gross structure of an OMP_LOOP statement. */
+
+static enum gimplify_status
+gimplify_omp_loop (tree *expr_p, gimple_seq *pre_p)
+{
+ tree for_stmt = *expr_p;
+ tree clauses = OMP_FOR_CLAUSES (for_stmt);
+ struct gimplify_omp_ctx *octx = gimplify_omp_ctxp;
+ enum omp_clause_bind_kind kind = OMP_CLAUSE_BIND_THREAD;
+ int i;
+
+ /* If order is not present, the behavior is as if order(concurrent)
+ appeared. */
+ tree order = omp_find_clause (clauses, OMP_CLAUSE_ORDER);
+ if (order == NULL_TREE)
+ {
+ order = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_ORDER);
+ OMP_CLAUSE_CHAIN (order) = clauses;
+ OMP_FOR_CLAUSES (for_stmt) = clauses = order;
+ }
+
+ tree bind = omp_find_clause (clauses, OMP_CLAUSE_BIND);
+ if (bind == NULL_TREE)
+ {
+ if (!flag_openmp) /* flag_openmp_simd */
+ ;
+ else if (octx && (octx->region_type & ORT_TEAMS) != 0)
+ kind = OMP_CLAUSE_BIND_TEAMS;
+ else if (octx && (octx->region_type & ORT_PARALLEL) != 0)
+ kind = OMP_CLAUSE_BIND_PARALLEL;
+ else
+ {
+ for (; octx; octx = octx->outer_context)
+ {
+ if ((octx->region_type & ORT_ACC) != 0
+ || octx->region_type == ORT_NONE
+ || octx->region_type == ORT_IMPLICIT_TARGET)
+ continue;
+ break;
+ }
+ if (octx == NULL && !in_omp_construct)
+ error_at (EXPR_LOCATION (for_stmt),
+ "%<bind%> clause not specified on a %<loop%> "
+ "construct not nested inside another OpenMP construct");
+ }
+ bind = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_BIND);
+ OMP_CLAUSE_CHAIN (bind) = clauses;
+ OMP_CLAUSE_BIND_KIND (bind) = kind;
+ OMP_FOR_CLAUSES (for_stmt) = bind;
+ }
+ else
+ switch (OMP_CLAUSE_BIND_KIND (bind))
+ {
+ case OMP_CLAUSE_BIND_THREAD:
+ break;
+ case OMP_CLAUSE_BIND_PARALLEL:
+ if (!flag_openmp) /* flag_openmp_simd */
+ {
+ OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
+ break;
+ }
+ for (; octx; octx = octx->outer_context)
+ if (octx->region_type == ORT_SIMD
+ && omp_find_clause (octx->clauses, OMP_CLAUSE_BIND) == NULL_TREE)
+ {
+ error_at (EXPR_LOCATION (for_stmt),
+ "%<bind(parallel)%> on a %<loop%> construct nested "
+ "inside %<simd%> construct");
+ OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
+ break;
+ }
+ kind = OMP_CLAUSE_BIND_PARALLEL;
+ break;
+ case OMP_CLAUSE_BIND_TEAMS:
+ if (!flag_openmp) /* flag_openmp_simd */
+ {
+ OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
+ break;
+ }
+ if ((octx
+ && octx->region_type != ORT_IMPLICIT_TARGET
+ && octx->region_type != ORT_NONE
+ && (octx->region_type & ORT_TEAMS) == 0)
+ || in_omp_construct)
+ {
+ error_at (EXPR_LOCATION (for_stmt),
+ "%<bind(teams)%> on a %<loop%> region not strictly "
+ "nested inside of a %<teams%> region");
+ OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD;
+ break;
+ }
+ kind = OMP_CLAUSE_BIND_TEAMS;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ for (tree *pc = &OMP_FOR_CLAUSES (for_stmt); *pc; )
+ switch (OMP_CLAUSE_CODE (*pc))
+ {
+ case OMP_CLAUSE_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_INSCAN (*pc))
+ {
+ error_at (OMP_CLAUSE_LOCATION (*pc),
+ "%<inscan%> %<reduction%> clause on "
+ "%qs construct", "loop");
+ OMP_CLAUSE_REDUCTION_INSCAN (*pc) = 0;
+ }
+ if (OMP_CLAUSE_REDUCTION_TASK (*pc))
+ {
+ error_at (OMP_CLAUSE_LOCATION (*pc),
+ "invalid %<task%> reduction modifier on construct "
+ "other than %<parallel%>, %<for%> or %<sections%>");
+ OMP_CLAUSE_REDUCTION_TASK (*pc) = 0;
+ }
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ break;
+ case OMP_CLAUSE_LASTPRIVATE:
+ for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++)
+ {
+ tree t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i);
+ gcc_assert (TREE_CODE (t) == MODIFY_EXPR);
+ if (OMP_CLAUSE_DECL (*pc) == TREE_OPERAND (t, 0))
+ break;
+ if (OMP_FOR_ORIG_DECLS (for_stmt)
+ && TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt),
+ i)) == TREE_LIST
+ && TREE_PURPOSE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt),
+ i)))
+ {
+ tree orig = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i);
+ if (OMP_CLAUSE_DECL (*pc) == TREE_PURPOSE (orig))
+ break;
+ }
+ }
+ if (i == TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)))
+ {
+ error_at (OMP_CLAUSE_LOCATION (*pc),
+ "%<lastprivate%> clause on a %<loop%> construct refers "
+ "to a variable %qD which is not the loop iterator",
+ OMP_CLAUSE_DECL (*pc));
+ *pc = OMP_CLAUSE_CHAIN (*pc);
+ break;
+ }
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ break;
+ default:
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ break;
+ }
+
+ TREE_SET_CODE (for_stmt, OMP_SIMD);
+
+ int last;
+ switch (kind)
+ {
+ case OMP_CLAUSE_BIND_THREAD: last = 0; break;
+ case OMP_CLAUSE_BIND_PARALLEL: last = 1; break;
+ case OMP_CLAUSE_BIND_TEAMS: last = 2; break;
+ }
+ for (int pass = 1; pass <= last; pass++)
+ {
+ if (pass == 2)
+ {
+ tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
+ append_to_statement_list (*expr_p, &BIND_EXPR_BODY (bind));
+ *expr_p = make_node (OMP_PARALLEL);
+ TREE_TYPE (*expr_p) = void_type_node;
+ OMP_PARALLEL_BODY (*expr_p) = bind;
+ OMP_PARALLEL_COMBINED (*expr_p) = 1;
+ SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (for_stmt));
+ }
+ tree t = make_node (pass == 2 ? OMP_DISTRIBUTE : OMP_FOR);
+ tree *pc = &OMP_FOR_CLAUSES (t);
+ TREE_TYPE (t) = void_type_node;
+ OMP_FOR_BODY (t) = *expr_p;
+ SET_EXPR_LOCATION (t, EXPR_LOCATION (for_stmt));
+ for (tree c = OMP_FOR_CLAUSES (for_stmt); c; c = OMP_CLAUSE_CHAIN (c))
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ case OMP_CLAUSE_BIND:
+ case OMP_CLAUSE_ORDER:
+ case OMP_CLAUSE_COLLAPSE:
+ *pc = copy_node (c);
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ break;
+ case OMP_CLAUSE_PRIVATE:
+ /* Only needed on innermost. */
+ break;
+ case OMP_CLAUSE_LASTPRIVATE:
+ *pc = copy_node (c);
+ OMP_CLAUSE_LASTPRIVATE_STMT (*pc) = NULL_TREE;
+ TREE_TYPE (*pc) = unshare_expr (TREE_TYPE (c));
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ break;
+ case OMP_CLAUSE_REDUCTION:
+ *pc = copy_node (c);
+ OMP_CLAUSE_DECL (*pc) = unshare_expr (OMP_CLAUSE_DECL (c));
+ TREE_TYPE (*pc) = unshare_expr (TREE_TYPE (c));
+ OMP_CLAUSE_REDUCTION_INIT (*pc)
+ = unshare_expr (OMP_CLAUSE_REDUCTION_INIT (c));
+ OMP_CLAUSE_REDUCTION_MERGE (*pc)
+ = unshare_expr (OMP_CLAUSE_REDUCTION_MERGE (c));
+ if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (*pc))
+ {
+ OMP_CLAUSE_REDUCTION_PLACEHOLDER (*pc)
+ = copy_node (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c));
+ if (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (*pc))
+ OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (*pc)
+ = copy_node (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c));
+ tree nc = *pc;
+ tree data[2] = { c, nc };
+ walk_tree_without_duplicates (&OMP_CLAUSE_REDUCTION_INIT (nc),
+ replace_reduction_placeholders,
+ data);
+ walk_tree_without_duplicates (&OMP_CLAUSE_REDUCTION_MERGE (nc),
+ replace_reduction_placeholders,
+ data);
+ }
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ *pc = NULL_TREE;
+ *expr_p = t;
+ }
+ return gimplify_omp_for (expr_p, pre_p);
+}
+
+
/* Helper function of optimize_target_teams, find OMP_TEAMS inside
of OMP_TARGET's body. */
@@ -11974,10 +12241,7 @@ gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
case OMP_TEAMS:
ort = OMP_TEAMS_COMBINED (expr) ? ORT_COMBINED_TEAMS : ORT_TEAMS;
if (gimplify_omp_ctxp == NULL
- || (gimplify_omp_ctxp->region_type == ORT_TARGET
- && gimplify_omp_ctxp->outer_context == NULL
- && lookup_attribute ("omp declare target",
- DECL_ATTRIBUTES (current_function_decl))))
+ || gimplify_omp_ctxp->region_type == ORT_IMPLICIT_TARGET)
ort = (enum omp_region_type) (ort | ORT_HOST_TEAMS);
break;
case OACC_HOST_DATA:
@@ -11986,6 +12250,10 @@ gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
default:
gcc_unreachable ();
}
+
+ bool save_in_omp_construct = in_omp_construct;
+ if ((ort & ORT_ACC) == 0)
+ in_omp_construct = false;
gimplify_scan_omp_clauses (&OMP_CLAUSES (expr), pre_p, ort,
TREE_CODE (expr));
if (TREE_CODE (expr) == OMP_TARGET)
@@ -12027,6 +12295,7 @@ gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p)
gimplify_and_add (OMP_BODY (expr), &body);
gimplify_adjust_omp_clauses (pre_p, body, &OMP_CLAUSES (expr),
TREE_CODE (expr));
+ in_omp_construct = save_in_omp_construct;
switch (TREE_CODE (expr))
{
@@ -13266,6 +13535,10 @@ gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
ret = gimplify_omp_for (expr_p, pre_p);
break;
+ case OMP_LOOP:
+ ret = gimplify_omp_loop (expr_p, pre_p);
+ break;
+
case OACC_CACHE:
gimplify_oacc_cache (expr_p, pre_p);
ret = GS_ALL_DONE;
@@ -13307,8 +13580,11 @@ gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
{
gimple_seq body = NULL;
gimple *g;
+ bool saved_in_omp_construct = in_omp_construct;
+ in_omp_construct = true;
gimplify_and_add (OMP_BODY (*expr_p), &body);
+ in_omp_construct = saved_in_omp_construct;
switch (TREE_CODE (*expr_p))
{
case OMP_SECTION:
@@ -13351,10 +13627,14 @@ gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p,
gimple_seq body = NULL;
tree *pclauses = &OMP_TASKGROUP_CLAUSES (*expr_p);
+ bool saved_in_omp_construct = in_omp_construct;
gimplify_scan_omp_clauses (pclauses, pre_p, ORT_TASKGROUP,
OMP_TASKGROUP);
gimplify_adjust_omp_clauses (pre_p, NULL, pclauses, OMP_TASKGROUP);
+
+ in_omp_construct = true;
gimplify_and_add (OMP_BODY (*expr_p), &body);
+ in_omp_construct = saved_in_omp_construct;
gimple_seq cleanup = NULL;
tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_END);
gimple *g = gimple_build_call (fn, 0);
@@ -13977,7 +14257,7 @@ gimplify_body (tree fndecl, bool do_parms)
{
gcc_assert (gimplify_omp_ctxp == NULL);
if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl)))
- gimplify_omp_ctxp = new_omp_context (ORT_TARGET);
+ gimplify_omp_ctxp = new_omp_context (ORT_IMPLICIT_TARGET);
}
/* Unshare most shared trees in the body and in that of any nested functions.
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 03df07b..d8756c0 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -153,6 +153,9 @@ struct omp_context
/* True if there is order(concurrent) clause on the construct. */
bool order_concurrent;
+
+ /* True if there is bind clause on the construct (i.e. a loop construct). */
+ bool loop_p;
};
static splay_tree all_contexts;
@@ -581,6 +584,7 @@ build_outer_var_ref (tree var, omp_context *ctx,
}
else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
+ || ctx->loop_p
|| (code == OMP_CLAUSE_PRIVATE
&& (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS
@@ -1397,6 +1401,10 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
ctx->order_concurrent = true;
break;
+ case OMP_CLAUSE_BIND:
+ ctx->loop_p = true;
+ break;
+
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
@@ -1603,6 +1611,7 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
+ case OMP_CLAUSE_BIND:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_ASYNC:
@@ -2675,7 +2684,8 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
&& gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
ctx = ctx->outer;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
- && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
+ && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
+ && !ctx->loop_p)
{
c = NULL_TREE;
if (ctx->order_concurrent
@@ -2684,8 +2694,8 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
{
error_at (gimple_location (stmt),
- "OpenMP constructs other than %<parallel%> or"
- " %<simd%> may not be nested inside a region with"
+ "OpenMP constructs other than %<parallel%>, %<loop%>"
+ " or %<simd%> may not be nested inside a region with"
" the %<order(concurrent)%> clause");
return false;
}
@@ -2714,23 +2724,28 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE
|| gimple_code (stmt) == GIMPLE_OMP_SCAN)
return true;
+ else if (gimple_code (stmt) == GIMPLE_OMP_FOR
+ && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
+ return true;
error_at (gimple_location (stmt),
- "OpenMP constructs other than %<#pragma omp ordered simd%>"
- " or %<#pragma omp atomic%> may not be nested inside"
- " %<simd%> region");
+ "OpenMP constructs other than "
+ "%<ordered simd%>, %<simd%>, %<loop%> or %<atomic%> may "
+ "not be nested inside %<simd%> region");
return false;
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
if ((gimple_code (stmt) != GIMPLE_OMP_FOR
- || ((gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE)
- && (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP)))
+ || (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE
+ && gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP
+ && omp_find_clause (gimple_omp_for_clauses (stmt),
+ OMP_CLAUSE_BIND) == NULL_TREE))
&& gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
{
error_at (gimple_location (stmt),
- "only %<distribute%> or %<parallel%> regions are "
- "allowed to be strictly nested inside %<teams%> "
- "region");
+ "only %<distribute%>, %<parallel%> or %<loop%> "
+ "regions are allowed to be strictly nested inside "
+ "%<teams%> region");
return false;
}
}
@@ -2740,10 +2755,15 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
|| gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_SIMD)
&& gimple_code (stmt) != GIMPLE_OMP_SCAN)
{
- error_at (gimple_location (stmt),
- "OpenMP constructs other than %<parallel%> or"
- " %<simd%> may not be nested inside a region with"
- " the %<order(concurrent)%> clause");
+ if (ctx->loop_p)
+ error_at (gimple_location (stmt),
+ "OpenMP constructs other than %<parallel%>, %<loop%> or "
+ "%<simd%> may not be nested inside a %<loop%> region");
+ else
+ error_at (gimple_location (stmt),
+ "OpenMP constructs other than %<parallel%>, %<loop%> or "
+ "%<simd%> may not be nested inside a region with "
+ "the %<order(concurrent)%> clause");
return false;
}
}
@@ -2766,6 +2786,11 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
/* We split taskloop into task and nested taskloop in it. */
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
return true;
+ /* For now, hope this will change and loop bind(parallel) will not
+ be allowed in lots of contexts. */
+ if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
+ && omp_find_clause (gimple_omp_for_clauses (stmt), OMP_CLAUSE_BIND))
+ return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
{
bool ok = false;
@@ -2816,8 +2841,8 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
const char *construct
= (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL)
- ? "#pragma omp cancel"
- : "#pragma omp cancellation point";
+ ? "cancel"
+ : "cancellation point";
if (ctx == NULL)
{
error_at (gimple_location (stmt), "orphaned %qs construct",
@@ -2830,7 +2855,7 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
{
case 1:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
- bad = "#pragma omp parallel";
+ bad = "parallel";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
@@ -2840,7 +2865,7 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
case 2:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
- bad = "#pragma omp for";
+ bad = "for";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
@@ -2849,12 +2874,12 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
- "%<#pragma omp cancel for%> inside "
+ "%<cancel for%> inside "
"%<nowait%> for construct");
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED))
warning_at (gimple_location (stmt), 0,
- "%<#pragma omp cancel for%> inside "
+ "%<cancel for%> inside "
"%<ordered%> for construct");
}
kind = "for";
@@ -2862,7 +2887,7 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
case 4:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
&& gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
- bad = "#pragma omp sections";
+ bad = "sections";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
@@ -2874,7 +2899,7 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
(ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
- "%<#pragma omp cancel sections%> inside "
+ "%<cancel sections%> inside "
"%<nowait%> sections construct");
}
else
@@ -2887,7 +2912,7 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
(ctx->outer->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
- "%<#pragma omp cancel sections%> inside "
+ "%<cancel sections%> inside "
"%<nowait%> sections construct");
}
}
@@ -2898,7 +2923,7 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
&& (!is_taskloop_ctx (ctx)
|| ctx->outer == NULL
|| !is_task_ctx (ctx->outer)))
- bad = "#pragma omp task";
+ bad = "task";
else
{
for (omp_context *octx = ctx->outer;
@@ -2976,14 +3001,14 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
return true;
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
- "of work-sharing, %<critical%>, %<ordered%>, "
- "%<master%>, explicit %<task%> or %<taskloop%> "
- "region");
+ "of work-sharing, %<loop%>, %<critical%>, "
+ "%<ordered%>, %<master%>, explicit %<task%> or "
+ "%<taskloop%> region");
return false;
}
error_at (gimple_location (stmt),
"work-sharing region may not be closely nested inside "
- "of work-sharing, %<critical%>, %<ordered%>, "
+ "of work-sharing, %<loop%>, %<critical%>, %<ordered%>, "
"%<master%>, explicit %<task%> or %<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
@@ -3012,8 +3037,8 @@ check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
case GIMPLE_OMP_TASK:
error_at (gimple_location (stmt),
"%<master%> region may not be closely nested inside "
- "of work-sharing, explicit %<task%> or %<taskloop%> "
- "region");
+ "of work-sharing, %<loop%>, explicit %<task%> or "
+ "%<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
@@ -3497,11 +3522,12 @@ scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
if (ctx
&& gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
- && setjmp_or_longjmp_p (fndecl))
+ && setjmp_or_longjmp_p (fndecl)
+ && !ctx->loop_p)
{
remove = true;
error_at (gimple_location (stmt),
- "setjmp/longjmp inside simd construct");
+ "setjmp/longjmp inside %<simd%> construct");
}
else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index a7c9426..adefdb9 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,5 +1,19 @@
2019-07-20 Jakub Jelinek <jakub@redhat.com>
+ * c-c++-common/gomp/cancel-1.c: Adjust expected diagnostic wording.
+ * c-c++-common/gomp/clauses-1.c (foo, baz, bar): Add order(concurrent)
+ clause where allowed. Add combined constructs with loop with all
+ possible clauses.
+ (qux): New function.
+ * c-c++-common/gomp/loop-1.c: New test.
+ * c-c++-common/gomp/loop-2.c: New test.
+ * c-c++-common/gomp/loop-3.c: New test.
+ * c-c++-common/gomp/loop-4.c: New test.
+ * c-c++-common/gomp/loop-5.c: New test.
+ * c-c++-common/gomp/order-3.c: Adjust expected diagnostic wording.
+ * c-c++-common/gomp/simd-setjmp-1.c: New test.
+ * c-c++-common/gomp/teams-2.c: Adjust expected diagnostic wording.
+
* gcc.dg/vect/vect-simd-16.c: New test.
2019-07-19 Jeff Law <law@redhat.com>
diff --git a/gcc/testsuite/c-c++-common/gomp/cancel-1.c b/gcc/testsuite/c-c++-common/gomp/cancel-1.c
index 03aedeb..5255dd3 100644
--- a/gcc/testsuite/c-c++-common/gomp/cancel-1.c
+++ b/gcc/testsuite/c-c++-common/gomp/cancel-1.c
@@ -336,14 +336,14 @@ f2 (void)
}
#pragma omp target teams
{
- #pragma omp cancel parallel /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
- #pragma omp cancel for /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
- #pragma omp cancel sections /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
- #pragma omp cancel taskgroup /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
- #pragma omp cancellation point parallel /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
- #pragma omp cancellation point for /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
- #pragma omp cancellation point sections /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
- #pragma omp cancellation point taskgroup /* { dg-error "only .distribute. or .parallel. regions are allowed to be strictly nested" } */
+ #pragma omp cancel parallel /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
+ #pragma omp cancel for /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
+ #pragma omp cancel sections /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
+ #pragma omp cancel taskgroup /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
+ #pragma omp cancellation point parallel /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
+ #pragma omp cancellation point for /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
+ #pragma omp cancellation point sections /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
+ #pragma omp cancellation point taskgroup /* { dg-error "only .distribute., .parallel. or .loop. regions are allowed to be strictly nested" } */
}
#pragma omp target teams distribute
for (i = 0; i < 10; i++)
diff --git a/gcc/testsuite/c-c++-common/gomp/clauses-1.c b/gcc/testsuite/c-c++-common/gomp/clauses-1.c
index 652270c..be42797 100644
--- a/gcc/testsuite/c-c++-common/gomp/clauses-1.c
+++ b/gcc/testsuite/c-c++-common/gomp/clauses-1.c
@@ -14,22 +14,32 @@ foo (int d, int m, int i1, int i2, int p, int *idp, int s,
#pragma omp distribute parallel for \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4)
+ lastprivate (l) schedule(static, 4) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute parallel for simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nontemporal(ntm) \
- safelen(8) simdlen(4) aligned(q: 32)
+ safelen(8) simdlen(4) aligned(q: 32) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
- safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
}
+
+void
+qux (int p)
+{
+ #pragma omp loop bind(teams) order(concurrent) \
+ private (p) lastprivate (l) collapse(1) reduction(+:r)
+ for (l = 0; l < 64; ++l)
+ ll++;
+}
#pragma omp end declare target
void
@@ -39,21 +49,26 @@ baz (int d, int m, int i1, int i2, int p, int *idp, int s,
#pragma omp distribute parallel for \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) copyin(t)
+ lastprivate (l) schedule(static, 4) copyin(t) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute parallel for simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
if (parallel: i2) if(simd: i1) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) nontemporal(ntm) \
- safelen(8) simdlen(4) aligned(q: 32) copyin(t)
+ safelen(8) simdlen(4) aligned(q: 32) copyin(t) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp distribute simd \
private (p) firstprivate (f) collapse(1) dist_schedule(static, 16) \
- safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) reduction(+:r) if(i1) nontemporal(ntm) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp loop bind(parallel) order(concurrent) \
+ private (p) lastprivate (l) collapse(1) reduction(+:r)
+ for (l = 0; l < 64; ++l)
+ ll++;
}
void
@@ -62,7 +77,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
{
#pragma omp for simd \
private (p) firstprivate (f) lastprivate (l) linear (ll:1) reduction(+:r) schedule(static, 4) collapse(1) nowait \
- safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) if(i1)
+ safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) if(i1) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel for \
@@ -70,10 +85,15 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp parallel for \
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) order(concurrent)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp parallel for simd \
private (p) firstprivate (f) if (i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
- safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm)
+ safelen(8) simdlen(4) aligned(q: 32) nontemporal(ntm) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel sections \
@@ -96,11 +116,17 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
lastprivate (l) linear (ll:1) ordered schedule(static, 4) collapse(1) nowait depend(inout: dd[0])
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp target parallel for \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
+ lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) nowait depend(inout: dd[0]) order(concurrent)
+ for (int i = 0; i < 64; i++)
+ ll++;
#pragma omp target parallel for simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
lastprivate (l) linear (ll:1) schedule(static, 4) collapse(1) \
- safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3)
+ safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams \
@@ -118,7 +144,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0])
+ lastprivate (l) schedule(static, 4) nowait depend(inout: dd[0]) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute parallel for simd \
@@ -126,39 +152,42 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) \
+ lastprivate (l) schedule(static, 4) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm) if (simd: i3)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target teams distribute simd \
device(d) map (tofrom: m) if (i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) dist_schedule(static, 16) \
+ collapse(1) dist_schedule(static, 16) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) nowait depend(inout: dd[0]) nontemporal(ntm)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target simd \
device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
safelen(8) simdlen(4) lastprivate (l) linear(ll: 1) aligned(q: 32) reduction(+:r) \
- nowait depend(inout: dd[0]) nontemporal(ntm) if(simd:i3)
+ nowait depend(inout: dd[0]) nontemporal(ntm) if(simd:i3) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction(+:r2)
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
- safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction(+:r)
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(i1) final(fi) mergeable nogroup priority (pp) \
- safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) in_reduction(+:r) nontemporal(ntm)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) in_reduction(+:r) nontemporal(ntm) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskwait
#pragma omp taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) if(taskloop: i1) final(fi) priority (pp) \
- safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) if (simd: i3) nontemporal(ntm)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(+:r) if (simd: i3) nontemporal(ntm) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target nowait depend(inout: dd[0])
@@ -172,7 +201,7 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4)
+ lastprivate (l) schedule(static, 4) order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
@@ -180,20 +209,20 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) \
+ lastprivate (l) schedule(static, 4) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp target
#pragma omp teams distribute simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) dist_schedule(static, 16) \
+ collapse(1) dist_schedule(static, 16) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute parallel for \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) dist_schedule(static, 16) \
+ collapse(1) dist_schedule(static, 16) order(concurrent) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
lastprivate (l) schedule(static, 4) copyin(t)
for (int i = 0; i < 64; i++)
@@ -202,13 +231,13 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
collapse(1) dist_schedule(static, 16) \
if (parallel: i2) num_threads (nth) proc_bind(spread) \
- lastprivate (l) schedule(static, 4) \
+ lastprivate (l) schedule(static, 4) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if (simd: i3) nontemporal(ntm) copyin(t)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp teams distribute simd \
private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
- collapse(1) dist_schedule(static, 16) \
+ collapse(1) dist_schedule(static, 16) order(concurrent) \
safelen(8) simdlen(4) aligned(q: 32) if(i3) nontemporal(ntm)
for (int i = 0; i < 64; i++)
ll++;
@@ -225,7 +254,8 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
#pragma omp taskgroup task_reduction (+:r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
- safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop \
@@ -235,7 +265,8 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) grainsize (g) collapse(1) untied if(taskloop: i1) if(simd: i2) final(fi) mergeable priority (pp) \
- safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) if (parallel: i2) num_threads (nth) proc_bind(spread) copyin(t) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp taskgroup task_reduction (+:r2)
@@ -247,7 +278,8 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
#pragma omp taskgroup task_reduction (+:r2)
#pragma omp master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
- safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) in_reduction(+:r2) nontemporal(ntm) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
#pragma omp parallel master taskloop \
@@ -257,7 +289,56 @@ bar (int d, int m, int i1, int i2, int i3, int p, int *idp, int s,
ll++;
#pragma omp parallel master taskloop simd \
private (p) firstprivate (f) lastprivate (l) shared (s) default(shared) num_tasks (nta) collapse(1) untied if(i1) final(fi) mergeable priority (pp) \
- safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t)
+ safelen(8) simdlen(4) linear(ll: 1) aligned(q: 32) reduction(default, +:r) nontemporal(ntm) num_threads (nth) proc_bind(spread) copyin(t) \
+ order(concurrent)
for (int i = 0; i < 64; i++)
ll++;
+ #pragma omp loop bind(thread) order(concurrent) \
+ private (p) lastprivate (l) collapse(1) reduction(+:r)
+ for (l = 0; l < 64; ++l)
+ ll++;
+ #pragma omp parallel loop \
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
+ lastprivate (l) collapse(1) bind(parallel) order(concurrent)
+ for (l = 0; l < 64; l++)
+ ll++;
+ #pragma omp parallel loop \
+ private (p) firstprivate (f) if (parallel: i2) default(shared) shared(s) copyin(t) reduction(+:r) num_threads (nth) proc_bind(spread) \
+ lastprivate (l) collapse(1)
+ for (l = 0; l < 64; l++)
+ ll++;
+ #pragma omp teams loop \
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+ collapse(1) lastprivate (l) bind(teams)
+ for (l = 0; l < 64; ++l)
+ ;
+ #pragma omp teams loop \
+ private(p) firstprivate (f) shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) \
+ collapse(1) lastprivate (l) order(concurrent)
+ for (l = 0; l < 64; ++l)
+ ;
+ #pragma omp target parallel loop \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
+ nowait depend(inout: dd[0]) lastprivate (l) bind(parallel) order(concurrent) collapse(1)
+ for (l = 0; l < 64; ++l)
+ ;
+ #pragma omp target parallel loop \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ if (parallel: i2) default(shared) shared(s) reduction(+:r) num_threads (nth) proc_bind(spread) \
+ nowait depend(inout: dd[0]) lastprivate (l) order(concurrent) collapse(1)
+ for (l = 0; l < 64; ++l)
+ ;
+ #pragma omp target teams loop \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
+ lastprivate (l) bind(teams) collapse(1)
+ for (l = 0; l < 64; ++l)
+ ;
+ #pragma omp target teams loop \
+ device(d) map (tofrom: m) if (target: i1) private (p) firstprivate (f) defaultmap(tofrom: scalar) is_device_ptr (idp) \
+ shared(s) default(shared) reduction(+:r) num_teams(nte) thread_limit(tl) nowait depend(inout: dd[0]) \
+ lastprivate (l) order(concurrent) collapse(1)
+ for (l = 0; l < 64; ++l)
+ ;
}
diff --git a/gcc/testsuite/c-c++-common/gomp/loop-1.c b/gcc/testsuite/c-c++-common/gomp/loop-1.c
new file mode 100644
index 0000000..d2f943a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/loop-1.c
@@ -0,0 +1,271 @@
+void foo (void);
+int v;
+#ifdef __cplusplus
+extern "C" {
+#endif
+int omp_get_thread_num (void);
+int omp_get_num_threads (void);
+int omp_target_is_present (const void *, int);
+int omp_get_cancellation (void);
+#ifdef __cplusplus
+}
+#endif
+
+void
+f1 (int *a)
+{
+ int i;
+ #pragma omp simd order(concurrent)
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp loop
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+}
+
+void
+f2 (int *a)
+{
+ int i;
+ #pragma omp for simd order(concurrent)
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp loop
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+}
+
+void
+f3 (int *a)
+{
+ int i;
+ #pragma omp for order(concurrent)
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp loop
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+}
+
+void
+f4 (int *a)
+{
+ int i;
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp parallel
+ foo ();
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp simd
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp loop
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp critical /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ v++;
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic read
+ a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" "" { target c++ } } */
+ v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" "" { target c } } */
+ }
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_thread_num (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_num_threads (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] += omp_target_is_present (a + i, 0); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop order(concurrent) bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_cancellation (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+}
+
+void
+f5 (int *a)
+{
+ int i;
+ #pragma omp parallel
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp parallel
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp simd
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp loop
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp critical /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ v++;
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic read
+ a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" "" { target c++ } } */
+ v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" "" { target c } } */
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_thread_num (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_num_threads (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_target_is_present (a + i, 0); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_cancellation (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ }
+}
+
+void
+f6 (int *a)
+{
+ int i;
+ #pragma omp master
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp parallel
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp simd
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ int j;
+ #pragma omp loop
+ for (j = 0; j < 64; j++)
+ a[64 * i + j] = i + j;
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp critical /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ foo ();
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ v++;
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic read
+ a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" } */
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ {
+ #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" "" { target c++ } } */
+ v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a 'loop' region" "" { target c } } */
+ }
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_thread_num (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_thread_num\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_num_threads (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_num_threads\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_target_is_present (a + i, 0); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_target_is_present\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] += omp_get_cancellation (); /* { dg-error "OpenMP runtime API call '\[^\n\r]*omp_get_cancellation\[^\n\r]*' in a region with 'order\\(concurrent\\)' clause" } */
+ }
+}
+
diff --git a/gcc/testsuite/c-c++-common/gomp/loop-2.c b/gcc/testsuite/c-c++-common/gomp/loop-2.c
new file mode 100644
index 0000000..ce9b6c9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/loop-2.c
@@ -0,0 +1,294 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+int omp_get_thread_num (void);
+#ifdef __cplusplus
+}
+#endif
+
+void
+f0 (int *a)
+{
+ int i;
+ #pragma omp loop bind(teams) order(concurrent)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+}
+
+void
+f1 (int *a)
+{
+ int i;
+ #pragma omp teams
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp teams
+ {
+ #pragma omp loop bind(teams)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp teams
+ {
+ #pragma omp loop bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp teams
+ {
+ #pragma omp loop lastprivate (i) bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+}
+
+void
+f2 (int *a)
+{
+ int i;
+ #pragma omp loop bind(parallel) order(concurrent)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ #pragma omp parallel
+ {
+ #pragma omp loop private (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp parallel
+ {
+ #pragma omp loop lastprivate (i) bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp parallel
+ {
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp taskgroup
+ {
+ #pragma omp loop bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp teams
+ {
+ int j;
+ #pragma omp distribute
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ }
+ #pragma omp target
+ {
+ #pragma omp loop bind(parallel)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+}
+
+void
+f3 (int *a)
+{
+ int i, j;
+ #pragma omp loop order ( concurrent )bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ #pragma omp parallel num_threads (4)
+ {
+ int j = omp_get_thread_num ();
+ #pragma omp loop private (i) bind(thread)
+ for (i = 0; i < 64; i++)
+ a[j * 64 + i] = i;
+ }
+ #pragma omp critical
+ {
+ #pragma omp loop lastprivate (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp critical
+ {
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp master
+ {
+ #pragma omp loop private (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp master
+ {
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp sections
+ {
+ #pragma omp loop private (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp sections
+ {
+ #pragma omp loop bind(thread) lastprivate(i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp single
+ {
+ #pragma omp loop private (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp single
+ {
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp task
+ {
+ #pragma omp loop private (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp task
+ {
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp taskgroup
+ {
+ #pragma omp loop private (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp taskgroup
+ {
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp teams
+ {
+ #pragma omp distribute
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ }
+ #pragma omp for
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp parallel
+ #pragma omp loop
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp loop bind(thread)
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp loop bind(parallel)
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp for ordered
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp ordered
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp ordered threads
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp simd
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp taskloop
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp target
+ {
+ #pragma omp loop
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+}
+
+void
+f4 (int *a)
+{
+ int i;
+ #pragma omp ordered
+ {
+ #pragma omp loop private (i)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/gomp/loop-3.c b/gcc/testsuite/c-c++-common/gomp/loop-3.c
new file mode 100644
index 0000000..186b8cc
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/loop-3.c
@@ -0,0 +1,145 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+int omp_get_thread_num (void);
+#ifdef __cplusplus
+}
+#endif
+
+void
+f1 (int *a)
+{
+ int i;
+ #pragma omp loop /* { dg-error "'bind' clause not specified on a 'loop' construct not nested inside another OpenMP construct" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+}
+
+void
+f2 (int *a)
+{
+ int i, j;
+ #pragma omp parallel num_threads (4)
+ {
+ int j = omp_get_thread_num ();
+ #pragma omp loop private (i) bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[j * 64 + i] = i;
+ }
+ #pragma omp critical
+ {
+ #pragma omp loop lastprivate (i) bind(teams)/* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp master
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp sections
+ {
+ #pragma omp loop bind(teams) lastprivate(i) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp single
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp task
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp taskgroup
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+ #pragma omp teams
+ {
+ #pragma omp distribute
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ }
+ #pragma omp for
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp parallel
+ #pragma omp loop
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp loop bind(thread)
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp loop bind(parallel)
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp for ordered
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp ordered threads
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp simd
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp taskloop
+ for (j = 0; j < 64; ++j)
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+ #pragma omp target
+ {
+ #pragma omp loop bind(teams) /* { dg-error "'bind\\(teams\\)' on a 'loop' region not strictly nested inside of a 'teams' region" } */
+ for (i = 0; i < 64; i++)
+ a[i] = i;
+ }
+}
+
+void
+f3 (int *a)
+{
+ int i, j;
+ #pragma omp simd
+ for (j = 0; j < 64; j++)
+ {
+ #pragma omp loop bind(parallel) /* { dg-error "'bind\\(parallel\\)' on a 'loop' construct nested inside 'simd' construct" } */
+ for (i = 0; i < 64; i++)
+ a[64 * j + i] = i;
+ }
+}
diff --git a/gcc/testsuite/c-c++-common/gomp/loop-4.c b/gcc/testsuite/c-c++-common/gomp/loop-4.c
new file mode 100644
index 0000000..b77f8c9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/loop-4.c
@@ -0,0 +1,46 @@
+int r, l;
+
+void
+f1 (int *a)
+{
+ int i;
+ #pragma omp master
+ {
+ #pragma omp loop bind /* { dg-error "expected" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind ) /* { dg-error "expected" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind ( /* { dg-error "expected" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind () /* { dg-error "expected" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind ( foobar ) /* { dg-error "expected" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind (default) /* { dg-error "expected" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind (parallel /* { dg-error "expected" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ }
+}
+
+void
+f2 (int *a)
+{
+ int i;
+ #pragma omp loop bind(parallel) reduction(task, +: r) /* { dg-error "invalid 'task' reduction modifier on construct other than 'parallel', 'for' or 'sections'" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind(thread) reduction(inscan, +: r) /* { dg-error "'inscan' 'reduction' clause on 'loop' construct" } */
+ for (i = 0; i < 64; ++i)
+ a[i] = i;
+ #pragma omp loop bind(parallel) lastprivate (l) /* { dg-error "'lastprivate' clause on a 'loop' construct refers to a variable 'l' which is not the loop iterator" } */
+ for (i = 0; i < 64; ++i)
+ l = i;
+}
diff --git a/gcc/testsuite/c-c++-common/gomp/loop-5.c b/gcc/testsuite/c-c++-common/gomp/loop-5.c
new file mode 100644
index 0000000..b9b2ad9
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/loop-5.c
@@ -0,0 +1,56 @@
+__attribute__((noipa)) int
+foo (int *a, int *r3)
+{
+ int r = 0, r2[2] = { 0, 0 }, i;
+ #pragma omp parallel loop default (none) reduction (+:r, r2[:2], r3[:2]) shared (a) lastprivate (i)
+ for (i = 0; i < 1024; i++)
+ {
+ r += a[i];
+ r2[0] += a[i];
+ r3[1] += a[i];
+ };
+ return r + r2[0] + r3[1] + i;
+}
+
+__attribute__((noipa)) int
+bar (int *a, int *r3)
+{
+ int r = 0, r2[2] = { 0, 0 }, i;
+ #pragma omp target parallel loop default (none) reduction (+:r, r2[0:2], r3[0:2]) shared (a) lastprivate (i)
+ for (i = 0; i < 1024; i++)
+ {
+ r += a[i];
+ r2[1] += a[i];
+ r3[0] += a[i];
+ }
+ return r + r2[1] + r3[0] + i;
+}
+
+__attribute__((noipa)) int
+baz (int *a, int *r3)
+{
+ int r = 0, r2[2] = { 0, 0 }, i;
+ #pragma omp teams loop default (none) reduction (+:r, r2[0:2], r3[1:1]) shared (a) lastprivate (i)
+ for (i = 0; i < 1024; i++)
+ {
+ r += a[i];
+ r2[0] += a[i];
+ r3[1] += a[i];
+ }
+ return r + r2[0] + r3[1] + i;
+}
+
+__attribute__((noipa)) int
+qux (int *a, int *r3)
+{
+ int r = 0, r2[2] = { 0, 0 }, i;
+ #pragma omp target teams loop default (none) reduction (+:r, r2[1:1], r3[0:2]) shared (a) lastprivate (i)
+ for (i = 0; i < 1024; i++)
+ {
+ r += a[i];
+ r2[1] += a[i];
+ r3[0] += a[i] - 1;
+ r3[1] += a[i];
+ }
+ return r + r2[1] + r3[0] + r3[1] + i;
+}
diff --git a/gcc/testsuite/c-c++-common/gomp/order-3.c b/gcc/testsuite/c-c++-common/gomp/order-3.c
index 225d0cc..2d51bf3 100644
--- a/gcc/testsuite/c-c++-common/gomp/order-3.c
+++ b/gcc/testsuite/c-c++-common/gomp/order-3.c
@@ -18,46 +18,46 @@ f1 (int *a)
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp parallel /* { dg-error "OpenMP constructs other than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested inside 'simd' region" } */
+ #pragma omp parallel /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
int j;
- #pragma omp simd /* { dg-error "OpenMP constructs other than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested inside 'simd' region" } */
+ #pragma omp simd
for (j = 0; j < 64; j++)
a[64 * i + j] = i + j;
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp critical /* { dg-error "OpenMP constructs other than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested inside 'simd' region" } */
+ #pragma omp critical /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
v++;
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic read
- a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
- v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+ #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+ v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp simd order(concurrent)
for (i = 0; i < 64; i++)
@@ -80,46 +80,46 @@ f2 (int *a)
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp parallel /* { dg-error "OpenMP constructs other than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested inside 'simd' region" } */
+ #pragma omp parallel /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
int j;
- #pragma omp simd /* { dg-error "OpenMP constructs other than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested inside 'simd' region" } */
+ #pragma omp simd
for (j = 0; j < 64; j++)
a[64 * i + j] = i + j;
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp critical /* { dg-error "OpenMP constructs other than '#pragma omp ordered simd' or '#pragma omp atomic' may not be nested inside 'simd' region" } */
+ #pragma omp critical /* { dg-error "OpenMP constructs other than 'ordered simd', 'simd', 'loop' or 'atomic' may not be nested inside 'simd' region" } */
foo ();
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
v++;
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic read
- a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
- v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+ #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+ v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp for simd order(concurrent)
for (i = 0; i < 64; i++)
@@ -156,44 +156,44 @@ f3 (int *a)
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp critical /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp critical /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp ordered simd /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
foo ();
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp atomic /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
v++;
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
#pragma omp atomic read
- a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ a[i] = v; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
- v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
+ #pragma omp atomic write /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c++ } } */
+ v = a[i]; /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" "" { target c } } */
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
- #pragma omp task /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp task /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
a[i]++;
}
#pragma omp for order(concurrent)
for (i = 0; i < 64; i++)
{
int j;
- #pragma omp taskloop /* { dg-error "OpenMP constructs other than 'parallel' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
+ #pragma omp taskloop /* { dg-error "OpenMP constructs other than 'parallel', 'loop' or 'simd' may not be nested inside a region with the 'order\\(concurrent\\)' clause" } */
for (j = 0; j < 64; j++)
a[64 * i + j] = i + j;
}
diff --git a/gcc/testsuite/c-c++-common/gomp/simd-setjmp-1.c b/gcc/testsuite/c-c++-common/gomp/simd-setjmp-1.c
new file mode 100644
index 0000000..453e001
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/gomp/simd-setjmp-1.c
@@ -0,0 +1,68 @@
+typedef long int jmp_buf[8];
+extern
+#ifdef __cplusplus
+"C"
+#endif
+int setjmp (jmp_buf);
+
+void
+foo (void)
+{
+ int i;
+ #pragma omp simd
+ for (i = 0; i < 64; i++)
+ {
+ jmp_buf buf;
+ setjmp (buf); /* { dg-error "setjmp/longjmp inside 'simd' construct" } */
+ }
+}
+
+void
+bar (void)
+{
+ int i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ {
+ jmp_buf buf;
+ setjmp (buf);
+ }
+}
+
+#ifdef __cplusplus
+struct S
+{
+ static int setjmp (jmp_buf);
+};
+
+namespace N
+{
+ int setjmp (jmp_buf);
+}
+
+void
+baz (void)
+{
+ int i;
+ #pragma omp simd
+ for (i = 0; i < 64; i++)
+ {
+ jmp_buf buf;
+ S::setjmp (buf);
+ N::setjmp (buf);
+ }
+}
+
+void
+qux (void)
+{
+ int i;
+ #pragma omp loop bind(thread)
+ for (i = 0; i < 64; i++)
+ {
+ jmp_buf buf;
+ S::setjmp (buf);
+ N::setjmp (buf);
+ }
+}
+#endif
diff --git a/gcc/testsuite/c-c++-common/gomp/teams-2.c b/gcc/testsuite/c-c++-common/gomp/teams-2.c
index 011c284..85a5be7 100644
--- a/gcc/testsuite/c-c++-common/gomp/teams-2.c
+++ b/gcc/testsuite/c-c++-common/gomp/teams-2.c
@@ -10,7 +10,7 @@ foo (void)
}
#pragma omp teams
{
- #pragma omp teams /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp teams /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
}
#pragma omp target
@@ -72,48 +72,48 @@ bar (void)
#pragma omp teams
{
int x, y, v = 4;
- #pragma omp target /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp target /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp target data map (to: v) /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp target data map (to: v) /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp for /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp for /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
for (int i = 0; i < 64; ++i)
;
- #pragma omp simd /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp simd /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
for (int i = 0; i < 64; ++i)
;
- #pragma omp for simd /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp for simd /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
for (int i = 0; i < 64; ++i)
;
- #pragma omp single /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp single /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp master /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp master /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp sections /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp sections /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
{
x = 1;
#pragma omp section
y = 2;
}
- #pragma omp critical /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp critical /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp target enter data map (to: v) /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
- #pragma omp target exit data map (from: v) /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
- #pragma omp cancel parallel /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
- #pragma omp cancellation point parallel /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
- #pragma omp barrier /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
- #pragma omp ordered /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp target enter data map (to: v) /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp target exit data map (from: v) /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp cancel parallel /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp cancellation point parallel /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp barrier /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp ordered /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp task /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp task /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp taskloop /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp taskloop /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
for (int i = 0; i < 64; ++i)
;
- #pragma omp atomic /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp atomic /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
v++;
- #pragma omp taskgroup /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp taskgroup /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
;
- #pragma omp taskwait /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
- #pragma omp taskyield /* { dg-error "only 'distribute' or 'parallel' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp taskwait /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
+ #pragma omp taskyield /* { dg-error "only 'distribute', 'parallel' or 'loop' regions are allowed to be strictly nested inside 'teams' region" } */
}
}
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 8ac07e8..fa37a0dc 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -451,6 +451,9 @@ enum omp_clause_code {
/* OpenMP clause: order (concurrent). */
OMP_CLAUSE_ORDER,
+ /* OpenMP clause: bind (binding). */
+ OMP_CLAUSE_BIND,
+
/* Internally used only clause, holding SIMD uid. */
OMP_CLAUSE__SIMDUID_,
@@ -539,6 +542,12 @@ enum omp_clause_defaultmap_kind {
OMP_CLAUSE_DEFAULTMAP_MASK = 7 * (OMP_CLAUSE_DEFAULTMAP_CATEGORY_MASK + 1)
};
+enum omp_clause_bind_kind {
+ OMP_CLAUSE_BIND_TEAMS,
+ OMP_CLAUSE_BIND_PARALLEL,
+ OMP_CLAUSE_BIND_THREAD
+};
+
/* memory-order-clause on OpenMP atomic/flush constructs or
argument of atomic_default_mem_order clause. */
enum omp_memory_order {
@@ -1531,6 +1540,7 @@ struct GTY(()) tree_omp_clause {
enum omp_clause_linear_kind linear_kind;
enum tree_code if_modifier;
enum omp_clause_defaultmap_kind defaultmap_kind;
+ enum omp_clause_bind_kind bind_kind;
/* The dimension a OMP_CLAUSE__GRIDDIM_ clause of a gridified target
construct describes. */
unsigned int dimension;
diff --git a/gcc/tree-pretty-print.c b/gcc/tree-pretty-print.c
index a75f97a..9bea132 100644
--- a/gcc/tree-pretty-print.c
+++ b/gcc/tree-pretty-print.c
@@ -1044,6 +1044,25 @@ dump_omp_clause (pretty_printer *pp, tree clause, int spc, dump_flags_t flags)
pp_string (pp, "order(concurrent)");
break;
+ case OMP_CLAUSE_BIND:
+ pp_string (pp, "bind(");
+ switch (OMP_CLAUSE_BIND_KIND (clause))
+ {
+ case OMP_CLAUSE_BIND_TEAMS:
+ pp_string (pp, "teams");
+ break;
+ case OMP_CLAUSE_BIND_PARALLEL:
+ pp_string (pp, "parallel");
+ break;
+ case OMP_CLAUSE_BIND_THREAD:
+ pp_string (pp, "thread");
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ pp_right_paren (pp);
+ break;
+
case OMP_CLAUSE__SIMDUID_:
pp_string (pp, "_simduid_(");
dump_generic_node (pp, OMP_CLAUSE__SIMDUID__DECL (clause),
@@ -3261,6 +3280,10 @@ dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags,
pp_string (pp, "#pragma omp taskloop");
goto dump_omp_loop;
+ case OMP_LOOP:
+ pp_string (pp, "#pragma omp loop");
+ goto dump_omp_loop;
+
case OACC_LOOP:
pp_string (pp, "#pragma acc loop");
goto dump_omp_loop;
diff --git a/gcc/tree.c b/gcc/tree.c
index 751370b..8cf75f2 100644
--- a/gcc/tree.c
+++ b/gcc/tree.c
@@ -344,6 +344,7 @@ unsigned const char omp_clause_num_ops[] =
1, /* OMP_CLAUSE_HINT */
0, /* OMP_CLAUSE_DEFAULTMAP */
0, /* OMP_CLAUSE_ORDER */
+ 0, /* OMP_CLAUSE_BIND */
1, /* OMP_CLAUSE__SIMDUID_ */
0, /* OMP_CLAUSE__SIMT_ */
0, /* OMP_CLAUSE_INDEPENDENT */
@@ -426,6 +427,7 @@ const char * const omp_clause_code_name[] =
"hint",
"defaultmap",
"order",
+ "bind",
"_simduid_",
"_simt_",
"independent",
@@ -12343,6 +12345,7 @@ walk_tree_1 (tree *tp, walk_tree_fn func, void *data,
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
+ case OMP_CLAUSE_BIND:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
diff --git a/gcc/tree.def b/gcc/tree.def
index d2e6938..4a22d94 100644
--- a/gcc/tree.def
+++ b/gcc/tree.def
@@ -1153,6 +1153,10 @@ DEFTREECODE (OMP_DISTRIBUTE, "omp_distribute", tcc_statement, 7)
Operands like for OMP_FOR. */
DEFTREECODE (OMP_TASKLOOP, "omp_taskloop", tcc_statement, 7)
+/* OpenMP - #pragma omp loop [clause1 ... clauseN]
+ Operands like for OMP_FOR. */
+DEFTREECODE (OMP_LOOP, "omp_loop", tcc_statement, 7)
+
/* OpenMP - #pragma acc loop [clause1 ... clauseN]
Operands like for OMP_FOR. */
DEFTREECODE (OACC_LOOP, "oacc_loop", tcc_statement, 7)
diff --git a/gcc/tree.h b/gcc/tree.h
index 992abd1..99d021e 100644
--- a/gcc/tree.h
+++ b/gcc/tree.h
@@ -1387,14 +1387,14 @@ class auto_suppress_location_wrappers
#define OMP_TASKREG_BODY(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 0)
#define OMP_TASKREG_CLAUSES(NODE) TREE_OPERAND (OMP_TASKREG_CHECK (NODE), 1)
-#define OMP_LOOP_CHECK(NODE) TREE_RANGE_CHECK (NODE, OMP_FOR, OACC_LOOP)
-#define OMP_FOR_BODY(NODE) TREE_OPERAND (OMP_LOOP_CHECK (NODE), 0)
-#define OMP_FOR_CLAUSES(NODE) TREE_OPERAND (OMP_LOOP_CHECK (NODE), 1)
-#define OMP_FOR_INIT(NODE) TREE_OPERAND (OMP_LOOP_CHECK (NODE), 2)
-#define OMP_FOR_COND(NODE) TREE_OPERAND (OMP_LOOP_CHECK (NODE), 3)
-#define OMP_FOR_INCR(NODE) TREE_OPERAND (OMP_LOOP_CHECK (NODE), 4)
-#define OMP_FOR_PRE_BODY(NODE) TREE_OPERAND (OMP_LOOP_CHECK (NODE), 5)
-#define OMP_FOR_ORIG_DECLS(NODE) TREE_OPERAND (OMP_LOOP_CHECK (NODE), 6)
+#define OMP_LOOPING_CHECK(NODE) TREE_RANGE_CHECK (NODE, OMP_FOR, OACC_LOOP)
+#define OMP_FOR_BODY(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 0)
+#define OMP_FOR_CLAUSES(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 1)
+#define OMP_FOR_INIT(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 2)
+#define OMP_FOR_COND(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 3)
+#define OMP_FOR_INCR(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 4)
+#define OMP_FOR_PRE_BODY(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 5)
+#define OMP_FOR_ORIG_DECLS(NODE) TREE_OPERAND (OMP_LOOPING_CHECK (NODE), 6)
#define OMP_SECTIONS_BODY(NODE) TREE_OPERAND (OMP_SECTIONS_CHECK (NODE), 0)
#define OMP_SECTIONS_CLAUSES(NODE) TREE_OPERAND (OMP_SECTIONS_CHECK (NODE), 1)
@@ -1742,6 +1742,9 @@ class auto_suppress_location_wrappers
(OMP_CLAUSE_DEFAULTMAP_KIND (NODE) \
= (enum omp_clause_defaultmap_kind) (CATEGORY | BEHAVIOR))
+#define OMP_CLAUSE_BIND_KIND(NODE) \
+ (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_BIND)->omp_clause.subcode.bind_kind)
+
#define OMP_CLAUSE_TILE_LIST(NODE) \
OMP_CLAUSE_OPERAND (OMP_CLAUSE_SUBCODE_CHECK (NODE, OMP_CLAUSE_TILE), 0)
#define OMP_CLAUSE_TILE_ITERVAR(NODE) \
diff --git a/libgomp/ChangeLog b/libgomp/ChangeLog
index 53125a5..547ce4e 100644
--- a/libgomp/ChangeLog
+++ b/libgomp/ChangeLog
@@ -1,3 +1,7 @@
+2019-07-20 Jakub Jelinek <jakub@redhat.com>
+
+ * testsuite/libgomp.c-c++-common/loop-1.c: New test.
+
2019-07-08 Jakub Jelinek <jakub@redhat.com>
* testsuite/libgomp.c++/scan-13.C: Replace xfail with target x86.
diff --git a/libgomp/testsuite/libgomp.c-c++-common/loop-1.c b/libgomp/testsuite/libgomp.c-c++-common/loop-1.c
new file mode 100644
index 0000000..de69608
--- /dev/null
+++ b/libgomp/testsuite/libgomp.c-c++-common/loop-1.c
@@ -0,0 +1,127 @@
+extern
+#ifdef __cplusplus
+"C"
+#endif
+void abort (void);
+#define N 256
+int r;
+
+void
+foo (int *a)
+{
+ int i, j;
+ #pragma omp loop bind(thread) order(concurrent) private (j) lastprivate (i) reduction(+:r) collapse(1)
+ for (i = 0; i < N; i++)
+ {
+ j = i - 2;
+ a[i] = j;
+ r += j;
+ }
+}
+
+void
+bar (int *a)
+{
+ int i, j;
+ #pragma omp loop bind(parallel) order(concurrent) private (j) lastprivate (i) reduction(+:r) collapse(1)
+ for (i = 0; i < N; i++)
+ {
+ j = i;
+ a[i] = j;
+ r += j;
+ }
+}
+
+void
+baz (int *a)
+{
+ int i, j;
+ #pragma omp loop bind(teams) order(concurrent) private (j) lastprivate (i) reduction(+:r)
+ for (i = 0; i < N; i++)
+ {
+ j = i + 2;
+ a[i] = j;
+ r += j;
+ }
+}
+
+int
+main ()
+{
+ int a[N], i, j;
+ foo (a);
+ for (i = 0; i < N; ++i)
+ if (a[i] != i - 2)
+ abort ();
+ else
+ a[i] = -35;
+ if (r != N * (N - 5) / 2)
+ abort ();
+ else
+ r = 0;
+ bar (a);
+ for (i = 0; i < N; ++i)
+ if (a[i] != i)
+ abort ();
+ else
+ a[i] = -35;
+ if (r != N * (N - 1) / 2)
+ abort ();
+ else
+ r = 0;
+ #pragma omp parallel loop private (j) lastprivate (i) reduction(+:r)
+ for (i = 0; i < N; i++)
+ {
+ j = i + 4;
+ a[i] = j;
+ r += j;
+ }
+ if (i != N)
+ abort ();
+ for (i = 0; i < N; ++i)
+ if (a[i] != i + 4)
+ abort ();
+ else
+ a[i] = -35;
+ if (r != N * (N + 7) / 2)
+ abort ();
+ else
+ r = 0;
+ #pragma omp parallel
+ bar (a);
+ for (i = 0; i < N; ++i)
+ if (a[i] != i)
+ abort ();
+ else
+ a[i] = -35;
+ if (r != N * (N - 1) / 2)
+ abort ();
+ else
+ r = 0;
+ #pragma omp teams
+ baz (a);
+ for (i = 0; i < N; ++i)
+ if (a[i] != i + 2)
+ abort ();
+ else
+ a[i] = -35;
+ if (r != N * (N + 3) / 2)
+ abort ();
+ else
+ r = 0;
+ #pragma omp teams loop order(concurrent) private (j) lastprivate (i) reduction(+:r) collapse(1)
+ for (i = 0; i < N; i++)
+ {
+ j = i - 4;
+ a[i] = j;
+ r += j;
+ }
+ if (i != N)
+ abort ();
+ for (i = 0; i < N; ++i)
+ if (a[i] != i - 4)
+ abort ();
+ if (r != N * (N - 9) / 2)
+ abort ();
+ return 0;
+}