aboutsummaryrefslogtreecommitdiff
path: root/gcc/omp-low.c
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/omp-low.c')
-rw-r--r--gcc/omp-low.c2600
1 files changed, 2312 insertions, 288 deletions
diff --git a/gcc/omp-low.c b/gcc/omp-low.c
index 84b2357..26f0c35 100644
--- a/gcc/omp-low.c
+++ b/gcc/omp-low.c
@@ -90,6 +90,10 @@ typedef struct omp_context
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
+ /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
+ barriers should jump to during omplower pass. */
+ tree cancel_label;
+
/* What to do with variables with implicitly determined sharing
attributes. */
enum omp_clause_default_kind default_kind;
@@ -101,6 +105,9 @@ typedef struct omp_context
/* True if this parallel directive is nested within another. */
bool is_nested;
+
+ /* True if this construct can be cancelled. */
+ bool cancellable;
} omp_context;
@@ -127,6 +134,7 @@ struct omp_for_data
static splay_tree all_contexts;
static int taskreg_nesting_level;
+static int target_nesting_level;
struct omp_region *root_omp_region;
static bitmap task_shared_vars;
@@ -224,6 +232,8 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
struct omp_for_data_loop dummy_loop;
location_t loc = gimple_location (for_stmt);
bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
+ bool distribute = gimple_omp_for_kind (for_stmt)
+ == GF_OMP_FOR_KIND_DISTRIBUTE;
fd->for_stmt = for_stmt;
fd->pre = NULL;
@@ -233,7 +243,8 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
else
fd->loops = &fd->loop;
- fd->have_nowait = fd->have_ordered = false;
+ fd->have_nowait = distribute || simd;
+ fd->have_ordered = false;
fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
fd->chunk_size = NULL_TREE;
collapse_iter = NULL;
@@ -249,9 +260,14 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
fd->have_ordered = true;
break;
case OMP_CLAUSE_SCHEDULE:
+ gcc_assert (!distribute);
fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
break;
+ case OMP_CLAUSE_DIST_SCHEDULE:
+ gcc_assert (distribute);
+ fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
+ break;
case OMP_CLAUSE_COLLAPSE:
if (fd->collapse > 1)
{
@@ -279,8 +295,7 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
/* We only need to compute a default chunk size for ordered
static loops and dynamic loops. */
if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
- || fd->have_ordered
- || fd->collapse > 1)
+ || fd->have_ordered)
fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
? integer_zero_node : integer_one_node;
}
@@ -294,7 +309,6 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
else
loop = &dummy_loop;
-
loop->v = gimple_omp_for_index (for_stmt, i);
gcc_assert (SSA_VAR_P (loop->v));
gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
@@ -351,7 +365,9 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
gcc_unreachable ();
}
- if (simd)
+ if (simd
+ || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
+ && !fd->have_ordered))
{
if (fd->collapse == 1)
iter_type = TREE_TYPE (loop->v);
@@ -360,7 +376,7 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
< TYPE_PRECISION (TREE_TYPE (loop->v)))
iter_type
= build_nonstandard_integer_type
- (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
+ (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
}
else if (iter_type != long_long_unsigned_type_node)
{
@@ -459,7 +475,9 @@ extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
}
if (count
- && !simd)
+ && !simd
+ && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
+ || fd->have_ordered))
{
if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
iter_type = long_long_unsigned_type_node;
@@ -570,7 +588,7 @@ workshare_safe_to_combine_p (basic_block ws_entry_bb)
expanded. */
static vec<tree, va_gc> *
-get_ws_args_for (gimple ws_stmt)
+get_ws_args_for (gimple par_stmt, gimple ws_stmt)
{
tree t;
location_t loc = gimple_location (ws_stmt);
@@ -579,15 +597,31 @@ get_ws_args_for (gimple ws_stmt)
if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
{
struct omp_for_data fd;
+ tree n1, n2;
extract_omp_for_data (ws_stmt, &fd, NULL);
+ n1 = fd.loop.n1;
+ n2 = fd.loop.n2;
+
+ if (gimple_omp_for_combined_into_p (ws_stmt))
+ {
+ tree innerc
+ = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ }
vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
- t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
+ t = fold_convert_loc (loc, long_integer_type_node, n1);
ws_args->quick_push (t);
- t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
+ t = fold_convert_loc (loc, long_integer_type_node, n2);
ws_args->quick_push (t);
t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
@@ -650,6 +684,7 @@ determine_parallel_type (struct omp_region *region)
|| (last_and_only_stmt (ws_entry_bb)
&& last_and_only_stmt (par_exit_bb))))
{
+ gimple par_stmt = last_stmt (par_entry_bb);
gimple ws_stmt = last_stmt (ws_entry_bb);
if (region->inner->type == GIMPLE_OMP_FOR)
@@ -677,7 +712,7 @@ determine_parallel_type (struct omp_region *region)
region->is_combined_parallel = true;
region->inner->is_combined_parallel = true;
- region->ws_args = get_ws_args_for (ws_stmt);
+ region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
}
}
@@ -984,7 +1019,12 @@ install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
|| !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
type = TREE_TYPE (var);
- if (by_ref)
+ if (mask & 4)
+ {
+ gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
+ type = build_pointer_type (build_pointer_type (type));
+ }
+ else if (by_ref)
type = build_pointer_type (type);
else if ((mask & 3) == 1 && is_reference (var))
type = TREE_TYPE (type);
@@ -1421,6 +1461,9 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
break;
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ break;
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
@@ -1480,6 +1523,13 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
install_var_local (decl, ctx);
break;
+ case OMP_CLAUSE__LOOPTEMP_:
+ gcc_assert (is_parallel_ctx (ctx));
+ decl = OMP_CLAUSE_DECL (c);
+ install_var_field (decl, false, 3, ctx);
+ install_var_local (decl, ctx);
+ break;
+
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
@@ -1494,19 +1544,113 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
+ case OMP_CLAUSE_NUM_TEAMS:
+ case OMP_CLAUSE_THREAD_LIMIT:
+ case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
+ case OMP_CLAUSE_DIST_SCHEDULE:
+ case OMP_CLAUSE_DEPEND:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
+ case OMP_CLAUSE_MAP:
+ if (ctx->outer)
+ scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
+ decl = OMP_CLAUSE_DECL (c);
+ /* Global variables with "omp declare target" attribute
+ don't need to be copied, the receiver side will use them
+ directly. */
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && DECL_P (decl)
+ && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
+ && lookup_attribute ("omp declare target",
+ DECL_ATTRIBUTES (decl)))
+ break;
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
+ {
+ /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
+ #pragma omp target data, there is nothing to map for
+ those. */
+ if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
+ && !POINTER_TYPE_P (TREE_TYPE (decl)))
+ break;
+ }
+ if (DECL_P (decl))
+ {
+ if (DECL_SIZE (decl)
+ && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ {
+ tree decl2 = DECL_VALUE_EXPR (decl);
+ gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
+ decl2 = TREE_OPERAND (decl2, 0);
+ gcc_assert (DECL_P (decl2));
+ install_var_field (decl2, true, 3, ctx);
+ install_var_local (decl2, ctx);
+ install_var_local (decl, ctx);
+ }
+ else
+ {
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ install_var_field (decl, true, 7, ctx);
+ else
+ install_var_field (decl, true, 3, ctx);
+ if (gimple_omp_target_kind (ctx->stmt)
+ == GF_OMP_TARGET_KIND_REGION)
+ install_var_local (decl, ctx);
+ }
+ }
+ else
+ {
+ tree base = get_base_address (decl);
+ tree nc = OMP_CLAUSE_CHAIN (c);
+ if (DECL_P (base)
+ && nc != NULL_TREE
+ && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_DECL (nc) == base
+ && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
+ && integer_zerop (OMP_CLAUSE_SIZE (nc)))
+ {
+ OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
+ OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
+ }
+ else
+ {
+ gcc_assert (!splay_tree_lookup (ctx->field_map,
+ (splay_tree_key) decl));
+ tree field
+ = build_decl (OMP_CLAUSE_LOCATION (c),
+ FIELD_DECL, NULL_TREE, ptr_type_node);
+ DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
+ insert_field_into_struct (ctx->record_type, field);
+ splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
+ (splay_tree_value) field);
+ }
+ }
+ break;
+
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
+ case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
break;
+ case OMP_CLAUSE_ALIGNED:
+ decl = OMP_CLAUSE_DECL (c);
+ if (is_global_var (decl)
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
+ install_var_local (decl, ctx);
+ break;
+
default:
gcc_unreachable ();
}
@@ -1541,24 +1685,71 @@ scan_sharing_clauses (tree clauses, omp_context *ctx)
break;
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ break;
decl = OMP_CLAUSE_DECL (c);
if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
fixup_remapped_decl (decl, ctx, false);
break;
+ case OMP_CLAUSE_MAP:
+ if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
+ break;
+ decl = OMP_CLAUSE_DECL (c);
+ if (DECL_P (decl)
+ && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
+ && lookup_attribute ("omp declare target",
+ DECL_ATTRIBUTES (decl)))
+ break;
+ if (DECL_P (decl))
+ {
+ if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
+ && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
+ {
+ tree new_decl = lookup_decl (decl, ctx);
+ TREE_TYPE (new_decl)
+ = remap_type (TREE_TYPE (decl), &ctx->cb);
+ }
+ else if (DECL_SIZE (decl)
+ && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
+ {
+ tree decl2 = DECL_VALUE_EXPR (decl);
+ gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
+ decl2 = TREE_OPERAND (decl2, 0);
+ gcc_assert (DECL_P (decl2));
+ fixup_remapped_decl (decl2, ctx, false);
+ fixup_remapped_decl (decl, ctx, true);
+ }
+ else
+ fixup_remapped_decl (decl, ctx, false);
+ }
+ break;
+
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
+ case OMP_CLAUSE_NUM_TEAMS:
+ case OMP_CLAUSE_THREAD_LIMIT:
+ case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
+ case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
+ case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
+ case OMP_CLAUSE_ALIGNED:
+ case OMP_CLAUSE_DEPEND:
+ case OMP_CLAUSE__LOOPTEMP_:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
break;
default:
@@ -1623,6 +1814,26 @@ create_omp_child_function (omp_context *ctx, bool task_copy)
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
+ bool target_p = false;
+ if (lookup_attribute ("omp declare target",
+ DECL_ATTRIBUTES (current_function_decl)))
+ target_p = true;
+ else
+ {
+ omp_context *octx;
+ for (octx = ctx; octx; octx = octx->outer)
+ if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
+ && gimple_omp_target_kind (octx->stmt)
+ == GF_OMP_TARGET_KIND_REGION)
+ {
+ target_p = true;
+ break;
+ }
+ }
+ if (target_p)
+ DECL_ATTRIBUTES (decl)
+ = tree_cons (get_identifier ("omp declare target"),
+ NULL_TREE, DECL_ATTRIBUTES (decl));
t = build_decl (DECL_SOURCE_LOCATION (decl),
RESULT_DECL, NULL_TREE, void_type_node);
@@ -1664,6 +1875,35 @@ create_omp_child_function (omp_context *ctx, bool task_copy)
pop_cfun ();
}
+/* Callback for walk_gimple_seq. Check if combined parallel
+ contains gimple_omp_for_combined_into_p OMP_FOR. */
+
+static tree
+find_combined_for (gimple_stmt_iterator *gsi_p,
+ bool *handled_ops_p,
+ struct walk_stmt_info *wi)
+{
+ gimple stmt = gsi_stmt (*gsi_p);
+
+ *handled_ops_p = true;
+ switch (gimple_code (stmt))
+ {
+ WALK_SUBSTMTS;
+
+ case GIMPLE_OMP_FOR:
+ if (gimple_omp_for_combined_into_p (stmt)
+ && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
+ {
+ wi->info = stmt;
+ return integer_zero_node;
+ }
+ break;
+ default:
+ break;
+ }
+ return NULL;
+}
+
/* Scan an OpenMP parallel directive. */
static void
@@ -1684,6 +1924,40 @@ scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
return;
}
+ if (gimple_omp_parallel_combined_p (stmt))
+ {
+ gimple for_stmt;
+ struct walk_stmt_info wi;
+
+ memset (&wi, 0, sizeof (wi));
+ wi.val_only = true;
+ walk_gimple_seq (gimple_omp_body (stmt),
+ find_combined_for, NULL, &wi);
+ for_stmt = (gimple) wi.info;
+ if (for_stmt)
+ {
+ struct omp_for_data fd;
+ extract_omp_for_data (for_stmt, &fd, NULL);
+ /* We need two temporaries with fd.loop.v type (istart/iend)
+ and then (fd.collapse - 1) temporaries with the same
+ type for count2 ... countN-1 vars if not constant. */
+ size_t count = 2, i;
+ tree type = fd.iter_type;
+ if (fd.collapse > 1
+ && TREE_CODE (fd.loop.n2) != INTEGER_CST)
+ count += fd.collapse - 1;
+ for (i = 0; i < count; i++)
+ {
+ tree temp = create_tmp_var (type, NULL);
+ tree c = build_omp_clause (UNKNOWN_LOCATION,
+ OMP_CLAUSE__LOOPTEMP_);
+ OMP_CLAUSE_DECL (c) = temp;
+ OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
+ gimple_omp_parallel_set_clauses (stmt, c);
+ }
+ }
+ }
+
ctx = new_omp_context (stmt, outer_ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
@@ -1858,6 +2132,63 @@ scan_omp_single (gimple stmt, omp_context *outer_ctx)
layout_type (ctx->record_type);
}
+/* Scan an OpenMP target{, data, update} directive. */
+
+static void
+scan_omp_target (gimple stmt, omp_context *outer_ctx)
+{
+ omp_context *ctx;
+ tree name;
+ int kind = gimple_omp_target_kind (stmt);
+
+ ctx = new_omp_context (stmt, outer_ctx);
+ ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
+ ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
+ ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
+ name = create_tmp_var_name (".omp_data_t");
+ name = build_decl (gimple_location (stmt),
+ TYPE_DECL, name, ctx->record_type);
+ DECL_ARTIFICIAL (name) = 1;
+ DECL_NAMELESS (name) = 1;
+ TYPE_NAME (ctx->record_type) = name;
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ create_omp_child_function (ctx, false);
+ gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
+ }
+
+ scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+
+ if (TYPE_FIELDS (ctx->record_type) == NULL)
+ ctx->record_type = ctx->receiver_decl = NULL;
+ else
+ {
+ TYPE_FIELDS (ctx->record_type)
+ = nreverse (TYPE_FIELDS (ctx->record_type));
+#ifdef ENABLE_CHECKING
+ tree field;
+ unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
+ for (field = TYPE_FIELDS (ctx->record_type);
+ field;
+ field = DECL_CHAIN (field))
+ gcc_assert (DECL_ALIGN (field) == align);
+#endif
+ layout_type (ctx->record_type);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ fixup_child_record_type (ctx);
+ }
+}
+
+/* Scan an OpenMP teams directive. */
+
+static void
+scan_omp_teams (gimple stmt, omp_context *outer_ctx)
+{
+ omp_context *ctx = new_omp_context (stmt, outer_ctx);
+ scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
+ scan_omp (gimple_omp_body_ptr (stmt), ctx);
+}
/* Check OpenMP nesting restrictions. */
static bool
@@ -1872,16 +2203,149 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
"OpenMP constructs may not be nested inside simd region");
return false;
}
+ else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ {
+ if ((gimple_code (stmt) != GIMPLE_OMP_FOR
+ || (gimple_omp_for_kind (stmt)
+ != GF_OMP_FOR_KIND_DISTRIBUTE))
+ && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
+ {
+ error_at (gimple_location (stmt),
+ "only distribute or parallel constructs are allowed to "
+ "be closely nested inside teams construct");
+ return false;
+ }
+ }
}
switch (gimple_code (stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
return true;
+ if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
+ {
+ if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
+ {
+ error_at (gimple_location (stmt),
+ "distribute construct must be closely nested inside "
+ "teams construct");
+ return false;
+ }
+ return true;
+ }
+ /* FALLTHRU */
+ case GIMPLE_CALL:
+ if (is_gimple_call (stmt)
+ && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCELLATION_POINT))
+ {
+ const char *bad = NULL;
+ const char *kind = NULL;
+ if (ctx == NULL)
+ {
+ error_at (gimple_location (stmt), "orphaned %qs construct",
+ DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ ? "#pragma omp cancel"
+ : "#pragma omp cancellation point");
+ return false;
+ }
+ switch (host_integerp (gimple_call_arg (stmt, 0), 0)
+ ? tree_low_cst (gimple_call_arg (stmt, 0), 0)
+ : 0)
+ {
+ case 1:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
+ bad = "#pragma omp parallel";
+ else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ && !integer_zerop (gimple_call_arg (stmt, 1)))
+ ctx->cancellable = true;
+ kind = "parallel";
+ break;
+ case 2:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
+ || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
+ bad = "#pragma omp for";
+ else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ && !integer_zerop (gimple_call_arg (stmt, 1)))
+ {
+ ctx->cancellable = true;
+ if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
+ OMP_CLAUSE_NOWAIT))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel for%> inside "
+ "%<nowait%> for construct");
+ if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
+ OMP_CLAUSE_ORDERED))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel for%> inside "
+ "%<ordered%> for construct");
+ }
+ kind = "for";
+ break;
+ case 4:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
+ && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
+ bad = "#pragma omp sections";
+ else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ && !integer_zerop (gimple_call_arg (stmt, 1)))
+ {
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
+ {
+ ctx->cancellable = true;
+ if (find_omp_clause (gimple_omp_sections_clauses
+ (ctx->stmt),
+ OMP_CLAUSE_NOWAIT))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel sections%> inside "
+ "%<nowait%> sections construct");
+ }
+ else
+ {
+ gcc_assert (ctx->outer
+ && gimple_code (ctx->outer->stmt)
+ == GIMPLE_OMP_SECTIONS);
+ ctx->outer->cancellable = true;
+ if (find_omp_clause (gimple_omp_sections_clauses
+ (ctx->outer->stmt),
+ OMP_CLAUSE_NOWAIT))
+ warning_at (gimple_location (stmt), 0,
+ "%<#pragma omp cancel sections%> inside "
+ "%<nowait%> sections construct");
+ }
+ }
+ kind = "sections";
+ break;
+ case 8:
+ if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
+ bad = "#pragma omp task";
+ else
+ ctx->cancellable = true;
+ kind = "taskgroup";
+ break;
+ default:
+ error_at (gimple_location (stmt), "invalid arguments");
+ return false;
+ }
+ if (bad)
+ {
+ error_at (gimple_location (stmt),
+ "%<%s %s%> construct not closely nested inside of %qs",
+ DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ == BUILT_IN_GOMP_CANCEL
+ ? "#pragma omp cancel"
+ : "#pragma omp cancellation point", kind, bad);
+ return false;
+ }
+ }
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
- case GIMPLE_CALL:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
@@ -1891,8 +2355,12 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASK:
+ case GIMPLE_OMP_CRITICAL:
if (is_gimple_call (stmt))
{
+ if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
+ != BUILT_IN_GOMP_BARRIER)
+ return true;
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
"of work-sharing, critical, ordered, master or "
@@ -1949,7 +2417,10 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
}
return true;
case GIMPLE_OMP_PARALLEL:
- return true;
+ error_at (gimple_location (stmt),
+ "ordered region must be closely nested inside "
+ "a loop region with an ordered clause");
+ return false;
default:
break;
}
@@ -1966,6 +2437,17 @@ check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
return false;
}
break;
+ case GIMPLE_OMP_TEAMS:
+ if (ctx == NULL
+ || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
+ || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
+ {
+ error_at (gimple_location (stmt),
+ "teams construct not closely nested inside of target "
+ "region");
+ return false;
+ }
+ break;
default:
break;
}
@@ -2038,23 +2520,33 @@ scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
input_location = gimple_location (stmt);
/* Check the OpenMP nesting restrictions. */
- if (ctx != NULL)
- {
- bool remove = false;
- if (is_gimple_omp (stmt))
- remove = !check_omp_nesting_restrictions (stmt, ctx);
- else if (is_gimple_call (stmt))
- {
- tree fndecl = gimple_call_fndecl (stmt);
- if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
- && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
+ bool remove = false;
+ if (is_gimple_omp (stmt))
+ remove = !check_omp_nesting_restrictions (stmt, ctx);
+ else if (is_gimple_call (stmt))
+ {
+ tree fndecl = gimple_call_fndecl (stmt);
+ if (fndecl
+ && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_GOMP_BARRIER:
+ case BUILT_IN_GOMP_CANCEL:
+ case BUILT_IN_GOMP_CANCELLATION_POINT:
+ case BUILT_IN_GOMP_TASKYIELD:
+ case BUILT_IN_GOMP_TASKWAIT:
+ case BUILT_IN_GOMP_TASKGROUP_START:
+ case BUILT_IN_GOMP_TASKGROUP_END:
remove = !check_omp_nesting_restrictions (stmt, ctx);
- }
- if (remove)
- {
- stmt = gimple_build_nop ();
- gsi_replace (gsi, stmt, false);
- }
+ break;
+ default:
+ break;
+ }
+ }
+ if (remove)
+ {
+ stmt = gimple_build_nop ();
+ gsi_replace (gsi, stmt, false);
}
*handled_ops_p = true;
@@ -2087,12 +2579,21 @@ scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
ctx = new_omp_context (stmt, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
+ case GIMPLE_OMP_TARGET:
+ scan_omp_target (stmt, ctx);
+ break;
+
+ case GIMPLE_OMP_TEAMS:
+ scan_omp_teams (stmt, ctx);
+ break;
+
case GIMPLE_BIND:
{
tree var;
@@ -2135,10 +2636,15 @@ scan_omp (gimple_seq *body_p, omp_context *ctx)
/* Build a call to GOMP_barrier. */
-static tree
-build_omp_barrier (void)
-{
- return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
+static gimple
+build_omp_barrier (tree lhs)
+{
+ tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
+ : BUILT_IN_GOMP_BARRIER);
+ gimple g = gimple_build_call (fndecl, 0);
+ if (lhs)
+ gimple_call_set_lhs (g, lhs);
+ return g;
}
/* If a context was created for STMT when it was scanned, return it. */
@@ -2299,6 +2805,49 @@ omp_reduction_init (tree clause, tree type)
}
}
+/* Return alignment to be assumed for var in CLAUSE, which should be
+ OMP_CLAUSE_ALIGNED. */
+
+static tree
+omp_clause_aligned_alignment (tree clause)
+{
+ if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
+ return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
+
+ /* Otherwise return implementation defined alignment. */
+ unsigned int al = 1;
+ enum machine_mode mode, vmode;
+ int vs = targetm.vectorize.autovectorize_vector_sizes ();
+ if (vs)
+ vs = 1 << floor_log2 (vs);
+ static enum mode_class classes[]
+ = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
+ for (int i = 0; i < 4; i += 2)
+ for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
+ mode != VOIDmode;
+ mode = GET_MODE_WIDER_MODE (mode))
+ {
+ vmode = targetm.vectorize.preferred_simd_mode (mode);
+ if (GET_MODE_CLASS (vmode) != classes[i + 1])
+ continue;
+ while (vs
+ && GET_MODE_SIZE (vmode) < vs
+ && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
+ vmode = GET_MODE_2XWIDER_MODE (vmode);
+
+ tree type = lang_hooks.types.type_for_mode (mode, 1);
+ if (type == NULL_TREE || TYPE_MODE (type) != mode)
+ continue;
+ type = build_vector_type (type, GET_MODE_SIZE (vmode)
+ / GET_MODE_SIZE (mode));
+ if (TYPE_MODE (type) != vmode)
+ continue;
+ if (TYPE_ALIGN_UNIT (type) > al)
+ al = TYPE_ALIGN_UNIT (type);
+ }
+ return build_int_cst (integer_type_node, al);
+}
+
/* Return maximum possible vectorization factor for the target. */
static int
@@ -2362,8 +2911,11 @@ lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
NULL_TREE, NULL_TREE);
lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
NULL_TREE, NULL_TREE);
- SET_DECL_VALUE_EXPR (new_var, lvar);
- DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ if (DECL_P (new_var))
+ {
+ SET_DECL_VALUE_EXPR (new_var, lvar);
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ }
return true;
}
@@ -2374,11 +2926,12 @@ lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
- omp_context *ctx)
+ omp_context *ctx, struct omp_for_data *fd)
{
tree c, dtor, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
+ bool reduction_omp_orig_ref = false;
int pass;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
@@ -2398,9 +2951,6 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_REDUCTION:
- if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
- max_vf = 1;
- /* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
@@ -2432,6 +2982,9 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
continue;
break;
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ continue;
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
@@ -2439,9 +2992,16 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
+ case OMP_CLAUSE_LINEAR:
+ break;
case OMP_CLAUSE_REDUCTION:
+ if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
+ reduction_omp_orig_ref = true;
break;
- case OMP_CLAUSE_LINEAR:
+ case OMP_CLAUSE__LOOPTEMP_:
+ /* Handle _looptemp_ clauses only on parallel. */
+ if (fd)
+ continue;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
@@ -2451,6 +3011,42 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
continue;
}
break;
+ case OMP_CLAUSE_ALIGNED:
+ if (pass == 0)
+ continue;
+ var = OMP_CLAUSE_DECL (c);
+ if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
+ && !is_global_var (var))
+ {
+ new_var = maybe_lookup_decl (var, ctx);
+ if (new_var == NULL_TREE)
+ new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
+ x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
+ x = build_call_expr_loc (clause_loc, x, 2, new_var,
+ omp_clause_aligned_alignment (c));
+ x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
+ x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
+ gimplify_and_add (x, ilist);
+ }
+ else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
+ && is_global_var (var))
+ {
+ tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
+ new_var = lookup_decl (var, ctx);
+ t = maybe_lookup_decl_in_outer_ctx (var, ctx);
+ t = build_fold_addr_expr_loc (clause_loc, t);
+ t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
+ t = build_call_expr_loc (clause_loc, t2, 2, t,
+ omp_clause_aligned_alignment (c));
+ t = fold_convert_loc (clause_loc, ptype, t);
+ x = create_tmp_var (ptype, NULL);
+ t = build2 (MODIFY_EXPR, ptype, x, t);
+ gimplify_and_add (t, ilist);
+ t = build_simple_mem_ref_loc (clause_loc, x);
+ SET_DECL_VALUE_EXPR (new_var, t);
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ }
+ continue;
default:
continue;
}
@@ -2502,10 +3098,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
- a direct variable. Note that this doesn't apply to
- C++, since reference types are disallowed in data
- sharing clauses there, except for NRV optimized
- return values. */
+ a direct variable. */
if (pass == 0)
continue;
@@ -2550,6 +3143,9 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
+ /* Ignore shared directives in teams construct. */
+ if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
+ continue;
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
@@ -2588,19 +3184,20 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
else
x = NULL;
do_private:
- x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
+ tree nx;
+ nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
if (is_simd)
{
tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
- if ((TREE_ADDRESSABLE (new_var) || x || y
+ if ((TREE_ADDRESSABLE (new_var) || nx || y
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
&& lower_rec_simd_input_clauses (new_var, ctx, max_vf,
idx, lane, ivar, lvar))
{
- if (x)
+ if (nx)
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar), x);
- if (x)
+ if (nx && x)
gimplify_and_add (x, &llist[0]);
if (y)
{
@@ -2617,8 +3214,8 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
break;
}
}
- if (x)
- gimplify_and_add (x, ilist);
+ if (nx)
+ gimplify_and_add (nx, ilist);
/* FALLTHRU */
do_dtor:
@@ -2661,6 +3258,44 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
x = build_outer_var_ref (var, ctx);
if (is_simd)
{
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
+ && gimple_omp_for_combined_into_p (ctx->stmt))
+ {
+ tree stept = POINTER_TYPE_P (TREE_TYPE (x))
+ ? sizetype : TREE_TYPE (x);
+ tree t = fold_convert (stept,
+ OMP_CLAUSE_LINEAR_STEP (c));
+ tree c = find_omp_clause (clauses,
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (c);
+ tree l = OMP_CLAUSE_DECL (c);
+ if (fd->collapse == 1)
+ {
+ tree n1 = fd->loop.n1;
+ tree step = fd->loop.step;
+ tree itype = TREE_TYPE (l);
+ if (POINTER_TYPE_P (itype))
+ itype = signed_type_for (itype);
+ l = fold_build2 (MINUS_EXPR, itype, l, n1);
+ if (TYPE_UNSIGNED (itype)
+ && fd->loop.cond_code == GT_EXPR)
+ l = fold_build2 (TRUNC_DIV_EXPR, itype,
+ fold_build1 (NEGATE_EXPR,
+ itype, l),
+ fold_build1 (NEGATE_EXPR,
+ itype, step));
+ else
+ l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
+ }
+ t = fold_build2 (MULT_EXPR, stept,
+ fold_convert (stept, l), t);
+ if (POINTER_TYPE_P (TREE_TYPE (x)))
+ x = fold_build2 (POINTER_PLUS_EXPR,
+ TREE_TYPE (x), x, t);
+ else
+ x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
+ }
+
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
|| TREE_ADDRESSABLE (new_var))
&& lower_rec_simd_input_clauses (new_var, ctx, max_vf,
@@ -2708,6 +3343,13 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
gimplify_and_add (x, ilist);
goto do_dtor;
+ case OMP_CLAUSE__LOOPTEMP_:
+ gcc_assert (is_parallel_ctx (ctx));
+ x = build_outer_var_ref (var, ctx);
+ x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
+ gimplify_and_add (x, ilist);
+ break;
+
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
@@ -2720,19 +3362,89 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
+ gimple tseq;
x = build_outer_var_ref (var, ctx);
- /* FIXME: Not handled yet. */
- gcc_assert (!is_simd);
- if (is_reference (var))
+ if (is_reference (var)
+ && !useless_type_conversion_p (TREE_TYPE (placeholder),
+ TREE_TYPE (x)))
x = build_fold_addr_expr_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
- lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
- gimple_seq_add_seq (ilist,
- OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
+ tree new_vard = new_var;
+ if (is_reference (var))
+ {
+ gcc_assert (TREE_CODE (new_var) == MEM_REF);
+ new_vard = TREE_OPERAND (new_var, 0);
+ gcc_assert (DECL_P (new_vard));
+ }
+ if (is_simd
+ && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
+ idx, lane, ivar, lvar))
+ {
+ if (new_vard == new_var)
+ {
+ gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
+ SET_DECL_VALUE_EXPR (new_var, ivar);
+ }
+ else
+ {
+ SET_DECL_VALUE_EXPR (new_vard,
+ build_fold_addr_expr (ivar));
+ DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
+ }
+ x = lang_hooks.decls.omp_clause_default_ctor
+ (c, unshare_expr (ivar),
+ build_outer_var_ref (var, ctx));
+ if (x)
+ gimplify_and_add (x, &llist[0]);
+ if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
+ {
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (&llist[0], tseq);
+ }
+ OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (&llist[1], tseq);
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
+ DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
+ if (new_vard == new_var)
+ SET_DECL_VALUE_EXPR (new_var, lvar);
+ else
+ SET_DECL_VALUE_EXPR (new_vard,
+ build_fold_addr_expr (lvar));
+ x = lang_hooks.decls.omp_clause_dtor (c, ivar);
+ if (x)
+ {
+ tseq = NULL;
+ dtor = x;
+ gimplify_stmt (&dtor, &tseq);
+ gimple_seq_add_seq (&llist[1], tseq);
+ }
+ break;
+ }
+ x = lang_hooks.decls.omp_clause_default_ctor
+ (c, new_var, unshare_expr (x));
+ if (x)
+ gimplify_and_add (x, ilist);
+ if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
+ {
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (ilist, tseq);
+ }
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
+ if (is_simd)
+ {
+ tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
+ lower_omp (&tseq, ctx);
+ gimple_seq_add_seq (dlist, tseq);
+ OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
+ }
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
+ goto do_dtor;
}
else
{
@@ -2835,14 +3547,15 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
- happens after firstprivate copying in all threads. */
- if (copyin_by_ref || lastprivate_firstprivate)
+ happens after firstprivate copying in all threads. And similarly
+ for UDRs if initializer expression refers to omp_orig. */
+ if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
{
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
- gimplify_and_add (build_omp_barrier (), ilist);
+ gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
}
/* If max_vf is non-zero, then we can use only a vectorization factor
@@ -2871,7 +3584,7 @@ lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
static void
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
- omp_context *ctx)
+ omp_context *ctx)
{
tree x, c, label = NULL, orig_clauses = clauses;
bool par_clauses = false;
@@ -3025,7 +3738,7 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
- /* Never use OMP_ATOMIC for array reductions. */
+ /* Never use OMP_ATOMIC for array reductions or UDRs. */
count = -1;
break;
}
@@ -3072,7 +3785,9 @@ lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
- if (is_reference (var))
+ if (is_reference (var)
+ && !useless_type_conversion_p (TREE_TYPE (placeholder),
+ TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
@@ -3175,6 +3890,7 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_REDUCTION:
+ case OMP_CLAUSE__LOOPTEMP_:
break;
default:
continue;
@@ -3195,6 +3911,7 @@ lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
+ case OMP_CLAUSE__LOOPTEMP_:
do_in = true;
break;
@@ -3317,7 +4034,7 @@ static void
expand_parallel_call (struct omp_region *region, basic_block bb,
gimple entry_stmt, vec<tree, va_gc> *ws_args)
{
- tree t, t1, t2, val, cond, c, clauses;
+ tree t, t1, t2, val, cond, c, clauses, flags;
gimple_stmt_iterator gsi;
gimple stmt;
enum built_in_function start_ix;
@@ -3327,23 +4044,23 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
clauses = gimple_omp_parallel_clauses (entry_stmt);
- /* Determine what flavor of GOMP_parallel_start we will be
+ /* Determine what flavor of GOMP_parallel we will be
emitting. */
- start_ix = BUILT_IN_GOMP_PARALLEL_START;
+ start_ix = BUILT_IN_GOMP_PARALLEL;
if (is_combined_parallel (region))
{
switch (region->inner->type)
{
case GIMPLE_OMP_FOR:
gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
- start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
+ start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
+ (region->inner->sched_kind
== OMP_CLAUSE_SCHEDULE_RUNTIME
? 3 : region->inner->sched_kind));
start_ix = (enum built_in_function)start_ix2;
break;
case GIMPLE_OMP_SECTIONS:
- start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
+ start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
break;
default:
gcc_unreachable ();
@@ -3354,6 +4071,7 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
and there is no conditional. */
cond = NULL_TREE;
val = build_int_cst (unsigned_type_node, 0);
+ flags = build_int_cst (unsigned_type_node, 0);
c = find_omp_clause (clauses, OMP_CLAUSE_IF);
if (c)
@@ -3368,6 +4086,10 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
else
clause_loc = gimple_location (entry_stmt);
+ c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
+ if (c)
+ flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
+
/* Ensure 'val' is of the correct type. */
val = fold_convert_loc (clause_loc, unsigned_type_node, val);
@@ -3459,34 +4181,19 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
t1 = build_fold_addr_expr (t);
t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
- vec_alloc (args, 3 + vec_safe_length (ws_args));
+ vec_alloc (args, 4 + vec_safe_length (ws_args));
args->quick_push (t2);
args->quick_push (t1);
args->quick_push (val);
if (ws_args)
args->splice (*ws_args);
+ args->quick_push (flags);
t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
builtin_decl_explicit (start_ix), args);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
-
- t = gimple_omp_parallel_data_arg (entry_stmt);
- if (t == NULL)
- t = null_pointer_node;
- else
- t = build_fold_addr_expr (t);
- t = build_call_expr_loc (gimple_location (entry_stmt),
- gimple_omp_parallel_child_fn (entry_stmt), 1, t);
- force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
- false, GSI_CONTINUE_LINKING);
-
- t = build_call_expr_loc (gimple_location (entry_stmt),
- builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
- 0);
- force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
- false, GSI_CONTINUE_LINKING);
}
@@ -3496,7 +4203,7 @@ expand_parallel_call (struct omp_region *region, basic_block bb,
static void
expand_task_call (basic_block bb, gimple entry_stmt)
{
- tree t, t1, t2, t3, flags, cond, c, c2, clauses;
+ tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
gimple_stmt_iterator gsi;
location_t loc = gimple_location (entry_stmt);
@@ -3510,8 +4217,9 @@ expand_task_call (basic_block bb, gimple entry_stmt)
c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
+ depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
flags = build_int_cst (unsigned_type_node,
- (c ? 1 : 0) + (c2 ? 4 : 0));
+ (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
if (c)
@@ -3522,6 +4230,10 @@ expand_task_call (basic_block bb, gimple entry_stmt)
build_int_cst (unsigned_type_node, 0));
flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
}
+ if (depend)
+ depend = OMP_CLAUSE_DECL (depend);
+ else
+ depend = build_int_cst (ptr_type_node, 0);
gsi = gsi_last_bb (bb);
t = gimple_omp_task_data_arg (entry_stmt);
@@ -3537,9 +4249,10 @@ expand_task_call (basic_block bb, gimple entry_stmt)
t3 = build_fold_addr_expr_loc (loc, t);
t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
- 7, t1, t2, t3,
+ 8, t1, t2, t3,
gimple_omp_task_arg_size (entry_stmt),
- gimple_omp_task_arg_align (entry_stmt), cond, flags);
+ gimple_omp_task_arg_align (entry_stmt), cond, flags,
+ depend);
force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
@@ -4064,7 +4777,9 @@ expand_omp_taskreg (struct omp_region *region)
count = count1 * count2 * count3;
Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
count = 0;
- and set ZERO_ITER_BB to that bb. */
+ and set ZERO_ITER_BB to that bb. If this isn't the outermost
+ of the combined loop constructs, just initialize COUNTS array
+ from the _looptemp_ clauses. */
/* NOTE: It *could* be better to moosh all of the BBs together,
creating one larger BB with all the computation and the unexpected
@@ -4106,6 +4821,28 @@ expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
/* Collapsed loops need work for expansion into SSA form. */
gcc_assert (!gimple_in_ssa_p (cfun));
+ if (gimple_omp_for_combined_into_p (fd->for_stmt)
+ && TREE_CODE (fd->loop.n2) != INTEGER_CST)
+ {
+ /* First two _looptemp_ clauses are for istart/iend, counts[0]
+ isn't supposed to be handled, as the inner loop doesn't
+ use it. */
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ for (i = 0; i < fd->collapse; i++)
+ {
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ if (i)
+ counts[i] = OMP_CLAUSE_DECL (innerc);
+ else
+ counts[0] = NULL_TREE;
+ }
+ return;
+ }
+
for (i = 0; i < fd->collapse; i++)
{
tree itype = TREE_TYPE (fd->loops[i].v);
@@ -4209,13 +4946,49 @@ expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
V2 = N21 + (T % count2) * STEP2;
T = T / count2;
V1 = N11 + T * STEP1;
- if this loop doesn't have an inner loop construct combined with it. */
+ if this loop doesn't have an inner loop construct combined with it.
+ If it does have an inner loop construct combined with it and the
+ iteration count isn't known constant, store values from counts array
+ into its _looptemp_ temporaries instead. */
static void
expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
- tree *counts, tree startvar)
+ tree *counts, gimple inner_stmt, tree startvar)
{
int i;
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ /* If fd->loop.n2 is constant, then no propagation of the counts
+ is needed, they are constant. */
+ if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
+ return;
+
+ tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
+ ? gimple_omp_parallel_clauses (inner_stmt)
+ : gimple_omp_for_clauses (inner_stmt);
+ /* First two _looptemp_ clauses are for istart/iend, counts[0]
+ isn't supposed to be handled, as the inner loop doesn't
+ use it. */
+ tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ for (i = 0; i < fd->collapse; i++)
+ {
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ if (i)
+ {
+ tree tem = OMP_CLAUSE_DECL (innerc);
+ tree t = fold_convert (TREE_TYPE (tem), counts[i]);
+ t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
+ false, GSI_CONTINUE_LINKING);
+ gimple stmt = gimple_build_assign (tem, t);
+ gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
+ }
+ }
+ return;
+ }
+
tree type = TREE_TYPE (fd->loop.v);
tree tem = create_tmp_reg (type, ".tem");
gimple stmt = gimple_build_assign (tem, startvar);
@@ -4368,6 +5141,10 @@ extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
If this is a combined omp parallel loop, instead of the call to
GOMP_loop_foo_start, we call GOMP_loop_foo_next.
+ If this is gimple_omp_for_combined_p loop, then instead of assigning
+ V and iend in L0 we assign the first two _looptemp_ clause decls of the
+ inner GIMPLE_OMP_FOR and V += STEP; and
+ if (V cond iend) goto L1; else goto L2; are removed.
For collapsed loops, given parameters:
collapse(3)
@@ -4437,7 +5214,8 @@ static void
expand_omp_for_generic (struct omp_region *region,
struct omp_for_data *fd,
enum built_in_function start_fn,
- enum built_in_function next_fn)
+ enum built_in_function next_fn,
+ gimple inner_stmt)
{
tree type, istart0, iend0, iend;
tree t, vmain, vback, bias = NULL_TREE;
@@ -4552,6 +5330,17 @@ expand_omp_for_generic (struct omp_region *region,
t2 = fold_convert (fd->iter_type, fd->loop.step);
t1 = fd->loop.n2;
t0 = fd->loop.n1;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ t0 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ t1 = OMP_CLAUSE_DECL (innerc);
+ }
if (POINTER_TYPE_P (TREE_TYPE (t0))
&& TYPE_PRECISION (TREE_TYPE (t0))
!= TYPE_PRECISION (fd->iter_type))
@@ -4622,6 +5411,21 @@ expand_omp_for_generic (struct omp_region *region,
tree startvar = fd->loop.v;
tree endvar = NULL_TREE;
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
+ && gimple_omp_for_kind (inner_stmt)
+ == GF_OMP_FOR_KIND_SIMD);
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ startvar = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ endvar = OMP_CLAUSE_DECL (innerc);
+ }
+
gsi = gsi_start_bb (l0_bb);
t = istart0;
if (bias)
@@ -4650,7 +5454,7 @@ expand_omp_for_generic (struct omp_region *region,
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
}
if (fd->collapse > 1)
- expand_omp_for_init_vars (fd, &gsi, counts, startvar);
+ expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
if (!broken_loop)
{
@@ -4662,8 +5466,7 @@ expand_omp_for_generic (struct omp_region *region,
vmain = gimple_omp_continue_control_use (stmt);
vback = gimple_omp_continue_control_def (stmt);
- /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
- if (1)
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
{
if (POINTER_TYPE_P (type))
t = fold_build_pointer_plus (vmain, fd->loop.step);
@@ -4686,7 +5489,7 @@ expand_omp_for_generic (struct omp_region *region,
/* Remove GIMPLE_OMP_CONTINUE. */
gsi_remove (&gsi, true);
- if (fd->collapse > 1)
+ if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
/* Emit code to get the next parallel iteration in L2_BB. */
@@ -4708,9 +5511,13 @@ expand_omp_for_generic (struct omp_region *region,
gsi = gsi_last_bb (exit_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
+ else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
+ t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
else
t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
stmt = gimple_build_call (t, 0);
+ if (gimple_omp_return_lhs (gsi_stmt (gsi)))
+ gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
gsi_remove (&gsi, true);
@@ -4738,9 +5545,11 @@ expand_omp_for_generic (struct omp_region *region,
if (current_loops)
add_bb_to_loop (l2_bb, cont_bb->loop_father);
e = find_edge (cont_bb, l1_bb);
- /* OMP4 placeholder for gimple_omp_for_combined_p (fd->for_stmt). */
- if (0)
- ;
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ remove_edge (e);
+ e = NULL;
+ }
else if (fd->collapse > 1)
{
remove_edge (e);
@@ -4774,8 +5583,7 @@ expand_omp_for_generic (struct omp_region *region,
outer_loop->latch = l2_bb;
add_loop (outer_loop, l0_bb->loop_father);
- /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
- if (1)
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
{
struct loop *loop = alloc_loop ();
loop->header = l1_bb;
@@ -4825,16 +5633,22 @@ expand_omp_for_generic (struct omp_region *region,
static void
expand_omp_for_static_nochunk (struct omp_region *region,
- struct omp_for_data *fd)
+ struct omp_for_data *fd,
+ gimple inner_stmt)
{
tree n, q, s0, e0, e, t, tt, nthreads, threadid;
tree type, itype, vmain, vback;
basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
- basic_block body_bb, cont_bb;
+ basic_block body_bb, cont_bb, collapse_bb = NULL;
basic_block fin_bb;
gimple_stmt_iterator gsi;
gimple stmt;
edge ep;
+ enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
+ enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
+ bool broken_loop = region->cont == NULL;
+ tree *counts = NULL;
+ tree n1, n2, step;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
@@ -4843,25 +5657,49 @@ expand_omp_for_static_nochunk (struct omp_region *region,
entry_bb = region->entry;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
- gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
+ fin_bb = BRANCH_EDGE (entry_bb)->dest;
+ gcc_assert (broken_loop
+ || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
body_bb = single_succ (seq_start_bb);
- gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
- gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
- fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
+ if (!broken_loop)
+ {
+ gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
+ gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
+ }
exit_bb = region->exit;
/* Iteration space partitioning goes in ENTRY_BB. */
gsi = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
- t = fold_binary (fd->loop.cond_code, boolean_type_node,
- fold_convert (type, fd->loop.n1),
- fold_convert (type, fd->loop.n2));
- if (TYPE_UNSIGNED (type)
+ if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
+ {
+ get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
+ get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
+ }
+
+ if (fd->collapse > 1)
+ {
+ int first_zero_iter = -1;
+ basic_block l2_dom_bb = NULL;
+
+ counts = XALLOCAVEC (tree, fd->collapse);
+ expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
+ fin_bb, first_zero_iter,
+ l2_dom_bb);
+ t = NULL_TREE;
+ }
+ else if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ t = integer_one_node;
+ else
+ t = fold_binary (fd->loop.cond_code, boolean_type_node,
+ fold_convert (type, fd->loop.n1),
+ fold_convert (type, fd->loop.n2));
+ if (fd->collapse == 1
+ && TYPE_UNSIGNED (type)
&& (t == NULL_TREE || !integer_onep (t)))
{
- tree n1, n2;
n1 = fold_convert (type, unshare_expr (fd->loop.n1));
n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -4899,36 +5737,47 @@ expand_omp_for_static_nochunk (struct omp_region *region,
gsi = gsi_last_bb (entry_bb);
}
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
+ t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
+ t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- fd->loop.n1
- = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.n2
- = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.step
- = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
- true, NULL_TREE, true, GSI_SAME_STMT);
+ n1 = fd->loop.n1;
+ n2 = fd->loop.n2;
+ step = fd->loop.step;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ }
+ n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
+ true, NULL_TREE, true, GSI_SAME_STMT);
t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
- t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
- t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
- t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
+ t = fold_build2 (PLUS_EXPR, itype, step, t);
+ t = fold_build2 (PLUS_EXPR, itype, t, n2);
+ t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
- fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
+ fold_build1 (NEGATE_EXPR, itype, step));
else
- t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (itype, t);
n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
@@ -4976,58 +5825,96 @@ expand_omp_for_static_nochunk (struct omp_region *region,
/* Setup code for sequential iteration goes in SEQ_START_BB. */
gsi = gsi_start_bb (seq_start_bb);
+ tree startvar = fd->loop.v;
+ tree endvar = NULL_TREE;
+
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
+ ? gimple_omp_parallel_clauses (inner_stmt)
+ : gimple_omp_for_clauses (inner_stmt);
+ tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ startvar = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ endvar = OMP_CLAUSE_DECL (innerc);
+ }
t = fold_convert (itype, s0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
t = force_gimple_operand_gsi (&gsi, t,
- DECL_P (fd->loop.v)
- && TREE_ADDRESSABLE (fd->loop.v),
+ DECL_P (startvar)
+ && TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
- stmt = gimple_build_assign (fd->loop.v, t);
+ stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
t = fold_convert (itype, e0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
+ if (endvar)
+ {
+ stmt = gimple_build_assign (endvar, e);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+ }
+ if (fd->collapse > 1)
+ expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
- /* The code controlling the sequential loop replaces the
- GIMPLE_OMP_CONTINUE. */
- gsi = gsi_last_bb (cont_bb);
- stmt = gsi_stmt (gsi);
- gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
- vmain = gimple_omp_continue_control_use (stmt);
- vback = gimple_omp_continue_control_def (stmt);
+ if (!broken_loop)
+ {
+ /* The code controlling the sequential loop replaces the
+ GIMPLE_OMP_CONTINUE. */
+ gsi = gsi_last_bb (cont_bb);
+ stmt = gsi_stmt (gsi);
+ gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
+ vmain = gimple_omp_continue_control_use (stmt);
+ vback = gimple_omp_continue_control_def (stmt);
- if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (vmain, fd->loop.step);
- else
- t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
- t = force_gimple_operand_gsi (&gsi, t,
- DECL_P (vback) && TREE_ADDRESSABLE (vback),
- NULL_TREE, true, GSI_SAME_STMT);
- stmt = gimple_build_assign (vback, t);
- gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ if (POINTER_TYPE_P (type))
+ t = fold_build_pointer_plus (vmain, step);
+ else
+ t = fold_build2 (PLUS_EXPR, type, vmain, step);
+ t = force_gimple_operand_gsi (&gsi, t,
+ DECL_P (vback)
+ && TREE_ADDRESSABLE (vback),
+ NULL_TREE, true, GSI_SAME_STMT);
+ stmt = gimple_build_assign (vback, t);
+ gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
- t = build2 (fd->loop.cond_code, boolean_type_node,
- DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
- gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ t = build2 (fd->loop.cond_code, boolean_type_node,
+ DECL_P (vback) && TREE_ADDRESSABLE (vback)
+ ? t : vback, e);
+ gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ }
- /* Remove the GIMPLE_OMP_CONTINUE statement. */
- gsi_remove (&gsi, true);
+ /* Remove the GIMPLE_OMP_CONTINUE statement. */
+ gsi_remove (&gsi, true);
+
+ if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
+ collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
+ }
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
gsi = gsi_last_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
- force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
- false, GSI_SAME_STMT);
+ {
+ t = gimple_omp_return_lhs (gsi_stmt (gsi));
+ gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
+ }
gsi_remove (&gsi, true);
/* Connect all the blocks. */
@@ -5039,21 +5926,42 @@ expand_omp_for_static_nochunk (struct omp_region *region,
find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
+ if (!broken_loop)
+ {
+ ep = find_edge (cont_bb, body_bb);
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ remove_edge (ep);
+ ep = NULL;
+ }
+ else if (fd->collapse > 1)
+ {
+ remove_edge (ep);
+ ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
+ }
+ else
+ ep->flags = EDGE_TRUE_VALUE;
+ find_edge (cont_bb, fin_bb)->flags
+ = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
+ }
set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
+
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
recompute_dominator (CDI_DOMINATORS, fin_bb));
- struct loop *loop = alloc_loop ();
- loop->header = body_bb;
- loop->latch = cont_bb;
- add_loop (loop, body_bb->loop_father);
+ if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ struct loop *loop = alloc_loop ();
+ loop->header = body_bb;
+ if (collapse_bb == NULL)
+ loop->latch = cont_bb;
+ add_loop (loop, body_bb->loop_father);
+ }
}
@@ -5096,16 +6004,22 @@ expand_omp_for_static_nochunk (struct omp_region *region,
*/
static void
-expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
+expand_omp_for_static_chunk (struct omp_region *region,
+ struct omp_for_data *fd, gimple inner_stmt)
{
tree n, s0, e0, e, t;
tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
tree type, itype, v_main, v_back, v_extra;
basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
- basic_block trip_update_bb, cont_bb, fin_bb;
+ basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
gimple_stmt_iterator si;
gimple stmt;
edge se;
+ enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
+ enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
+ bool broken_loop = region->cont == NULL;
+ tree *counts = NULL;
+ tree n1, n2, step;
itype = type = TREE_TYPE (fd->loop.v);
if (POINTER_TYPE_P (type))
@@ -5117,27 +6031,50 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
iter_part_bb = se->dest;
cont_bb = region->cont;
gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
- gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
- == FALLTHRU_EDGE (cont_bb)->dest);
+ fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
+ gcc_assert (broken_loop
+ || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
body_bb = single_succ (seq_start_bb);
- gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
- gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
- fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
- trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
+ if (!broken_loop)
+ {
+ gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
+ gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
+ trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
+ }
exit_bb = region->exit;
/* Trip and adjustment setup goes in ENTRY_BB. */
si = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
- t = fold_binary (fd->loop.cond_code, boolean_type_node,
- fold_convert (type, fd->loop.n1),
- fold_convert (type, fd->loop.n2));
- if (TYPE_UNSIGNED (type)
+ if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
+ {
+ get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
+ get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
+ }
+
+ if (fd->collapse > 1)
+ {
+ int first_zero_iter = -1;
+ basic_block l2_dom_bb = NULL;
+
+ counts = XALLOCAVEC (tree, fd->collapse);
+ expand_omp_for_init_counts (fd, &si, entry_bb, counts,
+ fin_bb, first_zero_iter,
+ l2_dom_bb);
+ t = NULL_TREE;
+ }
+ else if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ t = integer_one_node;
+ else
+ t = fold_binary (fd->loop.cond_code, boolean_type_node,
+ fold_convert (type, fd->loop.n1),
+ fold_convert (type, fd->loop.n2));
+ if (fd->collapse == 1
+ && TYPE_UNSIGNED (type)
&& (t == NULL_TREE || !integer_onep (t)))
{
- tree n1, n2;
n1 = fold_convert (type, unshare_expr (fd->loop.n1));
n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -5175,39 +6112,50 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
si = gsi_last_bb (entry_bb);
}
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
+ t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
t = fold_convert (itype, t);
nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
+ t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
t = fold_convert (itype, t);
threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
- fd->loop.n1
- = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.n2
- = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
- true, NULL_TREE, true, GSI_SAME_STMT);
- fd->loop.step
- = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
- true, NULL_TREE, true, GSI_SAME_STMT);
+ n1 = fd->loop.n1;
+ n2 = fd->loop.n2;
+ step = fd->loop.step;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ }
+ n1 = force_gimple_operand_gsi (&si, fold_convert (type, n1),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ n2 = force_gimple_operand_gsi (&si, fold_convert (itype, n2),
+ true, NULL_TREE, true, GSI_SAME_STMT);
+ step = force_gimple_operand_gsi (&si, fold_convert (itype, step),
+ true, NULL_TREE, true, GSI_SAME_STMT);
fd->chunk_size
= force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
true, NULL_TREE, true, GSI_SAME_STMT);
t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
- t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
- t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
- t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
+ t = fold_build2 (PLUS_EXPR, itype, step, t);
+ t = fold_build2 (PLUS_EXPR, itype, t, n2);
+ t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
t = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, t),
- fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
+ fold_build1 (NEGATE_EXPR, itype, step));
else
- t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
t = fold_convert (itype, t);
n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -5230,11 +6178,11 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
gsi_insert_before (&si, stmt, GSI_SAME_STMT);
t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
true, GSI_SAME_STMT);
@@ -5261,77 +6209,130 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
/* Setup code for sequential iteration goes in SEQ_START_BB. */
si = gsi_start_bb (seq_start_bb);
+ tree startvar = fd->loop.v;
+ tree endvar = NULL_TREE;
+
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
+ ? gimple_omp_parallel_clauses (inner_stmt)
+ : gimple_omp_for_clauses (inner_stmt);
+ tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ startvar = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ endvar = OMP_CLAUSE_DECL (innerc);
+ }
+
t = fold_convert (itype, s0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
t = force_gimple_operand_gsi (&si, t,
- DECL_P (fd->loop.v)
- && TREE_ADDRESSABLE (fd->loop.v),
+ DECL_P (startvar)
+ && TREE_ADDRESSABLE (startvar),
NULL_TREE, false, GSI_CONTINUE_LINKING);
- stmt = gimple_build_assign (fd->loop.v, t);
+ stmt = gimple_build_assign (startvar, t);
gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
t = fold_convert (itype, e0);
- t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
+ t = fold_build2 (MULT_EXPR, itype, t, step);
if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (fd->loop.n1, t);
+ t = fold_build_pointer_plus (n1, t);
else
- t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
+ t = fold_build2 (PLUS_EXPR, type, t, n1);
+ t = fold_convert (TREE_TYPE (startvar), t);
e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
false, GSI_CONTINUE_LINKING);
+ if (endvar)
+ {
+ stmt = gimple_build_assign (endvar, e);
+ gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
+ }
+ if (fd->collapse > 1)
+ expand_omp_for_init_vars (fd, &si, counts, inner_stmt, startvar);
- /* The code controlling the sequential loop goes in CONT_BB,
- replacing the GIMPLE_OMP_CONTINUE. */
- si = gsi_last_bb (cont_bb);
- stmt = gsi_stmt (si);
- gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
- v_main = gimple_omp_continue_control_use (stmt);
- v_back = gimple_omp_continue_control_def (stmt);
+ if (!broken_loop)
+ {
+ /* The code controlling the sequential loop goes in CONT_BB,
+ replacing the GIMPLE_OMP_CONTINUE. */
+ si = gsi_last_bb (cont_bb);
+ stmt = gsi_stmt (si);
+ gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
+ v_main = gimple_omp_continue_control_use (stmt);
+ v_back = gimple_omp_continue_control_def (stmt);
- if (POINTER_TYPE_P (type))
- t = fold_build_pointer_plus (v_main, fd->loop.step);
- else
- t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
- if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
- t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
- true, GSI_SAME_STMT);
- stmt = gimple_build_assign (v_back, t);
- gsi_insert_before (&si, stmt, GSI_SAME_STMT);
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ if (POINTER_TYPE_P (type))
+ t = fold_build_pointer_plus (v_main, step);
+ else
+ t = fold_build2 (PLUS_EXPR, type, v_main, step);
+ if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
+ t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
+ true, GSI_SAME_STMT);
+ stmt = gimple_build_assign (v_back, t);
+ gsi_insert_before (&si, stmt, GSI_SAME_STMT);
- t = build2 (fd->loop.cond_code, boolean_type_node,
- DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
- ? t : v_back, e);
- gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ t = build2 (fd->loop.cond_code, boolean_type_node,
+ DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
+ ? t : v_back, e);
+ gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
+ }
- /* Remove GIMPLE_OMP_CONTINUE. */
- gsi_remove (&si, true);
+ /* Remove GIMPLE_OMP_CONTINUE. */
+ gsi_remove (&si, true);
- /* Trip update code goes into TRIP_UPDATE_BB. */
- si = gsi_start_bb (trip_update_bb);
+ if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
+ collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
- t = build_int_cst (itype, 1);
- t = build2 (PLUS_EXPR, itype, trip_main, t);
- stmt = gimple_build_assign (trip_back, t);
- gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
+ /* Trip update code goes into TRIP_UPDATE_BB. */
+ si = gsi_start_bb (trip_update_bb);
+
+ t = build_int_cst (itype, 1);
+ t = build2 (PLUS_EXPR, itype, trip_main, t);
+ stmt = gimple_build_assign (trip_back, t);
+ gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
+ }
/* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
si = gsi_last_bb (exit_bb);
if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
- force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
- false, GSI_SAME_STMT);
+ {
+ t = gimple_omp_return_lhs (gsi_stmt (si));
+ gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
+ }
gsi_remove (&si, true);
/* Connect the new blocks. */
find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
- find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
- find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
+ if (!broken_loop)
+ {
+ se = find_edge (cont_bb, body_bb);
+ if (gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ remove_edge (se);
+ se = NULL;
+ }
+ else if (fd->collapse > 1)
+ {
+ remove_edge (se);
+ se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
+ }
+ else
+ se->flags = EDGE_TRUE_VALUE;
+ find_edge (cont_bb, trip_update_bb)->flags
+ = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
- redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
+ redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
+ }
if (gimple_in_ssa_p (cfun))
{
@@ -5342,6 +6343,8 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
edge_var_map *vm;
size_t i;
+ gcc_assert (fd->collapse == 1 && !broken_loop);
+
/* When we redirect the edge from trip_update_bb to iter_part_bb, we
remove arguments of the phi nodes in fin_bb. We need to create
appropriate phi nodes in iter_part_bb instead. */
@@ -5391,7 +6394,8 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
UNKNOWN_LOCATION);
}
- set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
+ if (!broken_loop)
+ set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
recompute_dominator (CDI_DOMINATORS, iter_part_bb));
set_immediate_dominator (CDI_DOMINATORS, fin_bb,
@@ -5401,17 +6405,24 @@ expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
set_immediate_dominator (CDI_DOMINATORS, body_bb,
recompute_dominator (CDI_DOMINATORS, body_bb));
- struct loop *trip_loop = alloc_loop ();
- trip_loop->header = iter_part_bb;
- trip_loop->latch = trip_update_bb;
- add_loop (trip_loop, iter_part_bb->loop_father);
+ if (!broken_loop)
+ {
+ struct loop *trip_loop = alloc_loop ();
+ trip_loop->header = iter_part_bb;
+ trip_loop->latch = trip_update_bb;
+ add_loop (trip_loop, iter_part_bb->loop_father);
- struct loop *loop = alloc_loop ();
- loop->header = body_bb;
- loop->latch = cont_bb;
- add_loop (loop, trip_loop);
+ if (!gimple_omp_for_combined_p (fd->for_stmt))
+ {
+ struct loop *loop = alloc_loop ();
+ loop->header = body_bb;
+ loop->latch = cont_bb;
+ add_loop (loop, trip_loop);
+ }
+ }
}
+
/* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
loop. Given parameters:
@@ -5487,7 +6498,7 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
OMP_CLAUSE_SAFELEN);
tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
OMP_CLAUSE__SIMDUID_);
- tree n2;
+ tree n1, n2;
type = TREE_TYPE (fd->loop.v);
entry_bb = region->entry;
@@ -5530,10 +6541,27 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
if (l2_dom_bb == NULL)
l2_dom_bb = l1_bb;
+ n1 = fd->loop.n1;
n2 = fd->loop.n2;
- if (0)
- /* Place holder for gimple_omp_for_combined_into_p() in
- the upcoming gomp-4_0-branch merge. */;
+ if (gimple_omp_for_combined_into_p (fd->for_stmt))
+ {
+ tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n1 = OMP_CLAUSE_DECL (innerc);
+ innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ gcc_assert (innerc);
+ n2 = OMP_CLAUSE_DECL (innerc);
+ expand_omp_build_assign (&gsi, fd->loop.v,
+ fold_convert (type, n1));
+ if (fd->collapse > 1)
+ {
+ gsi_prev (&gsi);
+ expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
+ gsi_next (&gsi);
+ }
+ }
else
{
expand_omp_build_assign (&gsi, fd->loop.v,
@@ -5706,7 +6734,7 @@ expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
/* Expand the OpenMP loop defined by REGION. */
static void
-expand_omp_for (struct omp_region *region)
+expand_omp_for (struct omp_region *region, gimple inner_stmt)
{
struct omp_for_data fd;
struct omp_for_data_loop *loops;
@@ -5736,14 +6764,12 @@ expand_omp_for (struct omp_region *region)
if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
expand_omp_simd (region, &fd);
else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
- && !fd.have_ordered
- && fd.collapse == 1
- && region->cont != NULL)
+ && !fd.have_ordered)
{
if (fd.chunk_size == NULL)
- expand_omp_for_static_nochunk (region, &fd);
+ expand_omp_for_static_nochunk (region, &fd, inner_stmt);
else
- expand_omp_for_static_chunk (region, &fd);
+ expand_omp_for_static_chunk (region, &fd, inner_stmt);
}
else
{
@@ -5768,7 +6794,7 @@ expand_omp_for (struct omp_region *region)
- (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
}
expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
- (enum built_in_function) next_ix);
+ (enum built_in_function) next_ix, inner_stmt);
}
if (gimple_in_ssa_p (cfun))
@@ -5983,9 +7009,13 @@ expand_omp_sections (struct omp_region *region)
si = gsi_last_bb (l2_bb);
if (gimple_omp_return_nowait_p (gsi_stmt (si)))
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
+ else if (gimple_omp_return_lhs (gsi_stmt (si)))
+ t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
else
t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
stmt = gimple_build_call (t, 0);
+ if (gimple_omp_return_lhs (gsi_stmt (si)))
+ gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
gsi_insert_after (&si, stmt, GSI_SAME_STMT);
gsi_remove (&si, true);
@@ -6001,26 +7031,21 @@ expand_omp_single (struct omp_region *region)
{
basic_block entry_bb, exit_bb;
gimple_stmt_iterator si;
- bool need_barrier = false;
entry_bb = region->entry;
exit_bb = region->exit;
si = gsi_last_bb (entry_bb);
- /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
- be removed. We need to ensure that the thread that entered the single
- does not exit before the data is copied out by the other threads. */
- if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
- OMP_CLAUSE_COPYPRIVATE))
- need_barrier = true;
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
si = gsi_last_bb (exit_bb);
- if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
- force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
- false, GSI_SAME_STMT);
+ if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
+ {
+ tree t = gimple_omp_return_lhs (gsi_stmt (si));
+ gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
+ }
gsi_remove (&si, true);
single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
}
@@ -6042,8 +7067,10 @@ expand_omp_synch (struct omp_region *region)
si = gsi_last_bb (entry_bb);
gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
|| gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
- || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
+ || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
gsi_remove (&si, true);
single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
@@ -6088,7 +7115,10 @@ expand_omp_atomic_load (basic_block load_bb, tree addr,
itype = TREE_TYPE (TREE_TYPE (decl));
call = build_call_expr_loc (loc, decl, 2, addr,
- build_int_cst (NULL, MEMMODEL_RELAXED));
+ build_int_cst (NULL,
+ gimple_omp_atomic_seq_cst_p (stmt)
+ ? MEMMODEL_SEQ_CST
+ : MEMMODEL_RELAXED));
if (!useless_type_conversion_p (type, itype))
call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
@@ -6160,7 +7190,10 @@ expand_omp_atomic_store (basic_block load_bb, tree addr,
if (!useless_type_conversion_p (itype, type))
stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
- build_int_cst (NULL, MEMMODEL_RELAXED));
+ build_int_cst (NULL,
+ gimple_omp_atomic_seq_cst_p (stmt)
+ ? MEMMODEL_SEQ_CST
+ : MEMMODEL_RELAXED));
if (exchange)
{
if (!useless_type_conversion_p (type, itype))
@@ -6201,6 +7234,7 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
enum tree_code code;
bool need_old, need_new;
enum machine_mode imode;
+ bool seq_cst;
/* We expect to find the following sequences:
@@ -6226,6 +7260,7 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
return false;
need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
+ seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
gcc_checking_assert (!need_old || !need_new);
if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
@@ -6292,7 +7327,9 @@ expand_omp_atomic_fetch_op (basic_block load_bb,
use the RELAXED memory model. */
call = build_call_expr_loc (loc, decl, 3, addr,
fold_convert_loc (loc, itype, rhs),
- build_int_cst (NULL, MEMMODEL_RELAXED));
+ build_int_cst (NULL,
+ seq_cst ? MEMMODEL_SEQ_CST
+ : MEMMODEL_RELAXED));
if (need_old || need_new)
{
@@ -6622,6 +7659,321 @@ expand_omp_atomic (struct omp_region *region)
}
+/* Expand the OpenMP target{, data, update} directive starting at REGION. */
+
+static void
+expand_omp_target (struct omp_region *region)
+{
+ basic_block entry_bb, exit_bb, new_bb;
+ struct function *child_cfun = NULL;
+ tree child_fn = NULL_TREE, block, t;
+ gimple_stmt_iterator gsi;
+ gimple entry_stmt, stmt;
+ edge e;
+
+ entry_stmt = last_stmt (region->entry);
+ new_bb = region->entry;
+ int kind = gimple_omp_target_kind (entry_stmt);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ child_fn = gimple_omp_target_child_fn (entry_stmt);
+ child_cfun = DECL_STRUCT_FUNCTION (child_fn);
+ }
+
+ entry_bb = region->entry;
+ exit_bb = region->exit;
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ unsigned srcidx, dstidx, num;
+
+ /* If the target region needs data sent from the parent
+ function, then the very first statement (except possible
+ tree profile counter updates) of the parallel body
+ is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
+ &.OMP_DATA_O is passed as an argument to the child function,
+ we need to replace it with the argument as seen by the child
+ function.
+
+ In most cases, this will end up being the identity assignment
+ .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
+ a function call that has been inlined, the original PARM_DECL
+ .OMP_DATA_I may have been converted into a different local
+ variable. In which case, we need to keep the assignment. */
+ if (gimple_omp_target_data_arg (entry_stmt))
+ {
+ basic_block entry_succ_bb = single_succ (entry_bb);
+ gimple_stmt_iterator gsi;
+ tree arg;
+ gimple tgtcopy_stmt = NULL;
+ tree sender
+ = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
+
+ for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
+ {
+ gcc_assert (!gsi_end_p (gsi));
+ stmt = gsi_stmt (gsi);
+ if (gimple_code (stmt) != GIMPLE_ASSIGN)
+ continue;
+
+ if (gimple_num_ops (stmt) == 2)
+ {
+ tree arg = gimple_assign_rhs1 (stmt);
+
+ /* We're ignoring the subcode because we're
+ effectively doing a STRIP_NOPS. */
+
+ if (TREE_CODE (arg) == ADDR_EXPR
+ && TREE_OPERAND (arg, 0) == sender)
+ {
+ tgtcopy_stmt = stmt;
+ break;
+ }
+ }
+ }
+
+ gcc_assert (tgtcopy_stmt != NULL);
+ arg = DECL_ARGUMENTS (child_fn);
+
+ gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
+ gsi_remove (&gsi, true);
+ }
+
+ /* Declare local variables needed in CHILD_CFUN. */
+ block = DECL_INITIAL (child_fn);
+ BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
+ /* The gimplifier could record temporaries in target block
+ rather than in containing function's local_decls chain,
+ which would mean cgraph missed finalizing them. Do it now. */
+ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
+ if (TREE_CODE (t) == VAR_DECL
+ && TREE_STATIC (t)
+ && !DECL_EXTERNAL (t))
+ varpool_finalize_decl (t);
+ DECL_SAVED_TREE (child_fn) = NULL;
+ /* We'll create a CFG for child_fn, so no gimple body is needed. */
+ gimple_set_body (child_fn, NULL);
+ TREE_USED (block) = 1;
+
+ /* Reset DECL_CONTEXT on function arguments. */
+ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
+ DECL_CONTEXT (t) = child_fn;
+
+ /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
+ so that it can be moved to the child function. */
+ gsi = gsi_last_bb (entry_bb);
+ stmt = gsi_stmt (gsi);
+ gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
+ && gimple_omp_target_kind (stmt)
+ == GF_OMP_TARGET_KIND_REGION);
+ gsi_remove (&gsi, true);
+ e = split_block (entry_bb, stmt);
+ entry_bb = e->dest;
+ single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
+
+ /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
+ if (exit_bb)
+ {
+ gsi = gsi_last_bb (exit_bb);
+ gcc_assert (!gsi_end_p (gsi)
+ && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
+ stmt = gimple_build_return (NULL);
+ gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
+ gsi_remove (&gsi, true);
+ }
+
+ /* Move the target region into CHILD_CFUN. */
+
+ block = gimple_block (entry_stmt);
+
+ new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
+ if (exit_bb)
+ single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
+ /* When the OMP expansion process cannot guarantee an up-to-date
+ loop tree arrange for the child function to fixup loops. */
+ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
+ child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
+
+ /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
+ num = vec_safe_length (child_cfun->local_decls);
+ for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
+ {
+ t = (*child_cfun->local_decls)[srcidx];
+ if (DECL_CONTEXT (t) == cfun->decl)
+ continue;
+ if (srcidx != dstidx)
+ (*child_cfun->local_decls)[dstidx] = t;
+ dstidx++;
+ }
+ if (dstidx != num)
+ vec_safe_truncate (child_cfun->local_decls, dstidx);
+
+ /* Inform the callgraph about the new function. */
+ DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
+ cgraph_add_new_function (child_fn, true);
+
+ /* Fix the callgraph edges for child_cfun. Those for cfun will be
+ fixed in a following pass. */
+ push_cfun (child_cfun);
+ rebuild_cgraph_edges ();
+
+ /* Some EH regions might become dead, see PR34608. If
+ pass_cleanup_cfg isn't the first pass to happen with the
+ new child, these dead EH edges might cause problems.
+ Clean them up now. */
+ if (flag_exceptions)
+ {
+ basic_block bb;
+ bool changed = false;
+
+ FOR_EACH_BB (bb)
+ changed |= gimple_purge_dead_eh_edges (bb);
+ if (changed)
+ cleanup_tree_cfg ();
+ }
+ pop_cfun ();
+ }
+
+ /* Emit a library call to launch the target region, or do data
+ transfers. */
+ tree t1, t2, t3, t4, device, cond, c, clauses;
+ enum built_in_function start_ix;
+ location_t clause_loc;
+
+ clauses = gimple_omp_target_clauses (entry_stmt);
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ start_ix = BUILT_IN_GOMP_TARGET;
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ start_ix = BUILT_IN_GOMP_TARGET_DATA;
+ else
+ start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
+
+ /* By default, the value of DEVICE is -1 (let runtime library choose)
+ and there is no conditional. */
+ cond = NULL_TREE;
+ device = build_int_cst (integer_type_node, -1);
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_IF);
+ if (c)
+ cond = OMP_CLAUSE_IF_EXPR (c);
+
+ c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
+ if (c)
+ {
+ device = OMP_CLAUSE_DEVICE_ID (c);
+ clause_loc = OMP_CLAUSE_LOCATION (c);
+ }
+ else
+ clause_loc = gimple_location (entry_stmt);
+
+ /* Ensure 'device' is of the correct type. */
+ device = fold_convert_loc (clause_loc, integer_type_node, device);
+
+ /* If we found the clause 'if (cond)', build
+ (cond ? device : -2). */
+ if (cond)
+ {
+ cond = gimple_boolify (cond);
+
+ basic_block cond_bb, then_bb, else_bb;
+ edge e;
+ tree tmp_var;
+
+ tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
+ if (kind != GF_OMP_TARGET_KIND_REGION)
+ {
+ gsi = gsi_last_bb (new_bb);
+ gsi_prev (&gsi);
+ e = split_block (new_bb, gsi_stmt (gsi));
+ }
+ else
+ e = split_block (new_bb, NULL);
+ cond_bb = e->src;
+ new_bb = e->dest;
+ remove_edge (e);
+
+ then_bb = create_empty_bb (cond_bb);
+ else_bb = create_empty_bb (then_bb);
+ set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
+ set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
+
+ stmt = gimple_build_cond_empty (cond);
+ gsi = gsi_last_bb (cond_bb);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ gsi = gsi_start_bb (then_bb);
+ stmt = gimple_build_assign (tmp_var, device);
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ gsi = gsi_start_bb (else_bb);
+ stmt = gimple_build_assign (tmp_var,
+ build_int_cst (integer_type_node, -2));
+ gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
+
+ make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
+ make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
+ if (current_loops)
+ {
+ add_bb_to_loop (then_bb, cond_bb->loop_father);
+ add_bb_to_loop (else_bb, cond_bb->loop_father);
+ }
+ make_edge (then_bb, new_bb, EDGE_FALLTHRU);
+ make_edge (else_bb, new_bb, EDGE_FALLTHRU);
+
+ device = tmp_var;
+ }
+
+ gsi = gsi_last_bb (new_bb);
+ t = gimple_omp_target_data_arg (entry_stmt);
+ if (t == NULL)
+ {
+ t1 = size_zero_node;
+ t2 = build_zero_cst (ptr_type_node);
+ t3 = t2;
+ t4 = t2;
+ }
+ else
+ {
+ t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
+ t1 = size_binop (PLUS_EXPR, t1, size_int (1));
+ t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
+ t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
+ t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
+ }
+
+ gimple g;
+ /* FIXME: This will be address of
+ extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
+ symbol, as soon as the linker plugin is able to create it for us. */
+ tree openmp_target = build_zero_cst (ptr_type_node);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ tree fnaddr = build_fold_addr_expr (child_fn);
+ g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
+ device, fnaddr, openmp_target, t1, t2, t3, t4);
+ }
+ else
+ g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
+ device, openmp_target, t1, t2, t3, t4);
+ gimple_set_location (g, gimple_location (entry_stmt));
+ gsi_insert_before (&gsi, g, GSI_SAME_STMT);
+ if (kind != GF_OMP_TARGET_KIND_REGION)
+ {
+ g = gsi_stmt (gsi);
+ gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
+ gsi_remove (&gsi, true);
+ }
+ if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
+ {
+ gsi = gsi_last_bb (region->exit);
+ g = gsi_stmt (gsi);
+ gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
+ gsi_remove (&gsi, true);
+ }
+}
+
+
/* Expand the parallel region tree rooted at REGION. Expansion
proceeds in depth-first order. Innermost regions are expanded
first. This way, parallel regions that require a new function to
@@ -6634,12 +7986,17 @@ expand_omp (struct omp_region *region)
while (region)
{
location_t saved_location;
+ gimple inner_stmt = NULL;
/* First, determine whether this is a combined parallel+workshare
region. */
if (region->type == GIMPLE_OMP_PARALLEL)
determine_parallel_type (region);
+ if (region->type == GIMPLE_OMP_FOR
+ && gimple_omp_for_combined_p (last_stmt (region->entry)))
+ inner_stmt = last_stmt (region->inner->entry);
+
if (region->inner)
expand_omp (region->inner);
@@ -6655,7 +8012,7 @@ expand_omp (struct omp_region *region)
break;
case GIMPLE_OMP_FOR:
- expand_omp_for (region);
+ expand_omp_for (region, inner_stmt);
break;
case GIMPLE_OMP_SECTIONS:
@@ -6672,8 +8029,10 @@ expand_omp (struct omp_region *region)
break;
case GIMPLE_OMP_MASTER:
+ case GIMPLE_OMP_TASKGROUP:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_OMP_TEAMS:
expand_omp_synch (region);
break;
@@ -6681,6 +8040,10 @@ expand_omp (struct omp_region *region)
expand_omp_atomic (region);
break;
+ case GIMPLE_OMP_TARGET:
+ expand_omp_target (region);
+ break;
+
default:
gcc_unreachable ();
}
@@ -6745,6 +8108,9 @@ build_omp_regions_1 (basic_block bb, struct omp_region *parent,
GIMPLE_OMP_SECTIONS, and we do nothing for it. */
;
}
+ else if (code == GIMPLE_OMP_TARGET
+ && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
+ new_omp_region (bb, code, parent);
else
{
/* Otherwise, this directive becomes the parent for a new
@@ -6880,6 +8246,32 @@ make_pass_expand_omp (gcc::context *ctxt)
/* Routines to lower OpenMP directives into OMP-GIMPLE. */
+/* If ctx is a worksharing context inside of a cancellable parallel
+ region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
+ and conditional branch to parallel's cancel_label to handle
+ cancellation in the implicit barrier. */
+
+static void
+maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
+{
+ gimple omp_return = gimple_seq_last_stmt (*body);
+ gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
+ if (gimple_omp_return_nowait_p (omp_return))
+ return;
+ if (ctx->outer
+ && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
+ && ctx->outer->cancellable)
+ {
+ tree lhs = create_tmp_var (boolean_type_node, NULL);
+ gimple_omp_return_set_lhs (omp_return, lhs);
+ tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
+ gimple g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
+ ctx->outer->cancel_label, fallthru_label);
+ gimple_seq_add_stmt (body, g);
+ gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
+ }
+}
+
/* Lower the OpenMP sections directive in the current statement in GSI_P.
CTX is the enclosing OMP context for the current statement. */
@@ -6899,7 +8291,7 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
dlist = NULL;
ilist = NULL;
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
- &ilist, &dlist, ctx);
+ &ilist, &dlist, ctx, NULL);
new_body = gimple_omp_body (stmt);
gimple_omp_set_body (stmt, NULL);
@@ -6959,6 +8351,8 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, olist);
+ if (ctx->cancellable)
+ gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, dlist);
new_body = maybe_catch_exception (new_body);
@@ -6967,6 +8361,7 @@ lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
(!!find_omp_clause (gimple_omp_sections_clauses (stmt),
OMP_CLAUSE_NOWAIT));
gimple_seq_add_stmt (&new_body, t);
+ maybe_add_implicit_barrier_cancel (ctx, &new_body);
gimple_bind_set_body (new_stmt, new_body);
}
@@ -7096,7 +8491,7 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
- gimple_seq bind_body, dlist;
+ gimple_seq bind_body, bind_body_tail = NULL, dlist;
struct gimplify_ctx gctx;
push_gimplify_context (&gctx);
@@ -7107,7 +8502,7 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
bind_body = NULL;
dlist = NULL;
lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
- &bind_body, &dlist, ctx);
+ &bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
gimple_seq_add_stmt (&bind_body, single_stmt);
@@ -7126,7 +8521,17 @@ lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
t = gimple_build_omp_return
(!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT));
- gimple_seq_add_stmt (&bind_body, t);
+ gimple_seq_add_stmt (&bind_body_tail, t);
+ maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
+ if (ctx->record_type)
+ {
+ gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
+ tree clobber = build_constructor (ctx->record_type, NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
+ clobber), GSI_SAME_STMT);
+ }
+ gimple_seq_add_seq (&bind_body, bind_body_tail);
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
@@ -7180,6 +8585,33 @@ lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
}
+/* Expand code for an OpenMP taskgroup directive. */
+
+static void
+lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
+{
+ gimple stmt = gsi_stmt (*gsi_p), bind, x;
+ tree block = make_node (BLOCK);
+
+ bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ gimple_bind_add_stmt (bind, stmt);
+
+ x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
+ 0);
+ gimple_bind_add_stmt (bind, x);
+
+ lower_omp (gimple_omp_body_ptr (stmt), ctx);
+ gimple_bind_add_seq (bind, gimple_omp_body (stmt));
+ gimple_omp_set_body (stmt, NULL);
+
+ gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
+
+ gimple_bind_append_vars (bind, ctx->block_vars);
+ BLOCK_VARS (block) = ctx->block_vars;
+}
+
+
/* Expand code for an OpenMP ordered directive. */
static void
@@ -7372,7 +8804,7 @@ static void
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree *rhs_p, block;
- struct omp_for_data fd;
+ struct omp_for_data fd, *fdp = NULL;
gimple stmt = gsi_stmt (*gsi_p), new_stmt;
gimple_seq omp_for_body, body, dlist;
size_t i;
@@ -7399,10 +8831,50 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_bind_append_vars (new_stmt, vars);
}
+ if (gimple_omp_for_combined_into_p (stmt))
+ {
+ extract_omp_for_data (stmt, &fd, NULL);
+ fdp = &fd;
+
+ /* We need two temporaries with fd.loop.v type (istart/iend)
+ and then (fd.collapse - 1) temporaries with the same
+ type for count2 ... countN-1 vars if not constant. */
+ size_t count = 2;
+ tree type = fd.iter_type;
+ if (fd.collapse > 1
+ && TREE_CODE (fd.loop.n2) != INTEGER_CST)
+ count += fd.collapse - 1;
+ bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
+ tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
+ tree clauses = *pc;
+ if (parallel_for)
+ outerc
+ = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
+ OMP_CLAUSE__LOOPTEMP_);
+ for (i = 0; i < count; i++)
+ {
+ tree temp;
+ if (parallel_for)
+ {
+ gcc_assert (outerc);
+ temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
+ outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
+ OMP_CLAUSE__LOOPTEMP_);
+ }
+ else
+ temp = create_tmp_var (type, NULL);
+ *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
+ OMP_CLAUSE_DECL (*pc) = temp;
+ pc = &OMP_CLAUSE_CHAIN (*pc);
+ }
+ *pc = clauses;
+ }
+
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
dlist = NULL;
body = NULL;
- lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
+ lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
+ fdp);
gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
lower_omp (gimple_omp_body_ptr (stmt), ctx);
@@ -7442,13 +8914,17 @@ lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
/* After the loop, add exit clauses. */
lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
+
+ if (ctx->cancellable)
+ gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
+
gimple_seq_add_seq (&body, dlist);
body = maybe_catch_exception (body);
/* Region exit marker goes at the end of the loop body. */
gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
-
+ maybe_add_implicit_barrier_cancel (ctx, &body);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
@@ -7769,6 +9245,68 @@ create_task_copyfn (gimple task_stmt, omp_context *ctx)
pop_cfun ();
}
+static void
+lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
+{
+ tree c, clauses;
+ gimple g;
+ size_t n_in = 0, n_out = 0, idx = 2, i;
+
+ clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
+ OMP_CLAUSE_DEPEND);
+ gcc_assert (clauses);
+ for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
+ switch (OMP_CLAUSE_DEPEND_KIND (c))
+ {
+ case OMP_CLAUSE_DEPEND_IN:
+ n_in++;
+ break;
+ case OMP_CLAUSE_DEPEND_OUT:
+ case OMP_CLAUSE_DEPEND_INOUT:
+ n_out++;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
+ tree array = create_tmp_var (type, NULL);
+ tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
+ NULL_TREE);
+ g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
+ gimple_seq_add_stmt (iseq, g);
+ r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
+ NULL_TREE);
+ g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
+ gimple_seq_add_stmt (iseq, g);
+ for (i = 0; i < 2; i++)
+ {
+ if ((i ? n_in : n_out) == 0)
+ continue;
+ for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
+ && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
+ {
+ tree t = OMP_CLAUSE_DECL (c);
+ t = fold_convert (ptr_type_node, t);
+ gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
+ r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
+ NULL_TREE, NULL_TREE);
+ g = gimple_build_assign (r, t);
+ gimple_seq_add_stmt (iseq, g);
+ }
+ }
+ tree *p = gimple_omp_task_clauses_ptr (stmt);
+ c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
+ OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
+ OMP_CLAUSE_CHAIN (c) = *p;
+ *p = c;
+ tree clobber = build_constructor (type, NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ g = gimple_build_assign (array, clobber);
+ gimple_seq_add_stmt (oseq, g);
+}
+
/* Lower the OpenMP parallel or task directive in the current statement
in GSI_P. CTX holds context information for the directive. */
@@ -7778,9 +9316,9 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
tree clauses;
tree child_fn, t;
gimple stmt = gsi_stmt (*gsi_p);
- gimple par_bind, bind;
- gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
- struct gimplify_ctx gctx;
+ gimple par_bind, bind, dep_bind = NULL;
+ gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
+ struct gimplify_ctx gctx, dep_gctx;
location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
@@ -7800,6 +9338,16 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
if (ws_num == 1)
gimple_omp_parallel_set_combined_p (stmt, true);
}
+ gimple_seq dep_ilist = NULL;
+ gimple_seq dep_olist = NULL;
+ if (gimple_code (stmt) == GIMPLE_OMP_TASK
+ && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
+ {
+ push_gimplify_context (&dep_gctx);
+ dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
+ lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
+ }
+
if (ctx->srecord_type)
create_task_copyfn (stmt, ctx);
@@ -7807,10 +9355,11 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
par_olist = NULL;
par_ilist = NULL;
- lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
+ par_rlist = NULL;
+ lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
lower_omp (&par_body, ctx);
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
- lower_reduction_clauses (clauses, &par_olist, ctx);
+ lower_reduction_clauses (clauses, &par_rlist, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
@@ -7832,6 +9381,14 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
+ if (ctx->record_type)
+ {
+ tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
+ clobber));
+ }
+
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
@@ -7848,12 +9405,329 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gimple_seq_add_seq (&new_body, par_ilist);
gimple_seq_add_seq (&new_body, par_body);
+ gimple_seq_add_seq (&new_body, par_rlist);
+ if (ctx->cancellable)
+ gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, par_olist);
new_body = maybe_catch_exception (new_body);
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
+ gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
+ gimple_bind_add_seq (bind, ilist);
+ gimple_bind_add_stmt (bind, stmt);
+ gimple_bind_add_seq (bind, olist);
+
+ pop_gimplify_context (NULL);
+
+ if (dep_bind)
+ {
+ gimple_bind_add_seq (dep_bind, dep_ilist);
+ gimple_bind_add_stmt (dep_bind, bind);
+ gimple_bind_add_seq (dep_bind, dep_olist);
+ pop_gimplify_context (dep_bind);
+ }
+}
+
+/* Lower the OpenMP target directive in the current statement
+ in GSI_P. CTX holds context information for the directive. */
+
+static void
+lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
+{
+ tree clauses;
+ tree child_fn, t, c;
+ gimple stmt = gsi_stmt (*gsi_p);
+ gimple tgt_bind = NULL, bind;
+ gimple_seq tgt_body = NULL, olist, ilist, new_body;
+ struct gimplify_ctx gctx;
+ location_t loc = gimple_location (stmt);
+ int kind = gimple_omp_target_kind (stmt);
+ unsigned int map_cnt = 0;
+
+ clauses = gimple_omp_target_clauses (stmt);
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
+ tgt_body = gimple_bind_body (tgt_bind);
+ }
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ tgt_body = gimple_omp_body (stmt);
+ child_fn = ctx->cb.dst_fn;
+
+ push_gimplify_context (&gctx);
+
+ for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ tree var, x;
+
+ default:
+ break;
+ case OMP_CLAUSE_MAP:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
+ var = OMP_CLAUSE_DECL (c);
+ if (!DECL_P (var))
+ {
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
+ || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
+ map_cnt++;
+ continue;
+ }
+
+ if (DECL_SIZE (var)
+ && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
+ {
+ tree var2 = DECL_VALUE_EXPR (var);
+ gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
+ var2 = TREE_OPERAND (var2, 0);
+ gcc_assert (DECL_P (var2));
+ var = var2;
+ }
+
+ if (!maybe_lookup_field (var, ctx))
+ continue;
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ x = build_receiver_ref (var, true, ctx);
+ tree new_var = lookup_decl (var, ctx);
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
+ && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
+ x = build_simple_mem_ref (x);
+ SET_DECL_VALUE_EXPR (new_var, x);
+ DECL_HAS_VALUE_EXPR_P (new_var) = 1;
+ }
+ map_cnt++;
+ }
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ target_nesting_level++;
+ lower_omp (&tgt_body, ctx);
+ target_nesting_level--;
+ }
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ lower_omp (&tgt_body, ctx);
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ /* Declare all the variables created by mapping and the variables
+ declared in the scope of the target body. */
+ record_vars_into (ctx->block_vars, child_fn);
+ record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
+ }
+
+ olist = NULL;
+ ilist = NULL;
+ if (ctx->record_type)
+ {
+ ctx->sender_decl
+ = create_tmp_var (ctx->record_type, ".omp_data_arr");
+ DECL_NAMELESS (ctx->sender_decl) = 1;
+ TREE_ADDRESSABLE (ctx->sender_decl) = 1;
+ t = make_tree_vec (3);
+ TREE_VEC_ELT (t, 0) = ctx->sender_decl;
+ TREE_VEC_ELT (t, 1)
+ = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
+ ".omp_data_sizes");
+ DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
+ TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
+ TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
+ TREE_VEC_ELT (t, 2)
+ = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
+ map_cnt),
+ ".omp_data_kinds");
+ DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
+ TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
+ TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
+ gimple_omp_target_set_data_arg (stmt, t);
+
+ vec<constructor_elt, va_gc> *vsize;
+ vec<constructor_elt, va_gc> *vkind;
+ vec_alloc (vsize, map_cnt);
+ vec_alloc (vkind, map_cnt);
+ unsigned int map_idx = 0;
+
+ for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ tree ovar, nc;
+
+ default:
+ break;
+ case OMP_CLAUSE_MAP:
+ case OMP_CLAUSE_TO:
+ case OMP_CLAUSE_FROM:
+ nc = c;
+ ovar = OMP_CLAUSE_DECL (c);
+ if (!DECL_P (ovar))
+ {
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
+ {
+ gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
+ == get_base_address (ovar));
+ nc = OMP_CLAUSE_CHAIN (c);
+ ovar = OMP_CLAUSE_DECL (nc);
+ }
+ else
+ {
+ tree x = build_sender_ref (ovar, ctx);
+ tree v
+ = build_fold_addr_expr_with_type (ovar, ptr_type_node);
+ gimplify_assign (x, v, &ilist);
+ nc = NULL_TREE;
+ }
+ }
+ else
+ {
+ if (DECL_SIZE (ovar)
+ && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
+ {
+ tree ovar2 = DECL_VALUE_EXPR (ovar);
+ gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
+ ovar2 = TREE_OPERAND (ovar2, 0);
+ gcc_assert (DECL_P (ovar2));
+ ovar = ovar2;
+ }
+ if (!maybe_lookup_field (ovar, ctx))
+ continue;
+ }
+
+ if (nc)
+ {
+ tree var = lookup_decl_in_outer_ctx (ovar, ctx);
+ tree x = build_sender_ref (ovar, ctx);
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
+ && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
+ && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
+ {
+ gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
+ tree avar
+ = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
+ mark_addressable (avar);
+ gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
+ avar = build_fold_addr_expr (avar);
+ gimplify_assign (x, avar, &ilist);
+ }
+ else if (is_gimple_reg (var))
+ {
+ gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
+ tree avar = create_tmp_var (TREE_TYPE (var), NULL);
+ mark_addressable (avar);
+ if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
+ && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
+ gimplify_assign (avar, var, &ilist);
+ avar = build_fold_addr_expr (avar);
+ gimplify_assign (x, avar, &ilist);
+ if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
+ || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
+ && !TYPE_READONLY (TREE_TYPE (var)))
+ {
+ x = build_sender_ref (ovar, ctx);
+ x = build_simple_mem_ref (x);
+ gimplify_assign (var, x, &olist);
+ }
+ }
+ else
+ {
+ var = build_fold_addr_expr (var);
+ gimplify_assign (x, var, &ilist);
+ }
+ }
+ tree s = OMP_CLAUSE_SIZE (c);
+ if (s == NULL_TREE)
+ s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
+ s = fold_convert (size_type_node, s);
+ tree purpose = size_int (map_idx++);
+ CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
+ if (TREE_CODE (s) != INTEGER_CST)
+ TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
+
+ unsigned char tkind = 0;
+ switch (OMP_CLAUSE_CODE (c))
+ {
+ case OMP_CLAUSE_MAP:
+ tkind = OMP_CLAUSE_MAP_KIND (c);
+ break;
+ case OMP_CLAUSE_TO:
+ tkind = OMP_CLAUSE_MAP_TO;
+ break;
+ case OMP_CLAUSE_FROM:
+ tkind = OMP_CLAUSE_MAP_FROM;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
+ if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
+ talign = DECL_ALIGN_UNIT (ovar);
+ talign = ceil_log2 (talign);
+ tkind |= talign << 3;
+ CONSTRUCTOR_APPEND_ELT (vkind, purpose,
+ build_int_cst (unsigned_char_type_node,
+ tkind));
+ if (nc && nc != c)
+ c = nc;
+ }
+
+ gcc_assert (map_idx == map_cnt);
+
+ DECL_INITIAL (TREE_VEC_ELT (t, 1))
+ = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
+ DECL_INITIAL (TREE_VEC_ELT (t, 2))
+ = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
+ if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
+ {
+ gimple_seq initlist = NULL;
+ force_gimple_operand (build1 (DECL_EXPR, void_type_node,
+ TREE_VEC_ELT (t, 1)),
+ &initlist, true, NULL_TREE);
+ gimple_seq_add_seq (&ilist, initlist);
+ }
+
+ tree clobber = build_constructor (ctx->record_type, NULL);
+ TREE_THIS_VOLATILE (clobber) = 1;
+ gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
+ clobber));
+ }
+
+ /* Once all the expansions are done, sequence all the different
+ fragments inside gimple_omp_body. */
+
+ new_body = NULL;
+
+ if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
+ /* fixup_child_record_type might have changed receiver_decl's type. */
+ t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
+ gimple_seq_add_stmt (&new_body,
+ gimple_build_assign (ctx->receiver_decl, t));
+ }
+
+ if (kind == GF_OMP_TARGET_KIND_REGION)
+ {
+ gimple_seq_add_seq (&new_body, tgt_body);
+ new_body = maybe_catch_exception (new_body);
+ }
+ else if (kind == GF_OMP_TARGET_KIND_DATA)
+ new_body = tgt_body;
+ if (kind != GF_OMP_TARGET_KIND_UPDATE)
+ {
+ gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
+ gimple_omp_set_body (stmt, new_body);
+ }
+
+ bind = gimple_build_bind (NULL, NULL,
+ tgt_bind ? gimple_bind_block (tgt_bind)
+ : NULL_TREE);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_seq (bind, ilist);
gimple_bind_add_stmt (bind, stmt);
@@ -7862,6 +9736,72 @@ lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
pop_gimplify_context (NULL);
}
+/* Expand code for an OpenMP teams directive. */
+
+static void
+lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
+{
+ gimple teams_stmt = gsi_stmt (*gsi_p);
+ struct gimplify_ctx gctx;
+ push_gimplify_context (&gctx);
+
+ tree block = make_node (BLOCK);
+ gimple bind = gimple_build_bind (NULL, NULL, block);
+ gsi_replace (gsi_p, bind, true);
+ gimple_seq bind_body = NULL;
+ gimple_seq dlist = NULL;
+ gimple_seq olist = NULL;
+
+ tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
+ OMP_CLAUSE_NUM_TEAMS);
+ if (num_teams == NULL_TREE)
+ num_teams = build_int_cst (unsigned_type_node, 0);
+ else
+ {
+ num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
+ num_teams = fold_convert (unsigned_type_node, num_teams);
+ gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
+ }
+ tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
+ OMP_CLAUSE_THREAD_LIMIT);
+ if (thread_limit == NULL_TREE)
+ thread_limit = build_int_cst (unsigned_type_node, 0);
+ else
+ {
+ thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
+ thread_limit = fold_convert (unsigned_type_node, thread_limit);
+ gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
+ fb_rvalue);
+ }
+
+ lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
+ &bind_body, &dlist, ctx, NULL);
+ lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
+ lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
+ gimple_seq_add_stmt (&bind_body, teams_stmt);
+
+ location_t loc = gimple_location (teams_stmt);
+ tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
+ gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
+ gimple_set_location (call, loc);
+ gimple_seq_add_stmt (&bind_body, call);
+
+ gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
+ gimple_omp_set_body (teams_stmt, NULL);
+ gimple_seq_add_seq (&bind_body, olist);
+ gimple_seq_add_seq (&bind_body, dlist);
+ gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
+ gimple_bind_set_body (bind, bind_body);
+
+ pop_gimplify_context (bind);
+
+ gimple_bind_append_vars (bind, ctx->block_vars);
+ BLOCK_VARS (block) = ctx->block_vars;
+ if (BLOCK_VARS (block))
+ TREE_USED (block) = 1;
+}
+
+
/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
regimplified. If DATA is non-NULL, lower_omp_1 is outside
of OpenMP context, but with task_shared_vars set. */
@@ -7940,16 +9880,23 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ if (ctx->cancellable)
+ ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_taskreg (gsi_p, ctx);
break;
case GIMPLE_OMP_FOR:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
+ if (ctx->cancellable)
+ ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_for (gsi_p, ctx);
break;
case GIMPLE_OMP_SECTIONS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
+ if (ctx->cancellable)
+ ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_sections (gsi_p, ctx);
break;
case GIMPLE_OMP_SINGLE:
@@ -7962,6 +9909,11 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
gcc_assert (ctx);
lower_omp_master (gsi_p, ctx);
break;
+ case GIMPLE_OMP_TASKGROUP:
+ ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ lower_omp_taskgroup (gsi_p, ctx);
+ break;
case GIMPLE_OMP_ORDERED:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
@@ -7978,6 +9930,66 @@ lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
gimple_regimplify_operands (stmt, gsi_p);
break;
+ case GIMPLE_OMP_TARGET:
+ ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ lower_omp_target (gsi_p, ctx);
+ break;
+ case GIMPLE_OMP_TEAMS:
+ ctx = maybe_lookup_ctx (stmt);
+ gcc_assert (ctx);
+ lower_omp_teams (gsi_p, ctx);
+ break;
+ case GIMPLE_CALL:
+ tree fndecl;
+ fndecl = gimple_call_fndecl (stmt);
+ if (fndecl
+ && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_GOMP_BARRIER:
+ if (ctx == NULL)
+ break;
+ /* FALLTHRU */
+ case BUILT_IN_GOMP_CANCEL:
+ case BUILT_IN_GOMP_CANCELLATION_POINT:
+ omp_context *cctx;
+ cctx = ctx;
+ if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
+ cctx = cctx->outer;
+ gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
+ if (!cctx->cancellable)
+ {
+ if (DECL_FUNCTION_CODE (fndecl)
+ == BUILT_IN_GOMP_CANCELLATION_POINT)
+ {
+ stmt = gimple_build_nop ();
+ gsi_replace (gsi_p, stmt, false);
+ }
+ break;
+ }
+ tree lhs;
+ lhs = create_tmp_var (boolean_type_node, NULL);
+ if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
+ {
+ fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
+ gimple_call_set_fndecl (stmt, fndecl);
+ gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
+ }
+ gimple_call_set_lhs (stmt, lhs);
+ tree fallthru_label;
+ fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
+ gimple g;
+ g = gimple_build_label (fallthru_label);
+ gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
+ g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
+ cctx->cancel_label, fallthru_label);
+ gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
+ break;
+ default:
+ break;
+ }
+ /* FALLTHRU */
default:
if ((ctx || task_shared_vars)
&& walk_gimple_op (stmt, lower_omp_regimplify_p,
@@ -7994,6 +10006,12 @@ lower_omp (gimple_seq *body, omp_context *ctx)
gimple_stmt_iterator gsi;
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
+ /* Inside target region we haven't called fold_stmt during gimplification,
+ because it can break code by adding decl references that weren't in the
+ source. Call fold_stmt now. */
+ if (target_nesting_level)
+ for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
+ fold_stmt (&gsi);
input_location = saved_location;
}
@@ -8162,6 +10180,9 @@ diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_OMP_TARGET:
+ case GIMPLE_OMP_TEAMS:
+ case GIMPLE_OMP_TASKGROUP:
/* The minimal context here is just the current OMP construct. */
inner_context = stmt;
wi->info = inner_context;
@@ -8217,6 +10238,9 @@ diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
+ case GIMPLE_OMP_TARGET:
+ case GIMPLE_OMP_TEAMS:
+ case GIMPLE_OMP_TASKGROUP:
wi->info = stmt;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;