aboutsummaryrefslogtreecommitdiff
path: root/gcc/cp/semantics.cc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/cp/semantics.cc')
-rw-r--r--gcc/cp/semantics.cc1800
1 files changed, 1723 insertions, 77 deletions
diff --git a/gcc/cp/semantics.cc b/gcc/cp/semantics.cc
index a10ef34..32d4511 100644
--- a/gcc/cp/semantics.cc
+++ b/gcc/cp/semantics.cc
@@ -45,6 +45,7 @@ along with GCC; see the file COPYING3. If not see
#include "gomp-constants.h"
#include "predict.h"
#include "memmodel.h"
+#include "gimplify.h"
/* There routines provide a modular interface to perform many parsing
operations. They may therefore be used during actual parsing, or
@@ -4010,6 +4011,13 @@ finish_translation_unit (void)
"#pragma omp end declare target");
vec_safe_truncate (scope_chain->omp_declare_target_attribute, 0);
}
+ if (vec_safe_length (scope_chain->omp_declare_variant_attribute))
+ {
+ if (!errorcount)
+ error ("%<omp begin declare variant%> without corresponding "
+ "%<omp end declare variant%>");
+ vec_safe_truncate (scope_chain->omp_declare_variant_attribute, 0);
+ }
if (vec_safe_length (scope_chain->omp_begin_assumes))
{
if (!errorcount)
@@ -5906,14 +5914,17 @@ public:
<= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't
0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we
can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above
- case though, as some lengths could be zero. */
+ case though, as some lengths could be zero.
+ NON_CONTIGUOUS will be true if this is an OpenACC non-contiguous array
+ section. */
static tree
handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
bool &maybe_zero_len, unsigned int &first_non_one,
- enum c_omp_region_type ort)
+ bool &non_contiguous, enum c_omp_region_type ort,
+ int *discontiguous)
{
- tree ret, low_bound, length, type;
+ tree ret, low_bound, length, stride, type;
bool openacc = (ort & C_ORT_ACC) != 0;
if (TREE_CODE (t) != OMP_ARRAY_SECTION)
{
@@ -5975,18 +5986,26 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
&& TREE_CODE (TREE_OPERAND (t, 0)) == FIELD_DECL)
TREE_OPERAND (t, 0) = omp_privatize_field (TREE_OPERAND (t, 0), false);
ret = handle_omp_array_sections_1 (c, TREE_OPERAND (t, 0), types,
- maybe_zero_len, first_non_one, ort);
+ maybe_zero_len, first_non_one,
+ non_contiguous, ort, discontiguous);
if (ret == error_mark_node || ret == NULL_TREE)
return ret;
- type = TREE_TYPE (ret);
+ if (TREE_CODE (ret) == OMP_ARRAY_SECTION)
+ type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (ret, 0)));
+ else
+ type = TREE_TYPE (ret);
low_bound = TREE_OPERAND (t, 1);
length = TREE_OPERAND (t, 2);
+ stride = TREE_OPERAND (t, 3);
if ((low_bound && type_dependent_expression_p (low_bound))
- || (length && type_dependent_expression_p (length)))
+ || (length && type_dependent_expression_p (length))
+ || (stride && type_dependent_expression_p (stride)))
return NULL_TREE;
- if (low_bound == error_mark_node || length == error_mark_node)
+ if (low_bound == error_mark_node
+ || length == error_mark_node
+ || stride == error_mark_node)
return error_mark_node;
if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound)))
@@ -6003,15 +6022,26 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
length);
return error_mark_node;
}
+ if (stride && !INTEGRAL_TYPE_P (TREE_TYPE (stride)))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "stride %qE of array section does not have integral type",
+ stride);
+ return error_mark_node;
+ }
if (low_bound)
low_bound = mark_rvalue_use (low_bound);
if (length)
length = mark_rvalue_use (length);
+ if (stride)
+ stride = mark_rvalue_use (stride);
/* We need to reduce to real constant-values for checks below. */
if (length)
length = fold_simple (length);
if (low_bound)
low_bound = fold_simple (low_bound);
+ if (stride)
+ stride = fold_simple (stride);
if (low_bound
&& TREE_CODE (low_bound) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (low_bound))
@@ -6022,9 +6052,15 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
+ if (stride
+ && TREE_CODE (stride) == INTEGER_CST
+ && TYPE_PRECISION (TREE_TYPE (stride))
+ > TYPE_PRECISION (sizetype))
+ stride = fold_convert (sizetype, stride);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
-
+ if (stride == NULL_TREE)
+ stride = size_one_node;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH))
@@ -6143,12 +6179,29 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
}
if (length && TREE_CODE (length) == INTEGER_CST)
{
- if (tree_int_cst_lt (size, length))
+ tree slength = length;
+ if (stride && TREE_CODE (stride) == INTEGER_CST)
{
- error_at (OMP_CLAUSE_LOCATION (c),
- "length %qE above array section size "
- "in %qs clause", length,
- omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
+ slength = size_binop (MULT_EXPR,
+ fold_convert (sizetype, length),
+ fold_convert (sizetype, stride));
+ slength = size_binop (MINUS_EXPR,
+ slength,
+ fold_convert (sizetype, stride));
+ slength = size_binop (PLUS_EXPR, slength, size_one_node);
+ }
+ if (tree_int_cst_lt (size, slength))
+ {
+ if (stride)
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "length %qE with stride %qE above array "
+ "section size in %qs clause", length, stride,
+ omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
+ else
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "length %qE above array section size "
+ "in %qs clause", length,
+ omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
return error_mark_node;
}
if (TREE_CODE (low_bound) == INTEGER_CST)
@@ -6156,7 +6209,7 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
tree lbpluslen
= size_binop (PLUS_EXPR,
fold_convert (sizetype, low_bound),
- fold_convert (sizetype, length));
+ fold_convert (sizetype, slength));
if (TREE_CODE (lbpluslen) == INTEGER_CST
&& tree_int_cst_lt (size, lbpluslen))
{
@@ -6226,12 +6279,38 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
d = TREE_OPERAND (d, 0))
{
tree d_length = TREE_OPERAND (d, 2);
- if (d_length == NULL_TREE || !integer_onep (d_length))
+ tree d_stride = TREE_OPERAND (d, 3);
+ if (d_length == NULL_TREE
+ || !integer_onep (d_length)
+ || (d_stride && !integer_onep (d_stride)))
{
- error_at (OMP_CLAUSE_LOCATION (c),
- "array section is not contiguous in %qs clause",
- omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
- return error_mark_node;
+ if (openacc && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP)
+ {
+ while (TREE_CODE (d) == OMP_ARRAY_SECTION)
+ d = TREE_OPERAND (d, 0);
+ if (DECL_P (d))
+ {
+ /* Note that OpenACC does accept these kinds of
+ non-contiguous pointer based arrays. */
+ non_contiguous = true;
+ break;
+ }
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "base-pointer expression in %qs clause not "
+ "supported for non-contiguous arrays",
+ omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
+ return error_mark_node;
+ }
+
+ if (discontiguous && *discontiguous)
+ *discontiguous = 2;
+ else
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "array section is not contiguous in %qs clause",
+ omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
+ return error_mark_node;
+ }
}
}
}
@@ -6243,7 +6322,7 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
return error_mark_node;
}
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
- types.safe_push (TREE_TYPE (ret));
+ types.safe_push (type);
/* We will need to evaluate lb more than once. */
tree lb = cp_save_expr (low_bound);
if (lb != low_bound)
@@ -6262,29 +6341,59 @@ handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types,
OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION);
- ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, NULL,
- tf_warning_or_error);
+ /* NOTE: Stride/length are discarded for affinity/depend here. */
+ if (discontiguous
+ && *discontiguous
+ && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_AFFINITY
+ && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
+ ret = grok_omp_array_section (OMP_CLAUSE_LOCATION (c), ret, low_bound,
+ length, stride);
+ else
+ ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, NULL,
+ tf_warning_or_error);
return ret;
}
-/* Handle array sections for clause C. */
+/* We built a reference to an array section, but it turns out we only need a
+ set of ARRAY_REFs to the lower bound. Rewrite the node. */
+
+static tree
+omp_array_section_low_bound (location_t loc, tree node)
+{
+ if (TREE_CODE (node) == OMP_ARRAY_SECTION)
+ {
+ tree low_bound = TREE_OPERAND (node, 1);
+ tree ret
+ = omp_array_section_low_bound (loc, TREE_OPERAND (node, 0));
+ return grok_array_decl (loc, ret, low_bound, NULL, tf_warning_or_error);
+ }
+
+ return node;
+}
+
+/* Handle array sections for clause C. On entry *DISCONTIGUOUS is 0 if array
+ section must be contiguous, 1 if it can be discontiguous, and in the latter
+ case it is set to 2 on exit if it is determined to be discontiguous during
+ the function's execution. PC points to the clause to be processed, and
+ *PNEXT to the last mapping node created, if passed as non-NULL. */
static bool
-handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
+handle_omp_array_sections (tree *pc, tree **pnext, enum c_omp_region_type ort,
+ int *discontiguous, bool *strided = NULL)
{
+ tree c = *pc;
bool maybe_zero_len = false;
unsigned int first_non_one = 0;
+ bool non_contiguous = false;
auto_vec<tree, 10> types;
tree *tp = &OMP_CLAUSE_DECL (c);
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_AFFINITY)
- && TREE_CODE (*tp) == TREE_LIST
- && TREE_PURPOSE (*tp)
- && TREE_CODE (TREE_PURPOSE (*tp)) == TREE_VEC)
+ && OMP_ITERATOR_DECL_P (*tp))
tp = &TREE_VALUE (*tp);
tree first = handle_omp_array_sections_1 (c, *tp, types,
maybe_zero_len, first_non_one,
- ort);
+ non_contiguous, ort, discontiguous);
if (first == error_mark_node)
return true;
if (first == NULL_TREE)
@@ -6319,12 +6428,15 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
unsigned int num = types.length (), i;
tree t, side_effects = NULL_TREE, size = NULL_TREE;
tree condition = NULL_TREE;
+ tree ncarray_dims = NULL_TREE;
if (int_size_in_bytes (TREE_TYPE (first)) <= 0)
maybe_zero_len = true;
if (processing_template_decl && maybe_zero_len)
return false;
+ bool higher_discontiguous = false;
+
for (i = num, t = OMP_CLAUSE_DECL (c); i > 0;
t = TREE_OPERAND (t, 0))
{
@@ -6332,6 +6444,7 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
tree low_bound = TREE_OPERAND (t, 1);
tree length = TREE_OPERAND (t, 2);
+ tree stride = TREE_OPERAND (t, 3);
i--;
if (low_bound
@@ -6344,12 +6457,66 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
&& TYPE_PRECISION (TREE_TYPE (length))
> TYPE_PRECISION (sizetype))
length = fold_convert (sizetype, length);
+ if (stride
+ && TREE_CODE (stride) == INTEGER_CST
+ && TYPE_PRECISION (TREE_TYPE (stride))
+ > TYPE_PRECISION (sizetype))
+ stride = fold_convert (sizetype, stride);
if (low_bound == NULL_TREE)
low_bound = integer_zero_node;
+
+ if (non_contiguous)
+ {
+ ncarray_dims = tree_cons (low_bound, length, ncarray_dims);
+ continue;
+ }
+
+ if (stride == NULL_TREE)
+ stride = size_one_node;
+ if (strided && !integer_onep (stride))
+ *strided = true;
+ if (discontiguous && *discontiguous)
+ {
+ /* This condition is similar to the error check below, but
+ whereas that checks for a definitely-discontiguous array
+ section in order to report an error (where such a section is
+ illegal), here we instead need to know if the array section
+ *may be* discontiguous so we can handle that case
+ appropriately (i.e. for rectangular "target update"
+ operations). */
+ bool full_span = false;
+ if (length != NULL_TREE
+ && TREE_CODE (length) == INTEGER_CST
+ && TREE_CODE (types[i]) == ARRAY_TYPE
+ && TYPE_DOMAIN (types[i])
+ && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))
+ && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])))
+ == INTEGER_CST)
+ {
+ tree size;
+ size = size_binop (PLUS_EXPR,
+ TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])),
+ size_one_node);
+ if (tree_int_cst_equal (length, size))
+ full_span = true;
+ }
+
+ if (!integer_onep (stride)
+ || (higher_discontiguous
+ && (!integer_zerop (low_bound)
+ || !full_span)))
+ *discontiguous = 2;
+
+ if (!integer_onep (stride)
+ || !integer_zerop (low_bound)
+ || !full_span)
+ higher_discontiguous = true;
+ }
+
if (!maybe_zero_len && i > first_non_one)
{
if (integer_nonzerop (low_bound))
- goto do_warn_noncontiguous;
+ goto is_noncontiguous;
if (length != NULL_TREE
&& TREE_CODE (length) == INTEGER_CST
&& TYPE_DOMAIN (types[i])
@@ -6363,12 +6530,17 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
size_one_node);
if (!tree_int_cst_equal (length, size))
{
- do_warn_noncontiguous:
- error_at (OMP_CLAUSE_LOCATION (c),
- "array section is not contiguous in %qs "
- "clause",
- omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
- return true;
+ is_noncontiguous:
+ if (discontiguous && *discontiguous)
+ *discontiguous = 2;
+ else
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "array section is not contiguous in %qs "
+ "clause",
+ omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
+ return true;
+ }
}
}
if (!processing_template_decl
@@ -6437,6 +6609,14 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
}
if (!processing_template_decl)
{
+ if (non_contiguous)
+ {
+ int kind = OMP_CLAUSE_MAP_KIND (c);
+ OMP_CLAUSE_SET_MAP_KIND (c, kind | GOMP_MAP_NONCONTIG_ARRAY);
+ OMP_CLAUSE_DECL (c) = t;
+ OMP_CLAUSE_SIZE (c) = ncarray_dims;
+ return false;
+ }
if (side_effects)
size = build2 (COMPOUND_EXPR, sizetype, side_effects, size);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
@@ -6477,6 +6657,9 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
OMP_CLAUSE_DECL (c) = t;
return false;
}
+ if (discontiguous && *discontiguous != 2)
+ first = omp_array_section_low_bound (OMP_CLAUSE_LOCATION (c),
+ first);
OMP_CLAUSE_DECL (c) = first;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_HAS_DEVICE_ADDR)
return false;
@@ -6488,9 +6671,6 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
if (TREE_CODE (t) == FIELD_DECL)
t = finish_non_static_data_member (t, NULL_TREE, NULL_TREE);
- if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
- return false;
-
if (TREE_CODE (first) == INDIRECT_REF)
{
/* Detect and skip adding extra nodes for pointer-to-member
@@ -6517,6 +6697,10 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
}
}
+ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
+ && !(discontiguous && *discontiguous == 2))
+ return false;
+
/* FIRST represents the first item of data that we are mapping.
E.g. if we're mapping an array, FIRST might resemble
"foo.bar.myarray[0]". */
@@ -6528,23 +6712,28 @@ handle_omp_array_sections (tree &c, enum c_omp_region_type ort)
cp_omp_address_inspector ai (OMP_CLAUSE_LOCATION (c), t);
- tree nc = ai.expand_map_clause (c, first, addr_tokens, ort);
- if (nc != error_mark_node)
+ tree* npc = ai.expand_map_clause (pc, first, addr_tokens, ort);
+ if (npc != NULL)
{
using namespace omp_addr_tokenizer;
- if (ai.maybe_zero_length_array_section (c))
+ c = *pc;
+
+ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
+ && ai.maybe_zero_length_array_section (c))
OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1;
/* !!! If we're accessing a base decl via chained access
methods (e.g. multiple indirections), duplicate clause
detection won't work properly. Skip it in that case. */
- if ((addr_tokens[0]->type == STRUCTURE_BASE
+ if (pnext
+ && (addr_tokens[0]->type == STRUCTURE_BASE
|| addr_tokens[0]->type == ARRAY_BASE)
&& addr_tokens[0]->u.structure_base_kind == BASE_DECL
&& addr_tokens[1]->type == ACCESS_METHOD
&& omp_access_chain_p (addr_tokens, 1))
- c = nc;
+ /* NPC points to the last node in the new sequence. */
+ *pnext = npc;
return false;
}
@@ -6723,6 +6912,102 @@ omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp,
return id;
}
+/* Return identifier to look up for omp declare mapper. */
+
+tree
+omp_mapper_id (tree mapper_id, tree type)
+{
+ const char *p = NULL;
+ const char *m = NULL;
+
+ if (mapper_id == NULL_TREE)
+ p = "";
+ else if (TREE_CODE (mapper_id) == IDENTIFIER_NODE)
+ p = IDENTIFIER_POINTER (mapper_id);
+ else
+ return error_mark_node;
+
+ if (type != NULL_TREE)
+ m = mangle_type_string (TYPE_MAIN_VARIANT (type));
+
+ const char prefix[] = "omp declare mapper ";
+ size_t lenp = sizeof (prefix);
+ if (strncmp (p, prefix, lenp - 1) == 0)
+ lenp = 1;
+ size_t len = strlen (p);
+ size_t lenm = m ? strlen (m) + 1 : 0;
+ char *name = XALLOCAVEC (char, lenp + len + lenm);
+ memcpy (name, prefix, lenp - 1);
+ memcpy (name + lenp - 1, p, len + 1);
+ if (m)
+ {
+ name[lenp + len - 1] = '~';
+ memcpy (name + lenp + len, m, lenm);
+ }
+ return get_identifier (name);
+}
+
+tree
+cxx_omp_mapper_lookup (tree id, tree type)
+{
+ if (TREE_CODE (type) != RECORD_TYPE
+ && TREE_CODE (type) != UNION_TYPE)
+ return NULL_TREE;
+ id = omp_mapper_id (id, type);
+ return lookup_name (id);
+}
+
+tree
+cxx_omp_extract_mapper_directive (tree vardecl)
+{
+ gcc_assert (TREE_CODE (vardecl) == VAR_DECL);
+
+ /* Instantiate the decl if we haven't already. */
+ mark_used (vardecl);
+ tree body = DECL_INITIAL (vardecl);
+
+ if (TREE_CODE (body) == STATEMENT_LIST)
+ {
+ tree_stmt_iterator tsi = tsi_start (body);
+ gcc_assert (TREE_CODE (tsi_stmt (tsi)) == DECL_EXPR);
+ tsi_next (&tsi);
+ body = tsi_stmt (tsi);
+ }
+
+ gcc_assert (TREE_CODE (body) == OMP_DECLARE_MAPPER);
+
+ return body;
+}
+
+/* For now we can handle singleton OMP_ARRAY_SECTIONs with custom mappers, but
+ nothing more complicated. */
+
+tree
+cxx_omp_map_array_section (location_t loc, tree t)
+{
+ tree low = TREE_OPERAND (t, 1);
+ tree len = TREE_OPERAND (t, 2);
+
+ if (len && integer_onep (len))
+ {
+ t = TREE_OPERAND (t, 0);
+
+ if (!low)
+ low = integer_zero_node;
+
+ if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE)
+ t = convert_from_reference (t);
+
+ if (TYPE_PTR_P (TREE_TYPE (t))
+ || TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
+ t = build_array_ref (loc, t, low);
+ else
+ t = error_mark_node;
+ }
+
+ return t;
+}
+
/* Helper function for cp_parser_omp_declare_reduction_exprs
and tsubst_omp_udr.
Remove CLEANUP_STMT for data (omp_priv variable).
@@ -6903,6 +7188,69 @@ cp_check_omp_declare_reduction (tree udr)
return true;
}
+
+static bool
+cp_oacc_reduction_defined_type_p (enum tree_code reduction_code, tree t)
+{
+ if (TREE_CODE (t) == INTEGER_TYPE)
+ return true;
+
+ if (FLOAT_TYPE_P (t) || TREE_CODE (t) == COMPLEX_TYPE)
+ switch (reduction_code)
+ {
+ case PLUS_EXPR:
+ case MULT_EXPR:
+ case MINUS_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ case TRUTH_ORIF_EXPR:
+ return true;
+ case MIN_EXPR:
+ case MAX_EXPR:
+ return TREE_CODE (t) != COMPLEX_TYPE;
+ case BIT_AND_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_IOR_EXPR:
+ return false;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (TREE_CODE (t) == ARRAY_TYPE)
+ return cp_oacc_reduction_defined_type_p (reduction_code, TREE_TYPE (t));
+
+ if (TREE_CODE (t) == RECORD_TYPE)
+ {
+ for (tree fld = TYPE_FIELDS (t); fld; fld = TREE_CHAIN (fld))
+ if (TREE_CODE (fld) == FIELD_DECL
+ && !cp_oacc_reduction_defined_type_p (reduction_code,
+ TREE_TYPE (fld)))
+ return false;
+ return true;
+ }
+
+ return false;
+}
+
+static const char *
+cp_oacc_reduction_code_name (enum tree_code reduction_code)
+{
+ switch (reduction_code)
+ {
+ case PLUS_EXPR: return "+";
+ case MULT_EXPR: return "*";
+ case MINUS_EXPR: return "-";
+ case TRUTH_ANDIF_EXPR: return "&&";
+ case TRUTH_ORIF_EXPR: return "||";
+ case MIN_EXPR: return "min";
+ case MAX_EXPR: return "max";
+ case BIT_AND_EXPR: return "&";
+ case BIT_XOR_EXPR: return "^";
+ case BIT_IOR_EXPR: return "|";
+ default:
+ gcc_unreachable ();
+ }
+}
+
/* Helper function of finish_omp_clauses. Clone STMT as if we were making
an inline call. But, remap
the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER
@@ -6947,7 +7295,8 @@ find_omp_placeholder_r (tree *tp, int *, void *data)
Return true if there is some error and the clause should be removed. */
static bool
-finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor)
+finish_omp_reduction_clause (tree c, enum c_omp_region_type ort,
+ bool *need_default_ctor, bool *need_dtor)
{
tree t = OMP_CLAUSE_DECL (c);
bool predefined = false;
@@ -7048,6 +7397,20 @@ finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor)
return false;
}
+ if (ort == C_ORT_ACC)
+ {
+ enum tree_code r_code = OMP_CLAUSE_REDUCTION_CODE (c);
+ if (!cp_oacc_reduction_defined_type_p (r_code, TREE_TYPE (t)))
+ {
+ const char *r_name = cp_oacc_reduction_code_name (r_code);
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%qE has invalid type for %<reduction(%s)%>",
+ t, r_name);
+ return true;
+ }
+ return false;
+ }
+
tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
type = TYPE_MAIN_VARIANT (type);
@@ -7193,9 +7556,11 @@ finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor)
*need_dtor = true;
else
{
- error_at (OMP_CLAUSE_LOCATION (c),
- "user defined reduction not found for %qE",
- omp_clause_printable_decl (t));
+ /* There are no user-defined reductions for OpenACC (as of 2.6). */
+ if (ort & C_ORT_OMP)
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "user defined reduction not found for %qE",
+ omp_clause_printable_decl (t));
return true;
}
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
@@ -7204,6 +7569,29 @@ finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor)
return false;
}
+/* Check an instance of an "omp declare mapper" function. */
+
+bool
+cp_check_omp_declare_mapper (tree udm)
+{
+ tree type = TREE_TYPE (udm);
+ location_t loc = DECL_SOURCE_LOCATION (udm);
+
+ if (type == error_mark_node)
+ return false;
+
+ if (!processing_template_decl
+ && TREE_CODE (type) != RECORD_TYPE
+ && TREE_CODE (type) != UNION_TYPE)
+ {
+ error_at (loc, "%qT is not a struct, union or class type in "
+ "%<#pragma omp declare mapper%>", type);
+ return false;
+ }
+
+ return true;
+}
+
/* Called from finish_struct_1. linear(this) or linear(this:step)
clauses might not be finalized yet because the class has been incomplete
when parsing #pragma omp declare simd methods. Fix those up now. */
@@ -7286,10 +7674,10 @@ cp_omp_finish_iterators (tree iter)
bool ret = false;
for (tree it = iter; it; it = TREE_CHAIN (it))
{
- tree var = TREE_VEC_ELT (it, 0);
- tree begin = TREE_VEC_ELT (it, 1);
- tree end = TREE_VEC_ELT (it, 2);
- tree step = TREE_VEC_ELT (it, 3);
+ tree var = OMP_ITERATORS_VAR (it);
+ tree begin = OMP_ITERATORS_BEGIN (it);
+ tree end = OMP_ITERATORS_END (it);
+ tree step = OMP_ITERATORS_STEP (it);
tree orig_step;
tree type = TREE_TYPE (var);
location_t loc = DECL_SOURCE_LOCATION (var);
@@ -7387,10 +7775,10 @@ cp_omp_finish_iterators (tree iter)
tree it2;
for (it2 = TREE_CHAIN (it); it2; it2 = TREE_CHAIN (it2))
{
- tree var2 = TREE_VEC_ELT (it2, 0);
- tree begin2 = TREE_VEC_ELT (it2, 1);
- tree end2 = TREE_VEC_ELT (it2, 2);
- tree step2 = TREE_VEC_ELT (it2, 3);
+ tree var2 = OMP_ITERATORS_VAR (it2);
+ tree begin2 = OMP_ITERATORS_BEGIN (it2);
+ tree end2 = OMP_ITERATORS_END (it2);
+ tree step2 = OMP_ITERATORS_STEP (it2);
location_t loc2 = DECL_SOURCE_LOCATION (var2);
if (cp_walk_tree (&begin2, find_omp_placeholder_r, var, &pset))
{
@@ -7416,14 +7804,14 @@ cp_omp_finish_iterators (tree iter)
ret = true;
continue;
}
- TREE_VEC_ELT (it, 1) = begin;
- TREE_VEC_ELT (it, 2) = end;
+ OMP_ITERATORS_BEGIN (it) = begin;
+ OMP_ITERATORS_END (it) = end;
if (processing_template_decl)
- TREE_VEC_ELT (it, 3) = orig_step;
+ OMP_ITERATORS_STEP (it) = orig_step;
else
{
- TREE_VEC_ELT (it, 3) = step;
- TREE_VEC_ELT (it, 4) = orig_step;
+ OMP_ITERATORS_STEP (it) = step;
+ OMP_ITERATORS_ORIG_STEP (it) = orig_step;
}
}
return ret;
@@ -7534,6 +7922,731 @@ cp_finish_omp_init_prefer_type (tree pref_type)
}
return t;
}
+/* LIST is a TREE_LIST, its TREE_PURPOSE is not used here, but its (possibly
+ NULL_TREE) value is propegated to any new nodes derived from the node.
+ Its TREE_VALUE is a TREE_LIST representing an OpenMP parameter-list-item.
+ Its TREE_PURPOSE contains an expr storing the location of the item and
+ TREE_VALUE contains the representation of the item. It is a NOP_EXPR,
+ INTEGER_CST, PARM_DECL, or TREE_LIST, this function possibly mutates it, it
+ is not preserved. DECL is the function the clause/clauses that LIST is
+ specified in is applied to. PARM_COUNT is the number of parameters, unless
+ the function has a parameter pack, or if the function is variadic, then
+ PARM_COUNT is 0. Functions with an empty parameter list are not handled
+ here.
+
+ Finalize each element in LIST, diagnose non-unique elements, and mutate
+ the original element appending them to a new list. The finalized parameter
+ indices are 0 based in contrast to OpenMP specifying 1 based parameter
+ indices, that adjustment is done here. NOP_EXPR elements require adjustment
+ from a 1 based index to a 0 based index. INTEGER_CST elements are finalized
+ parameter indices, but are still used for diagnosing duplicate elements.
+ PARM_DECL elements are switched out for their corresponding 0 based index,
+ provided it can be determined. TREE_LIST represents a numeric range. If
+ the number of parameters is known and DECL is non-variadic, relative bounds
+ are folded into literal bounds. If both bounds are non-relative the numeric
+ range is expanded, replacing the TREE_LIST with N INTEGER_CST nodes for each
+ index in the numeric range. If DECL is variadic, numeric ranges with
+ a relative bound are represented the same as in c_parser_omp_parm_list
+ so gimplify.cc:modify_call_for_omp_dispatch can handle them the same way.
+
+ Returns TREE_LIST or error_mark_node if each elt was invalid. */
+
+tree
+finish_omp_parm_list (tree list, const_tree decl, const int parm_count)
+{
+ gcc_assert (list && decl);
+ const tree parms = DECL_ARGUMENTS (decl);
+ /* We assume that the this parameter is included in parms, make sure this is
+ still true. */
+ gcc_assert (!DECL_IOBJ_MEMBER_FUNCTION_P (decl)
+ || is_this_parameter (parms));
+ /* We expect at least one argument, unless the function is variadic, in which
+ case parm_count will be 0. */
+ gcc_assert (parm_count >= 0 && (parms || parm_count == 0));
+
+ hash_map<int_hash<int, -1, -2>, tree> specified_idxs;
+ /* If there are any nodes that were not able to be finalized and/or expanded
+ this gets set to true, this can occur even if we are fully instantiated
+ if the function is a variadic function. */
+ bool dependent = false;
+ tree new_list = NULL_TREE;
+ auto append_to_list = [chain = &new_list] (tree node) mutable
+ {
+ gcc_assert (*chain == NULL_TREE);
+ *chain = node;
+ chain = &TREE_CHAIN (*chain);
+ };
+
+ const int iobj_parm_adjustment = DECL_IOBJ_MEMBER_FUNCTION_P (decl) ? 1 : 0;
+
+ for (tree next, node = list; node; node = next)
+ {
+ /* Nodes are mutated and appended to new_list, retrieve its chain early
+ and remember it. */
+ next = TREE_CHAIN (node);
+ TREE_CHAIN (node) = NULL_TREE;
+ tree parm_list_item = TREE_VALUE (node);
+ switch (TREE_CODE (TREE_VALUE (parm_list_item)))
+ {
+ case NOP_EXPR:
+ {
+ /* cp_parser_omp_parm_list stores parameter index items in a
+ NOP so we know to modify them here. This solution is
+ imperfect, but there isn't time to do it differently. */
+ tree cst = TREE_OPERAND (TREE_VALUE (parm_list_item), 0);
+ /* Adjust the 1 based index to 0 based, potentially adjust for
+ the 'this' parameter. */
+ const int idx = tree_to_shwi (cst) - 1 + iobj_parm_adjustment;
+ TREE_VALUE (parm_list_item)
+ = build_int_cst (integer_type_node, idx);
+ gcc_fallthrough ();
+ }
+ case INTEGER_CST:
+ {
+ /* These are finished, just check for errors and append
+ them to the list.
+ Check everything, we might have new errors if we didn't know
+ how many parameters we had the first time around. */
+ const int idx = tree_to_shwi (TREE_VALUE (parm_list_item));
+ tree *first = specified_idxs.get (idx);
+ if (first)
+ {
+ error_at (EXPR_LOCATION (TREE_PURPOSE (parm_list_item)),
+ "OpenMP parameter list items must specify a "
+ "unique parameter");
+ inform (EXPR_LOCATION (TREE_PURPOSE (*first)),
+ "parameter previously specified here");
+ }
+ else if (parm_count != 0 && idx >= parm_count)
+ {
+ error_at (EXPR_LOCATION (TREE_PURPOSE (parm_list_item)),
+ "parameter list item index is out of range");
+ }
+ else
+ {
+ append_to_list (node);
+ if (specified_idxs.put (idx, parm_list_item))
+ gcc_unreachable ();
+ }
+ break;
+ }
+ case PARM_DECL:
+ {
+ const const_tree parm_to_find = TREE_VALUE (parm_list_item);
+ /* Indices are stored as 0 based, don't adjust for iobj func,
+ the this parameter is present in parms. */
+ int idx = 0;
+ const_tree parm = parms;
+ while (true)
+ {
+ /* We already confirmed that the parameter exists
+ in cp_parser_omp_parm_list, we should never be reaching
+ the end of this list. */
+ gcc_assert (parm);
+ /* Expansion of a parameter pack will change the index of
+ parameters that come after it, we will have to defer this
+ lookup until the fndecl has been substituted. */
+ if (DECL_PACK_P (parm))
+ {
+ gcc_assert (processing_template_decl);
+ /* As explained above, this only happens with a parameter
+ pack in the middle of a function, this slower lookup
+ is fine for such an edge case. */
+ const tree first = [&] ()
+ {
+ for (tree n = new_list; n; n = TREE_CHAIN (n))
+ {
+ tree item = TREE_VALUE (n);
+ if (TREE_CODE (TREE_VALUE (item)) == PARM_DECL
+ /* This comparison works because we make sure
+ to store the original node. */
+ && TREE_VALUE (item) == parm_to_find)
+ return item;
+ }
+ return NULL_TREE;
+ } (); /* IILE. */
+ if (first)
+ {
+ location_t loc
+ = EXPR_LOCATION (TREE_PURPOSE (parm_list_item));
+ error_at (loc,
+ "OpenMP parameter list items must specify "
+ "a unique parameter");
+ inform (EXPR_LOCATION (TREE_PURPOSE (first)),
+ "parameter previously specified here");
+ }
+ else
+ {
+ /* We need to process this again once the pack
+ blocking us has been expanded. */
+ dependent = true;
+ /* Make sure we use the original so the above
+ comparison works when we return here later.
+ This may no longer be required since we are
+ comparing the DECL_NAME of each below, but
+ regardless, use the original. */
+ append_to_list (node);
+ }
+ break;
+ }
+ /* Compare the identifier nodes to so the comparison works
+ even after the node has been substituted. */
+ if (DECL_NAME (parm) == DECL_NAME (parm_to_find))
+ {
+ tree *first = specified_idxs.get (idx);
+ if (first)
+ {
+ location_t loc
+ = EXPR_LOCATION (TREE_PURPOSE (parm_list_item));
+ error_at (loc,
+ "OpenMP parameter list items must specify "
+ "a unique parameter");
+ inform (EXPR_LOCATION (TREE_PURPOSE (*first)),
+ "parameter previously specified here");
+ }
+ else
+ {
+ TREE_VALUE (parm_list_item)
+ = build_int_cst (integer_type_node, idx);
+ append_to_list (node);
+ if (specified_idxs.put (idx, parm_list_item))
+ gcc_unreachable ();
+ }
+ break;
+ }
+ ++idx;
+ parm = DECL_CHAIN (parm);
+ }
+ break;
+ }
+ case TREE_LIST:
+ {
+ /* Mutates bound.
+ This is the final point where indices and ranges are adjusted
+ from OpenMP spec representation (1 based indices, inclusive
+ intervals [lb, ub]) to GCC's internal representation (0 based
+ indices, inclusive lb, exclusive ub [lb, ub)), this is
+ intended to match C++ semantics.
+ Care must be taken to ensure we do not make these adjustments
+ multiple times. */
+ auto do_bound = [&] (tree bound, const int correction)
+ {
+ gcc_assert (-1 <= correction && correction <= 1);
+ if (bound == error_mark_node)
+ return bound;
+
+ tree expr = TREE_VALUE (bound);
+ /* If we already have an integer_cst, the bound has already
+ been converted to a 0 based index. Do not strip location
+ wrappers, they might be a template parameter that got
+ substituted to an INTEGER_CST, but not been finalized
+ yet. */
+ if (TREE_CODE (expr) == INTEGER_CST)
+ return bound;
+
+ const location_t expr_loc = EXPR_LOCATION (expr);
+
+ if (type_dependent_expression_p (expr))
+ {
+ ATTR_IS_DEPENDENT (bound) = true;
+ return bound;
+ }
+ else if (!INTEGRAL_TYPE_P (TREE_TYPE (expr)))
+ {
+ if (TREE_PURPOSE (bound))
+ error_at (expr_loc, "logical offset of a bound must "
+ "be of type %<int%>");
+ else
+ error_at (expr_loc, "expression of a bound must be "
+ "of type %<int%>");
+ return error_mark_node;
+ }
+ else if (value_dependent_expression_p (expr))
+ {
+ ATTR_IS_DEPENDENT (bound) = true;
+ return bound;
+ }
+ /* EXPR is not dependent, get rid of any leftover location
+ wrappers. */
+ expr = tree_strip_any_location_wrapper (expr);
+ /* Unless we want some really good diagnostics, we don't need
+ to wrap expr with a location anymore. Additionally, if we
+ do that we need a new way of differentiating adjusted and
+ unadjusted expressions. */
+
+ /* Do we need to mark this as an rvalue use with
+ mark_rvalue_use as well?
+ We either need to strictly only accept expressions of type
+ int, or warn for conversions.
+ I'm pretty sure this should be manifestly
+ constant-evaluated. We require a constant here,
+ let fold_non_dependent_expr complain. */
+ expr = fold_non_dependent_expr (expr,
+ tf_warning_or_error,
+ true);
+ if (!TREE_CONSTANT (expr))
+ {
+ if (TREE_PURPOSE (bound))
+ error_at (expr_loc, "logical offset of a bound must "
+ "be a constant expression");
+ else
+ error_at (expr_loc, "expression of a bound must be a "
+ "constant expression");
+ return error_mark_node;
+ }
+
+ const int sgn = tree_int_cst_sgn (expr);
+ const int val = tree_to_shwi (expr);
+ /* Technically this can work with omp_num_args+expr but the
+ spec forbids it, we can support it later if we like. */
+ if (sgn < 0)
+ {
+ if (TREE_PURPOSE (bound))
+ error_at (expr_loc, "logical offset of a bound must "
+ "be non negative");
+ else
+ error_at (expr_loc, "expression of a bound must be "
+ "positive");
+ return error_mark_node;
+ }
+
+ const const_tree num_args_marker = TREE_PURPOSE (bound);
+ if (num_args_marker == NULL_TREE)
+ {
+ if (sgn != 1)
+ {
+ error_at (expr_loc, "expression of bound must be "
+ "positive");
+ return error_mark_node;
+ }
+ if (parm_count > 0 && val > parm_count)
+ {
+ /* FIXME: output omp_num_args and parm_count. */
+ error_at (expr_loc, "expression of bound is out "
+ "of range");
+ return error_mark_node;
+ }
+ TREE_VALUE (bound) = build_int_cst (integer_type_node,
+ val + correction);
+ return bound;
+ }
+ else if (num_args_marker
+ == get_identifier ("omp num args plus"))
+ {
+ if (sgn != 0)
+ {
+ error_at (expr_loc,
+ "logical offset must be equal to 0 in a "
+ "bound of the form "
+ "%<omp_num_args+logical-offset%>");
+ return error_mark_node;
+ }
+ TREE_PURPOSE (bound)
+ = get_identifier ("omp relative bound");
+ /* This expresses
+ omp_num_args + correction + logical offset,
+ the only valid value for logical offset is 0. */
+ TREE_VALUE (bound) = build_int_cst (integer_type_node,
+ correction + 0);
+ return bound;
+ }
+ else if (num_args_marker
+ == get_identifier ("omp num args minus"))
+ {
+ gcc_assert (sgn != -1);
+ TREE_PURPOSE (bound)
+ = get_identifier ("omp relative bound");
+ /* Don't invert correction, we are expressing
+ omp_num_args + correction - logical offset. */
+ TREE_VALUE (bound) = build_int_cst (integer_type_node,
+ correction + (-val));
+ return bound;
+ }
+ gcc_unreachable ();
+ };
+ /* Convert both to 0 based indices, upper bound
+ is stored one past the end. */
+ static constexpr int lb_adjustment = -1;
+ static constexpr int ub_adjustment = -1 + 1;
+
+ tree range = TREE_VALUE (parm_list_item);
+ tree lb = do_bound (TREE_PURPOSE (range),
+ lb_adjustment + iobj_parm_adjustment);
+ tree ub = do_bound (TREE_VALUE (range),
+ ub_adjustment + iobj_parm_adjustment);
+ gcc_assert (lb && ub);
+ /* If we know how many params there are for sure we can
+ change this bound to a literal. */
+ auto maybe_fold_relative_bound = [&] (tree bound)
+ {
+ if (bound == error_mark_node
+ || parm_count == 0
+ || !TREE_PURPOSE (bound))
+ return bound;
+ gcc_assert (TREE_PURPOSE (bound)
+ == get_identifier ("omp relative bound"));
+ const int value = tree_to_shwi (TREE_VALUE (bound));
+ gcc_assert (value <= 0 && parm_count >= 1);
+ /* Overflow is impossible. */
+ const int diff = parm_count + value;
+ if (diff < 0)
+ {
+ /* FIXME: output value of omp_num_args. */
+ error_at (EXPR_LOCATION (TREE_CHAIN (bound)),
+ "bound with logical offset evaluates to an "
+ "out of range index");
+ return error_mark_node;
+ }
+ gcc_assert (diff < INT_MAX);
+ TREE_PURPOSE (bound) = NULL_TREE;
+ TREE_VALUE (bound) = build_int_cst (integer_type_node, diff);
+ return bound;
+ };
+ lb = maybe_fold_relative_bound (lb);
+ ub = maybe_fold_relative_bound (ub);
+
+ gcc_assert (lb && ub);
+ const tree range_loc_wrapped = TREE_PURPOSE (parm_list_item);
+
+ auto append_one_idx = [&] (tree purpose, tree loc_expr, int idx)
+ {
+ tree *dupe = specified_idxs.get (idx);
+ gcc_assert (!dupe || *dupe);
+ if (dupe)
+ return *dupe;
+ tree cst = build_int_cst (integer_type_node, idx);
+ tree new_item = build_tree_list (loc_expr, cst);
+ append_to_list (build_tree_list (purpose, new_item));
+ if (specified_idxs.put (idx, parm_list_item))
+ gcc_unreachable ();
+ return NULL_TREE;
+ };
+
+ /* TODO: handle better lol. */
+ if (lb == error_mark_node || ub == error_mark_node)
+ continue;
+ /* Wait until both lb and ub are substituted before trying to
+ process any further, we are also done if both bounds are
+ relative. */
+ if ((TREE_PURPOSE (lb) && TREE_PURPOSE (ub))
+ || value_dependent_expression_p (TREE_VALUE (lb))
+ || value_dependent_expression_p (TREE_VALUE (ub)))
+ {
+ /* If we are instantiating and have unexpanded numeric ranges
+ then this function must be variadic, and thus it doesn't
+ make this parm list dependent.
+ This doesn't really matter since we are high-jacking this
+ flag but it doesn't hurt to be technically correct. */
+ /* Early escape...? */
+ }
+ /* If both bounds are non relative, we can fully expand them. */
+ else if (!TREE_PURPOSE (lb) && !TREE_PURPOSE (ub))
+ {
+ const int lb_val = tree_to_shwi (TREE_VALUE (lb));
+ const int ub_val = tree_to_shwi (TREE_VALUE (ub));
+ /* Empty ranges are not allowed at this point. */
+ if (lb_val >= ub_val)
+ {
+ /* Note that the error message does not match the
+ condition as we altered ub to be one past the end. */
+ error_at (EXPR_LOCATION (range_loc_wrapped),
+ "numeric range lower bound must be less than "
+ "or equal to upper bound");
+ continue;
+ }
+ gcc_assert (lb_val >= 0 && ub_val > 0 && lb_val < ub_val);
+
+ for (int idx = lb_val; idx < ub_val; ++idx)
+ {
+ /* There might be something in PURPOSE that we want to
+ propogate when expanding. */
+ tree dupe = append_one_idx (TREE_PURPOSE (node),
+ range_loc_wrapped,
+ idx);
+ if (dupe)
+ {
+ const int omp_idx = idx + 1;
+ error_at (EXPR_LOCATION (range_loc_wrapped),
+ "expansion of numeric range specifies "
+ "non-unique index %d",
+ omp_idx);
+ inform (EXPR_LOCATION (TREE_PURPOSE (dupe)),
+ "parameter previously specified here");
+ }
+ }
+ /* The range is fully expanded, do not add it back to the
+ list. */
+ TREE_CHAIN (node) = NULL_TREE;
+ continue;
+ }
+ else if (!processing_template_decl)
+ {
+ /* Wait until we are fully instantiated to make this
+ transformation, expanding a bound with omp_num_args after
+ doing this will cause bugs.
+ We also potentially cause bugs if one gets expanded, gets
+ a partial expansion here, and then the other bound gets
+ expanded later. That case is probably fine but we should
+ avoid it anyway. */
+ gcc_assert (!TREE_PURPOSE (lb)
+ || TREE_PURPOSE (lb)
+ == get_identifier ("omp relative bound"));
+ gcc_assert (!TREE_PURPOSE (ub)
+ || TREE_PURPOSE (ub)
+ == get_identifier ("omp relative bound"));
+ /* At least one of lb and ub are NULL_TREE, and the other
+ is omp relative bound. */
+ gcc_assert (TREE_PURPOSE (lb) != TREE_PURPOSE (ub));
+ /* This only adds slight quality of life to diagnostics, it
+ isn't really worth it, but we need parity with the C front
+ end. Alternatively, handling empty numeric ranges could
+ have been removed from modify_call_for_omp_dispatch but
+ it's already there and it isn't that hard to add support
+ here. */
+ if (TREE_PURPOSE (ub))
+ {
+ /* The C front end goes a little further adding all
+ indices between lb and the last real parameter,
+ we aren't going to those efforts here though. */
+ gcc_assert (!TREE_PURPOSE (lb));
+ const int val = tree_to_shwi (TREE_VALUE (lb));
+ gcc_assert (val < INT_MAX);
+ /* We know the index in lb will always be specified. */
+ tree dupe = append_one_idx (TREE_PURPOSE (node),
+ range_loc_wrapped,
+ val);
+ if (dupe)
+ {
+ error_at (EXPR_LOCATION (range_loc_wrapped),
+ "lower bound of numeric range specifies "
+ "non-unique index %d",
+ val);
+ inform (EXPR_LOCATION (TREE_PURPOSE (dupe)),
+ "parameter previously specified here");
+ }
+ /* The value was added above, adjust lb to be ahead by
+ one so it's not added again in
+ modify_call_for_omp_dispatch. */
+ TREE_VALUE (lb) = build_int_cst (integer_type_node,
+ val + 1);
+ }
+ else
+ {
+ gcc_assert (TREE_PURPOSE (lb));
+ const int val = tree_to_shwi (TREE_VALUE (ub));
+ gcc_assert (val > 0);
+ /* We know the index in ub will always be specified. */
+ tree dupe = append_one_idx (TREE_PURPOSE (node),
+ range_loc_wrapped,
+ val);
+ if (dupe)
+ {
+ error_at (EXPR_LOCATION (range_loc_wrapped),
+ "upper bound of numeric range specifies "
+ "non-unique index %d", val);
+ inform (EXPR_LOCATION (TREE_PURPOSE (dupe)),
+ "parameter previously specified here");
+ }
+ /* The value was added above, adjust ub to be behind by
+ one so it's not added again in
+ modify_call_for_omp_dispatch. */
+ TREE_VALUE (ub) = build_int_cst (integer_type_node,
+ val - 1);
+ }
+ /* This is not a full expansion, just a partial, we still
+ to add the numeric range to the final list. */
+ }
+ dependent = processing_template_decl;
+ TREE_PURPOSE (range) = lb;
+ TREE_VALUE (range) = ub;
+ TREE_VALUE (parm_list_item) = range;
+ append_to_list (node);
+ break;
+ }
+ default:
+ gcc_unreachable ();
+ }
+ }
+ if (!new_list)
+ return error_mark_node;
+ /* Kinda a hack, hopefully temporary. */
+ ATTR_IS_DEPENDENT (new_list) = dependent;
+ return new_list;
+}
+
+/* LIST is a TREE_LIST representing an OpenMP parameter-list specified in an
+ adjust_args clause, or multiple concatanated parameter-lists each specified
+ in an adjust_args clause, each of which may have the same clause modifier,
+ or different clause modifiers. The clause modifier of the adjust_args
+ clause the parameter-list-item was specified in is stored in the
+ TREE_PURPOSE of each elt of LIST. DECL is the function decl the clauses
+ are applied to, PARM_COUNT is 0 if the number of parameters is unknown
+ or because the function is variadic, otherwise PARM_COUNT is the number of
+ parameters.
+
+ Check for and diagnose invalid parameter types for each item, remove them
+ from the list so errors are not diagnosed multiple times. Remove items with
+ the "nothing" modifier once everything is done.
+
+ Returns TREE_LIST or NULL_TREE if no items have errors, returns TREE_LIST
+ or error_mark_node if there were errors diagnosed. NULL_TREE is never
+ returned if an error was diagnosed. */
+
+tree
+finish_omp_adjust_args (tree list, const_tree decl, const int parm_count)
+{
+ gcc_assert (list && decl);
+ /* We need to keep track of this so we know whether we can remove items with
+ the "nothing" modifier. */
+ bool has_dependent_list_items = false;
+
+ const const_tree need_device_ptr_id = get_identifier ("need_device_ptr");
+ const const_tree need_device_addr_id = get_identifier ("need_device_addr");
+ const const_tree nothing_id = get_identifier ("nothing");
+ tree *prev_chain = &list;
+ auto keep_node = [&] (tree n) { prev_chain = &TREE_CHAIN (n); };
+ auto remove_node = [&] (tree n) { *prev_chain = TREE_CHAIN (n); };
+ for (tree n = list; n != NULL_TREE; n = TREE_CHAIN (n))
+ {
+ tree parm_list_item = TREE_VALUE (n);
+ if (TREE_CODE (TREE_VALUE (parm_list_item)) == PARM_DECL)
+ {
+ /* We only encounter a PARM_DECL here if a parameter pack comes
+ before it, it will have been replaced by an index by
+ finish_omp_parm_list otherwise. */
+ gcc_assert (processing_template_decl);
+ keep_node (n);
+ has_dependent_list_items = true;
+ continue;
+ }
+ /* Numeric range case. */
+ if (TREE_CODE (TREE_VALUE (parm_list_item)) == TREE_LIST)
+ {
+ /* These will have been expanded by finish_omp_parm_list unless we
+ can't determine the number of parameters. */
+ gcc_assert (processing_template_decl || parm_count == 0);
+ keep_node (n);
+ has_dependent_list_items = true;
+ continue;
+ }
+ gcc_assert (TREE_CODE (TREE_VALUE (parm_list_item)) == INTEGER_CST);
+
+ const const_tree n_modifier = TREE_PURPOSE (n);
+ gcc_assert (n_modifier == nothing_id
+ || n_modifier == need_device_ptr_id
+ || n_modifier == need_device_addr_id);
+ if (n_modifier == nothing_id)
+ {
+ keep_node (n);
+ continue;
+ }
+ const int idx = tree_to_shwi (TREE_VALUE (parm_list_item));
+
+ gcc_assert (idx >= 0 && (parm_count == 0 || idx < parm_count));
+ const const_tree parm_decl = [&] () -> const_tree
+ {
+ const const_tree parms = DECL_ARGUMENTS (decl);
+ gcc_assert (parms != NULL_TREE || parm_count == 0);
+ if (parms == NULL_TREE
+ || (parm_count != 0 && idx >= parm_count))
+ return NULL_TREE;
+
+ int cur_idx = 0;
+ for (const_tree p = parms; p != NULL_TREE; p = DECL_CHAIN (p))
+ {
+ /* This kind of sucks, we really should be building a vec instead
+ of traversing the list of parms each time. */
+ gcc_assert (parm_count == 0 || cur_idx < parm_count);
+ if (DECL_PACK_P (p))
+ return NULL_TREE;
+ if (cur_idx == idx)
+ return p;
+ ++cur_idx;
+ }
+ return NULL_TREE;
+ } (); /* IILE. */
+ /* cp_parser_omp_parm_list handles out of range indices. */
+ gcc_assert (parm_count == 0 || parm_decl);
+
+ if (!parm_decl)
+ has_dependent_list_items = true;
+ else if (n_modifier == need_device_ptr_id)
+ {
+ /* OpenMP 6.0 (332:28-30)
+ If the need_device_ptr adjust-op modifier is present, each list
+ item that appears in the clause that refers to a specific named
+ argument in the declaration of the function variant must be of
+ pointer type or reference to pointer type. */
+ tree parm_type = TREE_TYPE (parm_decl);
+ if (WILDCARD_TYPE_P (parm_type))
+ /* Do nothing for now, it might become a pointer. */;
+ else if (TYPE_REF_P (parm_type)
+ && WILDCARD_TYPE_P (TREE_TYPE (parm_type)))
+ /* It might become a reference to a pointer. */;
+ else if (!TYPE_PTR_P (parm_type))
+ {
+ if (TYPE_REF_P (parm_type)
+ && TYPE_PTR_P (TREE_TYPE (parm_type)))
+ /* The semantics for this are unclear, instead of supporting
+ it incorrectly, just sorry. */
+ sorry_at (DECL_SOURCE_LOCATION (parm_decl),
+ "parameter with type reference to pointer in an "
+ "%<adjust_args%> with the %<need_device_ptr%> "
+ "modifier is not currently supported");
+ else
+ error_at (DECL_SOURCE_LOCATION (parm_decl),
+ "parameter specified in an %<adjust_args%> clause "
+ "with the %<need_device_ptr%> modifier must be of "
+ "pointer type");
+ inform (EXPR_LOCATION (TREE_PURPOSE (parm_list_item)),
+ "parameter specified here");
+ remove_node (n);
+ continue;
+ }
+ }
+ else if (n_modifier == need_device_addr_id)
+ {
+ /* OpenMP 6.0 (332:31-33)
+ If the need_device_addr adjust-op modifier is present, each list
+ item that appears in the clause must refer to an argument in the
+ declaration of the function variant that has a reference type. */
+ tree parm_type = TREE_TYPE (parm_decl);
+ if (WILDCARD_TYPE_P (parm_type))
+ /* Do nothing for now, it might become a ref. */;
+ else if (!TYPE_REF_P (parm_type))
+ {
+ error_at (DECL_SOURCE_LOCATION (parm_decl),
+ "parameter specified in an %<adjust_args%> clause "
+ "with the %<need_device_addr%> modifier must be of "
+ "reference type");
+ inform (EXPR_LOCATION (TREE_PURPOSE (parm_list_item)),
+ "parameter specified here");
+ remove_node (n);
+ continue;
+ }
+ }
+ /* If we get here there were no errors. */
+ keep_node (n);
+ }
+
+ /* All items were removed due to errors. */
+ if (!list)
+ return error_mark_node;
+ if (has_dependent_list_items)
+ return list;
+ /* We no longer need to keep items with the "nothing" modifier. */
+ prev_chain = &list;
+ for (tree n = list; n != NULL_TREE; n = TREE_CHAIN (n))
+ {
+ if (TREE_PURPOSE (n) == nothing_id)
+ remove_node (n);
+ else
+ keep_node (n);
+ }
+ /* If all items had the "nothing" modifier, we might have NULL_TREE here,
+ but that isn't a problem. */
+ return list;
+}
/* For all elements of CLAUSES, validate them vs OpenMP constraints.
Remove any elements from the list that are invalid. */
@@ -7603,7 +8716,14 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
/* We've reached the end of a list of expanded nodes. Reset the group
start pointer. */
if (c == grp_sentinel)
- grp_start_p = NULL;
+ {
+ if (grp_start_p
+ && OMP_CLAUSE_HAS_ITERATORS (*grp_start_p))
+ for (tree gc = *grp_start_p; gc != grp_sentinel;
+ gc = OMP_CLAUSE_CHAIN (gc))
+ OMP_CLAUSE_ITERATORS (gc) = OMP_CLAUSE_ITERATORS (*grp_start_p);
+ grp_start_p = NULL;
+ }
switch (OMP_CLAUSE_CODE (c))
{
@@ -7632,7 +8752,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
- if (handle_omp_array_sections (c, ort))
+ if (handle_omp_array_sections (pc, NULL, ort, NULL))
{
remove = true;
break;
@@ -8677,14 +9797,107 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
break;
}
gcc_unreachable ();
+ case OMP_CLAUSE_USES_ALLOCATORS:
+ t = OMP_CLAUSE_USES_ALLOCATORS_ALLOCATOR (c);
+ if (TREE_CODE (t) == FIELD_DECL)
+ {
+ sorry_at (OMP_CLAUSE_LOCATION (c), "class members not yet "
+ "supported in %<uses_allocators%> clause");
+ remove = true;
+ break;
+ }
+ t = convert_from_reference (t);
+ if (TREE_CODE (TREE_TYPE (t)) != ENUMERAL_TYPE
+ || strcmp (IDENTIFIER_POINTER (TYPE_IDENTIFIER (TREE_TYPE (t))),
+ "omp_allocator_handle_t") != 0)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "allocator must be of %<omp_allocator_handle_t%> type");
+ remove = true;
+ break;
+ }
+ if (TREE_CODE (t) == CONST_DECL)
+ {
+ /* Currently for pre-defined allocators in libgomp, we do not
+ require additional init/fini inside target regions, so discard
+ such clauses. */
+ remove = true;
+
+ if (strcmp (IDENTIFIER_POINTER (DECL_NAME (t)),
+ "omp_null_allocator") == 0)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "%<omp_null_allocator%> cannot be used in "
+ "%<uses_allocators%> clause");
+ break;
+ }
+
+ if (OMP_CLAUSE_USES_ALLOCATORS_MEMSPACE (c)
+ || OMP_CLAUSE_USES_ALLOCATORS_TRAITS (c))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c),
+ "modifiers cannot be used with pre-defined "
+ "allocators");
+ break;
+ }
+ }
+ t = OMP_CLAUSE_USES_ALLOCATORS_MEMSPACE (c);
+ if (t != NULL_TREE
+ && (TREE_CODE (t) != CONST_DECL
+ || TREE_CODE (TREE_TYPE (t)) != ENUMERAL_TYPE
+ || strcmp (IDENTIFIER_POINTER (TYPE_IDENTIFIER (TREE_TYPE (t))),
+ "omp_memspace_handle_t") != 0))
+ {
+ error_at (OMP_CLAUSE_LOCATION (c), "memspace modifier must be "
+ "constant enum of %<omp_memspace_handle_t%> type");
+ remove = true;
+ break;
+ }
+ t = OMP_CLAUSE_USES_ALLOCATORS_TRAITS (c);
+ if (t != NULL_TREE)
+ {
+ bool type_err = false;
+
+ if (TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE
+ || DECL_SIZE (t) == NULL_TREE)
+ type_err = true;
+ else
+ {
+ tree elem_t = TREE_TYPE (TREE_TYPE (t));
+ if (TREE_CODE (elem_t) != RECORD_TYPE
+ || strcmp (IDENTIFIER_POINTER (TYPE_IDENTIFIER (elem_t)),
+ "omp_alloctrait_t") != 0
+ || !TYPE_READONLY (elem_t))
+ type_err = true;
+ }
+ if (type_err)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c), "traits array %qE must be of "
+ "%<const omp_alloctrait_t []%> type", t);
+ remove = true;
+ }
+ else
+ {
+ tree cst_val = decl_constant_value (t);
+ if (cst_val == t)
+ {
+ error_at (OMP_CLAUSE_LOCATION (c), "traits array must be "
+ "of constant values");
+
+ remove = true;
+ }
+ }
+ }
+ if (remove)
+ break;
+ pc = &OMP_CLAUSE_CHAIN (c);
+ continue;
case OMP_CLAUSE_DEPEND:
depend_clause = c;
/* FALLTHRU */
case OMP_CLAUSE_AFFINITY:
t = OMP_CLAUSE_DECL (c);
- if (TREE_CODE (t) == TREE_LIST
- && TREE_PURPOSE (t)
- && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
+ if (OMP_ITERATOR_DECL_P (t))
{
if (TREE_PURPOSE (t) != last_iterators)
last_iterators_remove
@@ -8699,7 +9912,8 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
- if (handle_omp_array_sections (c, ort))
+ int discontiguous = 1;
+ if (handle_omp_array_sections (pc, NULL, ort, &discontiguous))
remove = true;
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c)
@@ -8848,9 +10062,25 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
case OMP_CLAUSE_MAP:
if (OMP_CLAUSE_MAP_IMPLICIT (c) && !implicit_moved)
goto move_implicit;
+ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_PUSH_MAPPER_NAME
+ || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POP_MAPPER_NAME)
+ {
+ remove = true;
+ break;
+ }
+ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_GRID_DIM
+ || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_GRID_STRIDE)
+ break;
/* FALLTHRU */
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
+ if (OMP_CLAUSE_ITERATORS (c)
+ && cp_omp_finish_iterators (OMP_CLAUSE_ITERATORS (c)))
+ {
+ t = error_mark_node;
+ break;
+ }
+ /* FALLTHRU */
case OMP_CLAUSE__CACHE_:
{
using namespace omp_addr_tokenizer;
@@ -8861,11 +10091,25 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
{
grp_start_p = pc;
grp_sentinel = OMP_CLAUSE_CHAIN (c);
-
- if (handle_omp_array_sections (c, ort))
+ /* FIXME: Strided target updates not supported together with
+ iterators yet. */
+ int discontiguous
+ = (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM)
+ && !OMP_CLAUSE_ITERATORS (c);
+ bool strided = false;
+ tree *pnext = NULL;
+ if (handle_omp_array_sections (pc, &pnext, ort, &discontiguous,
+ &strided))
remove = true;
else
{
+ if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO
+ || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM)
+ && OMP_CLAUSE_ITERATORS (c) && strided)
+ sorry ("strided target updates with iterators");
+ /* We might have replaced the clause, so refresh C. */
+ c = *pc;
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != OMP_ARRAY_SECTION
&& !type_dependent_expression_p (t)
@@ -8965,6 +10209,8 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
clauses, reset the OMP_CLAUSE_SIZE (representing a bias)
to zero here. */
OMP_CLAUSE_SIZE (c) = size_zero_node;
+ if (pnext)
+ c = *pnext;
break;
}
else if (type_dependent_expression_p (t))
@@ -9148,7 +10394,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
bitmap_set_bit (&map_firstprivate_head, DECL_UID (t));
else if (bitmap_bit_p (&map_head, DECL_UID (t))
&& !bitmap_bit_p (&map_field_head, DECL_UID (t))
- && ort != C_ORT_OMP
+ && ort != C_ORT_OMP && ort != C_ORT_OMP_TARGET
&& ort != C_ORT_OMP_EXIT_DATA)
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
@@ -9213,10 +10459,10 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
{
grp_start_p = pc;
grp_sentinel = OMP_CLAUSE_CHAIN (c);
- tree nc = ai.expand_map_clause (c, OMP_CLAUSE_DECL (c),
- addr_tokens, ort);
- if (nc != error_mark_node)
- c = nc;
+ tree *npc = ai.expand_map_clause (pc, OMP_CLAUSE_DECL (c),
+ addr_tokens, ort);
+ if (npc != NULL)
+ c = *npc;
}
}
break;
@@ -9454,10 +10700,11 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) == OMP_ARRAY_SECTION)
{
- if (handle_omp_array_sections (c, ort))
+ if (handle_omp_array_sections (pc, NULL, ort, NULL))
remove = true;
else
{
+ c = *pc;
t = OMP_CLAUSE_DECL (c);
while (TREE_CODE (t) == OMP_ARRAY_SECTION)
t = TREE_OPERAND (t, 0);
@@ -9754,6 +11001,11 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
pc = &OMP_CLAUSE_CHAIN (c);
}
+ if (grp_start_p
+ && OMP_CLAUSE_HAS_ITERATORS (*grp_start_p))
+ for (tree gc = *grp_start_p; gc; gc = OMP_CLAUSE_CHAIN (gc))
+ OMP_CLAUSE_ITERATORS (gc) = OMP_CLAUSE_ITERATORS (*grp_start_p);
+
if (reduction_seen < 0 && (ordered_seen || schedule_seen))
reduction_seen = -2;
@@ -9994,7 +11246,7 @@ finish_omp_clauses (tree clauses, enum c_omp_region_type ort)
if (processing_template_decl
&& !VAR_P (t) && TREE_CODE (t) != PARM_DECL)
break;
- if (finish_omp_reduction_clause (c, &need_default_ctor,
+ if (finish_omp_reduction_clause (c, ort, &need_default_ctor,
&need_dtor))
remove = true;
else
@@ -10467,6 +11719,8 @@ struct omp_target_walk_data
/* Local variables declared inside a BIND_EXPR, used to filter out such
variables when recording lambda_objects_accessed. */
hash_set<tree> local_decls;
+
+ omp_mapper_list<tree> *mappers;
};
/* Helper function of finish_omp_target_clauses, called via
@@ -10480,6 +11734,7 @@ finish_omp_target_clauses_r (tree *tp, int *walk_subtrees, void *ptr)
struct omp_target_walk_data *data = (struct omp_target_walk_data *) ptr;
tree current_object = data->current_object;
tree current_closure = data->current_closure;
+ omp_mapper_list<tree> *mlist = data->mappers;
/* References inside of these expression codes shouldn't incur any
form of mapping, so return early. */
@@ -10493,6 +11748,27 @@ finish_omp_target_clauses_r (tree *tp, int *walk_subtrees, void *ptr)
if (TREE_CODE (t) == OMP_CLAUSE)
return NULL_TREE;
+ if (!processing_template_decl)
+ {
+ tree aggr_type = NULL_TREE;
+
+ if (TREE_CODE (t) == COMPONENT_REF
+ && AGGREGATE_TYPE_P (TREE_TYPE (TREE_OPERAND (t, 0))))
+ aggr_type = TREE_TYPE (TREE_OPERAND (t, 0));
+ else if ((TREE_CODE (t) == VAR_DECL
+ || TREE_CODE (t) == PARM_DECL
+ || TREE_CODE (t) == RESULT_DECL)
+ && AGGREGATE_TYPE_P (TREE_TYPE (t)))
+ aggr_type = TREE_TYPE (t);
+
+ if (aggr_type)
+ {
+ tree mapper_fn = cxx_omp_mapper_lookup (NULL_TREE, aggr_type);
+ if (mapper_fn)
+ mlist->add_mapper (NULL_TREE, aggr_type, mapper_fn);
+ }
+ }
+
if (current_object)
{
tree this_expr = TREE_OPERAND (current_object, 0);
@@ -10595,10 +11871,48 @@ finish_omp_target_clauses (location_t loc, tree body, tree *clauses_ptr)
else
data.current_closure = NULL_TREE;
- cp_walk_tree_without_duplicates (&body, finish_omp_target_clauses_r, &data);
-
auto_vec<tree, 16> new_clauses;
+ if (!processing_template_decl)
+ {
+ hash_set<omp_name_type<tree> > seen_types;
+ auto_vec<tree> mapper_fns;
+ omp_mapper_list<tree> mlist (&seen_types, &mapper_fns);
+ data.mappers = &mlist;
+
+ cp_walk_tree_without_duplicates (&body, finish_omp_target_clauses_r,
+ &data);
+
+ unsigned int i;
+ tree mapper_fn;
+ FOR_EACH_VEC_ELT (mapper_fns, i, mapper_fn)
+ c_omp_find_nested_mappers (&mlist, mapper_fn);
+
+ FOR_EACH_VEC_ELT (mapper_fns, i, mapper_fn)
+ {
+ tree mapper = cxx_omp_extract_mapper_directive (mapper_fn);
+ if (mapper == error_mark_node)
+ continue;
+ tree mapper_name = OMP_DECLARE_MAPPER_ID (mapper);
+ tree decl = OMP_DECLARE_MAPPER_DECL (mapper);
+ if (BASELINK_P (mapper_fn))
+ mapper_fn = BASELINK_FUNCTIONS (mapper_fn);
+
+ tree c = build_omp_clause (loc, OMP_CLAUSE__MAPPER_BINDING_);
+ OMP_CLAUSE__MAPPER_BINDING__ID (c) = mapper_name;
+ OMP_CLAUSE__MAPPER_BINDING__DECL (c) = decl;
+ OMP_CLAUSE__MAPPER_BINDING__MAPPER (c) = mapper_fn;
+
+ new_clauses.safe_push (c);
+ }
+ }
+ else
+ {
+ data.mappers = NULL;
+ cp_walk_tree_without_duplicates (&body, finish_omp_target_clauses_r,
+ &data);
+ }
+
tree omp_target_this_expr = NULL_TREE;
tree *explicit_this_deref_map = NULL;
if (data.this_expr_accessed)
@@ -11847,6 +13161,299 @@ finish_omp_for_block (tree bind, tree omp_for)
return bind;
}
+/* Validate an OpenMP allocate directive, then add the ALLOC and ALIGN exprs to
+ the "omp allocate" attr of each decl found in VARS. The value of attr is
+ a TREE_LIST with ALLOC stored in its purpose member and ALIGN stored in its
+ value member. ALLOC and ALIGN are exprs passed as arguments to the
+ allocator and align clauses of the directive. VARS may be NULL_TREE if
+ there were errors during parsing.
+ #pragma omp allocate(VARS) allocator(ALLOC) align(ALIGN)
+
+ If processing_template_decl, a stmt of tree_code OMP_ALLOCATE is added to
+ the function instead. LOC is used to initialize the nodes location member,
+ this information is currently unused. */
+
+void
+finish_omp_allocate (location_t loc, tree var_list, tree alloc, tree align)
+{
+ /* Common routine for modifying the "omp allocate" attribute. This should
+ only be called once for each var, either after a diagnostic, or when we
+ are finished with the directive. */
+ auto finalize_allocate_attr = [] (tree var, tree alloc, tree align)
+ {
+ gcc_assert (var != NULL_TREE && var != error_mark_node);
+
+ /* The attr was added in cp_parser_omp_allocate. */
+ tree attr = lookup_attribute ("omp allocate", DECL_ATTRIBUTES (var));
+ gcc_assert (attr != NULL_TREE);
+
+ /* cp_parser_omp_allocate adds the location where the var was used as an
+ arg for diagnostics, it should still be untouched at this point. */
+ tree arg_loc = TREE_VALUE (attr);
+ gcc_assert (arg_loc != NULL_TREE && TREE_CODE (arg_loc) == NOP_EXPR);
+
+ /* We still need the location in case parsing hasn't finished yet, we
+ simply smuggle it through the chain member. */
+ tree attr_value = tree_cons (alloc, align, arg_loc);
+ /* We can't modify the old "omp allocate" attr, substitution doesn't know
+ the attr is dependent so it isn't copied when substituting the var.
+ We avoid making unnecessary copies creating the final node here. */
+ DECL_ATTRIBUTES (var)
+ = tree_cons (get_identifier ("omp allocate"),
+ attr_value,
+ remove_attribute ("omp allocate",
+ DECL_ATTRIBUTES (var)));
+ };
+ /* The alloc/align clauses get marked with error_mark_node after an error is
+ reported to prevent duplicate diagnosis. The same is done for the var
+ (TREE_PURPOSE) of any node that has an error, additionally the
+ "omp allocate" attr is marked so the middle end knows to skip it during
+ gimplification. */
+
+
+
+ for (tree vn = var_list; vn != NULL_TREE; vn = TREE_CHAIN (vn))
+ {
+ tree var = TREE_PURPOSE (vn);
+ bool var_has_error = false;
+ if (var == error_mark_node)
+ /* Early escape. */;
+ else if (TYPE_REF_P (TREE_TYPE (var)))
+ {
+ auto_diagnostic_group d;
+ error_at (EXPR_LOCATION (TREE_VALUE (vn)),
+ "variable %qD with reference type may not appear as a "
+ "list item in an %<allocate%> directive", var);
+ inform (DECL_SOURCE_LOCATION (var), "%qD declared here", var);
+ var_has_error = true;
+ }
+ else if (TREE_STATIC (var) && var_in_maybe_constexpr_fn (var))
+ {
+ /* Unfortunately, until the first round of band-aids is applied to
+ make_rtl_for_nonlocal_decl we can't support static vars in
+ implicit constexpr functions in non-template contexts at all.
+ Technically, we could support cases inside templates, but it's
+ difficult to differentiate them here, and it would be confusing to
+ only allow the cases in templates. */
+ auto_diagnostic_group d;
+ sorry_at (EXPR_LOCATION (TREE_VALUE (vn)),
+ "static variable %qD is not supported in an %<allocate%> "
+ "directive in an implicit constexpr function", var);
+ inform (DECL_SOURCE_LOCATION (var), "%qD declared here", var);
+ var_has_error = true;
+ }
+
+ if (var_has_error)
+ {
+ /* Mark the node so we don't need to lookup the attribute every
+ time we check if we need to skip a diagnostic. */
+ TREE_PURPOSE (vn) = error_mark_node;
+ /* We won't have access to the var after it's cleared from the node,
+ finalize it early.
+ We avoid needing to handle error_mark_node in
+ varpool_node::finalize_decl if we make align a NULL_TREE. */
+ finalize_allocate_attr (var, error_mark_node, NULL_TREE);
+ }
+ }
+ /* Unfortunately, we can't easily diagnose use of a parameter in the alloc or
+ align expr before instantiation. For a type dependent expr
+ potential_constant_expression must return true even if the expr contains
+ a parameter. The align and alloc clause's exprs must be of type
+ integer/omp_allocator_handle_t respectively, in theory these extra
+ constraints would let us diagnose some cases during parsing of a template
+ declaration. The following case is invalid.
+ void f0(auto p) {
+ int a;
+ #pragma omp allocate(a) align(p)
+ }
+ We know that this can't be valid because expr p must be an integer type,
+ not an empty class type. On the other hand...
+ constexpr int g(auto) { return 32; }
+ void f1(auto p) {
+ int a;
+ #pragma omp allocate(a) align(g (p))
+ }
+ This is valid code if p is an empty class type, so we can't just
+ disqualify an expression because it contains a local var or parameter.
+
+ In short, we don't jump through hoops to try to diagnose cases that are
+ possible to be proven ill-formed, such as with f1 above, we just diagnose
+ it upon instantiation. Perhaps this can be revisited, but it doesn't
+ seem to be worth it. It will complicate the error handling code here,
+ and has a risk of breaking valid code like the f1 case above.
+
+ See PR91953 and r10-6416-g8fda2c274ac66d60c1dfc1349e9efb4e8c2a3580 for
+ more information.
+
+ There are also funny cases like const int that are considered constant
+ expressions which we have to accept for correctness, but that only applies
+ to variables, not parameters. */
+ if (align && align != error_mark_node)
+ {
+ /* (OpenMP 5.1, 181:17-18) alignment is a constant positive integer
+ expression with a value that is a power of two. */
+ location_t align_loc = EXPR_LOCATION (align);
+ if (!type_dependent_expression_p (align))
+ {
+ /* Might we want to support implicitly convertible to int? Is that
+ forbidden by the spec? */
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (align)))
+ {
+ /* Just use the same error as the value checked error, there is
+ little value in fragmenting the wording. */
+ error_at (align_loc,
+ "%<align%> clause argument needs to be positive "
+ "constant power of two integer expression");
+ /* Don't repeat the error again. */
+ align = error_mark_node;
+ }
+ }
+ if (align != error_mark_node && !value_dependent_expression_p (align))
+ {
+ align = fold_non_dependent_expr (align);
+ if (!TREE_CONSTANT (align)
+ || tree_int_cst_sgn (align) != 1
+ || !integer_pow2p (align))
+ {
+ error_at (align_loc,
+ "%<align%> clause argument needs to be positive "
+ "constant power of two integer expression");
+ align = error_mark_node;
+ }
+ }
+ }
+
+ if (alloc == NULL_TREE)
+ {
+ for (tree node = var_list; node != NULL_TREE; node = TREE_CHAIN (node))
+ {
+ tree var = TREE_PURPOSE (node);
+ if (var != error_mark_node && TREE_STATIC (var))
+ {
+ auto_diagnostic_group d;
+ error_at (EXPR_LOCATION (TREE_VALUE (node)),
+ "%<allocator%> clause required for "
+ "static variable %qD", var);
+ inform (DECL_SOURCE_LOCATION (var), "%qD declared here", var);
+ alloc = error_mark_node;
+ }
+ }
+ }
+ else if (alloc != error_mark_node)
+ {
+ location_t alloc_loc = EXPR_LOCATION (alloc);
+ if (!type_dependent_expression_p (alloc))
+ {
+ tree orig_type = TYPE_MAIN_VARIANT (TREE_TYPE (alloc));
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (alloc))
+ || TREE_CODE (orig_type) != ENUMERAL_TYPE
+ || TYPE_NAME (orig_type) == NULL_TREE
+ || (DECL_NAME (TYPE_NAME (orig_type))
+ != get_identifier ("omp_allocator_handle_t")))
+ {
+ error_at (alloc_loc,
+ "%<allocator%> clause expression has type "
+ "%qT rather than %<omp_allocator_handle_t%>",
+ TREE_TYPE (alloc));
+ alloc = error_mark_node;
+ }
+ }
+ if (alloc != error_mark_node && !value_dependent_expression_p (alloc))
+ {
+ /* It is unclear if this is required as fold_non_dependent_expr
+ appears to correctly return the original expr if it can't be
+ folded. Additionally, should we be passing tf_none? */
+ alloc = maybe_fold_non_dependent_expr (alloc);
+ const bool constant_predefined_allocator = [&] ()
+ {
+ if (!TREE_CONSTANT (alloc))
+ return false;
+ wi::tree_to_widest_ref alloc_value = wi::to_widest (alloc);
+ /* MAX is inclusive. */
+ return (alloc_value >= 1
+ && alloc_value <= GOMP_OMP_PREDEF_ALLOC_MAX)
+ || (alloc_value >= GOMP_OMPX_PREDEF_ALLOC_MIN
+ && alloc_value <= GOMP_OMPX_PREDEF_ALLOC_MAX);
+ } (); /* IILE. */
+ if (!constant_predefined_allocator)
+ {
+ for (tree vn = var_list; vn != NULL_TREE; vn = TREE_CHAIN (vn))
+ {
+ tree var = TREE_PURPOSE (vn);
+ if (var != error_mark_node && TREE_STATIC (var))
+ {
+ auto_diagnostic_group d;
+ /* Perhaps we should only report a single error and
+ inform for each static var? */
+ error_at (alloc_loc,
+ "%<allocator%> clause requires a predefined "
+ "allocator as %qD is static", var);
+ inform (DECL_SOURCE_LOCATION (var),
+ "%qD declared here", var);
+ alloc = error_mark_node;
+ }
+ }
+ }
+ }
+ }
+ /* Helper algorithm. */
+ auto any_of_vars = [&var_list] (bool (*predicate)(tree))
+ {
+ for (tree vn = var_list; vn != NULL_TREE; vn = TREE_CHAIN (vn))
+ if (predicate (TREE_PURPOSE (vn)))
+ return true;
+ return false;
+ };
+
+ /* Even if there have been errors we still want to save our progress so we
+ don't miss any potential diagnostics.
+ Technically we don't have to do this if there were errors and alloc,
+ align, and all the vars are substituted, but it's more work to check for
+ that than to just add the stmt. If it were viable to finalize everything
+ before instantiation is complete it might be worth it, but we can't do
+ that because substitution has to eagerly copy nodes. */
+ if (processing_template_decl)
+ {
+ tree allocate_stmt = make_node (OMP_ALLOCATE);
+ /* Pretty sure we don't want side effects on this, it also probably
+ doesn't matter but lets avoid unnecessary noise. */
+ TREE_SIDE_EFFECTS (allocate_stmt) = 0;
+ OMP_ALLOCATE_LOCATION (allocate_stmt) = loc;
+ OMP_ALLOCATE_VARS (allocate_stmt) = var_list;
+ OMP_ALLOCATE_ALLOCATOR (allocate_stmt) = alloc;
+ OMP_ALLOCATE_ALIGN (allocate_stmt) = align;
+ add_stmt (allocate_stmt);
+ return;
+ }
+ else if (alloc == error_mark_node || align == error_mark_node || !var_list
+ || any_of_vars ([] (tree var) { return var == error_mark_node; }))
+ {
+ /* The directive is fully instantiated, however, errors were diagnosed.
+ We can't remove the "omp allocate" attr just in case we are still
+ parsing a function, instead, we mark it. */
+ for (tree vn = var_list; vn != NULL_TREE; vn = TREE_CHAIN (vn))
+ {
+ tree var = TREE_PURPOSE (vn);
+ /* If the var decl is marked, it has already been finalized. */
+ if (var != error_mark_node)
+ /* We avoid needing to handle error_mark_node in
+ varpool_node::finalize_decl if we make align a NULL_TREE. */
+ finalize_allocate_attr (var, error_mark_node, NULL_TREE);
+ }
+ return;
+ }
+
+ /* We have no errors and everything is fully instantiated, we can finally
+ finish the attribute on each var_decl. */
+ gcc_assert (!processing_template_decl
+ && alloc != error_mark_node
+ && align != error_mark_node
+ && var_list != NULL_TREE);
+
+ for (tree vn = var_list; vn != NULL_TREE; vn = TREE_CHAIN (vn))
+ finalize_allocate_attr (TREE_PURPOSE (vn), alloc, align);
+}
+
void
finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode,
tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, tree r,
@@ -14053,6 +15660,45 @@ cp_build_bit_cast (location_t loc, tree type, tree arg,
return ret;
}
+/* Build an OpenMP array-shape cast of ARG to TYPE. */
+
+tree
+cp_build_omp_arrayshape_cast (location_t loc, tree type, tree arg,
+ tsubst_flags_t complain)
+{
+ if (error_operand_p (type))
+ return error_mark_node;
+
+ if (!dependent_type_p (type)
+ && !complete_type_or_maybe_complain (type, NULL_TREE, complain))
+ return error_mark_node;
+
+ if (error_operand_p (arg))
+ return error_mark_node;
+
+ if (!type_dependent_expression_p (arg) && !dependent_type_p (type))
+ {
+ if (!trivially_copyable_p (TREE_TYPE (arg)))
+ {
+ error_at (cp_expr_loc_or_loc (arg, loc),
+ "OpenMP array shape source type %qT "
+ "is not trivially copyable", TREE_TYPE (arg));
+ return error_mark_node;
+ }
+
+ /* A pointer to multi-dimensional array conversion isn't normally
+ allowed, but we force it here for array shape operators by creating
+ the node directly. We also want to avoid any overloaded conversions
+ the user might have defined, not that there are likely to be any. */
+ return build1_loc (loc, VIEW_CONVERT_EXPR, type, arg);
+ }
+
+ tree ret = build_min (OMP_ARRAYSHAPE_CAST_EXPR, type, arg);
+ SET_EXPR_LOCATION (ret, loc);
+
+ return ret;
+}
+
/* Diagnose invalid #pragma GCC unroll argument and adjust
it if needed. */