aboutsummaryrefslogtreecommitdiff
path: root/libgfortran
diff options
context:
space:
mode:
Diffstat (limited to 'libgfortran')
-rw-r--r--libgfortran/ChangeLog34
-rw-r--r--libgfortran/caf/libcaf.h91
-rw-r--r--libgfortran/caf/mpi.c3
-rw-r--r--libgfortran/caf/single.c1755
4 files changed, 1834 insertions, 49 deletions
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index 6454eae..cb12545 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,37 @@
+2016-09-19 Andre Vehreschild <vehre@gcc.gnu.org>
+
+ * caf/libcaf.h: Add caf_reference_type.
+ * caf/mpi.c: Adapted signature of caf_register().
+ * caf/single.c (struct caf_single_token): Added to keep the pointer
+ to the memory registered and array descriptor.
+ (caf_internal_error): Added convenience interface.
+ (_gfortran_caf_register): Adapted to work with caf_single_token and
+ return memory in the array descriptor.
+ (_gfortran_caf_deregister): Same.
+ (assign_char1_from_char4): Fixed style.
+ (convert_type): Fixed incorrect conversion.
+ (_gfortran_caf_get): Adapted to work with caf_single_token.
+ (_gfortran_caf_send): Same.
+ (_gfortran_caf_sendget): Same.
+ (copy_data): Added to stop repeating it in all _by_ref functions.
+ (get_for_ref): Recursive getting of coarray data using a chain of
+ references.
+ (_gfortran_caf_get_by_ref): Driver for computing the memory needed for
+ the get and checking properties of the operation.
+ (send_by_ref): Same as get_for_ref but for sending data.
+ (_gfortran_caf_send_by_ref): Same like caf_get_by_ref but for sending.
+ (_gfortran_caf_sendget_by_ref): Uses get_by_ref and send_by_ref to
+ implement sendget for reference chains.
+ (_gfortran_caf_atomic_define): Adapted to work with caf_single_token.
+ (_gfortran_caf_atomic_ref): Likewise.
+ (_gfortran_caf_atomic_cas): Likewise.
+ (_gfortran_caf_atomic_op): Likewise.
+ (_gfortran_caf_event_post): Likewise.
+ (_gfortran_caf_event_wait): Likewise.
+ (_gfortran_caf_event_query): Likewise.
+ (_gfortran_caf_lock): Likewise.
+ (_gfortran_caf_unlock): Likewise.
+
2016-09-09 Steven G. Kargl <kargl@gcc.gnu.org>
PR fortran/77507
diff --git a/libgfortran/caf/libcaf.h b/libgfortran/caf/libcaf.h
index 863b5b4..aad0f62 100644
--- a/libgfortran/caf/libcaf.h
+++ b/libgfortran/caf/libcaf.h
@@ -90,6 +90,81 @@ typedef struct caf_vector_t {
}
caf_vector_t;
+typedef enum caf_ref_type_t {
+ /* Reference a component of a derived type, either regular one or an
+ allocatable or pointer type. For regular ones idx in caf_reference_t is
+ set to -1. */
+ CAF_REF_COMPONENT,
+ /* Reference an allocatable array. */
+ CAF_REF_ARRAY,
+ /* Reference a non-allocatable/non-pointer array. */
+ CAF_REF_STATIC_ARRAY
+} caf_ref_type_t;
+
+typedef enum caf_array_ref_t {
+ /* No array ref. This terminates the array ref. */
+ CAF_ARR_REF_NONE = 0,
+ /* Reference array elements given by a vector. Only for this mode
+ caf_reference_t.u.a.dim[i].v is valid. */
+ CAF_ARR_REF_VECTOR,
+ /* A full array ref (:). */
+ CAF_ARR_REF_FULL,
+ /* Reference a range on elements given by start, end and stride. */
+ CAF_ARR_REF_RANGE,
+ /* Only a single item is referenced given in the start member. */
+ CAF_ARR_REF_SINGLE,
+ /* An array ref of the kind (i:), where i is an arbitrary valid index in the
+ array. The index i is given in the start member. */
+ CAF_ARR_REF_OPEN_END,
+ /* An array ref of the kind (:i), where the lower bound of the array ref
+ is given by the remote side. The index i is given in the end member. */
+ CAF_ARR_REF_OPEN_START
+} caf_array_ref_t;
+
+/* References to remote components of a derived type. */
+typedef struct caf_reference_t {
+ /* A pointer to the next ref or NULL. */
+ struct caf_reference_t *next;
+ /* The type of the reference. */
+ /* caf_ref_type_t, replaced by int to allow specification in fortran FE. */
+ int type;
+ /* The size of an item referenced in bytes. I.e. in an array ref this is
+ the factor to advance the array pointer with to get to the next item.
+ For component refs this gives just the size of the element referenced. */
+ size_t item_size;
+ union {
+ struct {
+ /* The offset (in bytes) of the component in the derived type. */
+ ptrdiff_t offset;
+ /* The offset (in bytes) to the caf_token associated with this
+ component. NULL, when not allocatable/pointer ref. */
+ ptrdiff_t caf_token_offset;
+ } c;
+ struct {
+ /* The mode of the array ref. See CAF_ARR_REF_*. */
+ /* caf_array_ref_t, replaced by unsigend char to allow specification in
+ fortran FE. */
+ unsigned char mode[GFC_MAX_DIMENSIONS];
+ /* The type of a static array. Unset for array's with descriptors. */
+ int static_array_type;
+ /* Subscript refs (s) or vector refs (v). */
+ union {
+ struct {
+ /* The start and end boundary of the ref and the stride. */
+ index_type start, end, stride;
+ } s;
+ struct {
+ /* nvec entries of kind giving the elements to reference. */
+ void *vector;
+ /* The number of entries in vector. */
+ size_t nvec;
+ /* The integer kind used for the elements in vector. */
+ int kind;
+ } v;
+ } dim[GFC_MAX_DIMENSIONS];
+ } a;
+ } u;
+} caf_reference_t;
void _gfortran_caf_init (int *, char ***);
void _gfortran_caf_finalize (void);
@@ -97,8 +172,8 @@ void _gfortran_caf_finalize (void);
int _gfortran_caf_this_image (int);
int _gfortran_caf_num_images (int, int);
-void *_gfortran_caf_register (size_t, caf_register_t, caf_token_t *, int *,
- char *, int);
+void _gfortran_caf_register (size_t, caf_register_t, caf_token_t *,
+ gfc_descriptor_t *, int *, char *, int);
void _gfortran_caf_deregister (caf_token_t *, int *, char *, int);
void _gfortran_caf_sync_all (int *, char *, int);
@@ -130,6 +205,18 @@ void _gfortran_caf_sendget (caf_token_t, size_t, int, gfc_descriptor_t *,
caf_vector_t *, caf_token_t, size_t, int,
gfc_descriptor_t *, caf_vector_t *, int, int, bool);
+void _gfortran_caf_get_by_ref (caf_token_t token, int image_idx,
+ gfc_descriptor_t *dst, caf_reference_t *refs, int dst_kind,
+ int src_kind, bool may_require_tmp, bool dst_reallocatable, int *stat);
+void _gfortran_caf_send_by_ref (caf_token_t token, int image_index,
+ gfc_descriptor_t *src, caf_reference_t *refs, int dst_kind,
+ int src_kind, bool may_require_tmp, bool dst_reallocatable, int *stat);
+void _gfortran_caf_sendget_by_ref (
+ caf_token_t dst_token, int dst_image_index, caf_reference_t *dst_refs,
+ caf_token_t src_token, int src_image_index, caf_reference_t *src_refs,
+ int dst_kind, int src_kind, bool may_require_tmp, int *dst_stat,
+ int *src_stat);
+
void _gfortran_caf_atomic_define (caf_token_t, size_t, int, void *, int *,
int, int);
void _gfortran_caf_atomic_ref (caf_token_t, size_t, int, void *, int *,
diff --git a/libgfortran/caf/mpi.c b/libgfortran/caf/mpi.c
index d7a21fe..1e9a477 100644
--- a/libgfortran/caf/mpi.c
+++ b/libgfortran/caf/mpi.c
@@ -131,7 +131,8 @@ _gfortran_caf_num_images (int distance __attribute__ ((unused)),
void *
_gfortran_caf_register (size_t size, caf_register_t type, caf_token_t *token,
- int *stat, char *errmsg, int errmsg_len)
+ int *stat, char *errmsg, int errmsg_len,
+ int num_alloc_comps __attribute__ ((unused)))
{
void *local;
int err;
diff --git a/libgfortran/caf/single.c b/libgfortran/caf/single.c
index 21916d3..c472446 100644
--- a/libgfortran/caf/single.c
+++ b/libgfortran/caf/single.c
@@ -33,8 +33,21 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* Define GFC_CAF_CHECK to enable run-time checking. */
/* #define GFC_CAF_CHECK 1 */
-typedef void* single_token_t;
-#define TOKEN(X) ((single_token_t) (X))
+struct caf_single_token
+{
+ /* The pointer to the memory registered. For arrays this is the data member
+ in the descriptor. For components it's the pure data pointer. */
+ void *memptr;
+ /* The descriptor when this token is associated to an allocatable array. */
+ gfc_descriptor_t *desc;
+ /* Set when the caf lib has allocated the memory in memptr and is responsible
+ for freeing it on deregister. */
+ bool owning_memory;
+};
+typedef struct caf_single_token *caf_single_token_t;
+
+#define TOKEN(X) ((caf_single_token_t) (X))
+#define MEMTOK(X) ((caf_single_token_t) (X))->memptr
/* Single-image implementation of the CAF library.
Note: For performance reasons -fcoarry=single should be used
@@ -43,7 +56,6 @@ typedef void* single_token_t;
/* Global variables. */
caf_static_t *caf_static_list = NULL;
-
/* Keep in sync with mpi.c. */
static void
caf_runtime_error (const char *message, ...)
@@ -59,6 +71,30 @@ caf_runtime_error (const char *message, ...)
exit (EXIT_FAILURE);
}
+/* Error handling is similar everytime. */
+static void
+caf_internal_error (const char *msg, int *stat, char *errmsg,
+ int errmsg_len, ...)
+{
+ va_list args;
+ va_start (args, errmsg_len);
+ if (stat)
+ {
+ *stat = 1;
+ if (errmsg_len > 0)
+ {
+ size_t len = snprintf (errmsg, errmsg_len, msg, args);
+ if ((size_t)errmsg_len > len)
+ memset (&errmsg[len], ' ', errmsg_len - len);
+ }
+ return;
+ }
+ else
+ caf_runtime_error (msg, args);
+ va_end (args);
+}
+
+
void
_gfortran_caf_init (int *argc __attribute__ ((unused)),
char ***argv __attribute__ ((unused)))
@@ -94,11 +130,14 @@ _gfortran_caf_num_images (int distance __attribute__ ((unused)),
}
-void *
+void
_gfortran_caf_register (size_t size, caf_register_t type, caf_token_t *token,
- int *stat, char *errmsg, int errmsg_len)
+ gfc_descriptor_t *data, int *stat, char *errmsg,
+ int errmsg_len)
{
+ const char alloc_fail_msg[] = "Failed to allocate coarray";
void *local;
+ caf_single_token_t single_token;
if (type == CAF_REGTYPE_LOCK_STATIC || type == CAF_REGTYPE_LOCK_ALLOC
|| type == CAF_REGTYPE_CRITICAL || type == CAF_REGTYPE_EVENT_STATIC
@@ -106,29 +145,19 @@ _gfortran_caf_register (size_t size, caf_register_t type, caf_token_t *token,
local = calloc (size, sizeof (bool));
else
local = malloc (size);
- *token = malloc (sizeof (single_token_t));
+ *token = malloc (sizeof (struct caf_single_token));
- if (unlikely (local == NULL || token == NULL))
+ if (unlikely (local == NULL || *token == NULL))
{
- const char msg[] = "Failed to allocate coarray";
- if (stat)
- {
- *stat = 1;
- if (errmsg_len > 0)
- {
- int len = ((int) sizeof (msg) > errmsg_len) ? errmsg_len
- : (int) sizeof (msg);
- memcpy (errmsg, msg, len);
- if (errmsg_len > len)
- memset (&errmsg[len], ' ', errmsg_len-len);
- }
- return NULL;
- }
- else
- caf_runtime_error (msg);
+ caf_internal_error (alloc_fail_msg, stat, errmsg, errmsg_len);
+ return;
}
- *token = local;
+ single_token = TOKEN (*token);
+ single_token->memptr = local;
+ single_token->owning_memory = true;
+ single_token->desc = GFC_DESCRIPTOR_RANK (data) > 0 ? data : NULL;
+
if (stat)
*stat = 0;
@@ -142,7 +171,7 @@ _gfortran_caf_register (size_t size, caf_register_t type, caf_token_t *token,
tmp->token = *token;
caf_static_list = tmp;
}
- return local;
+ GFC_DESCRIPTOR_DATA (data) = local;
}
@@ -151,7 +180,12 @@ _gfortran_caf_deregister (caf_token_t *token, int *stat,
char *errmsg __attribute__ ((unused)),
int errmsg_len __attribute__ ((unused)))
{
- free (TOKEN(*token));
+ caf_single_token_t single_token = TOKEN (*token);
+
+ if (single_token->owning_memory && single_token->memptr)
+ free (single_token->memptr);
+
+ free (TOKEN (*token));
if (stat)
*stat = 0;
@@ -322,7 +356,7 @@ assign_char1_from_char4 (size_t dst_size, size_t src_size, unsigned char *dst,
for (i = 0; i < n; ++i)
dst[i] = src[i] > UINT8_MAX ? (unsigned char) '?' : (unsigned char) src[i];
if (dst_size > n)
- memset(&dst[n], ' ', dst_size - n);
+ memset (&dst[n], ' ', dst_size - n);
}
@@ -465,7 +499,7 @@ convert_type (void *dst, int dst_type, int dst_kind, void *src, int src_type,
}
else
goto error;
- break;
+ return;
case BT_REAL:
if (src_type == BT_INTEGER)
{
@@ -518,7 +552,7 @@ convert_type (void *dst, int dst_type, int dst_kind, void *src, int src_type,
else
goto error;
}
- break;
+ return;
case BT_COMPLEX:
if (src_type == BT_INTEGER)
{
@@ -573,7 +607,7 @@ convert_type (void *dst, int dst_type, int dst_kind, void *src, int src_type,
}
else
goto error;
- break;
+ return;
default:
goto error;
}
@@ -608,7 +642,7 @@ _gfortran_caf_get (caf_token_t token, size_t offset,
if (rank == 0)
{
- void *sr = (void *) ((char *) TOKEN (token) + offset);
+ void *sr = (void *) ((char *) MEMTOK (token) + offset);
if (GFC_DESCRIPTOR_TYPE (dest) == GFC_DESCRIPTOR_TYPE (src)
&& dst_kind == src_kind)
{
@@ -669,7 +703,7 @@ _gfortran_caf_get (caf_token_t token, size_t offset,
stride = src->dim[j]._stride;
}
array_offset_sr += (i / extent) * src->dim[rank-1]._stride;
- void *sr = (void *)((char *) TOKEN (token) + offset
+ void *sr = (void *)((char *) MEMTOK (token) + offset
+ array_offset_sr*GFC_DESCRIPTOR_SIZE (src));
memcpy ((void *) ((char *) tmp + array_offset_dst), sr, src_size);
array_offset_dst += src_size;
@@ -754,7 +788,7 @@ _gfortran_caf_get (caf_token_t token, size_t offset,
stride = src->dim[j]._stride;
}
array_offset_sr += (i / extent) * src->dim[rank-1]._stride;
- void *sr = (void *)((char *) TOKEN (token) + offset
+ void *sr = (void *)((char *) MEMTOK (token) + offset
+ array_offset_sr*GFC_DESCRIPTOR_SIZE (src));
if (GFC_DESCRIPTOR_TYPE (dest) == GFC_DESCRIPTOR_TYPE (src)
@@ -801,7 +835,7 @@ _gfortran_caf_send (caf_token_t token, size_t offset,
if (rank == 0)
{
- void *dst = (void *) ((char *) TOKEN (token) + offset);
+ void *dst = (void *) ((char *) MEMTOK (token) + offset);
if (GFC_DESCRIPTOR_TYPE (dest) == GFC_DESCRIPTOR_TYPE (src)
&& dst_kind == src_kind)
{
@@ -893,7 +927,7 @@ _gfortran_caf_send (caf_token_t token, size_t offset,
stride = dest->dim[j]._stride;
}
array_offset_dst += (i / extent) * dest->dim[rank-1]._stride;
- void *dst = (void *)((char *) TOKEN (token) + offset
+ void *dst = (void *)((char *) MEMTOK (token) + offset
+ array_offset_dst*GFC_DESCRIPTOR_SIZE (dest));
void *sr = tmp + array_offset_sr;
if (GFC_DESCRIPTOR_TYPE (dest) == GFC_DESCRIPTOR_TYPE (src)
@@ -941,7 +975,7 @@ _gfortran_caf_send (caf_token_t token, size_t offset,
stride = dest->dim[j]._stride;
}
array_offset_dst += (i / extent) * dest->dim[rank-1]._stride;
- void *dst = (void *)((char *) TOKEN (token) + offset
+ void *dst = (void *)((char *) MEMTOK (token) + offset
+ array_offset_dst*GFC_DESCRIPTOR_SIZE (dest));
void *sr;
if (GFC_DESCRIPTOR_RANK (src) != 0)
@@ -1004,13 +1038,1639 @@ _gfortran_caf_sendget (caf_token_t dst_token, size_t dst_offset,
/* For a single image, src->base_addr should be the same as src_token + offset
but to play save, we do it properly. */
void *src_base = GFC_DESCRIPTOR_DATA (src);
- GFC_DESCRIPTOR_DATA (src) = (void *) ((char *) TOKEN (src_token) + src_offset);
+ GFC_DESCRIPTOR_DATA (src) = (void *) ((char *) MEMTOK (src_token)
+ + src_offset);
_gfortran_caf_send (dst_token, dst_offset, dst_image_index, dest, dst_vector,
src, dst_kind, src_kind, may_require_tmp, NULL);
GFC_DESCRIPTOR_DATA (src) = src_base;
}
+/* Emitted when a theorectically unreachable part is reached. */
+const char unreachable[] = "Fatal error: unreachable alternative found.\n";
+
+
+static void
+copy_data (void *ds, void *sr, int dst_type, int src_type,
+ int dst_kind, int src_kind, size_t dst_size, size_t src_size,
+ size_t num, int *stat)
+{
+ size_t k;
+ if (dst_type == src_type && dst_kind == src_kind)
+ {
+ memmove (ds, sr, (dst_size > src_size ? src_size : dst_size) * num);
+ if ((dst_type == BT_CHARACTER || src_type == BT_CHARACTER)
+ && dst_size > src_size)
+ {
+ if (dst_kind == 1)
+ memset ((void*)(char*) ds + src_size, ' ', dst_size-src_size);
+ else /* dst_kind == 4. */
+ for (k = src_size/4; k < dst_size/4; k++)
+ ((int32_t*) ds)[k] = (int32_t) ' ';
+ }
+ }
+ else if (dst_type == BT_CHARACTER && dst_kind == 1)
+ assign_char1_from_char4 (dst_size, src_size, ds, sr);
+ else if (dst_type == BT_CHARACTER)
+ assign_char4_from_char1 (dst_size, src_size, ds, sr);
+ else
+ for (k = 0; k < num; ++k)
+ {
+ convert_type (ds, dst_type, dst_kind, sr, src_type, src_kind, stat);
+ ds += dst_size;
+ sr += src_size;
+ }
+}
+
+
+#define COMPUTE_NUM_ITEMS(num, stride, lb, ub) \
+ do { \
+ index_type abs_stride = (stride) > 0 ? (stride) : -(stride); \
+ num = (stride) > 0 ? (ub) + 1 - (lb) : (lb) + 1 - (ub); \
+ if (num <= 0 || abs_stride < 1) return; \
+ num = (abs_stride > 1) ? (1 + (num - 1) / abs_stride) : num; \
+ } while (0)
+
+
+static void
+get_for_ref (caf_reference_t *ref, size_t *i, size_t *dst_index,
+ caf_single_token_t single_token, gfc_descriptor_t *dst,
+ gfc_descriptor_t *src, void *ds, void *sr,
+ int dst_kind, int src_kind, size_t dst_dim, size_t src_dim,
+ size_t num, int *stat)
+{
+ ptrdiff_t extent_src = 1, array_offset_src = 0, stride_src;
+ size_t next_dst_dim;
+
+ if (unlikely (ref == NULL))
+ /* May be we should issue an error here, because this case should not
+ occur. */
+ return;
+
+ if (ref->next == NULL)
+ {
+ size_t dst_size = GFC_DESCRIPTOR_SIZE (dst);
+ ptrdiff_t array_offset_dst = 0;;
+ size_t dst_rank = GFC_DESCRIPTOR_RANK (dst);
+ int src_type = -1;
+
+ switch (ref->type)
+ {
+ case CAF_REF_COMPONENT:
+ /* Because the token is always registered after the component, its
+ offset is always greater zeor. */
+ if (ref->u.c.caf_token_offset > 0)
+ copy_data (ds, *(void **)(sr + ref->u.c.offset),
+ GFC_DESCRIPTOR_TYPE (dst), GFC_DESCRIPTOR_TYPE (dst),
+ dst_kind, src_kind, dst_size, ref->item_size, 1, stat);
+ else
+ copy_data (ds, sr + ref->u.c.offset,
+ GFC_DESCRIPTOR_TYPE (dst), GFC_DESCRIPTOR_TYPE (src),
+ dst_kind, src_kind, dst_size, ref->item_size, 1, stat);
+ ++(*i);
+ return;
+ case CAF_REF_STATIC_ARRAY:
+ src_type = ref->u.a.static_array_type;
+ /* Intentionally fall through. */
+ case CAF_REF_ARRAY:
+ if (ref->u.a.mode[src_dim] == CAF_ARR_REF_NONE)
+ {
+ for (size_t d = 0; d < dst_rank; ++d)
+ array_offset_dst += dst_index[d];
+ copy_data (ds + array_offset_dst * dst_size, sr,
+ GFC_DESCRIPTOR_TYPE (dst),
+ src_type == -1 ? GFC_DESCRIPTOR_TYPE (src) : src_type,
+ dst_kind, src_kind, dst_size, ref->item_size, num,
+ stat);
+ *i += num;
+ return;
+ }
+ break;
+ default:
+ caf_runtime_error (unreachable);
+ }
+ }
+
+ switch (ref->type)
+ {
+ case CAF_REF_COMPONENT:
+ if (ref->u.c.caf_token_offset > 0)
+ get_for_ref (ref->next, i, dst_index,
+ *(caf_single_token_t*)(sr + ref->u.c.caf_token_offset), dst,
+ (*(caf_single_token_t*)(sr + ref->u.c.caf_token_offset))->desc,
+ ds, sr + ref->u.c.offset, dst_kind, src_kind, dst_dim, 0,
+ 1, stat);
+ else
+ get_for_ref (ref->next, i, dst_index, single_token, dst,
+ (gfc_descriptor_t *)(sr + ref->u.c.offset), ds,
+ sr + ref->u.c.offset, dst_kind, src_kind, dst_dim, 0, 1,
+ stat);
+ return;
+ case CAF_REF_ARRAY:
+ if (ref->u.a.mode[src_dim] == CAF_ARR_REF_NONE)
+ {
+ get_for_ref (ref->next, i, dst_index, single_token, dst,
+ src, ds, sr, dst_kind, src_kind,
+ dst_dim, 0, 1, stat);
+ return;
+ }
+ /* Only when on the left most index switch the data pointer to
+ the array's data pointer. */
+ if (src_dim == 0)
+ sr = GFC_DESCRIPTOR_DATA (src);
+ switch (ref->u.a.mode[src_dim])
+ {
+ case CAF_ARR_REF_VECTOR:
+ extent_src = GFC_DIMENSION_EXTENT (src->dim[src_dim]);
+ array_offset_src = 0;
+ dst_index[dst_dim] = 0;
+ for (size_t idx = 0; idx < ref->u.a.dim[src_dim].v.nvec;
+ ++idx)
+ {
+#define KINDCASE(kind, type) case kind: \
+ array_offset_src = (((index_type) \
+ ((type *)ref->u.a.dim[src_dim].v.vector)[idx]) \
+ - GFC_DIMENSION_LBOUND (src->dim[src_dim])) \
+ * GFC_DIMENSION_STRIDE (src->dim[src_dim]); \
+ break
+
+ switch (ref->u.a.dim[src_dim].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_runtime_error (unreachable);
+ return;
+ }
+#undef KINDCASE
+
+ get_for_ref (ref, i, dst_index, single_token, dst, src,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ }
+ return;
+ case CAF_ARR_REF_FULL:
+ COMPUTE_NUM_ITEMS (extent_src,
+ ref->u.a.dim[src_dim].s.stride,
+ GFC_DIMENSION_LBOUND (src->dim[src_dim]),
+ GFC_DIMENSION_UBOUND (src->dim[src_dim]));
+ stride_src = src->dim[src_dim]._stride
+ * ref->u.a.dim[src_dim].s.stride;
+ array_offset_src = 0;
+ dst_index[dst_dim] = 0;
+ for (index_type idx = 0; idx < extent_src;
+ ++idx, array_offset_src += stride_src)
+ {
+ get_for_ref (ref, i, dst_index, single_token, dst, src,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ }
+ return;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (extent_src,
+ ref->u.a.dim[src_dim].s.stride,
+ ref->u.a.dim[src_dim].s.start,
+ ref->u.a.dim[src_dim].s.end);
+ array_offset_src = (ref->u.a.dim[src_dim].s.start
+ - GFC_DIMENSION_LBOUND (src->dim[src_dim]))
+ * GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ stride_src = GFC_DIMENSION_STRIDE (src->dim[src_dim])
+ * ref->u.a.dim[src_dim].s.stride;
+ dst_index[dst_dim] = 0;
+ /* Increase the dst_dim only, when the src_extent is greater one
+ or src and dst extent are both one. Don't increase when the scalar
+ source is not present in the dst. */
+ next_dst_dim = extent_src > 1
+ || (GFC_DIMENSION_EXTENT (dst->dim[dst_dim]) == 1
+ && extent_src == 1) ? (dst_dim + 1) : dst_dim;
+ for (index_type idx = 0; idx < extent_src; ++idx)
+ {
+ get_for_ref (ref, i, dst_index, single_token, dst, src,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, next_dst_dim, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ array_offset_src += stride_src;
+ }
+ return;
+ case CAF_ARR_REF_SINGLE:
+ array_offset_src = (ref->u.a.dim[src_dim].s.start
+ - src->dim[src_dim].lower_bound)
+ * GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ dst_index[dst_dim] = 0;
+ get_for_ref (ref, i, dst_index, single_token, dst, src, ds,
+ sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim, src_dim + 1, 1,
+ stat);
+ return;
+ case CAF_ARR_REF_OPEN_END:
+ COMPUTE_NUM_ITEMS (extent_src,
+ ref->u.a.dim[src_dim].s.stride,
+ ref->u.a.dim[src_dim].s.start,
+ GFC_DIMENSION_UBOUND (src->dim[src_dim]));
+ stride_src = GFC_DIMENSION_STRIDE (src->dim[src_dim])
+ * ref->u.a.dim[src_dim].s.stride;
+ array_offset_src = (ref->u.a.dim[src_dim].s.start
+ - GFC_DIMENSION_LBOUND (src->dim[src_dim]))
+ * GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ dst_index[dst_dim] = 0;
+ for (index_type idx = 0; idx < extent_src; ++idx)
+ {
+ get_for_ref (ref, i, dst_index, single_token, dst, src,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ array_offset_src += stride_src;
+ }
+ return;
+ case CAF_ARR_REF_OPEN_START:
+ COMPUTE_NUM_ITEMS (extent_src,
+ ref->u.a.dim[src_dim].s.stride,
+ GFC_DIMENSION_LBOUND (src->dim[src_dim]),
+ ref->u.a.dim[src_dim].s.end);
+ stride_src = GFC_DIMENSION_STRIDE (src->dim[src_dim])
+ * ref->u.a.dim[src_dim].s.stride;
+ array_offset_src = 0;
+ dst_index[dst_dim] = 0;
+ for (index_type idx = 0; idx < extent_src; ++idx)
+ {
+ get_for_ref (ref, i, dst_index, single_token, dst, src,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ array_offset_src += stride_src;
+ }
+ return;
+ default:
+ caf_runtime_error (unreachable);
+ }
+ return;
+ case CAF_REF_STATIC_ARRAY:
+ if (ref->u.a.mode[src_dim] == CAF_ARR_REF_NONE)
+ {
+ get_for_ref (ref->next, i, dst_index, single_token, dst,
+ NULL, ds, sr, dst_kind, src_kind,
+ dst_dim, 0, 1, stat);
+ return;
+ }
+ switch (ref->u.a.mode[src_dim])
+ {
+ case CAF_ARR_REF_VECTOR:
+ array_offset_src = 0;
+ dst_index[dst_dim] = 0;
+ for (size_t idx = 0; idx < ref->u.a.dim[src_dim].v.nvec;
+ ++idx)
+ {
+#define KINDCASE(kind, type) case kind: \
+ array_offset_src = ((type *)ref->u.a.dim[src_dim].v.vector)[idx]; \
+ break
+
+ switch (ref->u.a.dim[src_dim].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_runtime_error (unreachable);
+ return;
+ }
+#undef KINDCASE
+
+ get_for_ref (ref, i, dst_index, single_token, dst, NULL,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ }
+ return;
+ case CAF_ARR_REF_FULL:
+ dst_index[dst_dim] = 0;
+ for (array_offset_src = 0 ;
+ array_offset_src <= ref->u.a.dim[src_dim].s.end;
+ array_offset_src += ref->u.a.dim[src_dim].s.stride)
+ {
+ get_for_ref (ref, i, dst_index, single_token, dst, NULL,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ }
+ return;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (extent_src,
+ ref->u.a.dim[src_dim].s.stride,
+ ref->u.a.dim[src_dim].s.start,
+ ref->u.a.dim[src_dim].s.end);
+ array_offset_src = ref->u.a.dim[src_dim].s.start;
+ dst_index[dst_dim] = 0;
+ for (index_type idx = 0; idx < extent_src; ++idx)
+ {
+ get_for_ref (ref, i, dst_index, single_token, dst, NULL,
+ ds, sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, stat);
+ dst_index[dst_dim]
+ += GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ array_offset_src += ref->u.a.dim[src_dim].s.stride;
+ }
+ return;
+ case CAF_ARR_REF_SINGLE:
+ array_offset_src = ref->u.a.dim[src_dim].s.start;
+ get_for_ref (ref, i, dst_index, single_token, dst, NULL, ds,
+ sr + array_offset_src * ref->item_size,
+ dst_kind, src_kind, dst_dim, src_dim + 1, 1,
+ stat);
+ return;
+ /* The OPEN_* are mapped to a RANGE and therefore can not occur. */
+ case CAF_ARR_REF_OPEN_END:
+ case CAF_ARR_REF_OPEN_START:
+ default:
+ caf_runtime_error (unreachable);
+ }
+ return;
+ default:
+ caf_runtime_error (unreachable);
+ }
+}
+
+
+void
+_gfortran_caf_get_by_ref (caf_token_t token,
+ int image_index __attribute__ ((unused)),
+ gfc_descriptor_t *dst, caf_reference_t *refs,
+ int dst_kind, int src_kind,
+ bool may_require_tmp __attribute__ ((unused)),
+ bool dst_reallocatable, int *stat)
+{
+ const char vecrefunknownkind[] = "libcaf_single::caf_get_by_ref(): "
+ "unknown kind in vector-ref.\n";
+ const char unknownreftype[] = "libcaf_single::caf_get_by_ref(): "
+ "unknown reference type.\n";
+ const char unknownarrreftype[] = "libcaf_single::caf_get_by_ref(): "
+ "unknown array reference type.\n";
+ const char rankoutofrange[] = "libcaf_single::caf_get_by_ref(): "
+ "rank out of range.\n";
+ const char extentoutofrange[] = "libcaf_single::caf_get_by_ref(): "
+ "extent out of range.\n";
+ const char cannotallocdst[] = "libcaf_single::caf_get_by_ref(): "
+ "can not allocate memory.\n";
+ const char nonallocextentmismatch[] = "libcaf_single::caf_get_by_ref(): "
+ "extent of non-allocatable arrays mismatch (%lu != %lu).\n";
+ const char doublearrayref[] = "libcaf_single::caf_get_by_ref(): "
+ "two or more array part references are not supported.\n";
+ size_t size, i;
+ size_t dst_index[GFC_MAX_DIMENSIONS];
+ int dst_rank = GFC_DESCRIPTOR_RANK (dst);
+ int dst_cur_dim = 0;
+ size_t src_size;
+ caf_single_token_t single_token = TOKEN (token);
+ void *memptr = single_token->memptr;
+ gfc_descriptor_t *src = single_token->desc;
+ caf_reference_t *riter = refs;
+ long delta;
+ /* Reallocation of dst.data is needed (e.g., array to small). */
+ bool realloc_needed;
+ /* Reallocation of dst.data is required, because data is not alloced at
+ all. */
+ bool realloc_required;
+ bool extent_mismatch = false;
+ /* Set when the first non-scalar array reference is encountered. */
+ bool in_array_ref = false;
+ bool array_extent_fixed = false;
+ realloc_needed = realloc_required = GFC_DESCRIPTOR_DATA (dst) == NULL;
+
+ assert (!realloc_needed || (realloc_needed && dst_reallocatable));
+
+ if (stat)
+ *stat = 0;
+
+ /* Compute the size of the result. In the beginning size just counts the
+ number of elements. */
+ size = 1;
+ while (riter)
+ {
+ switch (riter->type)
+ {
+ case CAF_REF_COMPONENT:
+ if (riter->u.c.caf_token_offset)
+ {
+ single_token = *(caf_single_token_t*)
+ (memptr + riter->u.c.caf_token_offset);
+ memptr = single_token->memptr;
+ src = single_token->desc;
+ }
+ else
+ {
+ memptr += riter->u.c.offset;
+ src = (gfc_descriptor_t *)memptr;
+ }
+ break;
+ case CAF_REF_ARRAY:
+ for (i = 0; riter->u.a.mode[i] != CAF_ARR_REF_NONE; ++i)
+ {
+ switch (riter->u.a.mode[i])
+ {
+ case CAF_ARR_REF_VECTOR:
+ delta = riter->u.a.dim[i].v.nvec;
+#define KINDCASE(kind, type) case kind: \
+ memptr += (((index_type) \
+ ((type *)riter->u.a.dim[i].v.vector)[0]) \
+ - GFC_DIMENSION_LBOUND (src->dim[i])) \
+ * GFC_DIMENSION_STRIDE (src->dim[i]) \
+ * riter->item_size; \
+ break
+
+ switch (riter->u.a.dim[i].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_internal_error (vecrefunknownkind, stat, NULL, 0);
+ return;
+ }
+#undef KINDCASE
+ break;
+ case CAF_ARR_REF_FULL:
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ GFC_DIMENSION_LBOUND (src->dim[i]),
+ GFC_DIMENSION_UBOUND (src->dim[i]));
+ /* The memptr stays unchanged when ref'ing the first element
+ in a dimension. */
+ break;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ riter->u.a.dim[i].s.start,
+ riter->u.a.dim[i].s.end);
+ memptr += (riter->u.a.dim[i].s.start
+ - GFC_DIMENSION_LBOUND (src->dim[i]))
+ * GFC_DIMENSION_STRIDE (src->dim[i])
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_SINGLE:
+ delta = 1;
+ memptr += (riter->u.a.dim[i].s.start
+ - GFC_DIMENSION_LBOUND (src->dim[i]))
+ * GFC_DIMENSION_STRIDE (src->dim[i])
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_OPEN_END:
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ riter->u.a.dim[i].s.start,
+ GFC_DIMENSION_UBOUND (src->dim[i]));
+ memptr += (riter->u.a.dim[i].s.start
+ - GFC_DIMENSION_LBOUND (src->dim[i]))
+ * GFC_DIMENSION_STRIDE (src->dim[i])
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_OPEN_START:
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ GFC_DIMENSION_LBOUND (src->dim[i]),
+ riter->u.a.dim[i].s.end);
+ /* The memptr stays unchanged when ref'ing the first element
+ in a dimension. */
+ break;
+ default:
+ caf_internal_error (unknownarrreftype, stat, NULL, 0);
+ return;
+ }
+ if (delta <= 0)
+ return;
+ /* Check the various properties of the destination array.
+ Is an array expected and present? */
+ if (delta > 1 && dst_rank == 0)
+ {
+ /* No, an array is required, but not provided. */
+ caf_internal_error (extentoutofrange, stat, NULL, 0);
+ return;
+ }
+ /* When dst is an array. */
+ if (dst_rank > 0)
+ {
+ /* Check that dst_cur_dim is valid for dst. Can be
+ superceeded only by scalar data. */
+ if (dst_cur_dim >= dst_rank && delta != 1)
+ {
+ caf_internal_error (rankoutofrange, stat, NULL, 0);
+ return;
+ }
+ /* Do further checks, when the source is not scalar. */
+ else if (delta != 1)
+ {
+ /* Check that the extent is not scalar and we are not in
+ an array ref for the dst side. */
+ if (!in_array_ref)
+ {
+ /* Check that this is the non-scalar extent. */
+ if (!array_extent_fixed)
+ {
+ /* In an array extent now. */
+ in_array_ref = true;
+ /* Check that we haven't skipped any scalar
+ dimensions yet and that the dst is
+ compatible. */
+ if (i > 0
+ && dst_rank == GFC_DESCRIPTOR_RANK (src))
+ {
+ if (dst_reallocatable)
+ {
+ /* Dst is reallocatable, which means that
+ the bounds are not set. Set them. */
+ for (dst_cur_dim= 0; dst_cur_dim < (int)i;
+ ++dst_cur_dim)
+ GFC_DIMENSION_SET (dst->dim[dst_cur_dim],
+ 1, 1, 1);
+ }
+ else
+ dst_cur_dim = i;
+ }
+ /* Else press thumbs, that there are enough
+ dimensional refs to come. Checked below. */
+ }
+ else
+ {
+ caf_internal_error (doublearrayref, stat, NULL,
+ 0);
+ return;
+ }
+ }
+ /* When the realloc is required, then no extent may have
+ been set. */
+ extent_mismatch = realloc_required
+ || GFC_DESCRIPTOR_EXTENT (dst, dst_cur_dim) != delta;
+ /* When it already known, that a realloc is needed or
+ the extent does not match the needed one. */
+ if (realloc_required || realloc_needed
+ || extent_mismatch)
+ {
+ /* Check whether dst is reallocatable. */
+ if (unlikely (!dst_reallocatable))
+ {
+ caf_internal_error (nonallocextentmismatch, stat,
+ NULL, 0, delta,
+ GFC_DESCRIPTOR_EXTENT (dst,
+ dst_cur_dim));
+ return;
+ }
+ /* Only report an error, when the extent needs to be
+ modified, which is not allowed. */
+ else if (!dst_reallocatable && extent_mismatch)
+ {
+ caf_internal_error (extentoutofrange, stat, NULL,
+ 0);
+ return;
+ }
+ realloc_needed = true;
+ }
+ /* Only change the extent when it does not match. This is
+ to prevent resetting given array bounds. */
+ if (extent_mismatch)
+ GFC_DIMENSION_SET (dst->dim[dst_cur_dim], 1, delta,
+ size);
+ }
+
+ /* Only increase the dim counter, when in an array ref. */
+ if (in_array_ref && dst_cur_dim < dst_rank)
+ ++dst_cur_dim;
+ }
+ size *= (index_type)delta;
+ }
+ if (in_array_ref)
+ {
+ array_extent_fixed = true;
+ in_array_ref = false;
+ /* Check, if we got less dimensional refs than the rank of dst
+ expects. */
+ assert (dst_cur_dim == GFC_DESCRIPTOR_RANK (dst));
+ }
+ break;
+ case CAF_REF_STATIC_ARRAY:
+ for (i = 0; riter->u.a.mode[i] != CAF_ARR_REF_NONE; ++i)
+ {
+ switch (riter->u.a.mode[i])
+ {
+ case CAF_ARR_REF_VECTOR:
+ delta = riter->u.a.dim[i].v.nvec;
+#define KINDCASE(kind, type) case kind: \
+ memptr += ((type *)riter->u.a.dim[i].v.vector)[0] \
+ * riter->item_size; \
+ break
+
+ switch (riter->u.a.dim[i].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_internal_error (vecrefunknownkind, stat, NULL, 0);
+ return;
+ }
+#undef KINDCASE
+ break;
+ case CAF_ARR_REF_FULL:
+ delta = riter->u.a.dim[i].s.end / riter->u.a.dim[i].s.stride
+ + 1;
+ /* The memptr stays unchanged when ref'ing the first element
+ in a dimension. */
+ break;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ riter->u.a.dim[i].s.start,
+ riter->u.a.dim[i].s.end);
+ memptr += riter->u.a.dim[i].s.start
+ * riter->u.a.dim[i].s.stride
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_SINGLE:
+ delta = 1;
+ memptr += riter->u.a.dim[i].s.start
+ * riter->u.a.dim[i].s.stride
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_OPEN_END:
+ /* This and OPEN_START are mapped to a RANGE and therefore
+ can not occur here. */
+ case CAF_ARR_REF_OPEN_START:
+ default:
+ caf_internal_error (unknownarrreftype, stat, NULL, 0);
+ return;
+ }
+ if (delta <= 0)
+ return;
+ /* Check the various properties of the destination array.
+ Is an array expected and present? */
+ if (delta > 1 && dst_rank == 0)
+ {
+ /* No, an array is required, but not provided. */
+ caf_internal_error (extentoutofrange, stat, NULL, 0);
+ return;
+ }
+ /* When dst is an array. */
+ if (dst_rank > 0)
+ {
+ /* Check that dst_cur_dim is valid for dst. Can be
+ superceeded only by scalar data. */
+ if (dst_cur_dim >= dst_rank && delta != 1)
+ {
+ caf_internal_error (rankoutofrange, stat, NULL, 0);
+ return;
+ }
+ /* Do further checks, when the source is not scalar. */
+ else if (delta != 1)
+ {
+ /* Check that the extent is not scalar and we are not in
+ an array ref for the dst side. */
+ if (!in_array_ref)
+ {
+ /* Check that this is the non-scalar extent. */
+ if (!array_extent_fixed)
+ {
+ /* In an array extent now. */
+ in_array_ref = true;
+ /* The dst is not reallocatable, so nothing more
+ to do, then correct the dim counter. */
+ dst_cur_dim = i;
+ }
+ else
+ {
+ caf_internal_error (doublearrayref, stat, NULL,
+ 0);
+ return;
+ }
+ }
+ /* When the realloc is required, then no extent may have
+ been set. */
+ extent_mismatch = realloc_required
+ || GFC_DESCRIPTOR_EXTENT (dst, dst_cur_dim) != delta;
+ /* When it is already known, that a realloc is needed or
+ the extent does not match the needed one. */
+ if (realloc_required || realloc_needed
+ || extent_mismatch)
+ {
+ /* Check whether dst is reallocatable. */
+ if (unlikely (!dst_reallocatable))
+ {
+ caf_internal_error (nonallocextentmismatch, stat,
+ NULL, 0, delta,
+ GFC_DESCRIPTOR_EXTENT (dst,
+ dst_cur_dim));
+ return;
+ }
+ /* Only report an error, when the extent needs to be
+ modified, which is not allowed. */
+ else if (!dst_reallocatable && extent_mismatch)
+ {
+ caf_internal_error (extentoutofrange, stat, NULL,
+ 0);
+ return;
+ }
+ realloc_needed = true;
+ }
+ /* Only change the extent when it does not match. This is
+ to prevent resetting given array bounds. */
+ if (extent_mismatch)
+ GFC_DIMENSION_SET (dst->dim[dst_cur_dim], 1, delta,
+ size);
+ }
+ /* Only increase the dim counter, when in an array ref. */
+ if (in_array_ref && dst_cur_dim < dst_rank)
+ ++dst_cur_dim;
+ }
+ size *= (index_type)delta;
+ }
+ if (in_array_ref)
+ {
+ array_extent_fixed = true;
+ in_array_ref = false;
+ /* Check, if we got less dimensional refs than the rank of dst
+ expects. */
+ assert (dst_cur_dim == GFC_DESCRIPTOR_RANK (dst));
+ }
+ break;
+ default:
+ caf_internal_error (unknownreftype, stat, NULL, 0);
+ return;
+ }
+ src_size = riter->item_size;
+ riter = riter->next;
+ }
+ if (size == 0 || src_size == 0)
+ return;
+ /* Postcondition:
+ - size contains the number of elements to store in the destination array,
+ - src_size gives the size in bytes of each item in the destination array.
+ */
+
+ if (realloc_needed)
+ {
+ if (!array_extent_fixed)
+ {
+ assert (size == 1);
+ /* This can happen only, when the result is scalar. */
+ for (dst_cur_dim = 0; dst_cur_dim < dst_rank; ++dst_cur_dim)
+ GFC_DIMENSION_SET (dst->dim[dst_cur_dim], 1, 1, 1);
+ }
+
+ GFC_DESCRIPTOR_DATA (dst) = malloc (size * GFC_DESCRIPTOR_SIZE (dst));
+ if (unlikely (GFC_DESCRIPTOR_DATA (dst) == NULL))
+ {
+ caf_internal_error (cannotallocdst, stat, NULL, 0);
+ return;
+ }
+ }
+
+ /* Reset the token. */
+ single_token = TOKEN (token);
+ memptr = single_token->memptr;
+ src = single_token->desc;
+ memset(dst_index, 0, sizeof (dst_index));
+ i = 0;
+ get_for_ref (refs, &i, dst_index, single_token, dst, src,
+ GFC_DESCRIPTOR_DATA (dst), memptr, dst_kind, src_kind, 0, 0,
+ 1, stat);
+}
+
+
+static void
+send_by_ref (caf_reference_t *ref, size_t *i, size_t *src_index,
+ caf_single_token_t single_token, gfc_descriptor_t *dst,
+ gfc_descriptor_t *src, void *ds, void *sr,
+ int dst_kind, int src_kind, size_t dst_dim, size_t src_dim,
+ size_t num, size_t size, int *stat)
+{
+ const char vecrefunknownkind[] = "libcaf_single::caf_send_by_ref(): "
+ "unknown kind in vector-ref.\n";
+ ptrdiff_t extent_dst = 1, array_offset_dst = 0, stride_dst;
+ const size_t src_rank = GFC_DESCRIPTOR_RANK (src);
+
+ if (unlikely (ref == NULL))
+ /* May be we should issue an error here, because this case should not
+ occur. */
+ return;
+
+ if (ref->next == NULL)
+ {
+ size_t src_size = GFC_DESCRIPTOR_SIZE (src);
+ ptrdiff_t array_offset_src = 0;;
+ int dst_type = -1;
+
+ switch (ref->type)
+ {
+ case CAF_REF_COMPONENT:
+ if (ref->u.c.caf_token_offset > 0)
+ {
+ if (*(void**)(ds + ref->u.c.offset) == NULL)
+ {
+ /* Create a scalar temporary array descriptor. */
+ gfc_descriptor_t static_dst;
+ GFC_DESCRIPTOR_DATA (&static_dst) = NULL;
+ GFC_DESCRIPTOR_DTYPE (&static_dst)
+ = GFC_DESCRIPTOR_DTYPE (src);
+ /* The component may be allocated now, because it is a
+ scalar. */
+ single_token = *(caf_single_token_t*)
+ (ds + ref->u.c.caf_token_offset);
+ _gfortran_caf_register (ref->item_size,
+ CAF_REGTYPE_COARRAY_ALLOC,
+ (caf_token_t *)&single_token,
+ &static_dst, stat, NULL, 0);
+ /* In case of an error in allocation return. When stat is
+ NULL, then register_component() terminates on error. */
+ if (stat != NULL && *stat)
+ return;
+ /* Publish the allocated memory. */
+ *((void **)(ds + ref->u.c.offset))
+ = GFC_DESCRIPTOR_DATA (&static_dst);
+ ds = GFC_DESCRIPTOR_DATA (&static_dst);
+ /* Set the type from the src. */
+ dst_type = GFC_DESCRIPTOR_TYPE (src);
+ }
+ else
+ {
+ ds = GFC_DESCRIPTOR_DATA (dst);
+ dst_type = GFC_DESCRIPTOR_TYPE (dst);
+ }
+ copy_data (ds, sr, dst_type, GFC_DESCRIPTOR_TYPE (src),
+ dst_kind, src_kind, ref->item_size, src_size, 1, stat);
+ }
+ else
+ copy_data (ds + ref->u.c.offset, sr,
+ dst != NULL ? GFC_DESCRIPTOR_TYPE (dst)
+ : GFC_DESCRIPTOR_TYPE (src),
+ GFC_DESCRIPTOR_TYPE (src),
+ dst_kind, src_kind, ref->item_size, src_size, 1, stat);
+ ++(*i);
+ return;
+ case CAF_REF_STATIC_ARRAY:
+ dst_type = ref->u.a.static_array_type;
+ /* Intentionally fall through. */
+ case CAF_REF_ARRAY:
+ if (ref->u.a.mode[dst_dim] == CAF_ARR_REF_NONE)
+ {
+ if (src_rank > 0)
+ {
+ for (size_t d = 0; d < src_rank; ++d)
+ array_offset_src += src_index[d];
+ copy_data (ds, sr + array_offset_src * ref->item_size,
+ dst_type == -1 ? GFC_DESCRIPTOR_TYPE (dst)
+ : dst_type,
+ GFC_DESCRIPTOR_TYPE (src), dst_kind, src_kind,
+ ref->item_size, src_size, num, stat);
+ }
+ else
+ copy_data (ds, sr,
+ dst_type == -1 ? GFC_DESCRIPTOR_TYPE (dst)
+ : dst_type,
+ GFC_DESCRIPTOR_TYPE (src), dst_kind, src_kind,
+ ref->item_size, src_size, num, stat);
+ *i += num;
+ return;
+ }
+ break;
+ default:
+ caf_runtime_error (unreachable);
+ }
+ }
+
+ switch (ref->type)
+ {
+ case CAF_REF_COMPONENT:
+ if (ref->u.c.caf_token_offset > 0)
+ {
+ if (*(void**)(ds + ref->u.c.offset) == NULL)
+ {
+ /* This component refs an unallocated array. Non-arrays are
+ caught in the if (!ref->next) above. */
+ dst = (gfc_descriptor_t *)(ds + ref->u.c.offset);
+ /* Assume that the rank and the dimensions fit for copying src
+ to dst. */
+ GFC_DESCRIPTOR_DTYPE (dst) = GFC_DESCRIPTOR_DTYPE (src);
+ dst->offset = 0;
+ stride_dst = 1;
+ for (size_t d = 0; d < src_rank; ++d)
+ {
+ extent_dst = GFC_DIMENSION_EXTENT (src->dim[d]);
+ GFC_DIMENSION_LBOUND (dst->dim[d]) = 0;
+ GFC_DIMENSION_UBOUND (dst->dim[d]) = extent_dst - 1;
+ GFC_DIMENSION_STRIDE (dst->dim[d]) = stride_dst;
+ stride_dst *= extent_dst;
+ }
+ /* Null the data-pointer to make register_component allocate
+ its own memory. */
+ GFC_DESCRIPTOR_DATA (dst) = NULL;
+
+ /* The size of the array is given by size. */
+ _gfortran_caf_register (size * ref->item_size,
+ CAF_REGTYPE_COARRAY_ALLOC,
+ (void **)&single_token,
+ dst, stat, NULL, 0);
+ /* In case of an error in allocation return. When stat is
+ NULL, then register_component() terminates on error. */
+ if (stat != NULL && *stat)
+ return;
+ /* The memptr, descriptor and the token are set below. */
+ *(caf_single_token_t *)(ds + ref->u.c.caf_token_offset)
+ = single_token;
+ }
+ single_token = *(caf_single_token_t*)(ds + ref->u.c.caf_token_offset);
+ send_by_ref (ref->next, i, src_index, single_token,
+ single_token->desc, src, ds + ref->u.c.offset, sr,
+ dst_kind, src_kind, 0, src_dim, 1, size, stat);
+ }
+ else
+ send_by_ref (ref->next, i, src_index, single_token,
+ (gfc_descriptor_t *)(ds + ref->u.c.offset), src,
+ ds + ref->u.c.offset, sr, dst_kind, src_kind, 0, src_dim,
+ 1, size, stat);
+ return;
+ case CAF_REF_ARRAY:
+ if (ref->u.a.mode[dst_dim] == CAF_ARR_REF_NONE)
+ {
+ send_by_ref (ref->next, i, src_index, single_token,
+ (gfc_descriptor_t *)ds, src, ds, sr, dst_kind, src_kind,
+ 0, src_dim, 1, size, stat);
+ return;
+ }
+ /* Only when on the left most index switch the data pointer to
+ the array's data pointer. And only for non-static arrays. */
+ if (dst_dim == 0 && ref->type != CAF_REF_STATIC_ARRAY)
+ ds = GFC_DESCRIPTOR_DATA (dst);
+ switch (ref->u.a.mode[dst_dim])
+ {
+ case CAF_ARR_REF_VECTOR:
+ array_offset_dst = 0;
+ src_index[src_dim] = 0;
+ for (size_t idx = 0; idx < ref->u.a.dim[dst_dim].v.nvec;
+ ++idx)
+ {
+#define KINDCASE(kind, type) case kind: \
+ array_offset_dst = (((index_type) \
+ ((type *)ref->u.a.dim[dst_dim].v.vector)[idx]) \
+ - GFC_DIMENSION_LBOUND (dst->dim[dst_dim])) \
+ * GFC_DIMENSION_STRIDE (dst->dim[dst_dim]); \
+ break
+
+ switch (ref->u.a.dim[dst_dim].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_internal_error (vecrefunknownkind, stat, NULL, 0);
+ return;
+ }
+#undef KINDCASE
+
+ send_by_ref (ref, i, src_index, single_token, dst, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ if (src_rank > 0)
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ }
+ return;
+ case CAF_ARR_REF_FULL:
+ COMPUTE_NUM_ITEMS (extent_dst,
+ ref->u.a.dim[dst_dim].s.stride,
+ GFC_DIMENSION_LBOUND (dst->dim[dst_dim]),
+ GFC_DIMENSION_UBOUND (dst->dim[dst_dim]));
+ array_offset_dst = 0;
+ stride_dst = GFC_DIMENSION_STRIDE (dst->dim[dst_dim])
+ * ref->u.a.dim[dst_dim].s.stride;
+ src_index[src_dim] = 0;
+ for (index_type idx = 0; idx < extent_dst;
+ ++idx, array_offset_dst += stride_dst)
+ {
+ send_by_ref (ref, i, src_index, single_token, dst, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ if (src_rank > 0)
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ }
+ return;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (extent_dst,
+ ref->u.a.dim[dst_dim].s.stride,
+ ref->u.a.dim[dst_dim].s.start,
+ ref->u.a.dim[dst_dim].s.end);
+ array_offset_dst = ref->u.a.dim[dst_dim].s.start
+ - GFC_DIMENSION_LBOUND (dst->dim[dst_dim]);
+ stride_dst = GFC_DIMENSION_STRIDE (dst->dim[dst_dim])
+ * ref->u.a.dim[dst_dim].s.stride;
+ src_index[src_dim] = 0;
+ for (index_type idx = 0; idx < extent_dst; ++idx)
+ {
+ send_by_ref (ref, i, src_index, single_token, dst, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ if (src_rank > 0)
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ array_offset_dst += stride_dst;
+ }
+ return;
+ case CAF_ARR_REF_SINGLE:
+ array_offset_dst = (ref->u.a.dim[dst_dim].s.start
+ - GFC_DIMENSION_LBOUND (dst->dim[dst_dim]))
+ * GFC_DIMENSION_STRIDE (dst->dim[dst_dim]);
+ send_by_ref (ref, i, src_index, single_token, dst, src, ds
+ + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim, 1,
+ size, stat);
+ return;
+ case CAF_ARR_REF_OPEN_END:
+ COMPUTE_NUM_ITEMS (extent_dst,
+ ref->u.a.dim[dst_dim].s.stride,
+ ref->u.a.dim[dst_dim].s.start,
+ GFC_DIMENSION_UBOUND (dst->dim[dst_dim]));
+ array_offset_dst = ref->u.a.dim[dst_dim].s.start
+ - GFC_DIMENSION_LBOUND (dst->dim[dst_dim]);
+ stride_dst = GFC_DIMENSION_STRIDE (dst->dim[dst_dim])
+ * ref->u.a.dim[dst_dim].s.stride;
+ src_index[src_dim] = 0;
+ for (index_type idx = 0; idx < extent_dst; ++idx)
+ {
+ send_by_ref (ref, i, src_index, single_token, dst, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ if (src_rank > 0)
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ array_offset_dst += stride_dst;
+ }
+ return;
+ case CAF_ARR_REF_OPEN_START:
+ COMPUTE_NUM_ITEMS (extent_dst,
+ ref->u.a.dim[dst_dim].s.stride,
+ GFC_DIMENSION_LBOUND (dst->dim[dst_dim]),
+ ref->u.a.dim[dst_dim].s.end);
+ array_offset_dst = 0;
+ stride_dst = GFC_DIMENSION_STRIDE (dst->dim[dst_dim])
+ * ref->u.a.dim[dst_dim].s.stride;
+ src_index[src_dim] = 0;
+ for (index_type idx = 0; idx < extent_dst; ++idx)
+ {
+ send_by_ref (ref, i, src_index, single_token, dst, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ if (src_rank > 0)
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ array_offset_dst += stride_dst;
+ }
+ return;
+ default:
+ caf_runtime_error (unreachable);
+ }
+ return;
+ case CAF_REF_STATIC_ARRAY:
+ if (ref->u.a.mode[dst_dim] == CAF_ARR_REF_NONE)
+ {
+ send_by_ref (ref->next, i, src_index, single_token, NULL,
+ src, ds, sr, dst_kind, src_kind,
+ 0, src_dim, 1, size, stat);
+ return;
+ }
+ switch (ref->u.a.mode[dst_dim])
+ {
+ case CAF_ARR_REF_VECTOR:
+ array_offset_dst = 0;
+ src_index[src_dim] = 0;
+ for (size_t idx = 0; idx < ref->u.a.dim[dst_dim].v.nvec;
+ ++idx)
+ {
+#define KINDCASE(kind, type) case kind: \
+ array_offset_dst = ((type *)ref->u.a.dim[dst_dim].v.vector)[idx]; \
+ break
+
+ switch (ref->u.a.dim[dst_dim].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_runtime_error (unreachable);
+ return;
+ }
+#undef KINDCASE
+
+ send_by_ref (ref, i, src_index, single_token, NULL, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ }
+ return;
+ case CAF_ARR_REF_FULL:
+ src_index[src_dim] = 0;
+ for (array_offset_dst = 0 ;
+ array_offset_dst <= ref->u.a.dim[dst_dim].s.end;
+ array_offset_dst += ref->u.a.dim[dst_dim].s.stride)
+ {
+ send_by_ref (ref, i, src_index, single_token, NULL, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ if (src_rank > 0)
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ }
+ return;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (extent_dst,
+ ref->u.a.dim[dst_dim].s.stride,
+ ref->u.a.dim[dst_dim].s.start,
+ ref->u.a.dim[dst_dim].s.end);
+ array_offset_dst = ref->u.a.dim[dst_dim].s.start;
+ src_index[src_dim] = 0;
+ for (index_type idx = 0; idx < extent_dst; ++idx)
+ {
+ send_by_ref (ref, i, src_index, single_token, NULL, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim + 1,
+ 1, size, stat);
+ if (src_rank > 0)
+ src_index[src_dim]
+ += GFC_DIMENSION_STRIDE (src->dim[src_dim]);
+ array_offset_dst += ref->u.a.dim[dst_dim].s.stride;
+ }
+ return;
+ case CAF_ARR_REF_SINGLE:
+ array_offset_dst = ref->u.a.dim[dst_dim].s.start;
+ send_by_ref (ref, i, src_index, single_token, NULL, src,
+ ds + array_offset_dst * ref->item_size, sr,
+ dst_kind, src_kind, dst_dim + 1, src_dim, 1,
+ size, stat);
+ return;
+ /* The OPEN_* are mapped to a RANGE and therefore can not occur. */
+ case CAF_ARR_REF_OPEN_END:
+ case CAF_ARR_REF_OPEN_START:
+ default:
+ caf_runtime_error (unreachable);
+ }
+ return;
+ default:
+ caf_runtime_error (unreachable);
+ }
+}
+
+
+void
+_gfortran_caf_send_by_ref (caf_token_t token,
+ int image_index __attribute__ ((unused)),
+ gfc_descriptor_t *src, caf_reference_t *refs,
+ int dst_kind, int src_kind,
+ bool may_require_tmp __attribute__ ((unused)),
+ bool dst_reallocatable, int *stat)
+{
+ const char vecrefunknownkind[] = "libcaf_single::caf_get_by_ref(): "
+ "unknown kind in vector-ref.\n";
+ const char unknownreftype[] = "libcaf_single::caf_send_by_ref(): "
+ "unknown reference type.\n";
+ const char unknownarrreftype[] = "libcaf_single::caf_send_by_ref(): "
+ "unknown array reference type.\n";
+ const char rankoutofrange[] = "libcaf_single::caf_send_by_ref(): "
+ "rank out of range.\n";
+ const char realloconinnerref[] = "libcaf_single::caf_send_by_ref(): "
+ "reallocation of array followed by component ref not allowed.\n";
+ const char cannotallocdst[] = "libcaf_single::caf_send_by_ref(): "
+ "can not allocate memory.\n";
+ const char nonallocextentmismatch[] = "libcaf_single::caf_send_by_ref(): "
+ "extent of non-allocatable array mismatch.\n";
+ const char innercompref[] = "libcaf_single::caf_send_by_ref(): "
+ "inner unallocated component detected.\n";
+ size_t size, i;
+ size_t dst_index[GFC_MAX_DIMENSIONS];
+ int src_rank = GFC_DESCRIPTOR_RANK (src);
+ int src_cur_dim = 0;
+ size_t src_size;
+ caf_single_token_t single_token = TOKEN (token);
+ void *memptr = single_token->memptr;
+ gfc_descriptor_t *dst = single_token->desc;
+ caf_reference_t *riter = refs;
+ long delta;
+ bool extent_mismatch;
+ /* Note that the component is not allocated yet. */
+ index_type new_component_idx = -1;
+
+ if (stat)
+ *stat = 0;
+
+ /* Compute the size of the result. In the beginning size just counts the
+ number of elements. */
+ size = 1;
+ while (riter)
+ {
+ switch (riter->type)
+ {
+ case CAF_REF_COMPONENT:
+ if (unlikely (new_component_idx != -1))
+ {
+ /* Allocating a component in the middle of a component ref is not
+ support. We don't know the type to allocate. */
+ caf_internal_error (innercompref, stat, NULL, 0);
+ return;
+ }
+ if (riter->u.c.caf_token_offset > 0)
+ {
+ /* Check whether the allocatable component is zero, then no
+ token is present, too. The token's pointer is not cleared
+ when the structure is initialized. */
+ if (*(void**)(memptr + riter->u.c.offset) == NULL)
+ {
+ /* This component is not yet allocated. Check that it is
+ allocatable here. */
+ if (!dst_reallocatable)
+ {
+ caf_internal_error (cannotallocdst, stat, NULL, 0);
+ return;
+ }
+ single_token = NULL;
+ memptr = NULL;
+ dst = NULL;
+ break;
+ }
+ single_token = *(caf_single_token_t*)
+ (memptr + riter->u.c.caf_token_offset);
+ memptr += riter->u.c.offset;
+ dst = single_token->desc;
+ }
+ else
+ {
+ /* Regular component. */
+ memptr += riter->u.c.offset;
+ dst = (gfc_descriptor_t *)memptr;
+ }
+ break;
+ case CAF_REF_ARRAY:
+ if (dst != NULL)
+ memptr = GFC_DESCRIPTOR_DATA (dst);
+ else
+ dst = src;
+ /* When the dst array needs to be allocated, then look at the
+ extent of the source array in the dimension dst_cur_dim. */
+ for (i = 0; riter->u.a.mode[i] != CAF_ARR_REF_NONE; ++i)
+ {
+ switch (riter->u.a.mode[i])
+ {
+ case CAF_ARR_REF_VECTOR:
+ delta = riter->u.a.dim[i].v.nvec;
+#define KINDCASE(kind, type) case kind: \
+ memptr += (((index_type) \
+ ((type *)riter->u.a.dim[i].v.vector)[0]) \
+ - GFC_DIMENSION_LBOUND (dst->dim[i])) \
+ * GFC_DIMENSION_STRIDE (dst->dim[i]) \
+ * riter->item_size; \
+ break
+
+ switch (riter->u.a.dim[i].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_internal_error (vecrefunknownkind, stat, NULL, 0);
+ return;
+ }
+#undef KINDCASE
+ break;
+ case CAF_ARR_REF_FULL:
+ if (dst)
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ GFC_DIMENSION_LBOUND (dst->dim[i]),
+ GFC_DIMENSION_UBOUND (dst->dim[i]));
+ else
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ GFC_DIMENSION_LBOUND (src->dim[src_cur_dim]),
+ GFC_DIMENSION_UBOUND (src->dim[src_cur_dim]));
+ break;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ riter->u.a.dim[i].s.start,
+ riter->u.a.dim[i].s.end);
+ memptr += (riter->u.a.dim[i].s.start
+ - dst->dim[i].lower_bound)
+ * GFC_DIMENSION_STRIDE (dst->dim[i])
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_SINGLE:
+ delta = 1;
+ memptr += (riter->u.a.dim[i].s.start
+ - dst->dim[i].lower_bound)
+ * GFC_DIMENSION_STRIDE (dst->dim[i])
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_OPEN_END:
+ if (dst)
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ riter->u.a.dim[i].s.start,
+ GFC_DIMENSION_UBOUND (dst->dim[i]));
+ else
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ riter->u.a.dim[i].s.start,
+ GFC_DIMENSION_UBOUND (src->dim[src_cur_dim]));
+ memptr += (riter->u.a.dim[i].s.start
+ - dst->dim[i].lower_bound)
+ * GFC_DIMENSION_STRIDE (dst->dim[i])
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_OPEN_START:
+ if (dst)
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ GFC_DIMENSION_LBOUND (dst->dim[i]),
+ riter->u.a.dim[i].s.end);
+ else
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ GFC_DIMENSION_LBOUND (src->dim[src_cur_dim]),
+ riter->u.a.dim[i].s.end);
+ /* The memptr stays unchanged when ref'ing the first element
+ in a dimension. */
+ break;
+ default:
+ caf_internal_error (unknownarrreftype, stat, NULL, 0);
+ return;
+ }
+
+ if (delta <= 0)
+ return;
+ /* Check the various properties of the source array.
+ When src is an array. */
+ if (delta > 1 && src_rank > 0)
+ {
+ /* Check that src_cur_dim is valid for src. Can be
+ superceeded only by scalar data. */
+ if (src_cur_dim >= src_rank)
+ {
+ caf_internal_error (rankoutofrange, stat, NULL, 0);
+ return;
+ }
+ /* Do further checks, when the source is not scalar. */
+ else
+ {
+ /* When the realloc is required, then no extent may have
+ been set. */
+ extent_mismatch = memptr == NULL
+ || (dst
+ && GFC_DESCRIPTOR_EXTENT (dst, src_cur_dim)
+ != delta);
+ /* When it already known, that a realloc is needed or
+ the extent does not match the needed one. */
+ if (extent_mismatch)
+ {
+ /* Check whether dst is reallocatable. */
+ if (unlikely (!dst_reallocatable))
+ {
+ caf_internal_error (nonallocextentmismatch, stat,
+ NULL, 0, delta,
+ GFC_DESCRIPTOR_EXTENT (dst,
+ src_cur_dim));
+ return;
+ }
+ /* Report error on allocatable but missing inner
+ ref. */
+ else if (riter->next != NULL)
+ {
+ caf_internal_error (realloconinnerref, stat, NULL,
+ 0);
+ return;
+ }
+ }
+ /* Only change the extent when it does not match. This is
+ to prevent resetting given array bounds. */
+ if (extent_mismatch)
+ GFC_DIMENSION_SET (dst->dim[src_cur_dim], 1, delta,
+ size);
+ }
+ /* Increase the dim-counter of the src only when the extent
+ matches. */
+ if (src_cur_dim < src_rank
+ && GFC_DESCRIPTOR_EXTENT (src, src_cur_dim) == delta)
+ ++src_cur_dim;
+ }
+ size *= (index_type)delta;
+ }
+ break;
+ case CAF_REF_STATIC_ARRAY:
+ for (i = 0; riter->u.a.mode[i] != CAF_ARR_REF_NONE; ++i)
+ {
+ switch (riter->u.a.mode[i])
+ {
+ case CAF_ARR_REF_VECTOR:
+ delta = riter->u.a.dim[i].v.nvec;
+#define KINDCASE(kind, type) case kind: \
+ memptr += ((type *)riter->u.a.dim[i].v.vector)[0] \
+ * riter->item_size; \
+ break
+
+ switch (riter->u.a.dim[i].v.kind)
+ {
+ KINDCASE (1, GFC_INTEGER_1);
+ KINDCASE (2, GFC_INTEGER_2);
+ KINDCASE (4, GFC_INTEGER_4);
+#ifdef HAVE_GFC_INTEGER_8
+ KINDCASE (8, GFC_INTEGER_8);
+#endif
+#ifdef HAVE_GFC_INTEGER_16
+ KINDCASE (16, GFC_INTEGER_16);
+#endif
+ default:
+ caf_internal_error (vecrefunknownkind, stat, NULL, 0);
+ return;
+ }
+#undef KINDCASE
+ break;
+ case CAF_ARR_REF_FULL:
+ delta = riter->u.a.dim[i].s.end / riter->u.a.dim[i].s.stride
+ + 1;
+ /* The memptr stays unchanged when ref'ing the first element
+ in a dimension. */
+ break;
+ case CAF_ARR_REF_RANGE:
+ COMPUTE_NUM_ITEMS (delta,
+ riter->u.a.dim[i].s.stride,
+ riter->u.a.dim[i].s.start,
+ riter->u.a.dim[i].s.end);
+ memptr += riter->u.a.dim[i].s.start
+ * riter->u.a.dim[i].s.stride
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_SINGLE:
+ delta = 1;
+ memptr += riter->u.a.dim[i].s.start
+ * riter->u.a.dim[i].s.stride
+ * riter->item_size;
+ break;
+ case CAF_ARR_REF_OPEN_END:
+ /* This and OPEN_START are mapped to a RANGE and therefore
+ can not occur here. */
+ case CAF_ARR_REF_OPEN_START:
+ default:
+ caf_internal_error (unknownarrreftype, stat, NULL, 0);
+ return;
+ }
+ if (delta <= 0)
+ return;
+ /* Check the various properties of the source array.
+ Only when the source array is not scalar examine its
+ properties. */
+ if (delta > 1 && src_rank > 0)
+ {
+ /* Check that src_cur_dim is valid for src. Can be
+ superceeded only by scalar data. */
+ if (src_cur_dim >= src_rank)
+ {
+ caf_internal_error (rankoutofrange, stat, NULL, 0);
+ return;
+ }
+ else
+ {
+ /* We will not be able to realloc the dst, because that's
+ a fixed size array. */
+ extent_mismatch = GFC_DESCRIPTOR_EXTENT (src, src_cur_dim)
+ != delta;
+ /* When the extent does not match the needed one we can
+ only stop here. */
+ if (extent_mismatch)
+ {
+ caf_internal_error (nonallocextentmismatch, stat,
+ NULL, 0, delta,
+ GFC_DESCRIPTOR_EXTENT (src,
+ src_cur_dim));
+ return;
+ }
+ }
+ ++src_cur_dim;
+ }
+ size *= (index_type)delta;
+ }
+ break;
+ default:
+ caf_internal_error (unknownreftype, stat, NULL, 0);
+ return;
+ }
+ src_size = riter->item_size;
+ riter = riter->next;
+ }
+ if (size == 0 || src_size == 0)
+ return;
+ /* Postcondition:
+ - size contains the number of elements to store in the destination array,
+ - src_size gives the size in bytes of each item in the destination array.
+ */
+
+ /* Reset the token. */
+ single_token = TOKEN (token);
+ memptr = single_token->memptr;
+ dst = single_token->desc;
+ memset (dst_index, 0, sizeof (dst_index));
+ i = 0;
+ send_by_ref (refs, &i, dst_index, single_token, dst, src,
+ memptr, GFC_DESCRIPTOR_DATA (src), dst_kind, src_kind, 0, 0,
+ 1, size, stat);
+ assert (i == size);
+}
+
+
+void
+_gfortran_caf_sendget_by_ref (caf_token_t dst_token, int dst_image_index,
+ caf_reference_t *dst_refs, caf_token_t src_token,
+ int src_image_index,
+ caf_reference_t *src_refs, int dst_kind,
+ int src_kind, bool may_require_tmp, int *dst_stat,
+ int *src_stat)
+{
+ gfc_array_void temp;
+
+ _gfortran_caf_get_by_ref (src_token, src_image_index, &temp, src_refs,
+ dst_kind, src_kind, may_require_tmp, true,
+ src_stat);
+
+ if (src_stat && *src_stat != 0)
+ return;
+
+ _gfortran_caf_send_by_ref (dst_token, dst_image_index, &temp, dst_refs,
+ dst_kind, src_kind, may_require_tmp, true,
+ dst_stat);
+ if (GFC_DESCRIPTOR_DATA (&temp))
+ free (GFC_DESCRIPTOR_DATA (&temp));
+}
+
+
void
_gfortran_caf_atomic_define (caf_token_t token, size_t offset,
int image_index __attribute__ ((unused)),
@@ -1019,7 +2679,7 @@ _gfortran_caf_atomic_define (caf_token_t token, size_t offset,
{
assert(kind == 4);
- uint32_t *atom = (uint32_t *) ((char *) TOKEN (token) + offset);
+ uint32_t *atom = (uint32_t *) ((char *) MEMTOK (token) + offset);
__atomic_store (atom, (uint32_t *) value, __ATOMIC_RELAXED);
@@ -1035,7 +2695,7 @@ _gfortran_caf_atomic_ref (caf_token_t token, size_t offset,
{
assert(kind == 4);
- uint32_t *atom = (uint32_t *) ((char *) TOKEN (token) + offset);
+ uint32_t *atom = (uint32_t *) ((char *) MEMTOK (token) + offset);
__atomic_load (atom, (uint32_t *) value, __ATOMIC_RELAXED);
@@ -1052,7 +2712,7 @@ _gfortran_caf_atomic_cas (caf_token_t token, size_t offset,
{
assert(kind == 4);
- uint32_t *atom = (uint32_t *) ((char *) TOKEN (token) + offset);
+ uint32_t *atom = (uint32_t *) ((char *) MEMTOK (token) + offset);
*(uint32_t *) old = *(uint32_t *) compare;
(void) __atomic_compare_exchange_n (atom, (uint32_t *) old,
@@ -1072,7 +2732,7 @@ _gfortran_caf_atomic_op (int op, caf_token_t token, size_t offset,
assert(kind == 4);
uint32_t res;
- uint32_t *atom = (uint32_t *) ((char *) TOKEN (token) + offset);
+ uint32_t *atom = (uint32_t *) ((char *) MEMTOK (token) + offset);
switch (op)
{
@@ -1106,7 +2766,8 @@ _gfortran_caf_event_post (caf_token_t token, size_t index,
int errmsg_len __attribute__ ((unused)))
{
uint32_t value = 1;
- uint32_t *event = (uint32_t *) ((char *) TOKEN (token) + index*sizeof(uint32_t));
+ uint32_t *event = (uint32_t *) ((char *) MEMTOK (token) + index
+ * sizeof (uint32_t));
__atomic_fetch_add (event, (uint32_t) value, __ATOMIC_RELAXED);
if(stat)
@@ -1119,7 +2780,8 @@ _gfortran_caf_event_wait (caf_token_t token, size_t index,
char *errmsg __attribute__ ((unused)),
int errmsg_len __attribute__ ((unused)))
{
- uint32_t *event = (uint32_t *) ((char *) TOKEN (token) + index*sizeof(uint32_t));
+ uint32_t *event = (uint32_t *) ((char *) MEMTOK (token) + index
+ * sizeof (uint32_t));
uint32_t value = (uint32_t)-until_count;
__atomic_fetch_add (event, (uint32_t) value, __ATOMIC_RELAXED);
@@ -1132,7 +2794,8 @@ _gfortran_caf_event_query (caf_token_t token, size_t index,
int image_index __attribute__ ((unused)),
int *count, int *stat)
{
- uint32_t *event = (uint32_t *) ((char *) TOKEN (token) + index*sizeof(uint32_t));
+ uint32_t *event = (uint32_t *) ((char *) MEMTOK (token) + index
+ * sizeof (uint32_t));
__atomic_load (event, (uint32_t *) count, __ATOMIC_RELAXED);
if(stat)
@@ -1145,7 +2808,7 @@ _gfortran_caf_lock (caf_token_t token, size_t index,
int *aquired_lock, int *stat, char *errmsg, int errmsg_len)
{
const char *msg = "Already locked";
- bool *lock = &((bool *) TOKEN (token))[index];
+ bool *lock = &((bool *) MEMTOK (token))[index];
if (!*lock)
{
@@ -1189,7 +2852,7 @@ _gfortran_caf_unlock (caf_token_t token, size_t index,
int *stat, char *errmsg, int errmsg_len)
{
const char *msg = "Variable is not locked";
- bool *lock = &((bool *) TOKEN (token))[index];
+ bool *lock = &((bool *) MEMTOK (token))[index];
if (*lock)
{