aboutsummaryrefslogtreecommitdiff
path: root/libgfortran
diff options
context:
space:
mode:
authorThomas Koenig <tkoenig@gcc.gnu.org>2008-03-23 22:19:19 +0000
committerThomas Koenig <tkoenig@gcc.gnu.org>2008-03-23 22:19:19 +0000
commit3478bba4660b29dc76202c955b709bf484dacf1f (patch)
treeb07d6c628b5ec47255fa6723cf727cb89ace575e /libgfortran
parent2ff8644d33a8b9f3ca7c7c870c011a07da852ab3 (diff)
downloadgcc-3478bba4660b29dc76202c955b709bf484dacf1f.zip
gcc-3478bba4660b29dc76202c955b709bf484dacf1f.tar.gz
gcc-3478bba4660b29dc76202c955b709bf484dacf1f.tar.bz2
re PR libfortran/32972 (performance of pack/unpack)
2007-03-23 Thomas Koenig <tkoenig@gcc.gnu.org PR libfortran/32972 * Makefile.am: Add new variable, i_unpack_c, containing unpack_i1.c, unpack_i2.c, unpack_i4.c, unpack_i8.c, unpack_i16.c, unpack_r4.c, unpack_r8.c, unpack_r10.c, unpack_r16.c, unpack_c4.c, unpack_c8.c, unpack_c10.c and unpack_c16.c Add i_unpack_c to gfor_built_src. Add rule to generate i_unpack_c from m4/unpack.m4. * Makefile.in: Regenerated. * libgfortran.h: Add prototypes for unpack0_i1, unpack0_i2, unpack0_i4, unpack0_i8, unpack0_i16, unpack0_r4, unpack0_r8, unpack0_r10, unpack0_r16, unpack0_c4, unpack0_c8, unpack0_c10, unpack0_c16, unpack1_i1, unpack1_i2, unpack1_i4, unpack1_i8, unpack1_i16, unpack1_r4, unpack1_r8, unpack1_r10, unpack1_r16, unpack1_c4, unpack1_c8, unpack1_c10 and unpack1_c16. * intrinsics/pack_generic.c (unpack1): Add calls to specific unpack1 functions. (unpack0): Add calls to specific unpack0 functions. * m4/unpack.m4: New file. * generated/unpack_i1.c: New file. * generated/unpack_i2.c: New file. * generated/unpack_i4.c: New file. * generated/unpack_i8.c: New file. * generated/unpack_i16.c: New file. * generated/unpack_r4.c: New file. * generated/unpack_r8.c: New file. * generated/unpack_r10.c: New file. * generated/unpack_r16.c: New file. * generated/unpack_c4.c: New file. * generated/unpack_c8.c: New file. * generated/unpack_c10.c: New file. * generated/unpack_c16.c: New file. 2007-03-23 Thomas Koenig <tkoenig@gcc.gnu.org PR libfortran/32972 * gfortran.dg/intrinsic_unpack_1.f90: New test case. * gfortran.dg/intrinsic_unpack_2.f90: New test case. * gfortran.dg/intrinsic_unpack_3.f90: New test case. From-SVN: r133469
Diffstat (limited to 'libgfortran')
-rw-r--r--libgfortran/ChangeLog35
-rw-r--r--libgfortran/Makefile.am23
-rw-r--r--libgfortran/Makefile.in169
-rw-r--r--libgfortran/generated/unpack_c10.c338
-rw-r--r--libgfortran/generated/unpack_c16.c338
-rw-r--r--libgfortran/generated/unpack_c4.c338
-rw-r--r--libgfortran/generated/unpack_c8.c338
-rw-r--r--libgfortran/generated/unpack_i1.c338
-rw-r--r--libgfortran/generated/unpack_i16.c338
-rw-r--r--libgfortran/generated/unpack_i2.c338
-rw-r--r--libgfortran/generated/unpack_i4.c338
-rw-r--r--libgfortran/generated/unpack_i8.c338
-rw-r--r--libgfortran/generated/unpack_r10.c338
-rw-r--r--libgfortran/generated/unpack_r16.c338
-rw-r--r--libgfortran/generated/unpack_r4.c338
-rw-r--r--libgfortran/generated/unpack_r8.c338
-rw-r--r--libgfortran/intrinsics/unpack_generic.c195
-rw-r--r--libgfortran/libgfortran.h136
-rw-r--r--libgfortran/m4/unpack.m4339
19 files changed, 5272 insertions, 19 deletions
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index 3ccccc9..6ac6dfe 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,38 @@
+2007-03-23 Thomas Koenig <tkoenig@gcc.gnu.org
+
+ PR libfortran/32972
+ * Makefile.am: Add new variable, i_unpack_c, containing
+ unpack_i1.c, unpack_i2.c, unpack_i4.c, unpack_i8.c,
+ unpack_i16.c, unpack_r4.c, unpack_r8.c, unpack_r10.c,
+ unpack_r16.c, unpack_c4.c, unpack_c8.c, unpack_c10.c
+ and unpack_c16.c
+ Add i_unpack_c to gfor_built_src.
+ Add rule to generate i_unpack_c from m4/unpack.m4.
+ * Makefile.in: Regenerated.
+ * libgfortran.h: Add prototypes for unpack0_i1, unpack0_i2,
+ unpack0_i4, unpack0_i8, unpack0_i16, unpack0_r4, unpack0_r8,
+ unpack0_r10, unpack0_r16, unpack0_c4, unpack0_c8, unpack0_c10,
+ unpack0_c16, unpack1_i1, unpack1_i2, unpack1_i4, unpack1_i8,
+ unpack1_i16, unpack1_r4, unpack1_r8, unpack1_r10, unpack1_r16,
+ unpack1_c4, unpack1_c8, unpack1_c10 and unpack1_c16.
+ * intrinsics/pack_generic.c (unpack1): Add calls to specific
+ unpack1 functions.
+ (unpack0): Add calls to specific unpack0 functions.
+ * m4/unpack.m4: New file.
+ * generated/unpack_i1.c: New file.
+ * generated/unpack_i2.c: New file.
+ * generated/unpack_i4.c: New file.
+ * generated/unpack_i8.c: New file.
+ * generated/unpack_i16.c: New file.
+ * generated/unpack_r4.c: New file.
+ * generated/unpack_r8.c: New file.
+ * generated/unpack_r10.c: New file.
+ * generated/unpack_r16.c: New file.
+ * generated/unpack_c4.c: New file.
+ * generated/unpack_c8.c: New file.
+ * generated/unpack_c10.c: New file.
+ * generated/unpack_c16.c: New file.
+
2008-03-22 Jerry DeLisle <jvdelisle@gcc.gnu.org>
PR libfortran/35632
diff --git a/libgfortran/Makefile.am b/libgfortran/Makefile.am
index 1706c6a..4e9655a6 100644
--- a/libgfortran/Makefile.am
+++ b/libgfortran/Makefile.am
@@ -491,6 +491,21 @@ $(srcdir)/generated/pack_c8.c \
$(srcdir)/generated/pack_c10.c \
$(srcdir)/generated/pack_c16.c
+i_unpack_c = \
+$(srcdir)/generated/unpack_i1.c \
+$(srcdir)/generated/unpack_i2.c \
+$(srcdir)/generated/unpack_i4.c \
+$(srcdir)/generated/unpack_i8.c \
+$(srcdir)/generated/unpack_i16.c \
+$(srcdir)/generated/unpack_r4.c \
+$(srcdir)/generated/unpack_r8.c \
+$(srcdir)/generated/unpack_r10.c \
+$(srcdir)/generated/unpack_r16.c \
+$(srcdir)/generated/unpack_c4.c \
+$(srcdir)/generated/unpack_c8.c \
+$(srcdir)/generated/unpack_c10.c \
+$(srcdir)/generated/unpack_c16.c
+
m4_files= m4/iparm.m4 m4/ifunction.m4 m4/iforeach.m4 m4/all.m4 \
m4/any.m4 m4/count.m4 m4/maxloc0.m4 m4/maxloc1.m4 m4/maxval.m4 \
m4/minloc0.m4 m4/minloc1.m4 m4/minval.m4 m4/product.m4 m4/sum.m4 \
@@ -499,7 +514,8 @@ m4_files= m4/iparm.m4 m4/ifunction.m4 m4/iforeach.m4 m4/all.m4 \
m4/specific.m4 m4/specific2.m4 m4/head.m4 m4/shape.m4 m4/reshape.m4 \
m4/transpose.m4 m4/eoshift1.m4 m4/eoshift3.m4 m4/exponent.m4 \
m4/fraction.m4 m4/nearest.m4 m4/set_exponent.m4 m4/pow.m4 \
- m4/misc_specifics.m4 m4/rrspacing.m4 m4/spacing.m4 m4/pack.m4
+ m4/misc_specifics.m4 m4/rrspacing.m4 m4/spacing.m4 m4/pack.m4 \
+ m4/unpack.m4
gfor_built_src= $(i_all_c) $(i_any_c) $(i_count_c) $(i_maxloc0_c) \
$(i_maxloc1_c) $(i_maxval_c) $(i_minloc0_c) $(i_minloc1_c) $(i_minval_c) \
@@ -507,7 +523,7 @@ gfor_built_src= $(i_all_c) $(i_any_c) $(i_count_c) $(i_maxloc0_c) \
$(i_matmul_c) $(i_matmull_c) $(i_transpose_c) $(i_shape_c) $(i_eoshift1_c) \
$(i_eoshift3_c) $(i_cshift1_c) $(i_reshape_c) $(in_pack_c) $(in_unpack_c) \
$(i_exponent_c) $(i_fraction_c) $(i_nearest_c) $(i_set_exponent_c) \
- $(i_pow_c) $(i_rrspacing_c) $(i_spacing_c) $(i_pack_c) \
+ $(i_pow_c) $(i_rrspacing_c) $(i_spacing_c) $(i_pack_c) $(i_unpack_c) \
selected_int_kind.inc selected_real_kind.inc kinds.h \
kinds.inc c99_protos.inc fpu-target.h
@@ -826,6 +842,9 @@ $(i_pow_c): m4/pow.m4 $(I_M4_DEPS)
$(i_pack_c): m4/pack.m4 $(I_M4_DEPS)
$(M4) -Dfile=$@ -I$(srcdir)/m4 pack.m4 > $@
+$(i_unpack_c): m4/unpack.m4 $(I_M4_DEPS)
+ $(M4) -Dfile=$@ -I$(srcdir)/m4 unpack.m4 > $@
+
$(gfor_built_specific_src): m4/specific.m4 m4/head.m4
$(M4) -Dfile=$@ -I$(srcdir)/m4 specific.m4 > $@
diff --git a/libgfortran/Makefile.in b/libgfortran/Makefile.in
index 286338b..fb6056f 100644
--- a/libgfortran/Makefile.in
+++ b/libgfortran/Makefile.in
@@ -370,7 +370,19 @@ am__libgfortran_la_SOURCES_DIST = runtime/backtrace.c \
$(srcdir)/generated/pack_r8.c $(srcdir)/generated/pack_r10.c \
$(srcdir)/generated/pack_r16.c $(srcdir)/generated/pack_c4.c \
$(srcdir)/generated/pack_c8.c $(srcdir)/generated/pack_c10.c \
- $(srcdir)/generated/pack_c16.c selected_int_kind.inc \
+ $(srcdir)/generated/pack_c16.c $(srcdir)/generated/unpack_i1.c \
+ $(srcdir)/generated/unpack_i2.c \
+ $(srcdir)/generated/unpack_i4.c \
+ $(srcdir)/generated/unpack_i8.c \
+ $(srcdir)/generated/unpack_i16.c \
+ $(srcdir)/generated/unpack_r4.c \
+ $(srcdir)/generated/unpack_r8.c \
+ $(srcdir)/generated/unpack_r10.c \
+ $(srcdir)/generated/unpack_r16.c \
+ $(srcdir)/generated/unpack_c4.c \
+ $(srcdir)/generated/unpack_c8.c \
+ $(srcdir)/generated/unpack_c10.c \
+ $(srcdir)/generated/unpack_c16.c selected_int_kind.inc \
selected_real_kind.inc kinds.h kinds.inc c99_protos.inc \
fpu-target.h io/close.c io/file_pos.c io/format.c io/inquire.c \
io/intrinsics.c io/list_read.c io/lock.c io/open.c io/read.c \
@@ -643,7 +655,11 @@ am__objects_29 = spacing_r4.lo spacing_r8.lo spacing_r10.lo \
am__objects_30 = pack_i1.lo pack_i2.lo pack_i4.lo pack_i8.lo \
pack_i16.lo pack_r4.lo pack_r8.lo pack_r10.lo pack_r16.lo \
pack_c4.lo pack_c8.lo pack_c10.lo pack_c16.lo
-am__objects_31 = $(am__objects_2) $(am__objects_3) $(am__objects_4) \
+am__objects_31 = unpack_i1.lo unpack_i2.lo unpack_i4.lo unpack_i8.lo \
+ unpack_i16.lo unpack_r4.lo unpack_r8.lo unpack_r10.lo \
+ unpack_r16.lo unpack_c4.lo unpack_c8.lo unpack_c10.lo \
+ unpack_c16.lo
+am__objects_32 = $(am__objects_2) $(am__objects_3) $(am__objects_4) \
$(am__objects_5) $(am__objects_6) $(am__objects_7) \
$(am__objects_8) $(am__objects_9) $(am__objects_10) \
$(am__objects_11) $(am__objects_12) $(am__objects_13) \
@@ -652,11 +668,11 @@ am__objects_31 = $(am__objects_2) $(am__objects_3) $(am__objects_4) \
$(am__objects_20) $(am__objects_21) $(am__objects_22) \
$(am__objects_23) $(am__objects_24) $(am__objects_25) \
$(am__objects_26) $(am__objects_27) $(am__objects_28) \
- $(am__objects_29) $(am__objects_30)
-am__objects_32 = close.lo file_pos.lo format.lo inquire.lo \
+ $(am__objects_29) $(am__objects_30) $(am__objects_31)
+am__objects_33 = close.lo file_pos.lo format.lo inquire.lo \
intrinsics.lo list_read.lo lock.lo open.lo read.lo \
size_from_kind.lo transfer.lo unit.lo unix.lo write.lo
-am__objects_33 = associated.lo abort.lo access.lo args.lo \
+am__objects_34 = associated.lo abort.lo access.lo args.lo \
c99_functions.lo chdir.lo chmod.lo clock.lo cpu_time.lo \
cshift0.lo ctime.lo date_and_time.lo dtime.lo env.lo \
eoshift0.lo eoshift2.lo erfc_scaled.lo etime.lo exit.lo \
@@ -670,8 +686,8 @@ am__objects_33 = associated.lo abort.lo access.lo args.lo \
system_clock.lo time.lo transpose_generic.lo umask.lo \
unlink.lo unpack_generic.lo in_pack_generic.lo \
in_unpack_generic.lo
-am__objects_34 =
-am__objects_35 = _abs_c4.lo _abs_c8.lo _abs_c10.lo _abs_c16.lo \
+am__objects_35 =
+am__objects_36 = _abs_c4.lo _abs_c8.lo _abs_c10.lo _abs_c16.lo \
_abs_i4.lo _abs_i8.lo _abs_i16.lo _abs_r4.lo _abs_r8.lo \
_abs_r10.lo _abs_r16.lo _aimag_c4.lo _aimag_c8.lo \
_aimag_c10.lo _aimag_c16.lo _exp_r4.lo _exp_r8.lo _exp_r10.lo \
@@ -695,18 +711,18 @@ am__objects_35 = _abs_c4.lo _abs_c8.lo _abs_c10.lo _abs_c16.lo \
_conjg_c4.lo _conjg_c8.lo _conjg_c10.lo _conjg_c16.lo \
_aint_r4.lo _aint_r8.lo _aint_r10.lo _aint_r16.lo _anint_r4.lo \
_anint_r8.lo _anint_r10.lo _anint_r16.lo
-am__objects_36 = _sign_i4.lo _sign_i8.lo _sign_i16.lo _sign_r4.lo \
+am__objects_37 = _sign_i4.lo _sign_i8.lo _sign_i16.lo _sign_r4.lo \
_sign_r8.lo _sign_r10.lo _sign_r16.lo _dim_i4.lo _dim_i8.lo \
_dim_i16.lo _dim_r4.lo _dim_r8.lo _dim_r10.lo _dim_r16.lo \
_atan2_r4.lo _atan2_r8.lo _atan2_r10.lo _atan2_r16.lo \
_mod_i4.lo _mod_i8.lo _mod_i16.lo _mod_r4.lo _mod_r8.lo \
_mod_r10.lo _mod_r16.lo
-am__objects_37 = misc_specifics.lo
-am__objects_38 = $(am__objects_35) $(am__objects_36) $(am__objects_37) \
+am__objects_38 = misc_specifics.lo
+am__objects_39 = $(am__objects_36) $(am__objects_37) $(am__objects_38) \
dprod_r8.lo f2c_specifics.lo
-am__objects_39 = $(am__objects_1) $(am__objects_31) $(am__objects_32) \
- $(am__objects_33) $(am__objects_34) $(am__objects_38)
-@onestep_FALSE@am_libgfortran_la_OBJECTS = $(am__objects_39)
+am__objects_40 = $(am__objects_1) $(am__objects_32) $(am__objects_33) \
+ $(am__objects_34) $(am__objects_35) $(am__objects_39)
+@onestep_FALSE@am_libgfortran_la_OBJECTS = $(am__objects_40)
@onestep_TRUE@am_libgfortran_la_OBJECTS = libgfortran_c.lo
libgfortran_la_OBJECTS = $(am_libgfortran_la_OBJECTS)
libgfortranbegin_la_LIBADD =
@@ -1355,6 +1371,21 @@ $(srcdir)/generated/pack_c8.c \
$(srcdir)/generated/pack_c10.c \
$(srcdir)/generated/pack_c16.c
+i_unpack_c = \
+$(srcdir)/generated/unpack_i1.c \
+$(srcdir)/generated/unpack_i2.c \
+$(srcdir)/generated/unpack_i4.c \
+$(srcdir)/generated/unpack_i8.c \
+$(srcdir)/generated/unpack_i16.c \
+$(srcdir)/generated/unpack_r4.c \
+$(srcdir)/generated/unpack_r8.c \
+$(srcdir)/generated/unpack_r10.c \
+$(srcdir)/generated/unpack_r16.c \
+$(srcdir)/generated/unpack_c4.c \
+$(srcdir)/generated/unpack_c8.c \
+$(srcdir)/generated/unpack_c10.c \
+$(srcdir)/generated/unpack_c16.c
+
m4_files = m4/iparm.m4 m4/ifunction.m4 m4/iforeach.m4 m4/all.m4 \
m4/any.m4 m4/count.m4 m4/maxloc0.m4 m4/maxloc1.m4 m4/maxval.m4 \
m4/minloc0.m4 m4/minloc1.m4 m4/minval.m4 m4/product.m4 m4/sum.m4 \
@@ -1363,7 +1394,8 @@ m4_files = m4/iparm.m4 m4/ifunction.m4 m4/iforeach.m4 m4/all.m4 \
m4/specific.m4 m4/specific2.m4 m4/head.m4 m4/shape.m4 m4/reshape.m4 \
m4/transpose.m4 m4/eoshift1.m4 m4/eoshift3.m4 m4/exponent.m4 \
m4/fraction.m4 m4/nearest.m4 m4/set_exponent.m4 m4/pow.m4 \
- m4/misc_specifics.m4 m4/rrspacing.m4 m4/spacing.m4 m4/pack.m4
+ m4/misc_specifics.m4 m4/rrspacing.m4 m4/spacing.m4 m4/pack.m4 \
+ m4/unpack.m4
gfor_built_src = $(i_all_c) $(i_any_c) $(i_count_c) $(i_maxloc0_c) \
$(i_maxloc1_c) $(i_maxval_c) $(i_minloc0_c) $(i_minloc1_c) $(i_minval_c) \
@@ -1371,7 +1403,7 @@ gfor_built_src = $(i_all_c) $(i_any_c) $(i_count_c) $(i_maxloc0_c) \
$(i_matmul_c) $(i_matmull_c) $(i_transpose_c) $(i_shape_c) $(i_eoshift1_c) \
$(i_eoshift3_c) $(i_cshift1_c) $(i_reshape_c) $(in_pack_c) $(in_unpack_c) \
$(i_exponent_c) $(i_fraction_c) $(i_nearest_c) $(i_set_exponent_c) \
- $(i_pow_c) $(i_rrspacing_c) $(i_spacing_c) $(i_pack_c) \
+ $(i_pow_c) $(i_rrspacing_c) $(i_spacing_c) $(i_pack_c) $(i_unpack_c) \
selected_int_kind.inc selected_real_kind.inc kinds.h \
kinds.inc c99_protos.inc fpu-target.h
@@ -2061,7 +2093,20 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unit.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unlink.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_c10.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_c16.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_c4.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_c8.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_generic.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_i1.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_i16.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_i2.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_i4.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_i8.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_r10.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_r16.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_r4.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unpack_r8.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/write.Plo@am__quote@
.F90.o:
@@ -4748,6 +4793,97 @@ pack_c16.lo: $(srcdir)/generated/pack_c16.c
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o pack_c16.lo `test -f '$(srcdir)/generated/pack_c16.c' || echo '$(srcdir)/'`$(srcdir)/generated/pack_c16.c
+unpack_i1.lo: $(srcdir)/generated/unpack_i1.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_i1.lo -MD -MP -MF "$(DEPDIR)/unpack_i1.Tpo" -c -o unpack_i1.lo `test -f '$(srcdir)/generated/unpack_i1.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i1.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_i1.Tpo" "$(DEPDIR)/unpack_i1.Plo"; else rm -f "$(DEPDIR)/unpack_i1.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_i1.c' object='unpack_i1.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_i1.lo `test -f '$(srcdir)/generated/unpack_i1.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i1.c
+
+unpack_i2.lo: $(srcdir)/generated/unpack_i2.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_i2.lo -MD -MP -MF "$(DEPDIR)/unpack_i2.Tpo" -c -o unpack_i2.lo `test -f '$(srcdir)/generated/unpack_i2.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i2.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_i2.Tpo" "$(DEPDIR)/unpack_i2.Plo"; else rm -f "$(DEPDIR)/unpack_i2.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_i2.c' object='unpack_i2.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_i2.lo `test -f '$(srcdir)/generated/unpack_i2.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i2.c
+
+unpack_i4.lo: $(srcdir)/generated/unpack_i4.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_i4.lo -MD -MP -MF "$(DEPDIR)/unpack_i4.Tpo" -c -o unpack_i4.lo `test -f '$(srcdir)/generated/unpack_i4.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i4.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_i4.Tpo" "$(DEPDIR)/unpack_i4.Plo"; else rm -f "$(DEPDIR)/unpack_i4.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_i4.c' object='unpack_i4.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_i4.lo `test -f '$(srcdir)/generated/unpack_i4.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i4.c
+
+unpack_i8.lo: $(srcdir)/generated/unpack_i8.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_i8.lo -MD -MP -MF "$(DEPDIR)/unpack_i8.Tpo" -c -o unpack_i8.lo `test -f '$(srcdir)/generated/unpack_i8.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i8.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_i8.Tpo" "$(DEPDIR)/unpack_i8.Plo"; else rm -f "$(DEPDIR)/unpack_i8.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_i8.c' object='unpack_i8.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_i8.lo `test -f '$(srcdir)/generated/unpack_i8.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i8.c
+
+unpack_i16.lo: $(srcdir)/generated/unpack_i16.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_i16.lo -MD -MP -MF "$(DEPDIR)/unpack_i16.Tpo" -c -o unpack_i16.lo `test -f '$(srcdir)/generated/unpack_i16.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i16.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_i16.Tpo" "$(DEPDIR)/unpack_i16.Plo"; else rm -f "$(DEPDIR)/unpack_i16.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_i16.c' object='unpack_i16.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_i16.lo `test -f '$(srcdir)/generated/unpack_i16.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_i16.c
+
+unpack_r4.lo: $(srcdir)/generated/unpack_r4.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_r4.lo -MD -MP -MF "$(DEPDIR)/unpack_r4.Tpo" -c -o unpack_r4.lo `test -f '$(srcdir)/generated/unpack_r4.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r4.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_r4.Tpo" "$(DEPDIR)/unpack_r4.Plo"; else rm -f "$(DEPDIR)/unpack_r4.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_r4.c' object='unpack_r4.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_r4.lo `test -f '$(srcdir)/generated/unpack_r4.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r4.c
+
+unpack_r8.lo: $(srcdir)/generated/unpack_r8.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_r8.lo -MD -MP -MF "$(DEPDIR)/unpack_r8.Tpo" -c -o unpack_r8.lo `test -f '$(srcdir)/generated/unpack_r8.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r8.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_r8.Tpo" "$(DEPDIR)/unpack_r8.Plo"; else rm -f "$(DEPDIR)/unpack_r8.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_r8.c' object='unpack_r8.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_r8.lo `test -f '$(srcdir)/generated/unpack_r8.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r8.c
+
+unpack_r10.lo: $(srcdir)/generated/unpack_r10.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_r10.lo -MD -MP -MF "$(DEPDIR)/unpack_r10.Tpo" -c -o unpack_r10.lo `test -f '$(srcdir)/generated/unpack_r10.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r10.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_r10.Tpo" "$(DEPDIR)/unpack_r10.Plo"; else rm -f "$(DEPDIR)/unpack_r10.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_r10.c' object='unpack_r10.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_r10.lo `test -f '$(srcdir)/generated/unpack_r10.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r10.c
+
+unpack_r16.lo: $(srcdir)/generated/unpack_r16.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_r16.lo -MD -MP -MF "$(DEPDIR)/unpack_r16.Tpo" -c -o unpack_r16.lo `test -f '$(srcdir)/generated/unpack_r16.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r16.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_r16.Tpo" "$(DEPDIR)/unpack_r16.Plo"; else rm -f "$(DEPDIR)/unpack_r16.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_r16.c' object='unpack_r16.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_r16.lo `test -f '$(srcdir)/generated/unpack_r16.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_r16.c
+
+unpack_c4.lo: $(srcdir)/generated/unpack_c4.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_c4.lo -MD -MP -MF "$(DEPDIR)/unpack_c4.Tpo" -c -o unpack_c4.lo `test -f '$(srcdir)/generated/unpack_c4.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c4.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_c4.Tpo" "$(DEPDIR)/unpack_c4.Plo"; else rm -f "$(DEPDIR)/unpack_c4.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_c4.c' object='unpack_c4.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_c4.lo `test -f '$(srcdir)/generated/unpack_c4.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c4.c
+
+unpack_c8.lo: $(srcdir)/generated/unpack_c8.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_c8.lo -MD -MP -MF "$(DEPDIR)/unpack_c8.Tpo" -c -o unpack_c8.lo `test -f '$(srcdir)/generated/unpack_c8.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c8.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_c8.Tpo" "$(DEPDIR)/unpack_c8.Plo"; else rm -f "$(DEPDIR)/unpack_c8.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_c8.c' object='unpack_c8.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_c8.lo `test -f '$(srcdir)/generated/unpack_c8.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c8.c
+
+unpack_c10.lo: $(srcdir)/generated/unpack_c10.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_c10.lo -MD -MP -MF "$(DEPDIR)/unpack_c10.Tpo" -c -o unpack_c10.lo `test -f '$(srcdir)/generated/unpack_c10.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c10.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_c10.Tpo" "$(DEPDIR)/unpack_c10.Plo"; else rm -f "$(DEPDIR)/unpack_c10.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_c10.c' object='unpack_c10.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_c10.lo `test -f '$(srcdir)/generated/unpack_c10.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c10.c
+
+unpack_c16.lo: $(srcdir)/generated/unpack_c16.c
+@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT unpack_c16.lo -MD -MP -MF "$(DEPDIR)/unpack_c16.Tpo" -c -o unpack_c16.lo `test -f '$(srcdir)/generated/unpack_c16.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c16.c; \
+@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/unpack_c16.Tpo" "$(DEPDIR)/unpack_c16.Plo"; else rm -f "$(DEPDIR)/unpack_c16.Tpo"; exit 1; fi
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='$(srcdir)/generated/unpack_c16.c' object='unpack_c16.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@ $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o unpack_c16.lo `test -f '$(srcdir)/generated/unpack_c16.c' || echo '$(srcdir)/'`$(srcdir)/generated/unpack_c16.c
+
close.lo: io/close.c
@am__fastdepCC_TRUE@ if $(LIBTOOL) --tag=CC --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT close.lo -MD -MP -MF "$(DEPDIR)/close.Tpo" -c -o close.lo `test -f 'io/close.c' || echo '$(srcdir)/'`io/close.c; \
@am__fastdepCC_TRUE@ then mv -f "$(DEPDIR)/close.Tpo" "$(DEPDIR)/close.Plo"; else rm -f "$(DEPDIR)/close.Tpo"; exit 1; fi
@@ -5702,6 +5838,9 @@ fpu-target.h: $(srcdir)/$(FPU_HOST_HEADER)
@MAINTAINER_MODE_TRUE@$(i_pack_c): m4/pack.m4 $(I_M4_DEPS)
@MAINTAINER_MODE_TRUE@ $(M4) -Dfile=$@ -I$(srcdir)/m4 pack.m4 > $@
+@MAINTAINER_MODE_TRUE@$(i_unpack_c): m4/unpack.m4 $(I_M4_DEPS)
+@MAINTAINER_MODE_TRUE@ $(M4) -Dfile=$@ -I$(srcdir)/m4 unpack.m4 > $@
+
@MAINTAINER_MODE_TRUE@$(gfor_built_specific_src): m4/specific.m4 m4/head.m4
@MAINTAINER_MODE_TRUE@ $(M4) -Dfile=$@ -I$(srcdir)/m4 specific.m4 > $@
diff --git a/libgfortran/generated/unpack_c10.c b/libgfortran/generated/unpack_c10.c
new file mode 100644
index 0000000..e6f3ecf
--- /dev/null
+++ b/libgfortran/generated/unpack_c10.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_COMPLEX_10)
+
+void
+unpack0_c10 (gfc_array_c10 *ret, const gfc_array_c10 *vector,
+ const gfc_array_l1 *mask, const GFC_COMPLEX_10 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_10 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_10 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_COMPLEX_10 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_10));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_c10 (gfc_array_c10 *ret, const gfc_array_c10 *vector,
+ const gfc_array_l1 *mask, const gfc_array_c10 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_10 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_10 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_COMPLEX_10 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_10));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_c16.c b/libgfortran/generated/unpack_c16.c
new file mode 100644
index 0000000..2d82a10
--- /dev/null
+++ b/libgfortran/generated/unpack_c16.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_COMPLEX_16)
+
+void
+unpack0_c16 (gfc_array_c16 *ret, const gfc_array_c16 *vector,
+ const gfc_array_l1 *mask, const GFC_COMPLEX_16 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_16 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_16 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_COMPLEX_16 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_16));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_c16 (gfc_array_c16 *ret, const gfc_array_c16 *vector,
+ const gfc_array_l1 *mask, const gfc_array_c16 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_16 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_16 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_COMPLEX_16 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_16));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_c4.c b/libgfortran/generated/unpack_c4.c
new file mode 100644
index 0000000..472ce48
--- /dev/null
+++ b/libgfortran/generated/unpack_c4.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_COMPLEX_4)
+
+void
+unpack0_c4 (gfc_array_c4 *ret, const gfc_array_c4 *vector,
+ const gfc_array_l1 *mask, const GFC_COMPLEX_4 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_4 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_4 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_COMPLEX_4 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_4));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_c4 (gfc_array_c4 *ret, const gfc_array_c4 *vector,
+ const gfc_array_l1 *mask, const gfc_array_c4 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_4 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_4 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_COMPLEX_4 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_4));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_c8.c b/libgfortran/generated/unpack_c8.c
new file mode 100644
index 0000000..62116b7
--- /dev/null
+++ b/libgfortran/generated/unpack_c8.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_COMPLEX_8)
+
+void
+unpack0_c8 (gfc_array_c8 *ret, const gfc_array_c8 *vector,
+ const gfc_array_l1 *mask, const GFC_COMPLEX_8 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_8 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_8 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_COMPLEX_8 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_8));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_c8 (gfc_array_c8 *ret, const gfc_array_c8 *vector,
+ const gfc_array_l1 *mask, const gfc_array_c8 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_COMPLEX_8 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_COMPLEX_8 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_COMPLEX_8 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_COMPLEX_8));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_i1.c b/libgfortran/generated/unpack_i1.c
new file mode 100644
index 0000000..46a9d4e
--- /dev/null
+++ b/libgfortran/generated/unpack_i1.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_INTEGER_1)
+
+void
+unpack0_i1 (gfc_array_i1 *ret, const gfc_array_i1 *vector,
+ const gfc_array_l1 *mask, const GFC_INTEGER_1 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_1 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_1 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_INTEGER_1 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_1));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_i1 (gfc_array_i1 *ret, const gfc_array_i1 *vector,
+ const gfc_array_l1 *mask, const gfc_array_i1 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_1 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_1 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_INTEGER_1 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_1));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_i16.c b/libgfortran/generated/unpack_i16.c
new file mode 100644
index 0000000..0fbd744
--- /dev/null
+++ b/libgfortran/generated/unpack_i16.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_INTEGER_16)
+
+void
+unpack0_i16 (gfc_array_i16 *ret, const gfc_array_i16 *vector,
+ const gfc_array_l1 *mask, const GFC_INTEGER_16 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_16 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_16 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_INTEGER_16 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_16));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_i16 (gfc_array_i16 *ret, const gfc_array_i16 *vector,
+ const gfc_array_l1 *mask, const gfc_array_i16 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_16 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_16 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_INTEGER_16 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_16));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_i2.c b/libgfortran/generated/unpack_i2.c
new file mode 100644
index 0000000..096c7858
--- /dev/null
+++ b/libgfortran/generated/unpack_i2.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_INTEGER_2)
+
+void
+unpack0_i2 (gfc_array_i2 *ret, const gfc_array_i2 *vector,
+ const gfc_array_l1 *mask, const GFC_INTEGER_2 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_2 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_2 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_INTEGER_2 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_2));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_i2 (gfc_array_i2 *ret, const gfc_array_i2 *vector,
+ const gfc_array_l1 *mask, const gfc_array_i2 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_2 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_2 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_INTEGER_2 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_2));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_i4.c b/libgfortran/generated/unpack_i4.c
new file mode 100644
index 0000000..08f197c
--- /dev/null
+++ b/libgfortran/generated/unpack_i4.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_INTEGER_4)
+
+void
+unpack0_i4 (gfc_array_i4 *ret, const gfc_array_i4 *vector,
+ const gfc_array_l1 *mask, const GFC_INTEGER_4 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_4 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_4 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_INTEGER_4 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_4));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_i4 (gfc_array_i4 *ret, const gfc_array_i4 *vector,
+ const gfc_array_l1 *mask, const gfc_array_i4 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_4 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_4 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_INTEGER_4 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_4));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_i8.c b/libgfortran/generated/unpack_i8.c
new file mode 100644
index 0000000..0847c1f
--- /dev/null
+++ b/libgfortran/generated/unpack_i8.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_INTEGER_8)
+
+void
+unpack0_i8 (gfc_array_i8 *ret, const gfc_array_i8 *vector,
+ const gfc_array_l1 *mask, const GFC_INTEGER_8 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_8 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_8 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_INTEGER_8 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_8));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_i8 (gfc_array_i8 *ret, const gfc_array_i8 *vector,
+ const gfc_array_l1 *mask, const gfc_array_i8 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_INTEGER_8 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_INTEGER_8 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_INTEGER_8 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_INTEGER_8));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_r10.c b/libgfortran/generated/unpack_r10.c
new file mode 100644
index 0000000..694d2c5
--- /dev/null
+++ b/libgfortran/generated/unpack_r10.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_REAL_10)
+
+void
+unpack0_r10 (gfc_array_r10 *ret, const gfc_array_r10 *vector,
+ const gfc_array_l1 *mask, const GFC_REAL_10 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_10 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_10 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_REAL_10 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_10));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_r10 (gfc_array_r10 *ret, const gfc_array_r10 *vector,
+ const gfc_array_l1 *mask, const gfc_array_r10 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_10 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_10 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_REAL_10 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_10));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_r16.c b/libgfortran/generated/unpack_r16.c
new file mode 100644
index 0000000..65121c1
--- /dev/null
+++ b/libgfortran/generated/unpack_r16.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_REAL_16)
+
+void
+unpack0_r16 (gfc_array_r16 *ret, const gfc_array_r16 *vector,
+ const gfc_array_l1 *mask, const GFC_REAL_16 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_16 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_16 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_REAL_16 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_16));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_r16 (gfc_array_r16 *ret, const gfc_array_r16 *vector,
+ const gfc_array_l1 *mask, const gfc_array_r16 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_16 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_16 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_REAL_16 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_16));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_r4.c b/libgfortran/generated/unpack_r4.c
new file mode 100644
index 0000000..b998318
--- /dev/null
+++ b/libgfortran/generated/unpack_r4.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_REAL_4)
+
+void
+unpack0_r4 (gfc_array_r4 *ret, const gfc_array_r4 *vector,
+ const gfc_array_l1 *mask, const GFC_REAL_4 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_4 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_4 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_REAL_4 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_4));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_r4 (gfc_array_r4 *ret, const gfc_array_r4 *vector,
+ const gfc_array_l1 *mask, const gfc_array_r4 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_4 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_4 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_REAL_4 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_4));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/generated/unpack_r8.c b/libgfortran/generated/unpack_r8.c
new file mode 100644
index 0000000..cccf759
--- /dev/null
+++ b/libgfortran/generated/unpack_r8.c
@@ -0,0 +1,338 @@
+/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+
+#if defined (HAVE_GFC_REAL_8)
+
+void
+unpack0_r8 (gfc_array_r8 *ret, const gfc_array_r8 *vector,
+ const gfc_array_l1 *mask, const GFC_REAL_8 *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_8 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_8 *vptr;
+ /* Value for field, this is constant. */
+ const GFC_REAL_8 fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_8));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_r8 (gfc_array_r8 *ret, const gfc_array_r8 *vector,
+ const gfc_array_l1 *mask, const gfc_array_r8 *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ GFC_REAL_8 *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ GFC_REAL_8 *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const GFC_REAL_8 *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof (GFC_REAL_8));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+
diff --git a/libgfortran/intrinsics/unpack_generic.c b/libgfortran/intrinsics/unpack_generic.c
index 05141ed..145dd35 100644
--- a/libgfortran/intrinsics/unpack_generic.c
+++ b/libgfortran/intrinsics/unpack_generic.c
@@ -196,8 +196,103 @@ void
unpack1 (gfc_array_char *ret, const gfc_array_char *vector,
const gfc_array_l1 *mask, const gfc_array_char *field)
{
- unpack_internal (ret, vector, mask, field,
- GFC_DESCRIPTOR_SIZE (vector),
+ int type;
+ index_type size;
+
+ type = GFC_DESCRIPTOR_TYPE (vector);
+ size = GFC_DESCRIPTOR_SIZE (vector);
+
+ switch(type)
+ {
+ case GFC_DTYPE_INTEGER:
+ case GFC_DTYPE_LOGICAL:
+ switch(size)
+ {
+ case sizeof (GFC_INTEGER_1):
+ unpack1_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) vector,
+ mask, (gfc_array_i1 *) field);
+ return;
+
+ case sizeof (GFC_INTEGER_2):
+ unpack1_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
+ mask, (gfc_array_i2 *) field);
+ return;
+
+ case sizeof (GFC_INTEGER_4):
+ unpack1_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
+ mask, (gfc_array_i4 *) field);
+ return;
+
+ case sizeof (GFC_INTEGER_8):
+ unpack1_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
+ mask, (gfc_array_i8 *) field);
+ return;
+
+#ifdef HAVE_GFC_INTEGER_16
+ case sizeof (GFC_INTEGER_16):
+ unpack1_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
+ mask, (gfc_array_i16 *) field);
+ return;
+#endif
+ }
+ case GFC_DTYPE_REAL:
+ switch (size)
+ {
+ case sizeof (GFC_REAL_4):
+ unpack1_r4 ((gfc_array_r4 *) ret, (gfc_array_r4 *) vector,
+ mask, (gfc_array_r4 *) field);
+ return;
+
+ case sizeof (GFC_REAL_8):
+ unpack1_r8 ((gfc_array_r8 *) ret, (gfc_array_r8 *) vector,
+ mask, (gfc_array_r8 *) field);
+ return;
+
+#ifdef HAVE_GFC_REAL_10
+ case sizeof (GFC_REAL_10):
+ unpack1_r10 ((gfc_array_r10 *) ret, (gfc_array_r10 *) vector,
+ mask, (gfc_array_r10 *) field);
+ return;
+#endif
+
+#ifdef HAVE_GFC_REAL_16
+ case sizeof (GFC_REAL_16):
+ unpack1_r16 ((gfc_array_r16 *) ret, (gfc_array_r16 *) vector,
+ mask, (gfc_array_r16 *) field);
+ return;
+#endif
+ }
+
+ case GFC_DTYPE_COMPLEX:
+ switch (size)
+ {
+ case sizeof (GFC_COMPLEX_4):
+ unpack1_c4 ((gfc_array_c4 *) ret, (gfc_array_c4 *) vector,
+ mask, (gfc_array_c4 *) field);
+ return;
+
+ case sizeof (GFC_COMPLEX_8):
+ unpack1_c8 ((gfc_array_c8 *) ret, (gfc_array_c8 *) vector,
+ mask, (gfc_array_c8 *) field);
+ return;
+
+#ifdef HAVE_GFC_COMPLEX_10
+ case sizeof (GFC_COMPLEX_10):
+ unpack1_c10 ((gfc_array_c10 *) ret, (gfc_array_c10 *) vector,
+ mask, (gfc_array_c10 *) field);
+ return;
+#endif
+
+#ifdef HAVE_GFC_COMPLEX_16
+ case sizeof (GFC_COMPLEX_16):
+ unpack1_c16 ((gfc_array_c16 *) ret, (gfc_array_c16 *) vector,
+ mask, (gfc_array_c16 *) field);
+ return;
+#endif
+ }
+
+ }
+ unpack_internal (ret, vector, mask, field, size,
GFC_DESCRIPTOR_SIZE (field));
}
@@ -227,6 +322,102 @@ unpack0 (gfc_array_char *ret, const gfc_array_char *vector,
{
gfc_array_char tmp;
+ int type;
+ index_type size;
+
+ type = GFC_DESCRIPTOR_TYPE (vector);
+ size = GFC_DESCRIPTOR_SIZE (vector);
+
+ switch(type)
+ {
+ case GFC_DTYPE_INTEGER:
+ case GFC_DTYPE_LOGICAL:
+ switch(size)
+ {
+ case sizeof (GFC_INTEGER_1):
+ unpack0_i1 ((gfc_array_i1 *) ret, (gfc_array_i1 *) vector,
+ mask, (GFC_INTEGER_1 *) field);
+ return;
+
+ case sizeof (GFC_INTEGER_2):
+ unpack0_i2 ((gfc_array_i2 *) ret, (gfc_array_i2 *) vector,
+ mask, (GFC_INTEGER_2 *) field);
+ return;
+
+ case sizeof (GFC_INTEGER_4):
+ unpack0_i4 ((gfc_array_i4 *) ret, (gfc_array_i4 *) vector,
+ mask, (GFC_INTEGER_4 *) field);
+ return;
+
+ case sizeof (GFC_INTEGER_8):
+ unpack0_i8 ((gfc_array_i8 *) ret, (gfc_array_i8 *) vector,
+ mask, (GFC_INTEGER_8 *) field);
+ return;
+
+#ifdef HAVE_GFC_INTEGER_16
+ case sizeof (GFC_INTEGER_16):
+ unpack0_i16 ((gfc_array_i16 *) ret, (gfc_array_i16 *) vector,
+ mask, (GFC_INTEGER_16 *) field);
+ return;
+#endif
+ }
+
+ case GFC_DTYPE_REAL:
+ switch(size)
+ {
+ case sizeof (GFC_REAL_4):
+ unpack0_r4 ((gfc_array_r4 *) ret, (gfc_array_r4 *) vector,
+ mask, (GFC_REAL_4 *) field);
+ return;
+
+ case sizeof (GFC_REAL_8):
+ unpack0_r8 ((gfc_array_r8 *) ret, (gfc_array_r8*) vector,
+ mask, (GFC_REAL_8 *) field);
+ return;
+
+#ifdef HAVE_GFC_REAL_10
+ case sizeof (GFC_REAL_10):
+ unpack0_r10 ((gfc_array_r10 *) ret, (gfc_array_r10 *) vector,
+ mask, (GFC_REAL_10 *) field);
+ return;
+#endif
+
+#ifdef HAVE_GFC_REAL_16
+ case sizeof (GFC_REAL_16):
+ unpack0_r16 ((gfc_array_r16 *) ret, (gfc_array_r16 *) vector,
+ mask, (GFC_REAL_16 *) field);
+ return;
+#endif
+ }
+
+ case GFC_DTYPE_COMPLEX:
+ switch(size)
+ {
+ case sizeof (GFC_COMPLEX_4):
+ unpack0_c4 ((gfc_array_c4 *) ret, (gfc_array_c4 *) vector,
+ mask, (GFC_COMPLEX_4 *) field);
+ return;
+
+ case sizeof (GFC_COMPLEX_8):
+ unpack0_c8 ((gfc_array_c8 *) ret, (gfc_array_c8 *) vector,
+ mask, (GFC_COMPLEX_8 *) field);
+ return;
+
+#ifdef HAVE_GFC_COMPLEX_10
+ case sizeof (GFC_COMPLEX_10):
+ unpack0_c10 ((gfc_array_c10 *) ret, (gfc_array_c10 *) vector,
+ mask, (GFC_COMPLEX_10 *) field);
+ return;
+#endif
+
+#ifdef HAVE_GFC_COMPLEX_16
+ case sizeof (GFC_COMPLEX_16):
+ unpack0_c16 ((gfc_array_c16 *) ret, (gfc_array_c16 *) vector,
+ mask, (GFC_COMPLEX_16 *) field);
+ return;
+#endif
+ }
+ }
memset (&tmp, 0, sizeof (tmp));
tmp.dtype = 0;
tmp.data = field;
diff --git a/libgfortran/libgfortran.h b/libgfortran/libgfortran.h
index a2b023b..9a1f643 100644
--- a/libgfortran/libgfortran.h
+++ b/libgfortran/libgfortran.h
@@ -774,6 +774,142 @@ extern void pack_c16 (gfc_array_c16 *, const gfc_array_c16 *,
internal_proto(pack_c16);
#endif
+/* Internal auxiliary functions for the unpack intrinsic. */
+
+extern void unpack0_i1 (gfc_array_i1 *, const gfc_array_i1 *,
+ const gfc_array_l1 *, const GFC_INTEGER_1 *);
+internal_proto(unpack0_i1);
+
+extern void unpack0_i2 (gfc_array_i2 *, const gfc_array_i2 *,
+ const gfc_array_l1 *, const GFC_INTEGER_2 *);
+internal_proto(unpack0_i2);
+
+extern void unpack0_i4 (gfc_array_i4 *, const gfc_array_i4 *,
+ const gfc_array_l1 *, const GFC_INTEGER_4 *);
+internal_proto(unpack0_i4);
+
+extern void unpack0_i8 (gfc_array_i8 *, const gfc_array_i8 *,
+ const gfc_array_l1 *, const GFC_INTEGER_8 *);
+internal_proto(unpack0_i8);
+
+#ifdef HAVE_GFC_INTEGER_16
+
+extern void unpack0_i16 (gfc_array_i16 *, const gfc_array_i16 *,
+ const gfc_array_l1 *, const GFC_INTEGER_16 *);
+internal_proto(unpack0_i16);
+
+#endif
+
+extern void unpack0_r4 (gfc_array_r4 *, const gfc_array_r4 *,
+ const gfc_array_l1 *, const GFC_REAL_4 *);
+internal_proto(unpack0_r4);
+
+extern void unpack0_r8 (gfc_array_r8 *, const gfc_array_r8 *,
+ const gfc_array_l1 *, const GFC_REAL_8 *);
+internal_proto(unpack0_r8);
+
+#ifdef HAVE_GFC_REAL_10
+
+extern void unpack0_r10 (gfc_array_r10 *, const gfc_array_r10 *,
+ const gfc_array_l1 *, const GFC_REAL_10 *);
+internal_proto(unpack0_r10);
+
+#endif
+
+#ifdef HAVE_GFC_REAL_16
+
+extern void unpack0_r16 (gfc_array_r16 *, const gfc_array_r16 *,
+ const gfc_array_l1 *, const GFC_REAL_16 *);
+internal_proto(unpack0_r16);
+
+#endif
+
+extern void unpack0_c4 (gfc_array_c4 *, const gfc_array_c4 *,
+ const gfc_array_l1 *, const GFC_COMPLEX_4 *);
+internal_proto(unpack0_c4);
+
+extern void unpack0_c8 (gfc_array_c8 *, const gfc_array_c8 *,
+ const gfc_array_l1 *, const GFC_COMPLEX_8 *);
+internal_proto(unpack0_c8);
+
+#ifdef HAVE_GFC_COMPLEX_10
+
+extern void unpack0_c10 (gfc_array_c10 *, const gfc_array_c10 *,
+ const gfc_array_l1 *mask, const GFC_COMPLEX_10 *);
+internal_proto(unpack0_c10);
+
+#endif
+
+#ifdef HAVE_GFC_COMPLEX_16
+
+extern void unpack0_c16 (gfc_array_c16 *, const gfc_array_c16 *,
+ const gfc_array_l1 *, const GFC_COMPLEX_16 *);
+internal_proto(unpack0_c16);
+
+#endif
+
+extern void unpack1_i1 (gfc_array_i1 *, const gfc_array_i1 *,
+ const gfc_array_l1 *, const gfc_array_i1 *);
+internal_proto(unpack1_i1);
+
+extern void unpack1_i2 (gfc_array_i2 *, const gfc_array_i2 *,
+ const gfc_array_l1 *, const gfc_array_i2 *);
+internal_proto(unpack1_i2);
+
+extern void unpack1_i4 (gfc_array_i4 *, const gfc_array_i4 *,
+ const gfc_array_l1 *, const gfc_array_i4 *);
+internal_proto(unpack1_i4);
+
+extern void unpack1_i8 (gfc_array_i8 *, const gfc_array_i8 *,
+ const gfc_array_l1 *, const gfc_array_i8 *);
+internal_proto(unpack1_i8);
+
+#ifdef HAVE_GFC_INTEGER_16
+extern void unpack1_i16 (gfc_array_i16 *, const gfc_array_i16 *,
+ const gfc_array_l1 *, const gfc_array_i16 *);
+internal_proto(unpack1_i16);
+#endif
+
+extern void unpack1_r4 (gfc_array_r4 *, const gfc_array_r4 *,
+ const gfc_array_l1 *, const gfc_array_r4 *);
+internal_proto(unpack1_r4);
+
+extern void unpack1_r8 (gfc_array_r8 *, const gfc_array_r8 *,
+ const gfc_array_l1 *, const gfc_array_r8 *);
+internal_proto(unpack1_r8);
+
+#ifdef HAVE_GFC_REAL_10
+extern void unpack1_r10 (gfc_array_r10 *, const gfc_array_r10 *,
+ const gfc_array_l1 *, const gfc_array_r10 *);
+internal_proto(unpack1_r10);
+#endif
+
+#ifdef HAVE_GFC_REAL_16
+extern void unpack1_r16 (gfc_array_r16 *, const gfc_array_r16 *,
+ const gfc_array_l1 *, const gfc_array_r16 *);
+internal_proto(unpack1_r16);
+#endif
+
+extern void unpack1_c4 (gfc_array_c4 *, const gfc_array_c4 *,
+ const gfc_array_l1 *, const gfc_array_c4 *);
+internal_proto(unpack1_c4);
+
+extern void unpack1_c8 (gfc_array_c8 *, const gfc_array_c8 *,
+ const gfc_array_l1 *, const gfc_array_c8 *);
+internal_proto(unpack1_c8);
+
+#ifdef HAVE_GFC_COMPLEX_10
+extern void unpack1_c10 (gfc_array_c10 *, const gfc_array_c10 *,
+ const gfc_array_l1 *, const gfc_array_c10 *);
+internal_proto(unpack1_c10);
+#endif
+
+#ifdef HAVE_GFC_COMPLEX_16
+extern void unpack1_c16 (gfc_array_c16 *, const gfc_array_c16 *,
+ const gfc_array_l1 *, const gfc_array_c16 *);
+internal_proto(unpack1_c16);
+#endif
+
/* string_intrinsics.c */
extern int compare_string (GFC_INTEGER_4, const char *,
diff --git a/libgfortran/m4/unpack.m4 b/libgfortran/m4/unpack.m4
new file mode 100644
index 0000000..2ad6841
--- /dev/null
+++ b/libgfortran/m4/unpack.m4
@@ -0,0 +1,339 @@
+`/* Specific implementation of the UNPACK intrinsic
+ Copyright 2008 Free Software Foundation, Inc.
+ Contributed by Thomas Koenig <tkoenig@gcc.gnu.org>, based on
+ unpack_generic.c by Paul Brook <paul@nowt.org>.
+
+This file is part of the GNU Fortran 95 runtime library (libgfortran).
+
+Libgfortran is free software; you can redistribute it and/or
+modify it under the terms of the GNU General Public
+License as published by the Free Software Foundation; either
+version 2 of the License, or (at your option) any later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file into combinations with other programs,
+and to distribute those combinations without any restriction coming
+from the use of this file. (The General Public License restrictions
+do apply in other respects; for example, they cover modification of
+the file, and distribution when not linked into a combine
+executable.)
+
+Ligbfortran is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public
+License along with libgfortran; see the file COPYING. If not,
+write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301, USA. */
+
+#include "libgfortran.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>'
+
+include(iparm.m4)dnl
+
+`#if defined (HAVE_'rtype_name`)
+
+void
+unpack0_'rtype_code` ('rtype` *ret, const 'rtype` *vector,
+ const gfc_array_l1 *mask, const 'rtype_name` *fptr)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ 'rtype_name` *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ 'rtype_name` *vptr;
+ /* Value for field, this is constant. */
+ const 'rtype_name` fval = *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof ('rtype_name`));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = fval;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+void
+unpack1_'rtype_code` ('rtype` *ret, const 'rtype` *vector,
+ const gfc_array_l1 *mask, const 'rtype` *field)
+{
+ /* r.* indicates the return array. */
+ index_type rstride[GFC_MAX_DIMENSIONS];
+ index_type rstride0;
+ index_type rs;
+ 'rtype_name` *rptr;
+ /* v.* indicates the vector array. */
+ index_type vstride0;
+ 'rtype_name` *vptr;
+ /* f.* indicates the field array. */
+ index_type fstride[GFC_MAX_DIMENSIONS];
+ index_type fstride0;
+ const 'rtype_name` *fptr;
+ /* m.* indicates the mask array. */
+ index_type mstride[GFC_MAX_DIMENSIONS];
+ index_type mstride0;
+ const GFC_LOGICAL_1 *mptr;
+
+ index_type count[GFC_MAX_DIMENSIONS];
+ index_type extent[GFC_MAX_DIMENSIONS];
+ index_type n;
+ index_type dim;
+
+ int empty;
+ int mask_kind;
+
+ empty = 0;
+
+ mptr = mask->data;
+
+ /* Use the same loop for all logical types, by using GFC_LOGICAL_1
+ and using shifting to address size and endian issues. */
+
+ mask_kind = GFC_DESCRIPTOR_SIZE (mask);
+
+ if (mask_kind == 1 || mask_kind == 2 || mask_kind == 4 || mask_kind == 8
+#ifdef HAVE_GFC_LOGICAL_16
+ || mask_kind == 16
+#endif
+ )
+ {
+ /* Do not convert a NULL pointer as we use test for NULL below. */
+ if (mptr)
+ mptr = GFOR_POINTER_TO_L1 (mptr, mask_kind);
+ }
+ else
+ runtime_error ("Funny sized logical array");
+
+ if (ret->data == NULL)
+ {
+ /* The front end has signalled that we need to populate the
+ return array descriptor. */
+ dim = GFC_DESCRIPTOR_RANK (mask);
+ rs = 1;
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ ret->dim[n].stride = rs;
+ ret->dim[n].lbound = 0;
+ ret->dim[n].ubound = mask->dim[n].ubound - mask->dim[n].lbound;
+ extent[n] = ret->dim[n].ubound + 1;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ rs *= extent[n];
+ }
+ ret->offset = 0;
+ ret->data = internal_malloc_size (rs * sizeof ('rtype_name`));
+ }
+ else
+ {
+ dim = GFC_DESCRIPTOR_RANK (ret);
+ for (n = 0; n < dim; n++)
+ {
+ count[n] = 0;
+ extent[n] = ret->dim[n].ubound + 1 - ret->dim[n].lbound;
+ empty = empty || extent[n] <= 0;
+ rstride[n] = ret->dim[n].stride;
+ fstride[n] = field->dim[n].stride;
+ mstride[n] = mask->dim[n].stride * mask_kind;
+ }
+ if (rstride[0] == 0)
+ rstride[0] = 1;
+ }
+
+ if (empty)
+ return;
+
+ if (fstride[0] == 0)
+ fstride[0] = 1;
+ if (mstride[0] == 0)
+ mstride[0] = 1;
+
+ vstride0 = vector->dim[0].stride;
+ if (vstride0 == 0)
+ vstride0 = 1;
+ rstride0 = rstride[0];
+ fstride0 = fstride[0];
+ mstride0 = mstride[0];
+ rptr = ret->data;
+ fptr = field->data;
+ vptr = vector->data;
+
+ while (rptr)
+ {
+ if (*mptr)
+ {
+ /* From vector. */
+ *rptr = *vptr;
+ vptr += vstride0;
+ }
+ else
+ {
+ /* From field. */
+ *rptr = *fptr;
+ }
+ /* Advance to the next element. */
+ rptr += rstride0;
+ fptr += fstride0;
+ mptr += mstride0;
+ count[0]++;
+ n = 0;
+ while (count[n] == extent[n])
+ {
+ /* When we get to the end of a dimension, reset it and increment
+ the next dimension. */
+ count[n] = 0;
+ /* We could precalculate these products, but this is a less
+ frequently used path so probably not worth it. */
+ rptr -= rstride[n] * extent[n];
+ fptr -= fstride[n] * extent[n];
+ mptr -= mstride[n] * extent[n];
+ n++;
+ if (n >= dim)
+ {
+ /* Break out of the loop. */
+ rptr = NULL;
+ break;
+ }
+ else
+ {
+ count[n]++;
+ rptr += rstride[n];
+ fptr += fstride[n];
+ mptr += mstride[n];
+ }
+ }
+ }
+}
+
+#endif
+'