aboutsummaryrefslogtreecommitdiff
path: root/libgfortran
diff options
context:
space:
mode:
authorJerry DeLisle <jvdelisle@gcc.gnu.org>2016-11-15 23:03:00 +0000
committerJerry DeLisle <jvdelisle@gcc.gnu.org>2016-11-15 23:03:00 +0000
commit5d70ab07b65a3fda05fe3ea00434ab98a66e9272 (patch)
tree2043e90433038a86fd6bc5e64566a9569951e672 /libgfortran
parentfd0477ca84491b35875ab913f3b607fd33cb1258 (diff)
downloadgcc-5d70ab07b65a3fda05fe3ea00434ab98a66e9272.zip
gcc-5d70ab07b65a3fda05fe3ea00434ab98a66e9272.tar.gz
gcc-5d70ab07b65a3fda05fe3ea00434ab98a66e9272.tar.bz2
[multiple changes]
2016-11-15 Jerry DeLisle <jvdelisle@gcc.gnu.org> Thomas Koenig <tkoenig@gcc.gnu.org> PR libgfortran/51119 * Makefile.am: Add new optimization flags matmul. * Makefile.in: Regenerate. * m4/matmul.m4: For the case of all strides = 1, implement a fast blocked matrix multiply. Fix some whitespace. * generated/matmul_c10.c: Regenerate. * generated/matmul_c16.c: Regenerate. * generated/matmul_c4.c: Regenerate. * generated/matmul_c8.c: Regenerate. * generated/matmul_i1.c: Regenerate. * generated/matmul_i16.c: Regenerate. * generated/matmul_i2.c: Regenerate. * generated/matmul_i4.c: Regenerate. * generated/matmul_i8.c: Regenerate. * generated/matmul_r10.c: Regenerate. * generated/matmul_r16.c: Regenerate. * generated/matmul_r4.c: Regenerate. * generated/matmul_r8.c: Regenerate. 2016-11-15 Thomas Koenig <tkoenig@gcc.gnu.org> PR libgfortran/51119 * gfortran.dg/matmul_12.f90: New test case. From-SVN: r242462
Diffstat (limited to 'libgfortran')
-rw-r--r--libgfortran/ChangeLog22
-rw-r--r--libgfortran/Makefile.am2
-rw-r--r--libgfortran/Makefile.in2
-rw-r--r--libgfortran/generated/matmul_c10.c396
-rw-r--r--libgfortran/generated/matmul_c16.c396
-rw-r--r--libgfortran/generated/matmul_c4.c396
-rw-r--r--libgfortran/generated/matmul_c8.c396
-rw-r--r--libgfortran/generated/matmul_i1.c396
-rw-r--r--libgfortran/generated/matmul_i16.c396
-rw-r--r--libgfortran/generated/matmul_i2.c396
-rw-r--r--libgfortran/generated/matmul_i4.c396
-rw-r--r--libgfortran/generated/matmul_i8.c396
-rw-r--r--libgfortran/generated/matmul_r10.c396
-rw-r--r--libgfortran/generated/matmul_r16.c396
-rw-r--r--libgfortran/generated/matmul_r4.c396
-rw-r--r--libgfortran/generated/matmul_r8.c396
-rw-r--r--libgfortran/m4/matmul.m4400
17 files changed, 4436 insertions, 1138 deletions
diff --git a/libgfortran/ChangeLog b/libgfortran/ChangeLog
index da3b3e0..50305af 100644
--- a/libgfortran/ChangeLog
+++ b/libgfortran/ChangeLog
@@ -1,3 +1,25 @@
+2016-11-15 Jerry DeLisle <jvdelisle@gcc.gnu.org>
+ Thomas Koenig <tkoenig@gcc.gnu.org>
+
+ PR libgfortran/51119
+ * Makefile.am: Add new optimization flags matmul.
+ * Makefile.in: Regenerate.
+ * m4/matmul.m4: For the case of all strides = 1, implement a
+ fast blocked matrix multiply. Fix some whitespace.
+ * generated/matmul_c10.c: Regenerate.
+ * generated/matmul_c16.c: Regenerate.
+ * generated/matmul_c4.c: Regenerate.
+ * generated/matmul_c8.c: Regenerate.
+ * generated/matmul_i1.c: Regenerate.
+ * generated/matmul_i16.c: Regenerate.
+ * generated/matmul_i2.c: Regenerate.
+ * generated/matmul_i4.c: Regenerate.
+ * generated/matmul_i8.c: Regenerate.
+ * generated/matmul_r10.c: Regenerate.
+ * generated/matmul_r16.c: Regenerate.
+ * generated/matmul_r4.c: Regenerate.
+ * generated/matmul_r8.c: Regenerate.
+
2016-11-15 Matthias Klose <doko@ubuntu.com>
* configure: Regenerate.
diff --git a/libgfortran/Makefile.am b/libgfortran/Makefile.am
index 39d3e11..7f4002d 100644
--- a/libgfortran/Makefile.am
+++ b/libgfortran/Makefile.am
@@ -850,7 +850,7 @@ intrinsics/dprod_r8.f90 \
intrinsics/f2c_specifics.F90
# Turn on vectorization and loop unrolling for matmul.
-$(patsubst %.c,%.lo,$(notdir $(i_matmul_c))): AM_CFLAGS += -ftree-vectorize -funroll-loops
+$(patsubst %.c,%.lo,$(notdir $(i_matmul_c))): AM_CFLAGS += -ffast-math -fno-protect-parens -fstack-arrays -ftree-vectorize -funroll-loops --param max-unroll-times=4
# Logical matmul doesn't vectorize.
$(patsubst %.c,%.lo,$(notdir $(i_matmull_c))): AM_CFLAGS += -funroll-loops
diff --git a/libgfortran/Makefile.in b/libgfortran/Makefile.in
index 7ed080c..c1a37d7 100644
--- a/libgfortran/Makefile.in
+++ b/libgfortran/Makefile.in
@@ -5956,7 +5956,7 @@ uninstall-am: uninstall-cafexeclibLTLIBRARIES \
@LIBGFOR_USE_SYMVER_SUN_TRUE@@LIBGFOR_USE_SYMVER_TRUE@ > $@ || (rm -f $@ ; exit 1)
# Turn on vectorization and loop unrolling for matmul.
-$(patsubst %.c,%.lo,$(notdir $(i_matmul_c))): AM_CFLAGS += -ftree-vectorize -funroll-loops
+$(patsubst %.c,%.lo,$(notdir $(i_matmul_c))): AM_CFLAGS += -ffast-math -fno-protect-parens -fstack-arrays -ftree-vectorize -funroll-loops --param max-unroll-times=4
# Logical matmul doesn't vectorize.
$(patsubst %.c,%.lo,$(notdir $(i_matmull_c))): AM_CFLAGS += -funroll-loops
diff --git a/libgfortran/generated/matmul_c10.c b/libgfortran/generated/matmul_c10.c
index c9558184..c784a26 100644
--- a/libgfortran/generated/matmul_c10.c
+++ b/libgfortran/generated/matmul_c10.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_COMPLEX_10)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_c10 (gfc_array_c10 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_c10 (gfc_array_c10 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_COMPLEX_10));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_c10 (gfc_array_c10 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_COMPLEX_10 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_COMPLEX_10 * restrict bbase_y;
- GFC_COMPLEX_10 * restrict dest_y;
- const GFC_COMPLEX_10 * restrict abase_n;
- GFC_COMPLEX_10 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_COMPLEX_10 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_COMPLEX_10) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_COMPLEX_10)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_COMPLEX_10 *a, *b;
+ GFC_COMPLEX_10 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_COMPLEX_10 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_COMPLEX_10)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_c10 (gfc_array_c10 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_c10 (gfc_array_c10 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_c16.c b/libgfortran/generated/matmul_c16.c
index 25fe56e..47e1bea 100644
--- a/libgfortran/generated/matmul_c16.c
+++ b/libgfortran/generated/matmul_c16.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_COMPLEX_16)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_c16 (gfc_array_c16 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_c16 (gfc_array_c16 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_COMPLEX_16));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_c16 (gfc_array_c16 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_COMPLEX_16 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_COMPLEX_16 * restrict bbase_y;
- GFC_COMPLEX_16 * restrict dest_y;
- const GFC_COMPLEX_16 * restrict abase_n;
- GFC_COMPLEX_16 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_COMPLEX_16 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_COMPLEX_16) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_COMPLEX_16)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_COMPLEX_16 *a, *b;
+ GFC_COMPLEX_16 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_COMPLEX_16 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_COMPLEX_16)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_c16 (gfc_array_c16 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_c16 (gfc_array_c16 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_c4.c b/libgfortran/generated/matmul_c4.c
index e9d2ed3..4eb1896 100644
--- a/libgfortran/generated/matmul_c4.c
+++ b/libgfortran/generated/matmul_c4.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_COMPLEX_4)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_c4 (gfc_array_c4 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_c4 (gfc_array_c4 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_COMPLEX_4));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_c4 (gfc_array_c4 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_COMPLEX_4 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_COMPLEX_4 * restrict bbase_y;
- GFC_COMPLEX_4 * restrict dest_y;
- const GFC_COMPLEX_4 * restrict abase_n;
- GFC_COMPLEX_4 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_COMPLEX_4 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_COMPLEX_4) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_COMPLEX_4)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_COMPLEX_4 *a, *b;
+ GFC_COMPLEX_4 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_COMPLEX_4 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_COMPLEX_4)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_c4 (gfc_array_c4 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_c4 (gfc_array_c4 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_c8.c b/libgfortran/generated/matmul_c8.c
index 8a127da..2321b9e 100644
--- a/libgfortran/generated/matmul_c8.c
+++ b/libgfortran/generated/matmul_c8.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_COMPLEX_8)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_c8 (gfc_array_c8 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_c8 (gfc_array_c8 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_COMPLEX_8));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_c8 (gfc_array_c8 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_COMPLEX_8 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_COMPLEX_8 * restrict bbase_y;
- GFC_COMPLEX_8 * restrict dest_y;
- const GFC_COMPLEX_8 * restrict abase_n;
- GFC_COMPLEX_8 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_COMPLEX_8 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_COMPLEX_8) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_COMPLEX_8)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_COMPLEX_8 *a, *b;
+ GFC_COMPLEX_8 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_COMPLEX_8 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_COMPLEX_8)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_c8 (gfc_array_c8 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_c8 (gfc_array_c8 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_i1.c b/libgfortran/generated/matmul_i1.c
index fdb3092..81c067b 100644
--- a/libgfortran/generated/matmul_i1.c
+++ b/libgfortran/generated/matmul_i1.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_INTEGER_1)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_i1 (gfc_array_i1 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_i1 (gfc_array_i1 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_INTEGER_1));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_i1 (gfc_array_i1 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_INTEGER_1 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_INTEGER_1 * restrict bbase_y;
- GFC_INTEGER_1 * restrict dest_y;
- const GFC_INTEGER_1 * restrict abase_n;
- GFC_INTEGER_1 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_INTEGER_1 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_INTEGER_1) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_INTEGER_1)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_INTEGER_1 *a, *b;
+ GFC_INTEGER_1 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_INTEGER_1 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_INTEGER_1)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_i1 (gfc_array_i1 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_i1 (gfc_array_i1 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_i16.c b/libgfortran/generated/matmul_i16.c
index 80eb63c..d1b1761 100644
--- a/libgfortran/generated/matmul_i16.c
+++ b/libgfortran/generated/matmul_i16.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_INTEGER_16)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_i16 (gfc_array_i16 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_i16 (gfc_array_i16 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_INTEGER_16));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_i16 (gfc_array_i16 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_INTEGER_16 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_INTEGER_16 * restrict bbase_y;
- GFC_INTEGER_16 * restrict dest_y;
- const GFC_INTEGER_16 * restrict abase_n;
- GFC_INTEGER_16 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_INTEGER_16 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_INTEGER_16) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_INTEGER_16)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_INTEGER_16 *a, *b;
+ GFC_INTEGER_16 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_INTEGER_16 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_INTEGER_16)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_i16 (gfc_array_i16 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_i16 (gfc_array_i16 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_i2.c b/libgfortran/generated/matmul_i2.c
index 281a013..5a06fcc 100644
--- a/libgfortran/generated/matmul_i2.c
+++ b/libgfortran/generated/matmul_i2.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_INTEGER_2)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_i2 (gfc_array_i2 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_i2 (gfc_array_i2 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_INTEGER_2));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_i2 (gfc_array_i2 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_INTEGER_2 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_INTEGER_2 * restrict bbase_y;
- GFC_INTEGER_2 * restrict dest_y;
- const GFC_INTEGER_2 * restrict abase_n;
- GFC_INTEGER_2 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_INTEGER_2 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_INTEGER_2) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_INTEGER_2)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_INTEGER_2 *a, *b;
+ GFC_INTEGER_2 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_INTEGER_2 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_INTEGER_2)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_i2 (gfc_array_i2 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_i2 (gfc_array_i2 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_i4.c b/libgfortran/generated/matmul_i4.c
index 2dc526d..aee8e4d 100644
--- a/libgfortran/generated/matmul_i4.c
+++ b/libgfortran/generated/matmul_i4.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_INTEGER_4)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_i4 (gfc_array_i4 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_i4 (gfc_array_i4 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_INTEGER_4));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_i4 (gfc_array_i4 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_INTEGER_4 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_INTEGER_4 * restrict bbase_y;
- GFC_INTEGER_4 * restrict dest_y;
- const GFC_INTEGER_4 * restrict abase_n;
- GFC_INTEGER_4 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_INTEGER_4 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_INTEGER_4) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_INTEGER_4)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_INTEGER_4 *a, *b;
+ GFC_INTEGER_4 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_INTEGER_4 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_INTEGER_4)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_i4 (gfc_array_i4 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_i4 (gfc_array_i4 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_i8.c b/libgfortran/generated/matmul_i8.c
index 0ff728d..902b284 100644
--- a/libgfortran/generated/matmul_i8.c
+++ b/libgfortran/generated/matmul_i8.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_INTEGER_8)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_i8 (gfc_array_i8 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_i8 (gfc_array_i8 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_INTEGER_8));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_i8 (gfc_array_i8 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_INTEGER_8 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_INTEGER_8 * restrict bbase_y;
- GFC_INTEGER_8 * restrict dest_y;
- const GFC_INTEGER_8 * restrict abase_n;
- GFC_INTEGER_8 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_INTEGER_8 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_INTEGER_8) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_INTEGER_8)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_INTEGER_8 *a, *b;
+ GFC_INTEGER_8 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_INTEGER_8 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_INTEGER_8)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_i8 (gfc_array_i8 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_i8 (gfc_array_i8 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_r10.c b/libgfortran/generated/matmul_r10.c
index a34856f..8bb1e62 100644
--- a/libgfortran/generated/matmul_r10.c
+++ b/libgfortran/generated/matmul_r10.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_REAL_10)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_r10 (gfc_array_r10 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_r10 (gfc_array_r10 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_REAL_10));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_r10 (gfc_array_r10 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_REAL_10 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_REAL_10 * restrict bbase_y;
- GFC_REAL_10 * restrict dest_y;
- const GFC_REAL_10 * restrict abase_n;
- GFC_REAL_10 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_REAL_10 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_REAL_10) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_REAL_10)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_REAL_10 *a, *b;
+ GFC_REAL_10 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_REAL_10 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_REAL_10)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_r10 (gfc_array_r10 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_r10 (gfc_array_r10 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_r16.c b/libgfortran/generated/matmul_r16.c
index d2f11bd..4ebd104 100644
--- a/libgfortran/generated/matmul_r16.c
+++ b/libgfortran/generated/matmul_r16.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_REAL_16)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_r16 (gfc_array_r16 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_r16 (gfc_array_r16 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_REAL_16));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_r16 (gfc_array_r16 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_REAL_16 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_REAL_16 * restrict bbase_y;
- GFC_REAL_16 * restrict dest_y;
- const GFC_REAL_16 * restrict abase_n;
- GFC_REAL_16 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_REAL_16 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_REAL_16) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_REAL_16)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_REAL_16 *a, *b;
+ GFC_REAL_16 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_REAL_16 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_REAL_16)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_r16 (gfc_array_r16 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_r16 (gfc_array_r16 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_r4.c b/libgfortran/generated/matmul_r4.c
index ff3b93f..cf3ffa3 100644
--- a/libgfortran/generated/matmul_r4.c
+++ b/libgfortran/generated/matmul_r4.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_REAL_4)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_r4 (gfc_array_r4 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_r4 (gfc_array_r4 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_REAL_4));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_r4 (gfc_array_r4 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_REAL_4 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_REAL_4 * restrict bbase_y;
- GFC_REAL_4 * restrict dest_y;
- const GFC_REAL_4 * restrict abase_n;
- GFC_REAL_4 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_REAL_4 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_REAL_4) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_REAL_4)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_REAL_4 *a, *b;
+ GFC_REAL_4 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_REAL_4 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_REAL_4)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_r4 (gfc_array_r4 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_r4 (gfc_array_r4 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/generated/matmul_r8.c b/libgfortran/generated/matmul_r8.c
index af805ee..9a70a23 100644
--- a/libgfortran/generated/matmul_r8.c
+++ b/libgfortran/generated/matmul_r8.c
@@ -32,7 +32,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#if defined (HAVE_GFC_REAL_8)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we'll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -99,7 +99,7 @@ matmul_r8 (gfc_array_r8 * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -127,47 +127,47 @@ matmul_r8 (gfc_array_r8 * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof (GFC_REAL_8));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
if (GFC_DESCRIPTOR_RANK (retarray) == 1)
@@ -230,61 +230,294 @@ matmul_r8 (gfc_array_r8 * const restrict retarray,
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we're performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const GFC_REAL_8 one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const GFC_REAL_8 * restrict bbase_y;
- GFC_REAL_8 * restrict dest_y;
- const GFC_REAL_8 * restrict abase_n;
- GFC_REAL_8 bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const GFC_REAL_8 one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof (GFC_REAL_8) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = (GFC_REAL_8)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const GFC_REAL_8 *a, *b;
+ GFC_REAL_8 *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ GFC_REAL_8 t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = (GFC_REAL_8)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -334,7 +567,9 @@ matmul_r8 (gfc_array_r8 * const restrict retarray,
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -372,5 +607,4 @@ matmul_r8 (gfc_array_r8 * const restrict retarray,
}
}
}
-
#endif
diff --git a/libgfortran/m4/matmul.m4 b/libgfortran/m4/matmul.m4
index 468615b..77ed440 100644
--- a/libgfortran/m4/matmul.m4
+++ b/libgfortran/m4/matmul.m4
@@ -33,7 +33,7 @@ include(iparm.m4)dnl
`#if defined (HAVE_'rtype_name`)
/* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
- passed to us by the front-end, in which case we''`ll call it for large
+ passed to us by the front-end, in which case we call it for large
matrices. */
typedef void (*blas_call)(const char *, const char *, const int *, const int *,
@@ -100,7 +100,7 @@ matmul_'rtype_code` ('rtype` * const restrict retarray,
o One-dimensional argument B is implicitly treated as a column matrix
dimensioned [count, 1], so ycount=1.
- */
+*/
if (retarray->base_addr == NULL)
{
@@ -128,47 +128,47 @@ matmul_'rtype_code` ('rtype` * const restrict retarray,
= xmallocarray (size0 ((array_t *) retarray), sizeof ('rtype_name`));
retarray->offset = 0;
}
- else if (unlikely (compile_options.bounds_check))
- {
- index_type ret_extent, arg_extent;
-
- if (GFC_DESCRIPTOR_RANK (a) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else if (GFC_DESCRIPTOR_RANK (b) == 1)
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic: is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- else
- {
- arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 1:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
-
- arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
- ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
- if (arg_extent != ret_extent)
- runtime_error ("Incorrect extent in return array in"
- " MATMUL intrinsic for dimension 2:"
- " is %ld, should be %ld",
- (long int) ret_extent, (long int) arg_extent);
- }
- }
+ else if (unlikely (compile_options.bounds_check))
+ {
+ index_type ret_extent, arg_extent;
+
+ if (GFC_DESCRIPTOR_RANK (a) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else if (GFC_DESCRIPTOR_RANK (b) == 1)
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic: is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ else
+ {
+ arg_extent = GFC_DESCRIPTOR_EXTENT(a,0);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,0);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 1:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+
+ arg_extent = GFC_DESCRIPTOR_EXTENT(b,1);
+ ret_extent = GFC_DESCRIPTOR_EXTENT(retarray,1);
+ if (arg_extent != ret_extent)
+ runtime_error ("Incorrect extent in return array in"
+ " MATMUL intrinsic for dimension 2:"
+ " is %ld, should be %ld",
+ (long int) ret_extent, (long int) arg_extent);
+ }
+ }
'
sinclude(`matmul_asm_'rtype_code`.m4')dnl
`
@@ -232,61 +232,294 @@ sinclude(`matmul_asm_'rtype_code`.m4')dnl
bbase = b->base_addr;
dest = retarray->base_addr;
-
- /* Now that everything is set up, we''`re performing the multiplication
+ /* Now that everything is set up, we perform the multiplication
itself. */
#define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
+#define min(a,b) ((a) <= (b) ? (a) : (b))
+#define max(a,b) ((a) >= (b) ? (a) : (b))
if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
&& (bxstride == 1 || bystride == 1)
&& (((float) xcount) * ((float) ycount) * ((float) count)
> POW3(blas_limit)))
- {
- const int m = xcount, n = ycount, k = count, ldc = rystride;
- const 'rtype_name` one = 1, zero = 0;
- const int lda = (axstride == 1) ? aystride : axstride,
- ldb = (bxstride == 1) ? bystride : bxstride;
-
- if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
- {
- assert (gemm != NULL);
- gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
- &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
- return;
- }
- }
-
- if (rxstride == 1 && axstride == 1 && bxstride == 1)
{
- const 'rtype_name` * restrict bbase_y;
- 'rtype_name` * restrict dest_y;
- const 'rtype_name` * restrict abase_n;
- 'rtype_name` bbase_yn;
+ const int m = xcount, n = ycount, k = count, ldc = rystride;
+ const 'rtype_name` one = 1, zero = 0;
+ const int lda = (axstride == 1) ? aystride : axstride,
+ ldb = (bxstride == 1) ? bystride : bxstride;
- if (rystride == xcount)
- memset (dest, 0, (sizeof ('rtype_name`) * xcount * ycount));
- else
+ if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
{
- for (y = 0; y < ycount; y++)
- for (x = 0; x < xcount; x++)
- dest[x + y*rystride] = ('rtype_name`)0;
+ assert (gemm != NULL);
+ gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m,
+ &n, &k, &one, abase, &lda, bbase, &ldb, &zero, dest,
+ &ldc, 1, 1);
+ return;
}
+ }
- for (y = 0; y < ycount; y++)
+ if (rxstride == 1 && axstride == 1 && bxstride == 1)
+ {
+ /* This block of code implements a tuned matmul, derived from
+ Superscalar GEMM-based level 3 BLAS, Beta version 0.1
+
+ Bo Kagstrom and Per Ling
+ Department of Computing Science
+ Umea University
+ S-901 87 Umea, Sweden
+
+ from netlib.org, translated to C, and modified for matmul.m4. */
+
+ const 'rtype_name` *a, *b;
+ 'rtype_name` *c;
+ const index_type m = xcount, n = ycount, k = count;
+
+ /* System generated locals */
+ index_type a_dim1, a_offset, b_dim1, b_offset, c_dim1, c_offset,
+ i1, i2, i3, i4, i5, i6;
+
+ /* Local variables */
+ 'rtype_name` t1[65536], /* was [256][256] */
+ f11, f12, f21, f22, f31, f32, f41, f42,
+ f13, f14, f23, f24, f33, f34, f43, f44;
+ index_type i, j, l, ii, jj, ll;
+ index_type isec, jsec, lsec, uisec, ujsec, ulsec;
+
+ a = abase;
+ b = bbase;
+ c = retarray->base_addr;
+
+ /* Parameter adjustments */
+ c_dim1 = rystride;
+ c_offset = 1 + c_dim1;
+ c -= c_offset;
+ a_dim1 = aystride;
+ a_offset = 1 + a_dim1;
+ a -= a_offset;
+ b_dim1 = bystride;
+ b_offset = 1 + b_dim1;
+ b -= b_offset;
+
+ /* Early exit if possible */
+ if (m == 0 || n == 0 || k == 0)
+ return;
+
+ /* Empty c first. */
+ for (j=1; j<=n; j++)
+ for (i=1; i<=m; i++)
+ c[i + j * c_dim1] = ('rtype_name`)0;
+
+ /* Start turning the crank. */
+ i1 = n;
+ for (jj = 1; jj <= i1; jj += 512)
{
- bbase_y = bbase + y*bystride;
- dest_y = dest + y*rystride;
- for (n = 0; n < count; n++)
+ /* Computing MIN */
+ i2 = 512;
+ i3 = n - jj + 1;
+ jsec = min(i2,i3);
+ ujsec = jsec - jsec % 4;
+ i2 = k;
+ for (ll = 1; ll <= i2; ll += 256)
{
- abase_n = abase + n*aystride;
- bbase_yn = bbase_y[n];
- for (x = 0; x < xcount; x++)
+ /* Computing MIN */
+ i3 = 256;
+ i4 = k - ll + 1;
+ lsec = min(i3,i4);
+ ulsec = lsec - lsec % 2;
+
+ i3 = m;
+ for (ii = 1; ii <= i3; ii += 256)
{
- dest_y[x] += abase_n[x] * bbase_yn;
+ /* Computing MIN */
+ i4 = 256;
+ i5 = m - ii + 1;
+ isec = min(i4,i5);
+ uisec = isec - isec % 2;
+ i4 = ll + ulsec - 1;
+ for (l = ll; l <= i4; l += 2)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 2)
+ {
+ t1[l - ll + 1 + ((i - ii + 1) << 8) - 257] =
+ a[i + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 1) << 8) - 257] =
+ a[i + (l + 1) * a_dim1];
+ t1[l - ll + 1 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + l * a_dim1];
+ t1[l - ll + 2 + ((i - ii + 2) << 8) - 257] =
+ a[i + 1 + (l + 1) * a_dim1];
+ }
+ if (uisec < isec)
+ {
+ t1[l - ll + 1 + (isec << 8) - 257] =
+ a[ii + isec - 1 + l * a_dim1];
+ t1[l - ll + 2 + (isec << 8) - 257] =
+ a[ii + isec - 1 + (l + 1) * a_dim1];
+ }
+ }
+ if (ulsec < lsec)
+ {
+ i4 = ii + isec - 1;
+ for (i = ii; i<= i4; ++i)
+ {
+ t1[lsec + ((i - ii + 1) << 8) - 257] =
+ a[i + (ll + lsec - 1) * a_dim1];
+ }
+ }
+
+ uisec = isec - isec % 4;
+ i4 = jj + ujsec - 1;
+ for (j = jj; j <= i4; j += 4)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f22 = c[i + 1 + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f23 = c[i + 1 + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ f24 = c[i + 1 + (j + 3) * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ f32 = c[i + 2 + (j + 1) * c_dim1];
+ f42 = c[i + 3 + (j + 1) * c_dim1];
+ f33 = c[i + 2 + (j + 2) * c_dim1];
+ f43 = c[i + 3 + (j + 2) * c_dim1];
+ f34 = c[i + 2 + (j + 3) * c_dim1];
+ f44 = c[i + 3 + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f22 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f23 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f24 += t1[l - ll + 1 + ((i - ii + 2) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + j * b_dim1];
+ f32 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f42 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 1) * b_dim1];
+ f33 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f43 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 2) * b_dim1];
+ f34 += t1[l - ll + 1 + ((i - ii + 3) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ f44 += t1[l - ll + 1 + ((i - ii + 4) << 8) - 257]
+ * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + 1 + (j + 1) * c_dim1] = f22;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + 1 + (j + 2) * c_dim1] = f23;
+ c[i + (j + 3) * c_dim1] = f14;
+ c[i + 1 + (j + 3) * c_dim1] = f24;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ c[i + 2 + (j + 1) * c_dim1] = f32;
+ c[i + 3 + (j + 1) * c_dim1] = f42;
+ c[i + 2 + (j + 2) * c_dim1] = f33;
+ c[i + 3 + (j + 2) * c_dim1] = f43;
+ c[i + 2 + (j + 3) * c_dim1] = f34;
+ c[i + 3 + (j + 3) * c_dim1] = f44;
+ }
+ if (uisec < isec)
+ {
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ f12 = c[i + (j + 1) * c_dim1];
+ f13 = c[i + (j + 2) * c_dim1];
+ f14 = c[i + (j + 3) * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f12 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 1) * b_dim1];
+ f13 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 2) * b_dim1];
+ f14 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + (j + 3) * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + (j + 1) * c_dim1] = f12;
+ c[i + (j + 2) * c_dim1] = f13;
+ c[i + (j + 3) * c_dim1] = f14;
+ }
+ }
+ }
+ if (ujsec < jsec)
+ {
+ i4 = jj + jsec - 1;
+ for (j = jj + ujsec; j <= i4; ++j)
+ {
+ i5 = ii + uisec - 1;
+ for (i = ii; i <= i5; i += 4)
+ {
+ f11 = c[i + j * c_dim1];
+ f21 = c[i + 1 + j * c_dim1];
+ f31 = c[i + 2 + j * c_dim1];
+ f41 = c[i + 3 + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ f21 += t1[l - ll + 1 + ((i - ii + 2) << 8) -
+ 257] * b[l + j * b_dim1];
+ f31 += t1[l - ll + 1 + ((i - ii + 3) << 8) -
+ 257] * b[l + j * b_dim1];
+ f41 += t1[l - ll + 1 + ((i - ii + 4) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ c[i + 1 + j * c_dim1] = f21;
+ c[i + 2 + j * c_dim1] = f31;
+ c[i + 3 + j * c_dim1] = f41;
+ }
+ i5 = ii + isec - 1;
+ for (i = ii + uisec; i <= i5; ++i)
+ {
+ f11 = c[i + j * c_dim1];
+ i6 = ll + lsec - 1;
+ for (l = ll; l <= i6; ++l)
+ {
+ f11 += t1[l - ll + 1 + ((i - ii + 1) << 8) -
+ 257] * b[l + j * b_dim1];
+ }
+ c[i + j * c_dim1] = f11;
+ }
+ }
+ }
}
}
}
+ return;
}
else if (rxstride == 1 && aystride == 1 && bxstride == 1)
{
@@ -336,7 +569,9 @@ sinclude(`matmul_asm_'rtype_code`.m4')dnl
for (n = 0; n < count; n++)
for (x = 0; x < xcount; x++)
/* dest[x,y] += a[x,n] * b[n,y] */
- dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
+ dest[x*rxstride + y*rystride] +=
+ abase[x*axstride + n*aystride] *
+ bbase[n*bxstride + y*bystride];
}
else if (GFC_DESCRIPTOR_RANK (a) == 1)
{
@@ -373,6 +608,5 @@ sinclude(`matmul_asm_'rtype_code`.m4')dnl
}
}
}
-}
-
-#endif'
+}'
+#endif