aboutsummaryrefslogtreecommitdiff
path: root/libjava/java/awt/Polygon.java
blob: 96c370aafc145e5e771c16a5174ce55df88b743e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
/* Polygon.java -- class representing a polygon
   Copyright (C) 1999, 2002 Free Software Foundation, Inc.

This file is part of GNU Classpath.

GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.

GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
General Public License for more details.

You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING.  If not, write to the
Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA.

Linking this library statically or dynamically with other modules is
making a combined work based on this library.  Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.

As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module.  An independent module is a module which is not derived from
or based on this library.  If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so.  If you do not wish to do so, delete this
exception statement from your version. */


package java.awt;

import java.awt.geom.AffineTransform;
import java.awt.geom.PathIterator;
import java.awt.geom.Point2D;
import java.awt.geom.Rectangle2D;
import java.io.Serializable;

/**
 * This class represents a polygon, a closed, two-dimensional region in a
 * coordinate space. The region is bounded by an arbitrary number of line
 * segments, between (x,y) coordinate vertices. The polygon has even-odd
 * winding, meaning that a point is inside the shape if it crosses the
 * boundary an odd number of times on the way to infinity.
 *
 * <p>There are some public fields; if you mess with them in an inconsistent
 * manner, it is your own fault when you get NullPointerException,
 * ArrayIndexOutOfBoundsException, or invalid results. Also, this class is
 * not threadsafe.
 *
 * @author Aaron M. Renn <arenn@urbanophile.com>
 * @author Eric Blake <ebb9@email.byu.edu>
 * @since 1.0
 * @status updated to 1.4
 */
public class Polygon implements Shape, Serializable
{
  /**
   * Compatible with JDK 1.0+.
   */
  private static final long serialVersionUID = -6460061437900069969L;

  /**
   * This total number of endpoints.
   *
   * @serial the number of endpoints, possibly less than the array sizes
   */
  public int npoints;

  /**
   * The array of X coordinates of endpoints. This should not be null.
   *
   * @see #addPoint(int, int)
   * @serial the x coordinates
   */
  public int[] xpoints;

  /**
   * The array of Y coordinates of endpoints. This should not be null.
   *
   * @see #addPoint(int, int)
   * @serial the y coordinates
   */
  public int[] ypoints;

  /**
   * The bounding box of this polygon. This is lazily created and cached, so
   * it must be invalidated after changing points.
   *
   * @see #getBounds()
   * @serial the bounding box, or null
   */
  protected Rectangle bounds;

  /**
   * Cached flattened version - condense points and parallel lines, so the
   * result has area if there are >= 3 condensed vertices. flat[0] is the
   * number of condensed points, and (flat[odd], flat[odd+1]) form the
   * condensed points.
   *
   * @see #condense()
   * @see #contains(double, double)
   * @see #contains(double, double, double, double)
   */
  private transient int[] condensed;

  /**
   * Initializes an empty polygon.
   */
  public Polygon()
  {
    // Leave room for growth.
    xpoints = new int[4];
    ypoints = new int[4];
  }

  /**
   * Create a new polygon with the specified endpoints. The arrays are copied,
   * so that future modifications to the parameters do not affect the polygon.
   *
   * @param xpoints the array of X coordinates for this polygon
   * @param ypoints the array of Y coordinates for this polygon
   * @param npoints the total number of endpoints in this polygon
   * @throws NegativeArraySizeException if npoints is negative
   * @throws IndexOutOfBoundsException if npoints exceeds either array
   * @throws NullPointerException if xpoints or ypoints is null
   */
  public Polygon(int[] xpoints, int[] ypoints, int npoints)
  {
    this.xpoints = new int[npoints];
    this.ypoints = new int[npoints];
    System.arraycopy(xpoints, 0, this.xpoints, 0, npoints);
    System.arraycopy(ypoints, 0, this.ypoints, 0, npoints);
    this.npoints = npoints;
  }

  /**
   * Reset the polygon to be empty. The arrays are left alone, to avoid object
   * allocation, but the number of points is set to 0, and all cached data
   * is discarded. If you are discarding a huge number of points, it may be
   * more efficient to just create a new Polygon.
   *
   * @see #invalidate()
   * @since 1.4
   */
  public void reset()
  {
    npoints = 0;
    invalidate();
  }

  /**
   * Invalidate or flush all cached data. After direct manipulation of the
   * public member fields, this is necessary to avoid inconsistent results
   * in methods like <code>contains</code>.
   *
   * @see #getBounds()
   * @since 1.4
   */
  public void invalidate()
  {
    bounds = null;
    condensed = null;
  }

  /**
   * Translates the polygon by adding the specified values to all X and Y
   * coordinates. This updates the bounding box, if it has been calculated.
   *
   * @param dx the amount to add to all X coordinates
   * @param dy the amount to add to all Y coordinates
   * @since 1.1
   */
  public void translate(int dx, int dy)
  {
    int i = npoints;
    while (--i >= 0)
      {
        xpoints[i] += dx;
        ypoints[i] += dy;
      }
    if (bounds != null)
      {
        bounds.x += dx;
        bounds.y += dy;
      }
    condensed = null;
  }

  /**
   * Adds the specified endpoint to the polygon. This updates the bounding
   * box, if it has been created.
   *
   * @param x the X coordinate of the point to add
   * @param y the Y coordiante of the point to add
   */
  public void addPoint(int x, int y)
  {
    if (npoints + 1 > xpoints.length)
      {
        int[] newx = new int[npoints + 1];
        System.arraycopy(xpoints, 0, newx, 0, npoints);
        xpoints = newx;
      }
    if (npoints + 1 > ypoints.length)
      {
        int[] newy = new int[npoints + 1];
        System.arraycopy(ypoints, 0, newy, 0, npoints);
        ypoints = newy;
      }
    xpoints[npoints] = x;
    ypoints[npoints] = y;
    npoints++;
    if (bounds != null)
      {
        if (npoints == 1)
          {
            bounds.x = x;
            bounds.y = y;
          }
        else
          {
            if (x < bounds.x)
              {
                bounds.width += bounds.x - x;
                bounds.x = x;
              }
            else if (x > bounds.x + bounds.width)
              bounds.width = x - bounds.x;
            if (y < bounds.y)
              {
                bounds.height += bounds.y - y;
                bounds.y = y;
              }
            else if (y > bounds.y + bounds.height)
              bounds.height = y - bounds.y;
          }
      }
    condensed = null;
  }

  /**
   * Returns the bounding box of this polygon. This is the smallest
   * rectangle with sides parallel to the X axis that will contain this
   * polygon.
   *
   * @return the bounding box for this polygon
   * @see #getBounds2D()
   * @since 1.1
   */
  public Rectangle getBounds()
  {
    return getBoundingBox ();
  }

  /**
   * Returns the bounding box of this polygon. This is the smallest
   * rectangle with sides parallel to the X axis that will contain this
   * polygon.
   *
   * @return the bounding box for this polygon
   * @see #getBounds2D()
   * @deprecated use {@link #getBounds()} instead
   */
  public Rectangle getBoundingBox()
  {
    if (bounds == null)
      {
        if (npoints == 0)
          return bounds = new Rectangle ();
        int i = npoints - 1;
        int minx = xpoints[i];
        int maxx = minx;
        int miny = ypoints[i];
        int maxy = miny;
        while (--i >= 0)
          {
            int x = xpoints[i];
            int y = ypoints[i];
            if (x < minx)
              minx = x;
            else if (x > maxx)
              maxx = x;
            if (y < miny)
              miny = y;
            else if (y > maxy)
              maxy = y;
          }
        bounds = new Rectangle (minx, miny, maxx - minx, maxy - miny);
      }
    return bounds;
  }

  /**
   * Tests whether or not the specified point is inside this polygon.
   *
   * @param p the point to test
   * @return true if the point is inside this polygon
   * @throws NullPointerException if p is null
   * @see #contains(double, double)
   */
  public boolean contains(Point p)
  {
    return contains(p.getX(), p.getY());
  }

  /**
   * Tests whether or not the specified point is inside this polygon.
   *
   * @param x the X coordinate of the point to test
   * @param y the Y coordinate of the point to test
   * @return true if the point is inside this polygon
   * @see #contains(double, double)
   * @since 1.1
   */
  public boolean contains(int x, int y)
  {
    return contains((double) x, (double) y);
  }

  /**
   * Tests whether or not the specified point is inside this polygon.
   *
   * @param x the X coordinate of the point to test
   * @param y the Y coordinate of the point to test
   * @return true if the point is inside this polygon
   * @see #contains(double, double)
   * @deprecated use {@link #contains(int, int)} instead
   */
  public boolean inside(int x, int y)
  {
    return contains((double) x, (double) y);
  }

  /**
   * Returns a high-precision bounding box of this polygon. This is the
   * smallest rectangle with sides parallel to the X axis that will contain
   * this polygon.
   *
   * @return the bounding box for this polygon
   * @see #getBounds()
   * @since 1.2
   */
  public Rectangle2D getBounds2D()
  {
    // For polygons, the integer version is exact!
    return getBounds();
  }

  /**
   * Tests whether or not the specified point is inside this polygon.
   *
   * @param x the X coordinate of the point to test
   * @param y the Y coordinate of the point to test
   * @return true if the point is inside this polygon
   * @since 1.2
   */
  public boolean contains(double x, double y)
  {
    // First, the obvious bounds checks.
    if (! condense() || ! getBounds().contains(x, y))
      return false;
    // A point is contained if a ray to (-inf, y) crosses an odd number
    // of segments. This must obey the semantics of Shape when the point is
    // exactly on a segment or vertex: a point is inside only if the adjacent
    // point in the increasing x or y direction is also inside. Note that we
    // are guaranteed that the condensed polygon has area, and no consecutive
    // segments with identical slope.
    boolean inside = false;
    int limit = condensed[0];
    int curx = condensed[(limit << 1) - 1];
    int cury = condensed[limit << 1];
    for (int i = 1; i <= limit; i++)
      {
        int priorx = curx;
        int priory = cury;
        curx = condensed[(i << 1) - 1];
        cury = condensed[i << 1];
        if ((priorx > x && curx > x) // Left of segment, or NaN.
            || (priory > y && cury > y) // Below segment, or NaN.
            || (priory < y && cury < y)) // Above segment.
          continue;
        if (priory == cury) // Horizontal segment, y == cury == priory
          {
            if (priorx < x && curx < x) // Right of segment.
              {
                inside = ! inside;
                continue;
              }
            // Did we approach this segment from above or below?
            // This mess is necessary to obey rules of Shape.
            priory = condensed[((limit + i - 2) % limit) << 1];
            boolean above = priory > cury;
            if ((curx == x && (curx > priorx || above))
                || (priorx == x && (curx < priorx || ! above))
                || (curx > priorx && ! above) || above)
              inside = ! inside;
            continue;
          }
        if (priorx == x && priory == y) // On prior vertex.
          continue;
        if (priorx == curx // Vertical segment.
            || (priorx < x && curx < x)) // Right of segment.
          {
            inside = ! inside;
            continue;
          }
        // The point is inside the segment's bounding box, compare slopes.
        double leftx = curx > priorx ? priorx : curx;
        double lefty = curx > priorx ? priory : cury;
        double slopeseg = (double) (cury - priory) / (curx - priorx);
        double slopepoint = (double) (y - lefty) / (x - leftx);
        if ((slopeseg > 0 && slopeseg > slopepoint)
            || slopeseg < slopepoint)
          inside = ! inside;
      }
    return inside;
  }

  /**
   * Tests whether or not the specified point is inside this polygon.
   *
   * @param p the point to test
   * @return true if the point is inside this polygon
   * @throws NullPointerException if p is null
   * @see #contains(double, double)
   * @since 1.2
   */
  public boolean contains(Point2D p)
  {
    return contains(p.getX(), p.getY());
  }

  /**
   * Test if a high-precision rectangle intersects the shape. This is true
   * if any point in the rectangle is in the shape. This implementation is
   * precise.
   *
   * @param x the x coordinate of the rectangle
   * @param y the y coordinate of the rectangle
   * @param w the width of the rectangle, treated as point if negative
   * @param h the height of the rectangle, treated as point if negative
   * @return true if the rectangle intersects this shape
   * @since 1.2
   */
  public boolean intersects(double x, double y, double w, double h)
  {
    // First, the obvious bounds checks.
    if (w <= 0 || h <= 0 || npoints == 0 ||
        ! getBounds().intersects(x, y, w, h))
      return false; // Disjoint bounds.
    if ((x <= bounds.x && x + w >= bounds.x + bounds.width
         && y <= bounds.y && y + h >= bounds.y + bounds.height)
        || contains(x, y))
      return true; // Rectangle contains the polygon, or one point matches.
    // If any vertex is in the rectangle, the two might intersect.
    int curx = 0;
    int cury = 0;
    for (int i = 0; i < npoints; i++)
      {
        curx = xpoints[i];
        cury = ypoints[i];
        if (curx >= x && curx < x + w && cury >= y && cury < y + h
            && contains(curx, cury)) // Boundary check necessary.
          return true;
      }
    // Finally, if at least one of the four bounding lines intersect any
    // segment of the polygon, return true. Be careful of the semantics of
    // Shape; coinciding lines do not necessarily return true.
    for (int i = 0; i < npoints; i++)
      {
        int priorx = curx;
        int priory = cury;
        curx = xpoints[i];
        cury = ypoints[i];
        if (priorx == curx) // Vertical segment.
          {
            if (curx < x || curx >= x + w) // Outside rectangle.
              continue;
            if ((cury >= y + h && priory <= y)
                || (cury <= y && priory >= y + h))
              return true; // Bisects rectangle.
            continue;
          }
        if (priory == cury) // Horizontal segment.
          {
            if (cury < y || cury >= y + h) // Outside rectangle.
              continue;
            if ((curx >= x + w && priorx <= x)
                || (curx <= x && priorx >= x + w))
              return true; // Bisects rectangle.
            continue;
          }
        // Slanted segment.
        double slope = (double) (cury - priory) / (curx - priorx);
        double intersect = slope * (x - curx) + cury;
        if (intersect > y && intersect < y + h) // Intersects left edge.
          return true;
        intersect = slope * (x + w - curx) + cury;
        if (intersect > y && intersect < y + h) // Intersects right edge.
          return true;
        intersect = (y - cury) / slope + curx;
        if (intersect > x && intersect < x + w) // Intersects bottom edge.
          return true;
        intersect = (y + h - cury) / slope + cury;
        if (intersect > x && intersect < x + w) // Intersects top edge.
          return true;
      }
    return false;
  }

  /**
   * Test if a high-precision rectangle intersects the shape. This is true
   * if any point in the rectangle is in the shape. This implementation is
   * precise.
   *
   * @param r the rectangle
   * @return true if the rectangle intersects this shape
   * @throws NullPointerException if r is null
   * @see #intersects(double, double, double, double)
   * @since 1.2
   */
  public boolean intersects(Rectangle2D r)
  {
    return intersects(r.getX(), r.getY(), r.getWidth(), r.getHeight());
  }

  /**
   * Test if a high-precision rectangle lies completely in the shape. This is
   * true if all points in the rectangle are in the shape. This implementation
   * is precise.
   *
   * @param x the x coordinate of the rectangle
   * @param y the y coordinate of the rectangle
   * @param w the width of the rectangle, treated as point if negative
   * @param h the height of the rectangle, treated as point if negative
   * @return true if the rectangle is contained in this shape
   * @since 1.2
   */
  public boolean contains(double x, double y, double w, double h)
  {
    // First, the obvious bounds checks.
    if (w <= 0 || h <= 0 || ! contains(x, y)
        || ! bounds.contains(x, y, w, h))
      return false;
    // Now, if any of the four bounding lines intersects a polygon segment,
    // return false. The previous check had the side effect of setting
    // the condensed array, which we use. Be careful of the semantics of
    // Shape; coinciding lines do not necessarily return false.
    int limit = condensed[0];
    int curx = condensed[(limit << 1) - 1];
    int cury = condensed[limit << 1];
    for (int i = 1; i <= limit; i++)
      {
        int priorx = curx;
        int priory = cury;
        curx = condensed[(i << 1) - 1];
        cury = condensed[i << 1];
        if (curx > x && curx < x + w && cury > y && cury < y + h)
          return false; // Vertex is in rectangle.
        if (priorx == curx) // Vertical segment.
          {
            if (curx < x || curx > x + w) // Outside rectangle.
              continue;
            if ((cury >= y + h && priory <= y)
                || (cury <= y && priory >= y + h))
              return false; // Bisects rectangle.
            continue;
          }
        if (priory == cury) // Horizontal segment.
          {
            if (cury < y || cury > y + h) // Outside rectangle.
              continue;
            if ((curx >= x + w && priorx <= x)
                || (curx <= x && priorx >= x + w))
              return false; // Bisects rectangle.
            continue;
          }
        // Slanted segment.
        double slope = (double) (cury - priory) / (curx - priorx);
        double intersect = slope * (x - curx) + cury;
        if (intersect > y && intersect < y + h) // Intersects left edge.
          return false;
        intersect = slope * (x + w - curx) + cury;
        if (intersect > y && intersect < y + h) // Intersects right edge.
          return false;
        intersect = (y - cury) / slope + curx;
        if (intersect > x && intersect < x + w) // Intersects bottom edge.
          return false;
        intersect = (y + h - cury) / slope + cury;
        if (intersect > x && intersect < x + w) // Intersects top edge.
          return false;
      }
    return true;
  }

  /**
   * Test if a high-precision rectangle lies completely in the shape. This is
   * true if all points in the rectangle are in the shape. This implementation
   * is precise.
   *
   * @param r the rectangle
   * @return true if the rectangle is contained in this shape
   * @throws NullPointerException if r is null
   * @see #contains(double, double, double, double)
   * @since 1.2
   */
  public boolean contains(Rectangle2D r)
  {
    return contains(r.getX(), r.getY(), r.getWidth(), r.getHeight());
  }

  /**
   * Return an iterator along the shape boundary. If the optional transform
   * is provided, the iterator is transformed accordingly. Each call returns
   * a new object, independent from others in use. This class is not
   * threadsafe to begin with, so the path iterator is not either.
   *
   * @param transform an optional transform to apply to the iterator
   * @return a new iterator over the boundary
   * @since 1.2
   */
  public PathIterator getPathIterator(final AffineTransform transform)
  {
    return new PathIterator()
    {
      /** The current vertex of iteration. */
      private int vertex;

      public int getWindingRule()
      {
        return WIND_EVEN_ODD;
      }

      public boolean isDone()
      {
        return vertex > npoints;
      }

      public void next()
      {
        vertex++;
      }

      public int currentSegment(float[] coords)
      {
        if (vertex >= npoints)
          return SEG_CLOSE;
        coords[0] = xpoints[vertex];
        coords[1] = ypoints[vertex];
        if (transform != null)
          transform.transform(coords, 0, coords, 0, 1);
        return vertex == 0 ? SEG_MOVETO : SEG_LINETO;
      }

      public int currentSegment(double[] coords)
      {
        if (vertex >= npoints)
          return SEG_CLOSE;
        coords[0] = xpoints[vertex];
        coords[1] = ypoints[vertex];
        if (transform != null)
          transform.transform(coords, 0, coords, 0, 1);
        return vertex == 0 ? SEG_MOVETO : SEG_LINETO;
      }
    };
  }

  /**
   * Return an iterator along the flattened version of the shape boundary.
   * Since polygons are already flat, the flatness parameter is ignored, and
   * the resulting iterator only has SEG_MOVETO, SEG_LINETO and SEG_CLOSE
   * points. If the optional transform is provided, the iterator is
   * transformed accordingly. Each call returns a new object, independent
   * from others in use. This class is not threadsafe to begin with, so the
   * path iterator is not either.
   *
   * @param transform an optional transform to apply to the iterator
   * @param double the maximum distance for deviation from the real boundary
   * @return a new iterator over the boundary
   * @since 1.2
   */
  public PathIterator getPathIterator(AffineTransform transform,
                                      double flatness)
  {
    return getPathIterator(transform);
  }

  /**
   * Helper for contains, which caches a condensed version of the polygon.
   * This condenses all colinear points, so that consecutive segments in
   * the condensed version always have different slope.
   *
   * @return true if the condensed polygon has area
   * @see #condensed
   * @see #contains(double, double)
   */
  private boolean condense()
  {
    if (npoints <= 2)
      return false;
    if (condensed != null)
      return condensed[0] > 2;
    condensed = new int[npoints * 2 + 1];
    int curx = xpoints[npoints - 1];
    int cury = ypoints[npoints - 1];
    double curslope = Double.NaN;
    int count = 0;
  outer:
    for (int i = 0; i < npoints; i++)
      {
        int priorx = curx;
        int priory = cury;
        double priorslope = curslope;
        curx = xpoints[i];
        cury = ypoints[i];
        while (curx == priorx && cury == priory)
          {
            if (++i == npoints)
              break outer;
            curx = xpoints[i];
            cury = ypoints[i];
          }
        curslope = (curx == priorx ? Double.POSITIVE_INFINITY
                    : (double) (cury - priory) / (curx - priorx));
        if (priorslope == curslope)
          {
            if (count > 1 && condensed[(count << 1) - 3] == curx
                && condensed[(count << 1) - 2] == cury)
              {
                count--;
                continue;
              }
          }
        else
          count++;
        condensed[(count << 1) - 1] = curx;
        condensed[count << 1] = cury;
      }
    condensed[0] = count;
    return count > 2;
  }
} // class Polygon
/a> 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967
/* Array translation routines
   Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
   Inc.
   Contributed by Paul Brook <paul@nowt.org>
   and Steven Bosscher <s.bosscher@student.tudelft.nl>

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING.  If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.  */

/* trans-array.c-- Various array related code, including scalarization,
                   allocation, initialization and other support routines.  */

/* How the scalarizer works.
   In gfortran, array expressions use the same core routines as scalar
   expressions.
   First, a Scalarization State (SS) chain is built.  This is done by walking
   the expression tree, and building a linear list of the terms in the
   expression.  As the tree is walked, scalar subexpressions are translated.

   The scalarization parameters are stored in a gfc_loopinfo structure.
   First the start and stride of each term is calculated by
   gfc_conv_ss_startstride.  During this process the expressions for the array
   descriptors and data pointers are also translated.

   If the expression is an assignment, we must then resolve any dependencies.
   In fortran all the rhs values of an assignment must be evaluated before
   any assignments take place.  This can require a temporary array to store the
   values.  We also require a temporary when we are passing array expressions
   or vector subecripts as procedure parameters.

   Array sections are passed without copying to a temporary.  These use the
   scalarizer to determine the shape of the section.  The flag
   loop->array_parameter tells the scalarizer that the actual values and loop
   variables will not be required.

   The function gfc_conv_loop_setup generates the scalarization setup code.
   It determines the range of the scalarizing loop variables.  If a temporary
   is required, this is created and initialized.  Code for scalar expressions
   taken outside the loop is also generated at this time.  Next the offset and
   scaling required to translate from loop variables to array indices for each
   term is calculated.

   A call to gfc_start_scalarized_body marks the start of the scalarized
   expression.  This creates a scope and declares the loop variables.  Before
   calling this gfc_make_ss_chain_used must be used to indicate which terms
   will be used inside this loop.

   The scalar gfc_conv_* functions are then used to build the main body of the
   scalarization loop.  Scalarization loop variables and precalculated scalar
   values are automatically substituted.  Note that gfc_advance_se_ss_chain
   must be used, rather than changing the se->ss directly.

   For assignment expressions requiring a temporary two sub loops are
   generated.  The first stores the result of the expression in the temporary,
   the second copies it to the result.  A call to
   gfc_trans_scalarized_loop_boundary marks the end of the main loop code and
   the start of the copying loop.  The temporary may be less than full rank.

   Finally gfc_trans_scalarizing_loops is called to generate the implicit do
   loops.  The loops are added to the pre chain of the loopinfo.  The post
   chain may still contain cleanup code.

   After the loop code has been added into its parent scope gfc_cleanup_loop
   is called to free all the SS allocated by the scalarizer.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tree.h"
#include "tree-gimple.h"
#include "ggc.h"
#include "toplev.h"
#include "real.h"
#include "flags.h"
#include "gfortran.h"
#include "trans.h"
#include "trans-stmt.h"
#include "trans-types.h"
#include "trans-array.h"
#include "trans-const.h"
#include "dependency.h"

static gfc_ss *gfc_walk_subexpr (gfc_ss *, gfc_expr *);
static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor *);

/* The contents of this structure aren't actually used, just the address.  */
static gfc_ss gfc_ss_terminator_var;
gfc_ss * const gfc_ss_terminator = &gfc_ss_terminator_var;


static tree
gfc_array_dataptr_type (tree desc)
{
  return (GFC_TYPE_ARRAY_DATAPTR_TYPE (TREE_TYPE (desc)));
}


/* Build expressions to access the members of an array descriptor.
   It's surprisingly easy to mess up here, so never access
   an array descriptor by "brute force", always use these
   functions.  This also avoids problems if we change the format
   of an array descriptor.

   To understand these magic numbers, look at the comments
   before gfc_build_array_type() in trans-types.c.

   The code within these defines should be the only code which knows the format
   of an array descriptor.

   Any code just needing to read obtain the bounds of an array should use
   gfc_conv_array_* rather than the following functions as these will return
   know constant values, and work with arrays which do not have descriptors.

   Don't forget to #undef these!  */

#define DATA_FIELD 0
#define OFFSET_FIELD 1
#define DTYPE_FIELD 2
#define DIMENSION_FIELD 3

#define STRIDE_SUBFIELD 0
#define LBOUND_SUBFIELD 1
#define UBOUND_SUBFIELD 2

/* This provides READ-ONLY access to the data field.  The field itself
   doesn't have the proper type.  */

tree
gfc_conv_descriptor_data_get (tree desc)
{
  tree field, type, t;

  type = TREE_TYPE (desc);
  gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));

  field = TYPE_FIELDS (type);
  gcc_assert (DATA_FIELD == 0);

  t = build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
  t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);

  return t;
}

/* This provides WRITE access to the data field.  */

void
gfc_conv_descriptor_data_set (stmtblock_t *block, tree desc, tree value)
{
  tree field, type, t;

  type = TREE_TYPE (desc);
  gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));

  field = TYPE_FIELDS (type);
  gcc_assert (DATA_FIELD == 0);

  t = build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
  gfc_add_modify_expr (block, t, fold_convert (TREE_TYPE (field), value));
}


/* This provides address access to the data field.  This should only be
   used by array allocation, passing this on to the runtime.  */

tree
gfc_conv_descriptor_data_addr (tree desc)
{
  tree field, type, t;

  type = TREE_TYPE (desc);
  gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));

  field = TYPE_FIELDS (type);
  gcc_assert (DATA_FIELD == 0);

  t = build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
  return build_fold_addr_expr (t);
}

tree
gfc_conv_descriptor_offset (tree desc)
{
  tree type;
  tree field;

  type = TREE_TYPE (desc);
  gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));

  field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
  gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);

  return build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
}

tree
gfc_conv_descriptor_dtype (tree desc)
{
  tree field;
  tree type;

  type = TREE_TYPE (desc);
  gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));

  field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
  gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);

  return build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
}

static tree
gfc_conv_descriptor_dimension (tree desc, tree dim)
{
  tree field;
  tree type;
  tree tmp;

  type = TREE_TYPE (desc);
  gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));

  field = gfc_advance_chain (TYPE_FIELDS (type), DIMENSION_FIELD);
  gcc_assert (field != NULL_TREE
	  && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
	  && TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);

  tmp = build3 (COMPONENT_REF, TREE_TYPE (field), desc, field, NULL_TREE);
  tmp = gfc_build_array_ref (tmp, dim);
  return tmp;
}

tree
gfc_conv_descriptor_stride (tree desc, tree dim)
{
  tree tmp;
  tree field;

  tmp = gfc_conv_descriptor_dimension (desc, dim);
  field = TYPE_FIELDS (TREE_TYPE (tmp));
  field = gfc_advance_chain (field, STRIDE_SUBFIELD);
  gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);

  tmp = build3 (COMPONENT_REF, TREE_TYPE (field), tmp, field, NULL_TREE);
  return tmp;
}

tree
gfc_conv_descriptor_lbound (tree desc, tree dim)
{
  tree tmp;
  tree field;

  tmp = gfc_conv_descriptor_dimension (desc, dim);
  field = TYPE_FIELDS (TREE_TYPE (tmp));
  field = gfc_advance_chain (field, LBOUND_SUBFIELD);
  gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);

  tmp = build3 (COMPONENT_REF, TREE_TYPE (field), tmp, field, NULL_TREE);
  return tmp;
}

tree
gfc_conv_descriptor_ubound (tree desc, tree dim)
{
  tree tmp;
  tree field;

  tmp = gfc_conv_descriptor_dimension (desc, dim);
  field = TYPE_FIELDS (TREE_TYPE (tmp));
  field = gfc_advance_chain (field, UBOUND_SUBFIELD);
  gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);

  tmp = build3 (COMPONENT_REF, TREE_TYPE (field), tmp, field, NULL_TREE);
  return tmp;
}


/* Build a null array descriptor constructor.  */

tree
gfc_build_null_descriptor (tree type)
{
  tree field;
  tree tmp;

  gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
  gcc_assert (DATA_FIELD == 0);
  field = TYPE_FIELDS (type);

  /* Set a NULL data pointer.  */
  tmp = build_constructor_single (type, field, null_pointer_node);
  TREE_CONSTANT (tmp) = 1;
  TREE_INVARIANT (tmp) = 1;
  /* All other fields are ignored.  */

  return tmp;
}


/* Cleanup those #defines.  */

#undef DATA_FIELD
#undef OFFSET_FIELD
#undef DTYPE_FIELD
#undef DIMENSION_FIELD
#undef STRIDE_SUBFIELD
#undef LBOUND_SUBFIELD
#undef UBOUND_SUBFIELD


/* Mark a SS chain as used.  Flags specifies in which loops the SS is used.
   flags & 1 = Main loop body.
   flags & 2 = temp copy loop.  */

void
gfc_mark_ss_chain_used (gfc_ss * ss, unsigned flags)
{
  for (; ss != gfc_ss_terminator; ss = ss->next)
    ss->useflags = flags;
}

static void gfc_free_ss (gfc_ss *);


/* Free a gfc_ss chain.  */

static void
gfc_free_ss_chain (gfc_ss * ss)
{
  gfc_ss *next;

  while (ss != gfc_ss_terminator)
    {
      gcc_assert (ss != NULL);
      next = ss->next;
      gfc_free_ss (ss);
      ss = next;
    }
}


/* Free a SS.  */

static void
gfc_free_ss (gfc_ss * ss)
{
  int n;

  switch (ss->type)
    {
    case GFC_SS_SECTION:
      for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
	{
	  if (ss->data.info.subscript[n])
	    gfc_free_ss_chain (ss->data.info.subscript[n]);
	}
      break;

    default:
      break;
    }

  gfc_free (ss);
}


/* Free all the SS associated with a loop.  */

void
gfc_cleanup_loop (gfc_loopinfo * loop)
{
  gfc_ss *ss;
  gfc_ss *next;

  ss = loop->ss;
  while (ss != gfc_ss_terminator)
    {
      gcc_assert (ss != NULL);
      next = ss->loop_chain;
      gfc_free_ss (ss);
      ss = next;
    }
}


/* Associate a SS chain with a loop.  */

void
gfc_add_ss_to_loop (gfc_loopinfo * loop, gfc_ss * head)
{
  gfc_ss *ss;

  if (head == gfc_ss_terminator)
    return;

  ss = head;
  for (; ss && ss != gfc_ss_terminator; ss = ss->next)
    {
      if (ss->next == gfc_ss_terminator)
	ss->loop_chain = loop->ss;
      else
	ss->loop_chain = ss->next;
    }
  gcc_assert (ss == gfc_ss_terminator);
  loop->ss = head;
}


/* Generate an initializer for a static pointer or allocatable array.  */

void
gfc_trans_static_array_pointer (gfc_symbol * sym)
{
  tree type;

  gcc_assert (TREE_STATIC (sym->backend_decl));
  /* Just zero the data member.  */
  type = TREE_TYPE (sym->backend_decl);
  DECL_INITIAL (sym->backend_decl) = gfc_build_null_descriptor (type);
}


/* If the bounds of SE's loop have not yet been set, see if they can be
   determined from array spec AS, which is the array spec of a called
   function.  MAPPING maps the callee's dummy arguments to the values
   that the caller is passing.  Add any initialization and finalization
   code to SE.  */

void
gfc_set_loop_bounds_from_array_spec (gfc_interface_mapping * mapping,
				     gfc_se * se, gfc_array_spec * as)
{
  int n, dim;
  gfc_se tmpse;
  tree lower;
  tree upper;
  tree tmp;

  if (as && as->type == AS_EXPLICIT)
    for (dim = 0; dim < se->loop->dimen; dim++)
      {
	n = se->loop->order[dim];
	if (se->loop->to[n] == NULL_TREE)
	  {
	    /* Evaluate the lower bound.  */
	    gfc_init_se (&tmpse, NULL);
	    gfc_apply_interface_mapping (mapping, &tmpse, as->lower[dim]);
	    gfc_add_block_to_block (&se->pre, &tmpse.pre);
	    gfc_add_block_to_block (&se->post, &tmpse.post);
	    lower = tmpse.expr;

	    /* ...and the upper bound.  */
	    gfc_init_se (&tmpse, NULL);
	    gfc_apply_interface_mapping (mapping, &tmpse, as->upper[dim]);
	    gfc_add_block_to_block (&se->pre, &tmpse.pre);
	    gfc_add_block_to_block (&se->post, &tmpse.post);
	    upper = tmpse.expr;

	    /* Set the upper bound of the loop to UPPER - LOWER.  */
	    tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, upper, lower);
	    tmp = gfc_evaluate_now (tmp, &se->pre);
	    se->loop->to[n] = tmp;
	  }
      }
}


/* Generate code to allocate an array temporary, or create a variable to
   hold the data.  If size is NULL, zero the descriptor so that the
   callee will allocate the array.  If DEALLOC is true, also generate code to
   free the array afterwards.

   Initialization code is added to PRE and finalization code to POST.
   DYNAMIC is true if the caller may want to extend the array later
   using realloc.  This prevents us from putting the array on the stack.  */

static void
gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
                                  gfc_ss_info * info, tree size, tree nelem,
                                  bool dynamic, bool dealloc)
{
  tree tmp;
  tree args;
  tree desc;
  bool onstack;

  desc = info->descriptor;
  info->offset = gfc_index_zero_node;
  if (size == NULL_TREE || integer_zerop (size))
    {
      /* A callee allocated array.  */
      gfc_conv_descriptor_data_set (pre, desc, null_pointer_node);
      onstack = FALSE;
    }
  else
    {
      /* Allocate the temporary.  */
      onstack = !dynamic && gfc_can_put_var_on_stack (size);

      if (onstack)
	{
	  /* Make a temporary variable to hold the data.  */
	  tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (nelem), nelem,
			     gfc_index_one_node);
	  tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
				  tmp);
	  tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
				  tmp);
	  tmp = gfc_create_var (tmp, "A");
	  tmp = build_fold_addr_expr (tmp);
	  gfc_conv_descriptor_data_set (pre, desc, tmp);
	}
      else
	{
	  /* Allocate memory to hold the data.  */
	  args = gfc_chainon_list (NULL_TREE, size);

	  if (gfc_index_integer_kind == 4)
	    tmp = gfor_fndecl_internal_malloc;
	  else if (gfc_index_integer_kind == 8)
	    tmp = gfor_fndecl_internal_malloc64;
	  else
	    gcc_unreachable ();
	  tmp = build_function_call_expr (tmp, args);
	  tmp = gfc_evaluate_now (tmp, pre);
	  gfc_conv_descriptor_data_set (pre, desc, tmp);
	}
    }
  info->data = gfc_conv_descriptor_data_get (desc);

  /* The offset is zero because we create temporaries with a zero
     lower bound.  */
  tmp = gfc_conv_descriptor_offset (desc);
  gfc_add_modify_expr (pre, tmp, gfc_index_zero_node);

  if (dealloc && !onstack)
    {
      /* Free the temporary.  */
      tmp = gfc_conv_descriptor_data_get (desc);
      tmp = fold_convert (pvoid_type_node, tmp);
      tmp = gfc_chainon_list (NULL_TREE, tmp);
      tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
      gfc_add_expr_to_block (post, tmp);
    }
}


/* Generate code to create and initialize the descriptor for a temporary
   array.  This is used for both temporaries needed by the scalarizer, and
   functions returning arrays.  Adjusts the loop variables to be
   zero-based, and calculates the loop bounds for callee allocated arrays.
   Allocate the array unless it's callee allocated (we have a callee
   allocated array if 'callee_alloc' is true, or if loop->to[n] is
   NULL_TREE for any n).  Also fills in the descriptor, data and offset
   fields of info if known.  Returns the size of the array, or NULL for a
   callee allocated array.

   PRE, POST, DYNAMIC and DEALLOC are as for gfc_trans_allocate_array_storage.
 */

tree
gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post,
			     gfc_loopinfo * loop, gfc_ss_info * info,
			     tree eltype, bool dynamic, bool dealloc,
			     bool callee_alloc, bool function)
{
  tree type;
  tree desc;
  tree tmp;
  tree size;
  tree nelem;
  tree cond;
  tree or_expr;
  tree thencase;
  tree elsecase;
  tree var;
  stmtblock_t thenblock;
  stmtblock_t elseblock;
  int n;
  int dim;

  gcc_assert (info->dimen > 0);
  /* Set the lower bound to zero.  */
  for (dim = 0; dim < info->dimen; dim++)
    {
      n = loop->order[dim];
      if (n < loop->temp_dim)
	gcc_assert (integer_zerop (loop->from[n]));
      else
	{
	  /* Callee allocated arrays may not have a known bound yet.  */
          if (loop->to[n])
              loop->to[n] = fold_build2 (MINUS_EXPR, gfc_array_index_type,
					 loop->to[n], loop->from[n]);
	  loop->from[n] = gfc_index_zero_node;
	}

      info->delta[dim] = gfc_index_zero_node;
      info->start[dim] = gfc_index_zero_node;
      info->stride[dim] = gfc_index_one_node;
      info->dim[dim] = dim;
    }

  /* Initialize the descriptor.  */
  type =
    gfc_get_array_type_bounds (eltype, info->dimen, loop->from, loop->to, 1);
  desc = gfc_create_var (type, "atmp");
  GFC_DECL_PACKED_ARRAY (desc) = 1;

  info->descriptor = desc;
  size = gfc_index_one_node;

  /* Fill in the array dtype.  */
  tmp = gfc_conv_descriptor_dtype (desc);
  gfc_add_modify_expr (pre, tmp, gfc_get_dtype (TREE_TYPE (desc)));

  /*
     Fill in the bounds and stride.  This is a packed array, so:

     size = 1;
     for (n = 0; n < rank; n++)
       {
	 stride[n] = size
	 delta = ubound[n] + 1 - lbound[n];
         size = size * delta;
       }
     size = size * sizeof(element);
  */

  or_expr = NULL_TREE;

  for (n = 0; n < info->dimen; n++)
    {
      if (loop->to[n] == NULL_TREE)
        {
	  /* For a callee allocated array express the loop bounds in terms
	     of the descriptor fields.  */
          tmp = build2 (MINUS_EXPR, gfc_array_index_type,
			gfc_conv_descriptor_ubound (desc, gfc_rank_cst[n]),
			gfc_conv_descriptor_lbound (desc, gfc_rank_cst[n]));
          loop->to[n] = tmp;
          size = NULL_TREE;
          continue;
        }
        
      /* Store the stride and bound components in the descriptor.  */
      tmp = gfc_conv_descriptor_stride (desc, gfc_rank_cst[n]);
      gfc_add_modify_expr (pre, tmp, size);

      tmp = gfc_conv_descriptor_lbound (desc, gfc_rank_cst[n]);
      gfc_add_modify_expr (pre, tmp, gfc_index_zero_node);

      tmp = gfc_conv_descriptor_ubound (desc, gfc_rank_cst[n]);
      gfc_add_modify_expr (pre, tmp, loop->to[n]);

      tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
			 loop->to[n], gfc_index_one_node);

      if (function)
	{
	  /* Check wether the size for this dimension is negative.  */
	  cond = fold_build2 (LE_EXPR, boolean_type_node, tmp,
			  gfc_index_zero_node);

	  cond = gfc_evaluate_now (cond, pre);

	  if (n == 0)
	    or_expr = cond;
	  else
	    or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);
	}
      size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
      size = gfc_evaluate_now (size, pre);
    }

  /* Get the size of the array.  */

  if (size && !callee_alloc)
    {
      if (function)
	{
	  var = gfc_create_var (TREE_TYPE (size), "size");
	  gfc_start_block (&thenblock);
	  gfc_add_modify_expr (&thenblock, var, gfc_index_zero_node);
	  thencase = gfc_finish_block (&thenblock);

	  gfc_start_block (&elseblock);
	  gfc_add_modify_expr (&elseblock, var, size);
	  elsecase = gfc_finish_block (&elseblock);
	  
	  tmp = gfc_evaluate_now (or_expr, pre);
	  tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
	  gfc_add_expr_to_block (pre, tmp);
	  nelem = var;
	  size = var;
	}
      else
	  nelem = size;

      size = fold_build2 (MULT_EXPR, gfc_array_index_type, size,
			  TYPE_SIZE_UNIT (gfc_get_element_type (type)));
    }
  else
    {
      nelem = size;
      size = NULL_TREE;
    }

  gfc_trans_allocate_array_storage (pre, post, info, size, nelem, dynamic,
			            dealloc);

  if (info->dimen > loop->temp_dim)
    loop->temp_dim = info->dimen;

  return size;
}


/* Generate code to transpose array EXPR by creating a new descriptor
   in which the dimension specifications have been reversed.  */

void
gfc_conv_array_transpose (gfc_se * se, gfc_expr * expr)
{
  tree dest, src, dest_index, src_index;
  gfc_loopinfo *loop;
  gfc_ss_info *dest_info, *src_info;
  gfc_ss *dest_ss, *src_ss;
  gfc_se src_se;
  int n;

  loop = se->loop;

  src_ss = gfc_walk_expr (expr);
  dest_ss = se->ss;

  src_info = &src_ss->data.info;
  dest_info = &dest_ss->data.info;
  gcc_assert (dest_info->dimen == 2);
  gcc_assert (src_info->dimen == 2);

  /* Get a descriptor for EXPR.  */
  gfc_init_se (&src_se, NULL);
  gfc_conv_expr_descriptor (&src_se, expr, src_ss);
  gfc_add_block_to_block (&se->pre, &src_se.pre);
  gfc_add_block_to_block (&se->post, &src_se.post);
  src = src_se.expr;

  /* Allocate a new descriptor for the return value.  */
  dest = gfc_create_var (TREE_TYPE (src), "atmp");
  dest_info->descriptor = dest;
  se->expr = dest;

  /* Copy across the dtype field.  */
  gfc_add_modify_expr (&se->pre,
		       gfc_conv_descriptor_dtype (dest),
		       gfc_conv_descriptor_dtype (src));

  /* Copy the dimension information, renumbering dimension 1 to 0 and
     0 to 1.  */
  for (n = 0; n < 2; n++)
    {
      dest_info->delta[n] = gfc_index_zero_node;
      dest_info->start[n] = gfc_index_zero_node;
      dest_info->stride[n] = gfc_index_one_node;
      dest_info->dim[n] = n;

      dest_index = gfc_rank_cst[n];
      src_index = gfc_rank_cst[1 - n];

      gfc_add_modify_expr (&se->pre,
			   gfc_conv_descriptor_stride (dest, dest_index),
			   gfc_conv_descriptor_stride (src, src_index));

      gfc_add_modify_expr (&se->pre,
			   gfc_conv_descriptor_lbound (dest, dest_index),
			   gfc_conv_descriptor_lbound (src, src_index));

      gfc_add_modify_expr (&se->pre,
			   gfc_conv_descriptor_ubound (dest, dest_index),
			   gfc_conv_descriptor_ubound (src, src_index));

      if (!loop->to[n])
        {
	  gcc_assert (integer_zerop (loop->from[n]));
	  loop->to[n] = build2 (MINUS_EXPR, gfc_array_index_type,
				gfc_conv_descriptor_ubound (dest, dest_index),
				gfc_conv_descriptor_lbound (dest, dest_index));
        }
    }

  /* Copy the data pointer.  */
  dest_info->data = gfc_conv_descriptor_data_get (src);
  gfc_conv_descriptor_data_set (&se->pre, dest, dest_info->data);

  /* Copy the offset.  This is not changed by transposition: the top-left
     element is still at the same offset as before.  */
  dest_info->offset = gfc_conv_descriptor_offset (src);
  gfc_add_modify_expr (&se->pre,
		       gfc_conv_descriptor_offset (dest),
		       dest_info->offset);

  if (dest_info->dimen > loop->temp_dim)
    loop->temp_dim = dest_info->dimen;
}


/* Return the number of iterations in a loop that starts at START,
   ends at END, and has step STEP.  */

static tree
gfc_get_iteration_count (tree start, tree end, tree step)
{
  tree tmp;
  tree type;

  type = TREE_TYPE (step);
  tmp = fold_build2 (MINUS_EXPR, type, end, start);
  tmp = fold_build2 (FLOOR_DIV_EXPR, type, tmp, step);
  tmp = fold_build2 (PLUS_EXPR, type, tmp, build_int_cst (type, 1));
  tmp = fold_build2 (MAX_EXPR, type, tmp, build_int_cst (type, 0));
  return fold_convert (gfc_array_index_type, tmp);
}


/* Extend the data in array DESC by EXTRA elements.  */

static void
gfc_grow_array (stmtblock_t * pblock, tree desc, tree extra)
{
  tree args;
  tree tmp;
  tree size;
  tree ubound;

  if (integer_zerop (extra))
    return;

  ubound = gfc_conv_descriptor_ubound (desc, gfc_rank_cst[0]);

  /* Add EXTRA to the upper bound.  */
  tmp = build2 (PLUS_EXPR, gfc_array_index_type, ubound, extra);
  gfc_add_modify_expr (pblock, ubound, tmp);

  /* Get the value of the current data pointer.  */
  tmp = gfc_conv_descriptor_data_get (desc);
  args = gfc_chainon_list (NULL_TREE, tmp);

  /* Calculate the new array size.  */
  size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
  tmp = build2 (PLUS_EXPR, gfc_array_index_type, ubound, gfc_index_one_node);
  tmp = build2 (MULT_EXPR, gfc_array_index_type, tmp, size);
  args = gfc_chainon_list (args, tmp);

  /* Pick the appropriate realloc function.  */
  if (gfc_index_integer_kind == 4)
    tmp = gfor_fndecl_internal_realloc;
  else if (gfc_index_integer_kind == 8)
    tmp = gfor_fndecl_internal_realloc64;
  else
    gcc_unreachable ();

  /* Set the new data pointer.  */
  tmp = build_function_call_expr (tmp, args);
  gfc_conv_descriptor_data_set (pblock, desc, tmp);
}


/* Return true if the bounds of iterator I can only be determined
   at run time.  */

static inline bool
gfc_iterator_has_dynamic_bounds (gfc_iterator * i)
{
  return (i->start->expr_type != EXPR_CONSTANT
	  || i->end->expr_type != EXPR_CONSTANT
	  || i->step->expr_type != EXPR_CONSTANT);
}


/* Split the size of constructor element EXPR into the sum of two terms,
   one of which can be determined at compile time and one of which must
   be calculated at run time.  Set *SIZE to the former and return true
   if the latter might be nonzero.  */

static bool
gfc_get_array_constructor_element_size (mpz_t * size, gfc_expr * expr)
{
  if (expr->expr_type == EXPR_ARRAY)
    return gfc_get_array_constructor_size (size, expr->value.constructor);
  else if (expr->rank > 0)
    {
      /* Calculate everything at run time.  */
      mpz_set_ui (*size, 0);
      return true;
    }
  else
    {
      /* A single element.  */
      mpz_set_ui (*size, 1);
      return false;
    }
}


/* Like gfc_get_array_constructor_element_size, but applied to the whole
   of array constructor C.  */

static bool
gfc_get_array_constructor_size (mpz_t * size, gfc_constructor * c)
{
  gfc_iterator *i;
  mpz_t val;
  mpz_t len;
  bool dynamic;

  mpz_set_ui (*size, 0);
  mpz_init (len);
  mpz_init (val);

  dynamic = false;
  for (; c; c = c->next)
    {
      i = c->iterator;
      if (i && gfc_iterator_has_dynamic_bounds (i))
	dynamic = true;
      else
	{
	  dynamic |= gfc_get_array_constructor_element_size (&len, c->expr);
	  if (i)
	    {
	      /* Multiply the static part of the element size by the
		 number of iterations.  */
	      mpz_sub (val, i->end->value.integer, i->start->value.integer);
	      mpz_fdiv_q (val, val, i->step->value.integer);
	      mpz_add_ui (val, val, 1);
	      if (mpz_sgn (val) > 0)
		mpz_mul (len, len, val);
	      else
		mpz_set_ui (len, 0);
	    }
	  mpz_add (*size, *size, len);
	}
    }
  mpz_clear (len);
  mpz_clear (val);
  return dynamic;
}


/* Make sure offset is a variable.  */

static void
gfc_put_offset_into_var (stmtblock_t * pblock, tree * poffset,
			 tree * offsetvar)
{
  /* We should have already created the offset variable.  We cannot
     create it here because we may be in an inner scope.  */
  gcc_assert (*offsetvar != NULL_TREE);
  gfc_add_modify_expr (pblock, *offsetvar, *poffset);
  *poffset = *offsetvar;
  TREE_USED (*offsetvar) = 1;
}


/* Assign an element of an array constructor.  */

static void
gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
			      tree offset, gfc_se * se, gfc_expr * expr)
{
  tree tmp;
  tree args;

  gfc_conv_expr (se, expr);

  /* Store the value.  */
  tmp = build_fold_indirect_ref (gfc_conv_descriptor_data_get (desc));
  tmp = gfc_build_array_ref (tmp, offset);
  if (expr->ts.type == BT_CHARACTER)
    {
      gfc_conv_string_parameter (se);
      if (POINTER_TYPE_P (TREE_TYPE (tmp)))
	{
	  /* The temporary is an array of pointers.  */
	  se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
	  gfc_add_modify_expr (&se->pre, tmp, se->expr);
	}
      else
	{
	  /* The temporary is an array of string values.  */
	  tmp = gfc_build_addr_expr (pchar_type_node, tmp);
	  /* We know the temporary and the value will be the same length,
	     so can use memcpy.  */
	  args = gfc_chainon_list (NULL_TREE, tmp);
	  args = gfc_chainon_list (args, se->expr);
	  args = gfc_chainon_list (args, se->string_length);
	  tmp = built_in_decls[BUILT_IN_MEMCPY];
	  tmp = build_function_call_expr (tmp, args);
	  gfc_add_expr_to_block (&se->pre, tmp);
	}
    }
  else
    {
      /* TODO: Should the frontend already have done this conversion?  */
      se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
      gfc_add_modify_expr (&se->pre, tmp, se->expr);
    }

  gfc_add_block_to_block (pblock, &se->pre);
  gfc_add_block_to_block (pblock, &se->post);
}


/* Add the contents of an array to the constructor.  DYNAMIC is as for
   gfc_trans_array_constructor_value.  */

static void
gfc_trans_array_constructor_subarray (stmtblock_t * pblock,
				      tree type ATTRIBUTE_UNUSED,
				      tree desc, gfc_expr * expr,
				      tree * poffset, tree * offsetvar,
				      bool dynamic)
{
  gfc_se se;
  gfc_ss *ss;
  gfc_loopinfo loop;
  stmtblock_t body;
  tree tmp;
  tree size;
  int n;

  /* We need this to be a variable so we can increment it.  */
  gfc_put_offset_into_var (pblock, poffset, offsetvar);

  gfc_init_se (&se, NULL);

  /* Walk the array expression.  */
  ss = gfc_walk_expr (expr);
  gcc_assert (ss != gfc_ss_terminator);

  /* Initialize the scalarizer.  */
  gfc_init_loopinfo (&loop);
  gfc_add_ss_to_loop (&loop, ss);

  /* Initialize the loop.  */
  gfc_conv_ss_startstride (&loop);
  gfc_conv_loop_setup (&loop);

  /* Make sure the constructed array has room for the new data.  */
  if (dynamic)
    {
      /* Set SIZE to the total number of elements in the subarray.  */
      size = gfc_index_one_node;
      for (n = 0; n < loop.dimen; n++)
	{
	  tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
					 gfc_index_one_node);
	  size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
	}

      /* Grow the constructed array by SIZE elements.  */
      gfc_grow_array (&loop.pre, desc, size);
    }

  /* Make the loop body.  */
  gfc_mark_ss_chain_used (ss, 1);
  gfc_start_scalarized_body (&loop, &body);
  gfc_copy_loopinfo_to_se (&se, &loop);
  se.ss = ss;

  gfc_trans_array_ctor_element (&body, desc, *poffset, &se, expr);
  gcc_assert (se.ss == gfc_ss_terminator);

  /* Increment the offset.  */
  tmp = build2 (PLUS_EXPR, gfc_array_index_type, *poffset, gfc_index_one_node);
  gfc_add_modify_expr (&body, *poffset, tmp);

  /* Finish the loop.  */
  gfc_trans_scalarizing_loops (&loop, &body);
  gfc_add_block_to_block (&loop.pre, &loop.post);
  tmp = gfc_finish_block (&loop.pre);
  gfc_add_expr_to_block (pblock, tmp);

  gfc_cleanup_loop (&loop);
}


/* Assign the values to the elements of an array constructor.  DYNAMIC
   is true if descriptor DESC only contains enough data for the static
   size calculated by gfc_get_array_constructor_size.  When true, memory
   for the dynamic parts must be allocated using realloc.  */

static void
gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
				   tree desc, gfc_constructor * c,
				   tree * poffset, tree * offsetvar,
				   bool dynamic)
{
  tree tmp;
  stmtblock_t body;
  gfc_se se;
  mpz_t size;

  mpz_init (size);
  for (; c; c = c->next)
    {
      /* If this is an iterator or an array, the offset must be a variable.  */
      if ((c->iterator || c->expr->rank > 0) && INTEGER_CST_P (*poffset))
	gfc_put_offset_into_var (pblock, poffset, offsetvar);

      gfc_start_block (&body);

      if (c->expr->expr_type == EXPR_ARRAY)
	{
	  /* Array constructors can be nested.  */
	  gfc_trans_array_constructor_value (&body, type, desc,
					     c->expr->value.constructor,
					     poffset, offsetvar, dynamic);
	}
      else if (c->expr->rank > 0)
	{
	  gfc_trans_array_constructor_subarray (&body, type, desc, c->expr,
						poffset, offsetvar, dynamic);
	}
      else
	{
	  /* This code really upsets the gimplifier so don't bother for now.  */
	  gfc_constructor *p;
	  HOST_WIDE_INT n;
	  HOST_WIDE_INT size;

	  p = c;
	  n = 0;
	  while (p && !(p->iterator || p->expr->expr_type != EXPR_CONSTANT))
	    {
	      p = p->next;
	      n++;
	    }
	  if (n < 4)
	    {
	      /* Scalar values.  */
	      gfc_init_se (&se, NULL);
	      gfc_trans_array_ctor_element (&body, desc, *poffset,
					    &se, c->expr);

	      *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
				      *poffset, gfc_index_one_node);
	    }
	  else
	    {
	      /* Collect multiple scalar constants into a constructor.  */
	      tree list;
	      tree init;
	      tree bound;
	      tree tmptype;

	      p = c;
	      list = NULL_TREE;
              /* Count the number of consecutive scalar constants.  */
	      while (p && !(p->iterator
			    || p->expr->expr_type != EXPR_CONSTANT))
		{
		  gfc_init_se (&se, NULL);
		  gfc_conv_constant (&se, p->expr);
		  if (p->expr->ts.type == BT_CHARACTER
		      && POINTER_TYPE_P (type))
		    {
		      /* For constant character array constructors we build
			 an array of pointers.  */
		      se.expr = gfc_build_addr_expr (pchar_type_node,
						     se.expr);
		    }
		    
		  list = tree_cons (NULL_TREE, se.expr, list);
		  c = p;
		  p = p->next;
		}

	      bound = build_int_cst (NULL_TREE, n - 1);
              /* Create an array type to hold them.  */
	      tmptype = build_range_type (gfc_array_index_type,
					  gfc_index_zero_node, bound);
	      tmptype = build_array_type (type, tmptype);

	      init = build_constructor_from_list (tmptype, nreverse (list));
	      TREE_CONSTANT (init) = 1;
	      TREE_INVARIANT (init) = 1;
	      TREE_STATIC (init) = 1;
	      /* Create a static variable to hold the data.  */
	      tmp = gfc_create_var (tmptype, "data");
	      TREE_STATIC (tmp) = 1;
	      TREE_CONSTANT (tmp) = 1;
	      TREE_INVARIANT (tmp) = 1;
	      DECL_INITIAL (tmp) = init;
	      init = tmp;

	      /* Use BUILTIN_MEMCPY to assign the values.  */
	      tmp = gfc_conv_descriptor_data_get (desc);
	      tmp = build_fold_indirect_ref (tmp);
	      tmp = gfc_build_array_ref (tmp, *poffset);
	      tmp = build_fold_addr_expr (tmp);
	      init = build_fold_addr_expr (init);

	      size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
	      bound = build_int_cst (NULL_TREE, n * size);
	      tmp = gfc_chainon_list (NULL_TREE, tmp);
	      tmp = gfc_chainon_list (tmp, init);
	      tmp = gfc_chainon_list (tmp, bound);
	      tmp = build_function_call_expr (built_in_decls[BUILT_IN_MEMCPY],
					     tmp);
	      gfc_add_expr_to_block (&body, tmp);

	      *poffset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
				      *poffset, build_int_cst (NULL_TREE, n));
	    }
	  if (!INTEGER_CST_P (*poffset))
            {
              gfc_add_modify_expr (&body, *offsetvar, *poffset);
              *poffset = *offsetvar;
            }
	}

      /* The frontend should already have done any expansions possible
	 at compile-time.  */
      if (!c->iterator)
	{
	  /* Pass the code as is.  */
	  tmp = gfc_finish_block (&body);
	  gfc_add_expr_to_block (pblock, tmp);
	}
      else
	{
	  /* Build the implied do-loop.  */
	  tree cond;
	  tree end;
	  tree step;
	  tree loopvar;
	  tree exit_label;
	  tree loopbody;
	  tree tmp2;

	  loopbody = gfc_finish_block (&body);

	  gfc_init_se (&se, NULL);
	  gfc_conv_expr (&se, c->iterator->var);
	  gfc_add_block_to_block (pblock, &se.pre);
	  loopvar = se.expr;

	  /* Initialize the loop.  */
	  gfc_init_se (&se, NULL);
	  gfc_conv_expr_val (&se, c->iterator->start);
	  gfc_add_block_to_block (pblock, &se.pre);
	  gfc_add_modify_expr (pblock, loopvar, se.expr);

	  gfc_init_se (&se, NULL);
	  gfc_conv_expr_val (&se, c->iterator->end);
	  gfc_add_block_to_block (pblock, &se.pre);
	  end = gfc_evaluate_now (se.expr, pblock);

	  gfc_init_se (&se, NULL);
	  gfc_conv_expr_val (&se, c->iterator->step);
	  gfc_add_block_to_block (pblock, &se.pre);
	  step = gfc_evaluate_now (se.expr, pblock);

	  /* If this array expands dynamically, and the number of iterations
	     is not constant, we won't have allocated space for the static
	     part of C->EXPR's size.  Do that now.  */
	  if (dynamic && gfc_iterator_has_dynamic_bounds (c->iterator))
	    {
	      /* Get the number of iterations.  */
	      tmp = gfc_get_iteration_count (loopvar, end, step);

	      /* Get the static part of C->EXPR's size.  */
	      gfc_get_array_constructor_element_size (&size, c->expr);
	      tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);

	      /* Grow the array by TMP * TMP2 elements.  */
	      tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, tmp, tmp2);
	      gfc_grow_array (pblock, desc, tmp);
	    }

	  /* Generate the loop body.  */
	  exit_label = gfc_build_label_decl (NULL_TREE);
	  gfc_start_block (&body);

	  /* Generate the exit condition.  Depending on the sign of
	     the step variable we have to generate the correct
	     comparison.  */
	  tmp = fold_build2 (GT_EXPR, boolean_type_node, step, 
			     build_int_cst (TREE_TYPE (step), 0));
	  cond = fold_build3 (COND_EXPR, boolean_type_node, tmp,
			      build2 (GT_EXPR, boolean_type_node,
				      loopvar, end),
			      build2 (LT_EXPR, boolean_type_node,
				      loopvar, end));
	  tmp = build1_v (GOTO_EXPR, exit_label);
	  TREE_USED (exit_label) = 1;
	  tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt ());
	  gfc_add_expr_to_block (&body, tmp);

	  /* The main loop body.  */
	  gfc_add_expr_to_block (&body, loopbody);

	  /* Increase loop variable by step.  */
	  tmp = build2 (PLUS_EXPR, TREE_TYPE (loopvar), loopvar, step);
	  gfc_add_modify_expr (&body, loopvar, tmp);

	  /* Finish the loop.  */
	  tmp = gfc_finish_block (&body);
	  tmp = build1_v (LOOP_EXPR, tmp);
	  gfc_add_expr_to_block (pblock, tmp);

	  /* Add the exit label.  */
	  tmp = build1_v (LABEL_EXPR, exit_label);
	  gfc_add_expr_to_block (pblock, tmp);
	}
    }
  mpz_clear (size);
}


/* Figure out the string length of a variable reference expression.
   Used by get_array_ctor_strlen.  */

static void
get_array_ctor_var_strlen (gfc_expr * expr, tree * len)
{
  gfc_ref *ref;
  gfc_typespec *ts;

  /* Don't bother if we already know the length is a constant.  */
  if (*len && INTEGER_CST_P (*len))
    return;

  ts = &expr->symtree->n.sym->ts;
  for (ref = expr->ref; ref; ref = ref->next)
    {
      switch (ref->type)
	{
	case REF_ARRAY:
	  /* Array references don't change the string length.  */
	  break;

	case REF_COMPONENT:
	  /* Use the length of the component.  */
	  ts = &ref->u.c.component->ts;
	  break;

	default:
	  /* TODO: Substrings are tricky because we can't evaluate the
	     expression more than once.  For now we just give up, and hope
	     we can figure it out elsewhere.  */
	  return;
	}
    }

  *len = ts->cl->backend_decl;
}


/* Figure out the string length of a character array constructor.
   Returns TRUE if all elements are character constants.  */

bool
get_array_ctor_strlen (gfc_constructor * c, tree * len)
{
  bool is_const;
  
  is_const = TRUE;
  for (; c; c = c->next)
    {
      switch (c->expr->expr_type)
	{
	case EXPR_CONSTANT:
	  if (!(*len && INTEGER_CST_P (*len)))
	    *len = build_int_cstu (gfc_charlen_type_node,
				   c->expr->value.character.length);
	  break;

	case EXPR_ARRAY:
	  if (!get_array_ctor_strlen (c->expr->value.constructor, len))
	    is_const = FALSE;
	  break;

	case EXPR_VARIABLE:
	  is_const = false;
	  get_array_ctor_var_strlen (c->expr, len);
	  break;

	default:
	  is_const = FALSE;
	  /* TODO: For now we just ignore anything we don't know how to
	     handle, and hope we can figure it out a different way.  */
	  break;
	}
    }

  return is_const;
}


/* Array constructors are handled by constructing a temporary, then using that
   within the scalarization loop.  This is not optimal, but seems by far the
   simplest method.  */

static void
gfc_trans_array_constructor (gfc_loopinfo * loop, gfc_ss * ss)
{
  gfc_constructor *c;
  tree offset;
  tree offsetvar;
  tree desc;
  tree type;
  bool const_string;
  bool dynamic;

  ss->data.info.dimen = loop->dimen;

  c = ss->expr->value.constructor;
  if (ss->expr->ts.type == BT_CHARACTER)
    {
      const_string = get_array_ctor_strlen (c, &ss->string_length);
      if (!ss->string_length)
	gfc_todo_error ("complex character array constructors");

      type = gfc_get_character_type_len (ss->expr->ts.kind, ss->string_length);
      if (const_string)
	type = build_pointer_type (type);
    }
  else
    {
      const_string = TRUE;
      type = gfc_typenode_for_spec (&ss->expr->ts);
    }

  /* See if the constructor determines the loop bounds.  */
  dynamic = false;
  if (loop->to[0] == NULL_TREE)
    {
      mpz_t size;

      /* We should have a 1-dimensional, zero-based loop.  */
      gcc_assert (loop->dimen == 1);
      gcc_assert (integer_zerop (loop->from[0]));

      /* Split the constructor size into a static part and a dynamic part.
	 Allocate the static size up-front and record whether the dynamic
	 size might be nonzero.  */
      mpz_init (size);
      dynamic = gfc_get_array_constructor_size (&size, c);
      mpz_sub_ui (size, size, 1);
      loop->to[0] = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
      mpz_clear (size);
    }

  gfc_trans_create_temp_array (&loop->pre, &loop->post, loop, &ss->data.info,
			       type, dynamic, true, false, false);

  desc = ss->data.info.descriptor;
  offset = gfc_index_zero_node;
  offsetvar = gfc_create_var_np (gfc_array_index_type, "offset");
  TREE_USED (offsetvar) = 0;
  gfc_trans_array_constructor_value (&loop->pre, type, desc, c,
				     &offset, &offsetvar, dynamic);

  /* If the array grows dynamically, the upper bound of the loop variable
     is determined by the array's final upper bound.  */
  if (dynamic)
    loop->to[0] = gfc_conv_descriptor_ubound (desc, gfc_rank_cst[0]);

  if (TREE_USED (offsetvar))
    pushdecl (offsetvar);
  else
    gcc_assert (INTEGER_CST_P (offset));
#if 0
  /* Disable bound checking for now because it's probably broken.  */
  if (flag_bounds_check)
    {
      gcc_unreachable ();
    }
#endif
}


/* INFO describes a GFC_SS_SECTION in loop LOOP, and this function is
   called after evaluating all of INFO's vector dimensions.  Go through
   each such vector dimension and see if we can now fill in any missing
   loop bounds.  */

static void
gfc_set_vector_loop_bounds (gfc_loopinfo * loop, gfc_ss_info * info)
{
  gfc_se se;
  tree tmp;
  tree desc;
  tree zero;
  int n;
  int dim;

  for (n = 0; n < loop->dimen; n++)
    {
      dim = info->dim[n];
      if (info->ref->u.ar.dimen_type[dim] == DIMEN_VECTOR
	  && loop->to[n] == NULL)
	{
	  /* Loop variable N indexes vector dimension DIM, and we don't
	     yet know the upper bound of loop variable N.  Set it to the
	     difference between the vector's upper and lower bounds.  */
	  gcc_assert (loop->from[n] == gfc_index_zero_node);
	  gcc_assert (info->subscript[dim]
		      && info->subscript[dim]->type == GFC_SS_VECTOR);

	  gfc_init_se (&se, NULL);
	  desc = info->subscript[dim]->data.info.descriptor;
	  zero = gfc_rank_cst[0];
	  tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
			     gfc_conv_descriptor_ubound (desc, zero),
			     gfc_conv_descriptor_lbound (desc, zero));
	  tmp = gfc_evaluate_now (tmp, &loop->pre);
	  loop->to[n] = tmp;
	}
    }
}


/* Add the pre and post chains for all the scalar expressions in a SS chain
   to loop.  This is called after the loop parameters have been calculated,
   but before the actual scalarizing loops.  */

static void
gfc_add_loop_ss_code (gfc_loopinfo * loop, gfc_ss * ss, bool subscript)
{
  gfc_se se;
  int n;

  /* TODO: This can generate bad code if there are ordering dependencies.
     eg. a callee allocated function and an unknown size constructor.  */
  gcc_assert (ss != NULL);

  for (; ss != gfc_ss_terminator; ss = ss->loop_chain)
    {
      gcc_assert (ss);

      switch (ss->type)
	{
	case GFC_SS_SCALAR:
	  /* Scalar expression.  Evaluate this now.  This includes elemental
	     dimension indices, but not array section bounds.  */
	  gfc_init_se (&se, NULL);
          gfc_conv_expr (&se, ss->expr);
          gfc_add_block_to_block (&loop->pre, &se.pre);

          if (ss->expr->ts.type != BT_CHARACTER)
            {
              /* Move the evaluation of scalar expressions outside the
                 scalarization loop.  */
              if (subscript)
                se.expr = convert(gfc_array_index_type, se.expr);
              se.expr = gfc_evaluate_now (se.expr, &loop->pre);
              gfc_add_block_to_block (&loop->pre, &se.post);
            }
          else
            gfc_add_block_to_block (&loop->post, &se.post);

	  ss->data.scalar.expr = se.expr;
	  ss->string_length = se.string_length;
	  break;

	case GFC_SS_REFERENCE:
	  /* Scalar reference.  Evaluate this now.  */
	  gfc_init_se (&se, NULL);
	  gfc_conv_expr_reference (&se, ss->expr);
	  gfc_add_block_to_block (&loop->pre, &se.pre);
	  gfc_add_block_to_block (&loop->post, &se.post);

	  ss->data.scalar.expr = gfc_evaluate_now (se.expr, &loop->pre);
	  ss->string_length = se.string_length;
	  break;

	case GFC_SS_SECTION:
	  /* Add the expressions for scalar and vector subscripts.  */
	  for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
	    if (ss->data.info.subscript[n])
	      gfc_add_loop_ss_code (loop, ss->data.info.subscript[n], true);

	  gfc_set_vector_loop_bounds (loop, &ss->data.info);
	  break;

	case GFC_SS_VECTOR:
	  /* Get the vector's descriptor and store it in SS.  */
	  gfc_init_se (&se, NULL);
	  gfc_conv_expr_descriptor (&se, ss->expr, gfc_walk_expr (ss->expr));
	  gfc_add_block_to_block (&loop->pre, &se.pre);
	  gfc_add_block_to_block (&loop->post, &se.post);
	  ss->data.info.descriptor = se.expr;
	  break;

	case GFC_SS_INTRINSIC:
	  gfc_add_intrinsic_ss_code (loop, ss);
	  break;

	case GFC_SS_FUNCTION:
	  /* Array function return value.  We call the function and save its
	     result in a temporary for use inside the loop.  */
	  gfc_init_se (&se, NULL);
	  se.loop = loop;
	  se.ss = ss;
	  gfc_conv_expr (&se, ss->expr);
	  gfc_add_block_to_block (&loop->pre, &se.pre);
	  gfc_add_block_to_block (&loop->post, &se.post);
	  ss->string_length = se.string_length;
	  break;

	case GFC_SS_CONSTRUCTOR:
	  gfc_trans_array_constructor (loop, ss);
	  break;

        case GFC_SS_TEMP:
	case GFC_SS_COMPONENT:
          /* Do nothing.  These are handled elsewhere.  */
          break;

	default:
	  gcc_unreachable ();
	}
    }
}


/* Translate expressions for the descriptor and data pointer of a SS.  */
/*GCC ARRAYS*/

static void
gfc_conv_ss_descriptor (stmtblock_t * block, gfc_ss * ss, int base)
{
  gfc_se se;
  tree tmp;

  /* Get the descriptor for the array to be scalarized.  */
  gcc_assert (ss->expr->expr_type == EXPR_VARIABLE);
  gfc_init_se (&se, NULL);
  se.descriptor_only = 1;
  gfc_conv_expr_lhs (&se, ss->expr);
  gfc_add_block_to_block (block, &se.pre);
  ss->data.info.descriptor = se.expr;
  ss->string_length = se.string_length;

  if (base)
    {
      /* Also the data pointer.  */
      tmp = gfc_conv_array_data (se.expr);
      /* If this is a variable or address of a variable we use it directly.
         Otherwise we must evaluate it now to avoid breaking dependency
	 analysis by pulling the expressions for elemental array indices
	 inside the loop.  */
      if (!(DECL_P (tmp)
	    || (TREE_CODE (tmp) == ADDR_EXPR
		&& DECL_P (TREE_OPERAND (tmp, 0)))))
	tmp = gfc_evaluate_now (tmp, block);
      ss->data.info.data = tmp;

      tmp = gfc_conv_array_offset (se.expr);
      ss->data.info.offset = gfc_evaluate_now (tmp, block);
    }
}


/* Initialize a gfc_loopinfo structure.  */

void
gfc_init_loopinfo (gfc_loopinfo * loop)
{
  int n;

  memset (loop, 0, sizeof (gfc_loopinfo));
  gfc_init_block (&loop->pre);
  gfc_init_block (&loop->post);

  /* Initially scalarize in order.  */
  for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
    loop->order[n] = n;

  loop->ss = gfc_ss_terminator;
}


/* Copies the loop variable info to a gfc_se structure. Does not copy the SS
   chain.  */

void
gfc_copy_loopinfo_to_se (gfc_se * se, gfc_loopinfo * loop)
{
  se->loop = loop;
}


/* Return an expression for the data pointer of an array.  */

tree
gfc_conv_array_data (tree descriptor)
{
  tree type;

  type = TREE_TYPE (descriptor);
  if (GFC_ARRAY_TYPE_P (type))
    {
      if (TREE_CODE (type) == POINTER_TYPE)
        return descriptor;
      else
        {
          /* Descriptorless arrays.  */
	  return build_fold_addr_expr (descriptor);
        }
    }
  else
    return gfc_conv_descriptor_data_get (descriptor);
}


/* Return an expression for the base offset of an array.  */

tree
gfc_conv_array_offset (tree descriptor)
{
  tree type;

  type = TREE_TYPE (descriptor);
  if (GFC_ARRAY_TYPE_P (type))
    return GFC_TYPE_ARRAY_OFFSET (type);
  else
    return gfc_conv_descriptor_offset (descriptor);
}


/* Get an expression for the array stride.  */

tree
gfc_conv_array_stride (tree descriptor, int dim)
{
  tree tmp;
  tree type;

  type = TREE_TYPE (descriptor);

  /* For descriptorless arrays use the array size.  */
  tmp = GFC_TYPE_ARRAY_STRIDE (type, dim);
  if (tmp != NULL_TREE)
    return tmp;

  tmp = gfc_conv_descriptor_stride (descriptor, gfc_rank_cst[dim]);
  return tmp;
}


/* Like gfc_conv_array_stride, but for the lower bound.  */

tree
gfc_conv_array_lbound (tree descriptor, int dim)
{
  tree tmp;
  tree type;

  type = TREE_TYPE (descriptor);

  tmp = GFC_TYPE_ARRAY_LBOUND (type, dim);
  if (tmp != NULL_TREE)
    return tmp;

  tmp = gfc_conv_descriptor_lbound (descriptor, gfc_rank_cst[dim]);
  return tmp;
}


/* Like gfc_conv_array_stride, but for the upper bound.  */

tree
gfc_conv_array_ubound (tree descriptor, int dim)
{
  tree tmp;
  tree type;

  type = TREE_TYPE (descriptor);

  tmp = GFC_TYPE_ARRAY_UBOUND (type, dim);
  if (tmp != NULL_TREE)
    return tmp;

  /* This should only ever happen when passing an assumed shape array
     as an actual parameter.  The value will never be used.  */
  if (GFC_ARRAY_TYPE_P (TREE_TYPE (descriptor)))
    return gfc_index_zero_node;

  tmp = gfc_conv_descriptor_ubound (descriptor, gfc_rank_cst[dim]);
  return tmp;
}


/* Generate code to perform an array index bound check.  */

static tree
gfc_trans_array_bound_check (gfc_se * se, tree descriptor, tree index, int n)
{
  tree fault;
  tree tmp;
  char *msg;

  if (!flag_bounds_check)
    return index;

  index = gfc_evaluate_now (index, &se->pre);

  /* Check lower bound.  */
  tmp = gfc_conv_array_lbound (descriptor, n);
  fault = fold_build2 (LT_EXPR, boolean_type_node, index, tmp);
  if (se->ss)
    asprintf (&msg, "%s for array '%s', lower bound of dimension %d exceeded",
	      gfc_msg_fault, se->ss->expr->symtree->name, n+1);
  else
    asprintf (&msg, "%s, lower bound of dimension %d exceeded",
	      gfc_msg_fault, n+1);
  gfc_trans_runtime_check (fault, msg, &se->pre,
			   (se->ss ? &se->ss->expr->where : NULL));
  gfc_free (msg);

  /* Check upper bound.  */
  tmp = gfc_conv_array_ubound (descriptor, n);
  fault = fold_build2 (GT_EXPR, boolean_type_node, index, tmp);
  if (se->ss)
    asprintf (&msg, "%s for array '%s', upper bound of dimension %d exceeded",
	      gfc_msg_fault, se->ss->expr->symtree->name, n+1);
  else
    asprintf (&msg, "%s, upper bound of dimension %d exceeded",
	      gfc_msg_fault, n+1);
  gfc_trans_runtime_check (fault, msg, &se->pre,
			   (se->ss ? &se->ss->expr->where : NULL));
  gfc_free (msg);

  return index;
}


/* Return the offset for an index.  Performs bound checking for elemental
   dimensions.  Single element references are processed separately.  */

static tree
gfc_conv_array_index_offset (gfc_se * se, gfc_ss_info * info, int dim, int i,
			     gfc_array_ref * ar, tree stride)
{
  tree index;
  tree desc;
  tree data;

  /* Get the index into the array for this dimension.  */
  if (ar)
    {
      gcc_assert (ar->type != AR_ELEMENT);
      switch (ar->dimen_type[dim])
	{
	case DIMEN_ELEMENT:
	  gcc_assert (i == -1);
	  /* Elemental dimension.  */
	  gcc_assert (info->subscript[dim]
		      && info->subscript[dim]->type == GFC_SS_SCALAR);
	  /* We've already translated this value outside the loop.  */
	  index = info->subscript[dim]->data.scalar.expr;

	  index =
	    gfc_trans_array_bound_check (se, info->descriptor, index, dim);
	  break;

	case DIMEN_VECTOR:
	  gcc_assert (info && se->loop);
	  gcc_assert (info->subscript[dim]
		      && info->subscript[dim]->type == GFC_SS_VECTOR);
	  desc = info->subscript[dim]->data.info.descriptor;

	  /* Get a zero-based index into the vector.  */
	  index = fold_build2 (MINUS_EXPR, gfc_array_index_type,
			       se->loop->loopvar[i], se->loop->from[i]);

	  /* Multiply the index by the stride.  */
	  index = fold_build2 (MULT_EXPR, gfc_array_index_type,
			       index, gfc_conv_array_stride (desc, 0));

	  /* Read the vector to get an index into info->descriptor.  */
	  data = build_fold_indirect_ref (gfc_conv_array_data (desc));
	  index = gfc_build_array_ref (data, index);
	  index = gfc_evaluate_now (index, &se->pre);

	  /* Do any bounds checking on the final info->descriptor index.  */
	  index = gfc_trans_array_bound_check (se, info->descriptor,
					       index, dim);
	  break;

	case DIMEN_RANGE:
	  /* Scalarized dimension.  */
	  gcc_assert (info && se->loop);

          /* Multiply the loop variable by the stride and delta.  */
	  index = se->loop->loopvar[i];
	  index = fold_build2 (MULT_EXPR, gfc_array_index_type, index,
			       info->stride[i]);
	  index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index,
			       info->delta[i]);
	  break;

	default:
	  gcc_unreachable ();
	}
    }
  else
    {
      /* Temporary array or derived type component.  */
      gcc_assert (se->loop);
      index = se->loop->loopvar[se->loop->order[i]];
      if (!integer_zerop (info->delta[i]))
	index = fold_build2 (PLUS_EXPR, gfc_array_index_type,
			     index, info->delta[i]);
    }

  /* Multiply by the stride.  */
  index = fold_build2 (MULT_EXPR, gfc_array_index_type, index, stride);

  return index;
}


/* Build a scalarized reference to an array.  */

static void
gfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar)
{
  gfc_ss_info *info;
  tree index;
  tree tmp;
  int n;

  info = &se->ss->data.info;
  if (ar)
    n = se->loop->order[0];
  else
    n = 0;

  index = gfc_conv_array_index_offset (se, info, info->dim[n], n, ar,
				       info->stride0);
  /* Add the offset for this dimension to the stored offset for all other
     dimensions.  */
  index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, info->offset);

  tmp = build_fold_indirect_ref (info->data);
  se->expr = gfc_build_array_ref (tmp, index);
}


/* Translate access of temporary array.  */

void
gfc_conv_tmp_array_ref (gfc_se * se)
{
  se->string_length = se->ss->string_length;
  gfc_conv_scalarized_array_ref (se, NULL);
}


/* Build an array reference.  se->expr already holds the array descriptor.
   This should be either a variable, indirect variable reference or component
   reference.  For arrays which do not have a descriptor, se->expr will be
   the data pointer.
   a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/

void
gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_symbol * sym,
		    locus * where)
{
  int n;
  tree index;
  tree tmp;
  tree stride;
  gfc_se indexse;

  /* Handle scalarized references separately.  */
  if (ar->type != AR_ELEMENT)
    {
      gfc_conv_scalarized_array_ref (se, ar);
      gfc_advance_se_ss_chain (se);
      return;
    }

  index = gfc_index_zero_node;

  /* Calculate the offsets from all the dimensions.  */
  for (n = 0; n < ar->dimen; n++)
    {
      /* Calculate the index for this dimension.  */
      gfc_init_se (&indexse, se);
      gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type);
      gfc_add_block_to_block (&se->pre, &indexse.pre);

      if (flag_bounds_check &&
	  ((ar->as->type != AS_ASSUMED_SIZE && !ar->as->cp_was_assumed)
	   || n < ar->dimen - 1))
	{
	  /* Check array bounds.  */
	  tree cond;
	  char *msg;

	  indexse.expr = gfc_evaluate_now (indexse.expr, &se->pre);

	  tmp = gfc_conv_array_lbound (se->expr, n);
	  cond = fold_build2 (LT_EXPR, boolean_type_node, 
			      indexse.expr, tmp);
	  asprintf (&msg, "%s for array '%s', "
	            "lower bound of dimension %d exceeded", gfc_msg_fault,
		    sym->name, n+1);
	  gfc_trans_runtime_check (cond, msg, &se->pre, where);
	  gfc_free (msg);

	  tmp = gfc_conv_array_ubound (se->expr, n);
	  cond = fold_build2 (GT_EXPR, boolean_type_node, 
			      indexse.expr, tmp);
	  asprintf (&msg, "%s for array '%s', "
	            "upper bound of dimension %d exceeded", gfc_msg_fault,
		    sym->name, n+1);
	  gfc_trans_runtime_check (cond, msg, &se->pre, where);
	  gfc_free (msg);
	}

      /* Multiply the index by the stride.  */
      stride = gfc_conv_array_stride (se->expr, n);
      tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, indexse.expr,
			 stride);

      /* And add it to the total.  */
      index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
    }

  tmp = gfc_conv_array_offset (se->expr);
  if (!integer_zerop (tmp))
    index = fold_build2 (PLUS_EXPR, gfc_array_index_type, index, tmp);
      
  /* Access the calculated element.  */
  tmp = gfc_conv_array_data (se->expr);
  tmp = build_fold_indirect_ref (tmp);
  se->expr = gfc_build_array_ref (tmp, index);
}


/* Generate the code to be executed immediately before entering a
   scalarization loop.  */

static void
gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
			 stmtblock_t * pblock)
{
  tree index;
  tree stride;
  gfc_ss_info *info;
  gfc_ss *ss;
  gfc_se se;
  int i;

  /* This code will be executed before entering the scalarization loop
     for this dimension.  */
  for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
    {
      if ((ss->useflags & flag) == 0)
	continue;

      if (ss->type != GFC_SS_SECTION
	  && ss->type != GFC_SS_FUNCTION && ss->type != GFC_SS_CONSTRUCTOR
	  && ss->type != GFC_SS_COMPONENT)
	continue;

      info = &ss->data.info;

      if (dim >= info->dimen)
	continue;

      if (dim == info->dimen - 1)
	{
	  /* For the outermost loop calculate the offset due to any
	     elemental dimensions.  It will have been initialized with the
	     base offset of the array.  */
	  if (info->ref)
	    {
	      for (i = 0; i < info->ref->u.ar.dimen; i++)
		{
		  if (info->ref->u.ar.dimen_type[i] != DIMEN_ELEMENT)
		    continue;

		  gfc_init_se (&se, NULL);
		  se.loop = loop;
		  se.expr = info->descriptor;
		  stride = gfc_conv_array_stride (info->descriptor, i);
		  index = gfc_conv_array_index_offset (&se, info, i, -1,
						       &info->ref->u.ar,
						       stride);
		  gfc_add_block_to_block (pblock, &se.pre);

		  info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
					      info->offset, index);
		  info->offset = gfc_evaluate_now (info->offset, pblock);
		}

	      i = loop->order[0];
	      stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
	    }
	  else
	    stride = gfc_conv_array_stride (info->descriptor, 0);

	  /* Calculate the stride of the innermost loop.  Hopefully this will
             allow the backend optimizers to do their stuff more effectively.
           */
	  info->stride0 = gfc_evaluate_now (stride, pblock);
	}
      else
	{
	  /* Add the offset for the previous loop dimension.  */
	  gfc_array_ref *ar;

	  if (info->ref)
	    {
	      ar = &info->ref->u.ar;
	      i = loop->order[dim + 1];
	    }
	  else
	    {
	      ar = NULL;
	      i = dim + 1;
	    }

	  gfc_init_se (&se, NULL);
	  se.loop = loop;
	  se.expr = info->descriptor;
	  stride = gfc_conv_array_stride (info->descriptor, info->dim[i]);
	  index = gfc_conv_array_index_offset (&se, info, info->dim[i], i,
					       ar, stride);
	  gfc_add_block_to_block (pblock, &se.pre);
	  info->offset = fold_build2 (PLUS_EXPR, gfc_array_index_type,
				      info->offset, index);
	  info->offset = gfc_evaluate_now (info->offset, pblock);
	}

      /* Remember this offset for the second loop.  */
      if (dim == loop->temp_dim - 1)
        info->saved_offset = info->offset;
    }
}


/* Start a scalarized expression.  Creates a scope and declares loop
   variables.  */

void
gfc_start_scalarized_body (gfc_loopinfo * loop, stmtblock_t * pbody)
{
  int dim;
  int n;
  int flags;

  gcc_assert (!loop->array_parameter);

  for (dim = loop->dimen - 1; dim >= 0; dim--)
    {
      n = loop->order[dim];

      gfc_start_block (&loop->code[n]);

      /* Create the loop variable.  */
      loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "S");

      if (dim < loop->temp_dim)
	flags = 3;
      else
	flags = 1;
      /* Calculate values that will be constant within this loop.  */
      gfc_trans_preloop_setup (loop, dim, flags, &loop->code[n]);
    }
  gfc_start_block (pbody);
}


/* Generates the actual loop code for a scalarization loop.  */

static void
gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
			       stmtblock_t * pbody)
{
  stmtblock_t block;
  tree cond;
  tree tmp;
  tree loopbody;
  tree exit_label;

  loopbody = gfc_finish_block (pbody);

  /* Initialize the loopvar.  */
  gfc_add_modify_expr (&loop->code[n], loop->loopvar[n], loop->from[n]);

  exit_label = gfc_build_label_decl (NULL_TREE);

  /* Generate the loop body.  */
  gfc_init_block (&block);

  /* The exit condition.  */
  cond = build2 (GT_EXPR, boolean_type_node, loop->loopvar[n], loop->to[n]);
  tmp = build1_v (GOTO_EXPR, exit_label);
  TREE_USED (exit_label) = 1;
  tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt ());
  gfc_add_expr_to_block (&block, tmp);

  /* The main body.  */
  gfc_add_expr_to_block (&block, loopbody);

  /* Increment the loopvar.  */
  tmp = build2 (PLUS_EXPR, gfc_array_index_type,
		loop->loopvar[n], gfc_index_one_node);
  gfc_add_modify_expr (&block, loop->loopvar[n], tmp);

  /* Build the loop.  */
  tmp = gfc_finish_block (&block);
  tmp = build1_v (LOOP_EXPR, tmp);
  gfc_add_expr_to_block (&loop->code[n], tmp);

  /* Add the exit label.  */
  tmp = build1_v (LABEL_EXPR, exit_label);
  gfc_add_expr_to_block (&loop->code[n], tmp);
}


/* Finishes and generates the loops for a scalarized expression.  */

void
gfc_trans_scalarizing_loops (gfc_loopinfo * loop, stmtblock_t * body)
{
  int dim;
  int n;
  gfc_ss *ss;
  stmtblock_t *pblock;
  tree tmp;

  pblock = body;
  /* Generate the loops.  */
  for (dim = 0; dim < loop->dimen; dim++)
    {
      n = loop->order[dim];
      gfc_trans_scalarized_loop_end (loop, n, pblock);
      loop->loopvar[n] = NULL_TREE;
      pblock = &loop->code[n];
    }

  tmp = gfc_finish_block (pblock);
  gfc_add_expr_to_block (&loop->pre, tmp);

  /* Clear all the used flags.  */
  for (ss = loop->ss; ss; ss = ss->loop_chain)
    ss->useflags = 0;
}


/* Finish the main body of a scalarized expression, and start the secondary
   copying body.  */

void
gfc_trans_scalarized_loop_boundary (gfc_loopinfo * loop, stmtblock_t * body)
{
  int dim;
  int n;
  stmtblock_t *pblock;
  gfc_ss *ss;

  pblock = body;
  /* We finish as many loops as are used by the temporary.  */
  for (dim = 0; dim < loop->temp_dim - 1; dim++)
    {
      n = loop->order[dim];
      gfc_trans_scalarized_loop_end (loop, n, pblock);
      loop->loopvar[n] = NULL_TREE;
      pblock = &loop->code[n];
    }

  /* We don't want to finish the outermost loop entirely.  */
  n = loop->order[loop->temp_dim - 1];
  gfc_trans_scalarized_loop_end (loop, n, pblock);

  /* Restore the initial offsets.  */
  for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
    {
      if ((ss->useflags & 2) == 0)
	continue;

      if (ss->type != GFC_SS_SECTION
	  && ss->type != GFC_SS_FUNCTION && ss->type != GFC_SS_CONSTRUCTOR
	  && ss->type != GFC_SS_COMPONENT)
	continue;

      ss->data.info.offset = ss->data.info.saved_offset;
    }

  /* Restart all the inner loops we just finished.  */
  for (dim = loop->temp_dim - 2; dim >= 0; dim--)
    {
      n = loop->order[dim];

      gfc_start_block (&loop->code[n]);

      loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "Q");

      gfc_trans_preloop_setup (loop, dim, 2, &loop->code[n]);
    }

  /* Start a block for the secondary copying code.  */
  gfc_start_block (body);
}


/* Calculate the upper bound of an array section.  */

static tree
gfc_conv_section_upper_bound (gfc_ss * ss, int n, stmtblock_t * pblock)
{
  int dim;
  gfc_expr *end;
  tree desc;
  tree bound;
  gfc_se se;
  gfc_ss_info *info;

  gcc_assert (ss->type == GFC_SS_SECTION);

  info = &ss->data.info;
  dim = info->dim[n];

  if (info->ref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
    /* We'll calculate the upper bound once we have access to the
       vector's descriptor.  */
    return NULL;

  gcc_assert (info->ref->u.ar.dimen_type[dim] == DIMEN_RANGE);
  desc = info->descriptor;
  end = info->ref->u.ar.end[dim];

  if (end)
    {
      /* The upper bound was specified.  */
      gfc_init_se (&se, NULL);
      gfc_conv_expr_type (&se, end, gfc_array_index_type);
      gfc_add_block_to_block (pblock, &se.pre);
      bound = se.expr;
    }
  else
    {
      /* No upper bound was specified, so use the bound of the array.  */
      bound = gfc_conv_array_ubound (desc, dim);
    }

  return bound;
}


/* Calculate the lower bound of an array section.  */

static void
gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int n)
{
  gfc_expr *start;
  gfc_expr *stride;
  tree desc;
  gfc_se se;
  gfc_ss_info *info;
  int dim;

  gcc_assert (ss->type == GFC_SS_SECTION);

  info = &ss->data.info;
  dim = info->dim[n];

  if (info->ref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
    {
      /* We use a zero-based index to access the vector.  */
      info->start[n] = gfc_index_zero_node;
      info->stride[n] = gfc_index_one_node;
      return;
    }

  gcc_assert (info->ref->u.ar.dimen_type[dim] == DIMEN_RANGE);
  desc = info->descriptor;
  start = info->ref->u.ar.start[dim];
  stride = info->ref->u.ar.stride[dim];

  /* Calculate the start of the range.  For vector subscripts this will
     be the range of the vector.  */
  if (start)
    {
      /* Specified section start.  */
      gfc_init_se (&se, NULL);
      gfc_conv_expr_type (&se, start, gfc_array_index_type);
      gfc_add_block_to_block (&loop->pre, &se.pre);
      info->start[n] = se.expr;
    }
  else
    {
      /* No lower bound specified so use the bound of the array.  */
      info->start[n] = gfc_conv_array_lbound (desc, dim);
    }
  info->start[n] = gfc_evaluate_now (info->start[n], &loop->pre);

  /* Calculate the stride.  */
  if (stride == NULL)
    info->stride[n] = gfc_index_one_node;
  else
    {
      gfc_init_se (&se, NULL);
      gfc_conv_expr_type (&se, stride, gfc_array_index_type);
      gfc_add_block_to_block (&loop->pre, &se.pre);
      info->stride[n] = gfc_evaluate_now (se.expr, &loop->pre);
    }
}


/* Calculates the range start and stride for a SS chain.  Also gets the
   descriptor and data pointer.  The range of vector subscripts is the size
   of the vector.  Array bounds are also checked.  */

void
gfc_conv_ss_startstride (gfc_loopinfo * loop)
{
  int n;
  tree tmp;
  gfc_ss *ss;
  tree desc;

  loop->dimen = 0;
  /* Determine the rank of the loop.  */
  for (ss = loop->ss;
       ss != gfc_ss_terminator && loop->dimen == 0; ss = ss->loop_chain)
    {
      switch (ss->type)
	{
	case GFC_SS_SECTION:
	case GFC_SS_CONSTRUCTOR:
	case GFC_SS_FUNCTION:
	case GFC_SS_COMPONENT:
	  loop->dimen = ss->data.info.dimen;
	  break;

	/* As usual, lbound and ubound are exceptions!.  */
	case GFC_SS_INTRINSIC:
	  switch (ss->expr->value.function.isym->generic_id)
	    {
	    case GFC_ISYM_LBOUND:
	    case GFC_ISYM_UBOUND:
	      loop->dimen = ss->data.info.dimen;

	    default:
	      break;
	    }

	default:
	  break;
	}
    }

  if (loop->dimen == 0)
    gfc_todo_error ("Unable to determine rank of expression");


  /* Loop over all the SS in the chain.  */
  for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
    {
      if (ss->expr && ss->expr->shape && !ss->shape)
	ss->shape = ss->expr->shape;

      switch (ss->type)
	{
	case GFC_SS_SECTION:
	  /* Get the descriptor for the array.  */
	  gfc_conv_ss_descriptor (&loop->pre, ss, !loop->array_parameter);

	  for (n = 0; n < ss->data.info.dimen; n++)
	    gfc_conv_section_startstride (loop, ss, n);
	  break;

	case GFC_SS_INTRINSIC:
	  switch (ss->expr->value.function.isym->generic_id)
	    {
	    /* Fall through to supply start and stride.  */
	    case GFC_ISYM_LBOUND:
	    case GFC_ISYM_UBOUND:
	      break;
	    default:
	      continue;
	    }

	case GFC_SS_CONSTRUCTOR:
	case GFC_SS_FUNCTION:
	  for (n = 0; n < ss->data.info.dimen; n++)
	    {
	      ss->data.info.start[n] = gfc_index_zero_node;
	      ss->data.info.stride[n] = gfc_index_one_node;
	    }
	  break;

	default:
	  break;
	}
    }

  /* The rest is just runtime bound checking.  */
  if (flag_bounds_check)
    {
      stmtblock_t block;
      tree bound;
      tree end;
      tree size[GFC_MAX_DIMENSIONS];
      gfc_ss_info *info;
      char *msg;
      int dim;

      gfc_start_block (&block);

      for (n = 0; n < loop->dimen; n++)
	size[n] = NULL_TREE;

      for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
	{
	  if (ss->type != GFC_SS_SECTION)
	    continue;

	  /* TODO: range checking for mapped dimensions.  */
	  info = &ss->data.info;

	  /* This code only checks ranges.  Elemental and vector
	     dimensions are checked later.  */
	  for (n = 0; n < loop->dimen; n++)
	    {
	      dim = info->dim[n];
	      if (info->ref->u.ar.dimen_type[dim] != DIMEN_RANGE)
		continue;

	      desc = ss->data.info.descriptor;

	      /* Check lower bound.  */
	      bound = gfc_conv_array_lbound (desc, dim);
	      tmp = info->start[n];
	      tmp = fold_build2 (LT_EXPR, boolean_type_node, tmp, bound);
	      asprintf (&msg, "%s, lower bound of dimension %d of array '%s'"
			" exceeded", gfc_msg_bounds, n+1,
			ss->expr->symtree->name);
	      gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
	      gfc_free (msg);

	      /* Check the upper bound.  */
	      bound = gfc_conv_array_ubound (desc, dim);
	      end = gfc_conv_section_upper_bound (ss, n, &block);
	      tmp = fold_build2 (GT_EXPR, boolean_type_node, end, bound);
	      asprintf (&msg, "%s, upper bound of dimension %d of array '%s'"
			" exceeded", gfc_msg_bounds, n+1,
			ss->expr->symtree->name);
	      gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
	      gfc_free (msg);

	      /* Check the section sizes match.  */
	      tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, end,
				 info->start[n]);
	      tmp = fold_build2 (FLOOR_DIV_EXPR, gfc_array_index_type, tmp,
				 info->stride[n]);
	      /* We remember the size of the first section, and check all the
	         others against this.  */
	      if (size[n])
		{
		  tmp =
		    fold_build2 (NE_EXPR, boolean_type_node, tmp, size[n]);
		  asprintf (&msg, "%s, size mismatch for dimension %d "
			    "of array '%s'", gfc_msg_bounds, n+1,
			    ss->expr->symtree->name);
		  gfc_trans_runtime_check (tmp, msg, &block, &ss->expr->where);
		  gfc_free (msg);
		}
	      else
		size[n] = gfc_evaluate_now (tmp, &block);
	    }
	}

      tmp = gfc_finish_block (&block);
      gfc_add_expr_to_block (&loop->pre, tmp);
    }
}


/* Return true if the two SS could be aliased, i.e. both point to the same data
   object.  */
/* TODO: resolve aliases based on frontend expressions.  */

static int
gfc_could_be_alias (gfc_ss * lss, gfc_ss * rss)
{
  gfc_ref *lref;
  gfc_ref *rref;
  gfc_symbol *lsym;
  gfc_symbol *rsym;

  lsym = lss->expr->symtree->n.sym;
  rsym = rss->expr->symtree->n.sym;
  if (gfc_symbols_could_alias (lsym, rsym))
    return 1;

  if (rsym->ts.type != BT_DERIVED
      && lsym->ts.type != BT_DERIVED)
    return 0;

  /* For derived types we must check all the component types.  We can ignore
     array references as these will have the same base type as the previous
     component ref.  */
  for (lref = lss->expr->ref; lref != lss->data.info.ref; lref = lref->next)
    {
      if (lref->type != REF_COMPONENT)
	continue;

      if (gfc_symbols_could_alias (lref->u.c.sym, rsym))
	return 1;

      for (rref = rss->expr->ref; rref != rss->data.info.ref;
	   rref = rref->next)
	{
	  if (rref->type != REF_COMPONENT)
	    continue;

	  if (gfc_symbols_could_alias (lref->u.c.sym, rref->u.c.sym))
	    return 1;
	}
    }

  for (rref = rss->expr->ref; rref != rss->data.info.ref; rref = rref->next)
    {
      if (rref->type != REF_COMPONENT)
	break;

      if (gfc_symbols_could_alias (rref->u.c.sym, lsym))
	return 1;
    }

  return 0;
}


/* Resolve array data dependencies.  Creates a temporary if required.  */
/* TODO: Calc dependencies with gfc_expr rather than gfc_ss, and move to
   dependency.c.  */

void
gfc_conv_resolve_dependencies (gfc_loopinfo * loop, gfc_ss * dest,
			       gfc_ss * rss)
{
  gfc_ss *ss;
  gfc_ref *lref;
  gfc_ref *rref;
  gfc_ref *aref;
  int nDepend = 0;
  int temp_dim = 0;

  loop->temp_ss = NULL;
  aref = dest->data.info.ref;
  temp_dim = 0;

  for (ss = rss; ss != gfc_ss_terminator; ss = ss->next)
    {
      if (ss->type != GFC_SS_SECTION)
	continue;

      if (gfc_could_be_alias (dest, ss)
	    || gfc_are_equivalenced_arrays (dest->expr, ss->expr))
	{
	  nDepend = 1;
	  break;
	}

      if (dest->expr->symtree->n.sym == ss->expr->symtree->n.sym)
	{
	  lref = dest->expr->ref;
	  rref = ss->expr->ref;

	  nDepend = gfc_dep_resolver (lref, rref);
#if 0
	  /* TODO : loop shifting.  */
	  if (nDepend == 1)
	    {
	      /* Mark the dimensions for LOOP SHIFTING */
	      for (n = 0; n < loop->dimen; n++)
	        {
	          int dim = dest->data.info.dim[n];

		  if (lref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
		    depends[n] = 2;
		  else if (! gfc_is_same_range (&lref->u.ar,
						&rref->u.ar, dim, 0))
		    depends[n] = 1;
	         }

	      /* Put all the dimensions with dependencies in the
		 innermost loops.  */
	      dim = 0;
	      for (n = 0; n < loop->dimen; n++)
		{
		  gcc_assert (loop->order[n] == n);
		  if (depends[n])
		  loop->order[dim++] = n;
		}
	      temp_dim = dim;
	      for (n = 0; n < loop->dimen; n++)
	        {
		  if (! depends[n])
		  loop->order[dim++] = n;
		}

	      gcc_assert (dim == loop->dimen);
	      break;
	    }
#endif
	}
    }

  if (nDepend == 1)
    {
      tree base_type = gfc_typenode_for_spec (&dest->expr->ts);
      if (GFC_ARRAY_TYPE_P (base_type)
	  || GFC_DESCRIPTOR_TYPE_P (base_type))
	base_type = gfc_get_element_type (base_type);
      loop->temp_ss = gfc_get_ss ();
      loop->temp_ss->type = GFC_SS_TEMP;
      loop->temp_ss->data.temp.type = base_type;
      loop->temp_ss->string_length = dest->string_length;
      loop->temp_ss->data.temp.dimen = loop->dimen;
      loop->temp_ss->next = gfc_ss_terminator;
      gfc_add_ss_to_loop (loop, loop->temp_ss);
    }
  else
    loop->temp_ss = NULL;
}


/* Initialize the scalarization loop.  Creates the loop variables.  Determines
   the range of the loop variables.  Creates a temporary if required.
   Calculates how to transform from loop variables to array indices for each
   expression.  Also generates code for scalar expressions which have been
   moved outside the loop.  */

void
gfc_conv_loop_setup (gfc_loopinfo * loop)
{
  int n;
  int dim;
  gfc_ss_info *info;
  gfc_ss_info *specinfo;
  gfc_ss *ss;
  tree tmp;
  tree len;
  gfc_ss *loopspec[GFC_MAX_DIMENSIONS];
  bool dynamic[GFC_MAX_DIMENSIONS];
  gfc_constructor *c;
  mpz_t *cshape;
  mpz_t i;

  mpz_init (i);
  for (n = 0; n < loop->dimen; n++)
    {
      loopspec[n] = NULL;
      dynamic[n] = false;
      /* We use one SS term, and use that to determine the bounds of the
         loop for this dimension.  We try to pick the simplest term.  */
      for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
	{
	  if (ss->shape)
	    {
	      /* The frontend has worked out the size for us.  */
	      loopspec[n] = ss;
	      continue;
	    }

	  if (ss->type == GFC_SS_CONSTRUCTOR)
	    {
	      /* An unknown size constructor will always be rank one.
		 Higher rank constructors will either have known shape,
		 or still be wrapped in a call to reshape.  */
	      gcc_assert (loop->dimen == 1);

	      /* Always prefer to use the constructor bounds if the size
		 can be determined at compile time.  Prefer not to otherwise,
		 since the general case involves realloc, and it's better to
		 avoid that overhead if possible.  */
	      c = ss->expr->value.constructor;
	      dynamic[n] = gfc_get_array_constructor_size (&i, c);
	      if (!dynamic[n] || !loopspec[n])
		loopspec[n] = ss;
	      continue;
	    }

	  /* TODO: Pick the best bound if we have a choice between a
	     function and something else.  */
          if (ss->type == GFC_SS_FUNCTION)
            {
              loopspec[n] = ss;
              continue;
            }

	  if (ss->type != GFC_SS_SECTION)
	    continue;

	  if (loopspec[n])
	    specinfo = &loopspec[n]->data.info;
	  else
	    specinfo = NULL;
	  info = &ss->data.info;

	  if (!specinfo)
	    loopspec[n] = ss;
	  /* Criteria for choosing a loop specifier (most important first):
	     doesn't need realloc
	     stride of one
	     known stride
	     known lower bound
	     known upper bound
	   */
	  else if (loopspec[n]->type == GFC_SS_CONSTRUCTOR && dynamic[n])
	    loopspec[n] = ss;
	  else if (integer_onep (info->stride[n])
		   && !integer_onep (specinfo->stride[n]))
	    loopspec[n] = ss;
	  else if (INTEGER_CST_P (info->stride[n])
		   && !INTEGER_CST_P (specinfo->stride[n]))
	    loopspec[n] = ss;
	  else if (INTEGER_CST_P (info->start[n])
		   && !INTEGER_CST_P (specinfo->start[n]))
	    loopspec[n] = ss;
	  /* We don't work out the upper bound.
	     else if (INTEGER_CST_P (info->finish[n])
	     && ! INTEGER_CST_P (specinfo->finish[n]))
	     loopspec[n] = ss; */
	}

      if (!loopspec[n])
	gfc_todo_error ("Unable to find scalarization loop specifier");

      info = &loopspec[n]->data.info;

      /* Set the extents of this range.  */
      cshape = loopspec[n]->shape;
      if (cshape && INTEGER_CST_P (info->start[n])
	  && INTEGER_CST_P (info->stride[n]))
	{
	  loop->from[n] = info->start[n];
	  mpz_set (i, cshape[n]);
	  mpz_sub_ui (i, i, 1);
	  /* To = from + (size - 1) * stride.  */
	  tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
	  if (!integer_onep (info->stride[n]))
	    tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
			       tmp, info->stride[n]);
	  loop->to[n] = fold_build2 (PLUS_EXPR, gfc_array_index_type,
				     loop->from[n], tmp);
	}
      else
	{
	  loop->from[n] = info->start[n];
	  switch (loopspec[n]->type)
	    {
	    case GFC_SS_CONSTRUCTOR:
	      /* The upper bound is calculated when we expand the
		 constructor.  */
	      gcc_assert (loop->to[n] == NULL_TREE);
	      break;

	    case GFC_SS_SECTION:
	      loop->to[n] = gfc_conv_section_upper_bound (loopspec[n], n,
							  &loop->pre);
	      break;

            case GFC_SS_FUNCTION:
	      /* The loop bound will be set when we generate the call.  */
              gcc_assert (loop->to[n] == NULL_TREE);
              break;

	    default:
	      gcc_unreachable ();
	    }
	}

      /* Transform everything so we have a simple incrementing variable.  */
      if (integer_onep (info->stride[n]))
	info->delta[n] = gfc_index_zero_node;
      else
	{
	  /* Set the delta for this section.  */
	  info->delta[n] = gfc_evaluate_now (loop->from[n], &loop->pre);
	  /* Number of iterations is (end - start + step) / step.
	     with start = 0, this simplifies to
	     last = end / step;
	     for (i = 0; i<=last; i++){...};  */
	  tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
			     loop->to[n], loop->from[n]);
	  tmp = fold_build2 (TRUNC_DIV_EXPR, gfc_array_index_type, 
			     tmp, info->stride[n]);
	  loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
	  /* Make the loop variable start at 0.  */
	  loop->from[n] = gfc_index_zero_node;
	}
    }

  /* Add all the scalar code that can be taken out of the loops.
     This may include calculating the loop bounds, so do it before
     allocating the temporary.  */
  gfc_add_loop_ss_code (loop, loop->ss, false);

  /* If we want a temporary then create it.  */
  if (loop->temp_ss != NULL)
    {
      gcc_assert (loop->temp_ss->type == GFC_SS_TEMP);
      tmp = loop->temp_ss->data.temp.type;
      len = loop->temp_ss->string_length;
      n = loop->temp_ss->data.temp.dimen;
      memset (&loop->temp_ss->data.info, 0, sizeof (gfc_ss_info));
      loop->temp_ss->type = GFC_SS_SECTION;
      loop->temp_ss->data.info.dimen = n;
      gfc_trans_create_temp_array (&loop->pre, &loop->post, loop,
				   &loop->temp_ss->data.info, tmp, false, true,
				   false, false);
    }

  for (n = 0; n < loop->temp_dim; n++)
    loopspec[loop->order[n]] = NULL;

  mpz_clear (i);

  /* For array parameters we don't have loop variables, so don't calculate the
     translations.  */
  if (loop->array_parameter)
    return;

  /* Calculate the translation from loop variables to array indices.  */
  for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
    {
      if (ss->type != GFC_SS_SECTION && ss->type != GFC_SS_COMPONENT)
	continue;

      info = &ss->data.info;

      for (n = 0; n < info->dimen; n++)
	{
	  dim = info->dim[n];

	  /* If we are specifying the range the delta is already set.  */
	  if (loopspec[n] != ss)
	    {
	      /* Calculate the offset relative to the loop variable.
	         First multiply by the stride.  */
	      tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
				 loop->from[n], info->stride[n]);

	      /* Then subtract this from our starting value.  */
	      tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
				 info->start[n], tmp);

	      info->delta[n] = gfc_evaluate_now (tmp, &loop->pre);
	    }
	}
    }
}


/* Fills in an array descriptor, and returns the size of the array.  The size
   will be a simple_val, ie a variable or a constant.  Also calculates the
   offset of the base.  Returns the size of the array.
   {
    stride = 1;
    offset = 0;
    for (n = 0; n < rank; n++)
      {
        a.lbound[n] = specified_lower_bound;
        offset = offset + a.lbond[n] * stride;
        size = 1 - lbound;
        a.ubound[n] = specified_upper_bound;
        a.stride[n] = stride;
        size = ubound + size; //size = ubound + 1 - lbound
        stride = stride * size;
      }
    return (stride);
   }  */
/*GCC ARRAYS*/

static tree
gfc_array_init_size (tree descriptor, int rank, tree * poffset,
		     gfc_expr ** lower, gfc_expr ** upper,
		     stmtblock_t * pblock)
{
  tree type;
  tree tmp;
  tree size;
  tree offset;
  tree stride;
  tree cond;
  tree or_expr;
  tree thencase;
  tree elsecase;
  tree var;
  stmtblock_t thenblock;
  stmtblock_t elseblock;
  gfc_expr *ubound;
  gfc_se se;
  int n;

  type = TREE_TYPE (descriptor);

  stride = gfc_index_one_node;
  offset = gfc_index_zero_node;

  /* Set the dtype.  */
  tmp = gfc_conv_descriptor_dtype (descriptor);
  gfc_add_modify_expr (pblock, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));

  or_expr = NULL_TREE;

  for (n = 0; n < rank; n++)
    {
      /* We have 3 possibilities for determining the size of the array:
         lower == NULL    => lbound = 1, ubound = upper[n]
         upper[n] = NULL  => lbound = 1, ubound = lower[n]
         upper[n] != NULL => lbound = lower[n], ubound = upper[n]  */
      ubound = upper[n];

      /* Set lower bound.  */
      gfc_init_se (&se, NULL);
      if (lower == NULL)
	se.expr = gfc_index_one_node;
      else
	{
	  gcc_assert (lower[n]);
          if (ubound)
            {
	      gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
	      gfc_add_block_to_block (pblock, &se.pre);
            }
          else
            {
              se.expr = gfc_index_one_node;
              ubound = lower[n];
            }
	}
      tmp = gfc_conv_descriptor_lbound (descriptor, gfc_rank_cst[n]);
      gfc_add_modify_expr (pblock, tmp, se.expr);

      /* Work out the offset for this component.  */
      tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, se.expr, stride);
      offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);

      /* Start the calculation for the size of this dimension.  */
      size = build2 (MINUS_EXPR, gfc_array_index_type,
		     gfc_index_one_node, se.expr);

      /* Set upper bound.  */
      gfc_init_se (&se, NULL);
      gcc_assert (ubound);
      gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
      gfc_add_block_to_block (pblock, &se.pre);

      tmp = gfc_conv_descriptor_ubound (descriptor, gfc_rank_cst[n]);
      gfc_add_modify_expr (pblock, tmp, se.expr);

      /* Store the stride.  */
      tmp = gfc_conv_descriptor_stride (descriptor, gfc_rank_cst[n]);
      gfc_add_modify_expr (pblock, tmp, stride);

      /* Calculate the size of this dimension.  */
      size = fold_build2 (PLUS_EXPR, gfc_array_index_type, se.expr, size);

      /* Check wether the size for this dimension is negative.  */
      cond = fold_build2 (LE_EXPR, boolean_type_node, size,
			  gfc_index_zero_node);
      if (n == 0)
	or_expr = cond;
      else
	or_expr = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, or_expr, cond);

      /* Multiply the stride by the number of elements in this dimension.  */
      stride = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, size);
      stride = gfc_evaluate_now (stride, pblock);
    }

  /* The stride is the number of elements in the array, so multiply by the
     size of an element to get the total size.  */
  tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
  size = fold_build2 (MULT_EXPR, gfc_array_index_type, stride, tmp);

  if (poffset != NULL)
    {
      offset = gfc_evaluate_now (offset, pblock);
      *poffset = offset;
    }

  var = gfc_create_var (TREE_TYPE (size), "size");
  gfc_start_block (&thenblock);
  gfc_add_modify_expr (&thenblock, var, gfc_index_zero_node);
  thencase = gfc_finish_block (&thenblock);

  gfc_start_block (&elseblock);
  gfc_add_modify_expr (&elseblock, var, size);
  elsecase = gfc_finish_block (&elseblock);

  tmp = gfc_evaluate_now (or_expr, pblock);
  tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
  gfc_add_expr_to_block (pblock, tmp);

  return var;
}


/* Initializes the descriptor and generates a call to _gfor_allocate.  Does
   the work for an ALLOCATE statement.  */
/*GCC ARRAYS*/

bool
gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree pstat)
{
  tree tmp;
  tree pointer;
  tree allocate;
  tree offset;
  tree size;
  gfc_expr **lower;
  gfc_expr **upper;
  gfc_ref *ref;
  int allocatable_array;
  int must_be_pointer;

  ref = expr->ref;

  /* In Fortran 95, components can only contain pointers, so that,
     in ALLOCATE (foo%bar(2)), bar must be a pointer component.
     We test this by checking for ref->next.
     An implementation of TR 15581 would need to change this.  */

  if (ref)
    must_be_pointer = ref->next != NULL;
  else
    must_be_pointer = 0;
  
  /* Find the last reference in the chain.  */
  while (ref && ref->next != NULL)
    {
      gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT);
      ref = ref->next;
    }

  if (ref == NULL || ref->type != REF_ARRAY)
    return false;

  /* Figure out the size of the array.  */
  switch (ref->u.ar.type)
    {
    case AR_ELEMENT:
      lower = NULL;
      upper = ref->u.ar.start;
      break;

    case AR_FULL:
      gcc_assert (ref->u.ar.as->type == AS_EXPLICIT);

      lower = ref->u.ar.as->lower;
      upper = ref->u.ar.as->upper;
      break;

    case AR_SECTION:
      lower = ref->u.ar.start;
      upper = ref->u.ar.end;
      break;

    default:
      gcc_unreachable ();
      break;
    }

  size = gfc_array_init_size (se->expr, ref->u.ar.as->rank, &offset,
			      lower, upper, &se->pre);

  /* Allocate memory to store the data.  */
  tmp = gfc_conv_descriptor_data_addr (se->expr);
  pointer = gfc_evaluate_now (tmp, &se->pre);

  if (must_be_pointer)
    allocatable_array = 0;
  else
    allocatable_array = expr->symtree->n.sym->attr.allocatable;

  if (TYPE_PRECISION (gfc_array_index_type) == 32)
    {
      if (allocatable_array)
	allocate = gfor_fndecl_allocate_array;
      else
	allocate = gfor_fndecl_allocate;
    }
  else if (TYPE_PRECISION (gfc_array_index_type) == 64)
    {
      if (allocatable_array)
	allocate = gfor_fndecl_allocate64_array;
      else
	allocate = gfor_fndecl_allocate64;
    }
  else
    gcc_unreachable ();

  tmp = gfc_chainon_list (NULL_TREE, pointer);
  tmp = gfc_chainon_list (tmp, size);
  tmp = gfc_chainon_list (tmp, pstat);
  tmp = build_function_call_expr (allocate, tmp);
  gfc_add_expr_to_block (&se->pre, tmp);

  tmp = gfc_conv_descriptor_offset (se->expr);
  gfc_add_modify_expr (&se->pre, tmp, offset);

  return true;
}


/* Deallocate an array variable.  Also used when an allocated variable goes
   out of scope.  */
/*GCC ARRAYS*/

tree
gfc_array_deallocate (tree descriptor, tree pstat)
{
  tree var;
  tree tmp;
  stmtblock_t block;

  gfc_start_block (&block);
  /* Get a pointer to the data.  */
  tmp = gfc_conv_descriptor_data_addr (descriptor);
  var = gfc_evaluate_now (tmp, &block);

  /* Parameter is the address of the data component.  */
  tmp = gfc_chainon_list (NULL_TREE, var);
  tmp = gfc_chainon_list (tmp, pstat);
  tmp = build_function_call_expr (gfor_fndecl_deallocate, tmp);
  gfc_add_expr_to_block (&block, tmp);

  return gfc_finish_block (&block);
}


/* Create an array constructor from an initialization expression.
   We assume the frontend already did any expansions and conversions.  */

tree
gfc_conv_array_initializer (tree type, gfc_expr * expr)
{
  gfc_constructor *c;
  tree tmp;
  mpz_t maxval;
  gfc_se se;
  HOST_WIDE_INT hi;
  unsigned HOST_WIDE_INT lo;
  tree index, range;
  VEC(constructor_elt,gc) *v = NULL;

  switch (expr->expr_type)
    {
    case EXPR_CONSTANT:
    case EXPR_STRUCTURE:
      /* A single scalar or derived type value.  Create an array with all
         elements equal to that value.  */
      gfc_init_se (&se, NULL);
      
      if (expr->expr_type == EXPR_CONSTANT)
	gfc_conv_constant (&se, expr);
      else
	gfc_conv_structure (&se, expr, 1);

      tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
      gcc_assert (tmp && INTEGER_CST_P (tmp));
      hi = TREE_INT_CST_HIGH (tmp);
      lo = TREE_INT_CST_LOW (tmp);
      lo++;
      if (lo == 0)
	hi++;
      /* This will probably eat buckets of memory for large arrays.  */
      while (hi != 0 || lo != 0)
        {
	  CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
          if (lo == 0)
            hi--;
          lo--;
        }
      break;

    case EXPR_ARRAY:
      /* Create a vector of all the elements.  */
      for (c = expr->value.constructor; c; c = c->next)
        {
          if (c->iterator)
            {
              /* Problems occur when we get something like
                 integer :: a(lots) = (/(i, i=1,lots)/)  */
              /* TODO: Unexpanded array initializers.  */
              internal_error
                ("Possible frontend bug: array constructor not expanded");
	    }
          if (mpz_cmp_si (c->n.offset, 0) != 0)
            index = gfc_conv_mpz_to_tree (c->n.offset, gfc_index_integer_kind);
          else
            index = NULL_TREE;
	  mpz_init (maxval);
          if (mpz_cmp_si (c->repeat, 0) != 0)
            {
              tree tmp1, tmp2;

              mpz_set (maxval, c->repeat);
              mpz_add (maxval, c->n.offset, maxval);
              mpz_sub_ui (maxval, maxval, 1);
              tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
              if (mpz_cmp_si (c->n.offset, 0) != 0)
                {
                  mpz_add_ui (maxval, c->n.offset, 1);
                  tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
                }
              else
                tmp1 = gfc_conv_mpz_to_tree (c->n.offset, gfc_index_integer_kind);

              range = build2 (RANGE_EXPR, integer_type_node, tmp1, tmp2);
            }
          else
            range = NULL;
	  mpz_clear (maxval);

          gfc_init_se (&se, NULL);
	  switch (c->expr->expr_type)
	    {
	    case EXPR_CONSTANT:
	      gfc_conv_constant (&se, c->expr);
              if (range == NULL_TREE)
		CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
              else
                {
                  if (index != NULL_TREE)
		    CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
		  CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
                }
	      break;

	    case EXPR_STRUCTURE:
              gfc_conv_structure (&se, c->expr, 1);
	      CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
	      break;

	    default:
	      gcc_unreachable ();
	    }
        }
      break;

    default:
      gcc_unreachable ();
    }

  /* Create a constructor from the list of elements.  */
  tmp = build_constructor (type, v);
  TREE_CONSTANT (tmp) = 1;
  TREE_INVARIANT (tmp) = 1;
  return tmp;
}


/* Generate code to evaluate non-constant array bounds.  Sets *poffset and
   returns the size (in elements) of the array.  */

static tree
gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
                        stmtblock_t * pblock)
{
  gfc_array_spec *as;
  tree size;
  tree stride;
  tree offset;
  tree ubound;
  tree lbound;
  tree tmp;
  gfc_se se;

  int dim;

  as = sym->as;

  size = gfc_index_one_node;
  offset = gfc_index_zero_node;
  for (dim = 0; dim < as->rank; dim++)
    {
      /* Evaluate non-constant array bound expressions.  */
      lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
      if (as->lower[dim] && !INTEGER_CST_P (lbound))
        {
          gfc_init_se (&se, NULL);
          gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
          gfc_add_block_to_block (pblock, &se.pre);
          gfc_add_modify_expr (pblock, lbound, se.expr);
        }
      ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
      if (as->upper[dim] && !INTEGER_CST_P (ubound))
        {
          gfc_init_se (&se, NULL);
          gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
          gfc_add_block_to_block (pblock, &se.pre);
          gfc_add_modify_expr (pblock, ubound, se.expr);
        }
      /* The offset of this dimension.  offset = offset - lbound * stride.  */
      tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, size);
      offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);

      /* The size of this dimension, and the stride of the next.  */
      if (dim + 1 < as->rank)
        stride = GFC_TYPE_ARRAY_STRIDE (type, dim + 1);
      else
	stride = GFC_TYPE_ARRAY_SIZE (type);

      if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
        {
          /* Calculate stride = size * (ubound + 1 - lbound).  */
          tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
			     gfc_index_one_node, lbound);
          tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, ubound, tmp);
          tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);
          if (stride)
            gfc_add_modify_expr (pblock, stride, tmp);
          else
            stride = gfc_evaluate_now (tmp, pblock);
        }

      size = stride;
    }

  gfc_trans_vla_type_sizes (sym, pblock);

  *poffset = offset;
  return size;
}


/* Generate code to initialize/allocate an array variable.  */

tree
gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym, tree fnbody)
{
  stmtblock_t block;
  tree type;
  tree tmp;
  tree fndecl;
  tree size;
  tree offset;
  bool onstack;

  gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));

  /* Do nothing for USEd variables.  */
  if (sym->attr.use_assoc)
    return fnbody;

  type = TREE_TYPE (decl);
  gcc_assert (GFC_ARRAY_TYPE_P (type));
  onstack = TREE_CODE (type) != POINTER_TYPE;

  gfc_start_block (&block);

  /* Evaluate character string length.  */
  if (sym->ts.type == BT_CHARACTER
      && onstack && !INTEGER_CST_P (sym->ts.cl->backend_decl))
    {
      gfc_trans_init_string_length (sym->ts.cl, &block);

      gfc_trans_vla_type_sizes (sym, &block);

      /* Emit a DECL_EXPR for this variable, which will cause the
	 gimplifier to allocate storage, and all that good stuff.  */
      tmp = build1 (DECL_EXPR, TREE_TYPE (decl), decl);
      gfc_add_expr_to_block (&block, tmp);
    }

  if (onstack)
    {
      gfc_add_expr_to_block (&block, fnbody);
      return gfc_finish_block (&block);
    }

  type = TREE_TYPE (type);

  gcc_assert (!sym->attr.use_assoc);
  gcc_assert (!TREE_STATIC (decl));
  gcc_assert (!sym->module);

  if (sym->ts.type == BT_CHARACTER
      && !INTEGER_CST_P (sym->ts.cl->backend_decl))
    gfc_trans_init_string_length (sym->ts.cl, &block);

  size = gfc_trans_array_bounds (type, sym, &offset, &block);

  /* Don't actually allocate space for Cray Pointees.  */
  if (sym->attr.cray_pointee)
    {
      if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
	gfc_add_modify_expr (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);
      gfc_add_expr_to_block (&block, fnbody);
      return gfc_finish_block (&block);
    }

  /* The size is the number of elements in the array, so multiply by the
     size of an element to get the total size.  */
  tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
  size = fold_build2 (MULT_EXPR, gfc_array_index_type, size, tmp);

  /* Allocate memory to hold the data.  */
  tmp = gfc_chainon_list (NULL_TREE, size);

  if (gfc_index_integer_kind == 4)
    fndecl = gfor_fndecl_internal_malloc;
  else if (gfc_index_integer_kind == 8)
    fndecl = gfor_fndecl_internal_malloc64;
  else
    gcc_unreachable ();
  tmp = build_function_call_expr (fndecl, tmp);
  tmp = fold (convert (TREE_TYPE (decl), tmp));
  gfc_add_modify_expr (&block, decl, tmp);

  /* Set offset of the array.  */
  if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
    gfc_add_modify_expr (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);


  /* Automatic arrays should not have initializers.  */
  gcc_assert (!sym->value);

  gfc_add_expr_to_block (&block, fnbody);

  /* Free the temporary.  */
  tmp = convert (pvoid_type_node, decl);
  tmp = gfc_chainon_list (NULL_TREE, tmp);
  tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
  gfc_add_expr_to_block (&block, tmp);

  return gfc_finish_block (&block);
}


/* Generate entry and exit code for g77 calling convention arrays.  */

tree
gfc_trans_g77_array (gfc_symbol * sym, tree body)
{
  tree parm;
  tree type;
  locus loc;
  tree offset;
  tree tmp;
  stmtblock_t block;

  gfc_get_backend_locus (&loc);
  gfc_set_backend_locus (&sym->declared_at);

  /* Descriptor type.  */
  parm = sym->backend_decl;
  type = TREE_TYPE (parm);
  gcc_assert (GFC_ARRAY_TYPE_P (type));

  gfc_start_block (&block);

  if (sym->ts.type == BT_CHARACTER
      && TREE_CODE (sym->ts.cl->backend_decl) == VAR_DECL)
    gfc_trans_init_string_length (sym->ts.cl, &block);

  /* Evaluate the bounds of the array.  */
  gfc_trans_array_bounds (type, sym, &offset, &block);

  /* Set the offset.  */
  if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
    gfc_add_modify_expr (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);

  /* Set the pointer itself if we aren't using the parameter directly.  */
  if (TREE_CODE (parm) != PARM_DECL)
    {
      tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
      gfc_add_modify_expr (&block, parm, tmp);
    }
  tmp = gfc_finish_block (&block);

  gfc_set_backend_locus (&loc);

  gfc_start_block (&block);
  /* Add the initialization code to the start of the function.  */
  gfc_add_expr_to_block (&block, tmp);
  gfc_add_expr_to_block (&block, body);

  return gfc_finish_block (&block);
}


/* Modify the descriptor of an array parameter so that it has the
   correct lower bound.  Also move the upper bound accordingly.
   If the array is not packed, it will be copied into a temporary.
   For each dimension we set the new lower and upper bounds.  Then we copy the
   stride and calculate the offset for this dimension.  We also work out
   what the stride of a packed array would be, and see it the two match.
   If the array need repacking, we set the stride to the values we just
   calculated, recalculate the offset and copy the array data.
   Code is also added to copy the data back at the end of the function.
   */

tree
gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc, tree body)
{
  tree size;
  tree type;
  tree offset;
  locus loc;
  stmtblock_t block;
  stmtblock_t cleanup;
  tree lbound;
  tree ubound;
  tree dubound;
  tree dlbound;
  tree dumdesc;
  tree tmp;
  tree stmt;
  tree stride, stride2;
  tree stmt_packed;
  tree stmt_unpacked;
  tree partial;
  gfc_se se;
  int n;
  int checkparm;
  int no_repack;
  bool optional_arg;

  /* Do nothing for pointer and allocatable arrays.  */
  if (sym->attr.pointer || sym->attr.allocatable)
    return body;

  if (sym->attr.dummy && gfc_is_nodesc_array (sym))
    return gfc_trans_g77_array (sym, body);

  gfc_get_backend_locus (&loc);
  gfc_set_backend_locus (&sym->declared_at);

  /* Descriptor type.  */
  type = TREE_TYPE (tmpdesc);
  gcc_assert (GFC_ARRAY_TYPE_P (type));
  dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
  dumdesc = build_fold_indirect_ref (dumdesc);
  gfc_start_block (&block);

  if (sym->ts.type == BT_CHARACTER
      && TREE_CODE (sym->ts.cl->backend_decl) == VAR_DECL)
    gfc_trans_init_string_length (sym->ts.cl, &block);

  checkparm = (sym->as->type == AS_EXPLICIT && flag_bounds_check);

  no_repack = !(GFC_DECL_PACKED_ARRAY (tmpdesc)
                || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));

  if (GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc))
    {
      /* For non-constant shape arrays we only check if the first dimension
         is contiguous.  Repacking higher dimensions wouldn't gain us
         anything as we still don't know the array stride.  */
      partial = gfc_create_var (boolean_type_node, "partial");
      TREE_USED (partial) = 1;
      tmp = gfc_conv_descriptor_stride (dumdesc, gfc_rank_cst[0]);
      tmp = fold_build2 (EQ_EXPR, boolean_type_node, tmp, gfc_index_one_node);
      gfc_add_modify_expr (&block, partial, tmp);
    }
  else
    {
      partial = NULL_TREE;
    }

  /* The naming of stmt_unpacked and stmt_packed may be counter-intuitive
     here, however I think it does the right thing.  */
  if (no_repack)
    {
      /* Set the first stride.  */
      stride = gfc_conv_descriptor_stride (dumdesc, gfc_rank_cst[0]);
      stride = gfc_evaluate_now (stride, &block);

      tmp = build2 (EQ_EXPR, boolean_type_node, stride, gfc_index_zero_node);
      tmp = build3 (COND_EXPR, gfc_array_index_type, tmp,
		    gfc_index_one_node, stride);
      stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
      gfc_add_modify_expr (&block, stride, tmp);

      /* Allow the user to disable array repacking.  */
      stmt_unpacked = NULL_TREE;
    }
  else
    {
      gcc_assert (integer_onep (GFC_TYPE_ARRAY_STRIDE (type, 0)));
      /* A library call to repack the array if necessary.  */
      tmp = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
      tmp = gfc_chainon_list (NULL_TREE, tmp);
      stmt_unpacked = build_function_call_expr (gfor_fndecl_in_pack, tmp);

      stride = gfc_index_one_node;
    }

  /* This is for the case where the array data is used directly without
     calling the repack function.  */
  if (no_repack || partial != NULL_TREE)
    stmt_packed = gfc_conv_descriptor_data_get (dumdesc);
  else
    stmt_packed = NULL_TREE;

  /* Assign the data pointer.  */
  if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
    {
      /* Don't repack unknown shape arrays when the first stride is 1.  */
      tmp = build3 (COND_EXPR, TREE_TYPE (stmt_packed), partial,
		    stmt_packed, stmt_unpacked);
    }
  else
    tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
  gfc_add_modify_expr (&block, tmpdesc, fold_convert (type, tmp));

  offset = gfc_index_zero_node;
  size = gfc_index_one_node;

  /* Evaluate the bounds of the array.  */
  for (n = 0; n < sym->as->rank; n++)
    {
      if (checkparm || !sym->as->upper[n])
	{
	  /* Get the bounds of the actual parameter.  */
	  dubound = gfc_conv_descriptor_ubound (dumdesc, gfc_rank_cst[n]);
	  dlbound = gfc_conv_descriptor_lbound (dumdesc, gfc_rank_cst[n]);
	}
      else
        {
	  dubound = NULL_TREE;
	  dlbound = NULL_TREE;
        }

      lbound = GFC_TYPE_ARRAY_LBOUND (type, n);
      if (!INTEGER_CST_P (lbound))
        {
          gfc_init_se (&se, NULL);
          gfc_conv_expr_type (&se, sym->as->lower[n],
                              gfc_array_index_type);
          gfc_add_block_to_block (&block, &se.pre);
          gfc_add_modify_expr (&block, lbound, se.expr);
        }

      ubound = GFC_TYPE_ARRAY_UBOUND (type, n);
      /* Set the desired upper bound.  */
      if (sym->as->upper[n])
	{
	  /* We know what we want the upper bound to be.  */
          if (!INTEGER_CST_P (ubound))
            {
	      gfc_init_se (&se, NULL);
	      gfc_conv_expr_type (&se, sym->as->upper[n],
                                  gfc_array_index_type);
	      gfc_add_block_to_block (&block, &se.pre);
              gfc_add_modify_expr (&block, ubound, se.expr);
            }

	  /* Check the sizes match.  */
	  if (checkparm)
	    {
	      /* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)).  */
	      char * msg;

	      tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
				 ubound, lbound);
              stride2 = build2 (MINUS_EXPR, gfc_array_index_type,
			       dubound, dlbound);
              tmp = fold_build2 (NE_EXPR, gfc_array_index_type, tmp, stride2);
	      asprintf (&msg, "%s for dimension %d of array '%s'",
			gfc_msg_bounds, n+1, sym->name);
	      gfc_trans_runtime_check (tmp, msg, &block, NULL);
	      gfc_free (msg);
	    }
	}
      else
	{
	  /* For assumed shape arrays move the upper bound by the same amount
	     as the lower bound.  */
          tmp = build2 (MINUS_EXPR, gfc_array_index_type, dubound, dlbound);
          tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, tmp, lbound);
          gfc_add_modify_expr (&block, ubound, tmp);
	}
      /* The offset of this dimension.  offset = offset - lbound * stride.  */
      tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, lbound, stride);
      offset = fold_build2 (MINUS_EXPR, gfc_array_index_type, offset, tmp);

      /* The size of this dimension, and the stride of the next.  */
      if (n + 1 < sym->as->rank)
        {
          stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);

          if (no_repack || partial != NULL_TREE)
            {
              stmt_unpacked =
                gfc_conv_descriptor_stride (dumdesc, gfc_rank_cst[n+1]);
            }

          /* Figure out the stride if not a known constant.  */
          if (!INTEGER_CST_P (stride))
            {
              if (no_repack)
                stmt_packed = NULL_TREE;
              else
                {
                  /* Calculate stride = size * (ubound + 1 - lbound).  */
                  tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
				     gfc_index_one_node, lbound);
                  tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
				     ubound, tmp);
                  size = fold_build2 (MULT_EXPR, gfc_array_index_type,
				      size, tmp);
                  stmt_packed = size;
                }

              /* Assign the stride.  */
              if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
		tmp = build3 (COND_EXPR, gfc_array_index_type, partial,
			      stmt_unpacked, stmt_packed);
              else
                tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
              gfc_add_modify_expr (&block, stride, tmp);
            }
        }
      else
	{
	  stride = GFC_TYPE_ARRAY_SIZE (type);

	  if (stride && !INTEGER_CST_P (stride))
	    {
	      /* Calculate size = stride * (ubound + 1 - lbound).  */
	      tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
				 gfc_index_one_node, lbound);
	      tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type,
				 ubound, tmp);
	      tmp = fold_build2 (MULT_EXPR, gfc_array_index_type,
				 GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
	      gfc_add_modify_expr (&block, stride, tmp);
	    }
	}
    }

  /* Set the offset.  */
  if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
    gfc_add_modify_expr (&block, GFC_TYPE_ARRAY_OFFSET (type), offset);

  gfc_trans_vla_type_sizes (sym, &block);

  stmt = gfc_finish_block (&block);

  gfc_start_block (&block);

  /* Only do the entry/initialization code if the arg is present.  */
  dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
  optional_arg = (sym->attr.optional
		  || (sym->ns->proc_name->attr.entry_master
		      && sym->attr.dummy));
  if (optional_arg)
    {
      tmp = gfc_conv_expr_present (sym);
      stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt ());
    }
  gfc_add_expr_to_block (&block, stmt);

  /* Add the main function body.  */
  gfc_add_expr_to_block (&block, body);

  /* Cleanup code.  */
  if (!no_repack)
    {
      gfc_start_block (&cleanup);
      
      if (sym->attr.intent != INTENT_IN)
	{
	  /* Copy the data back.  */
	  tmp = gfc_chainon_list (NULL_TREE, dumdesc);
	  tmp = gfc_chainon_list (tmp, tmpdesc);
	  tmp = build_function_call_expr (gfor_fndecl_in_unpack, tmp);
	  gfc_add_expr_to_block (&cleanup, tmp);
	}

      /* Free the temporary.  */
      tmp = gfc_chainon_list (NULL_TREE, tmpdesc);
      tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
      gfc_add_expr_to_block (&cleanup, tmp);

      stmt = gfc_finish_block (&cleanup);
	
      /* Only do the cleanup if the array was repacked.  */
      tmp = build_fold_indirect_ref (dumdesc);
      tmp = gfc_conv_descriptor_data_get (tmp);
      tmp = build2 (NE_EXPR, boolean_type_node, tmp, tmpdesc);
      stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt ());

      if (optional_arg)
        {
          tmp = gfc_conv_expr_present (sym);
          stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt ());
        }
      gfc_add_expr_to_block (&block, stmt);
    }
  /* We don't need to free any memory allocated by internal_pack as it will
     be freed at the end of the function by pop_context.  */
  return gfc_finish_block (&block);
}


/* Convert an array for passing as an actual argument.  Expressions and
   vector subscripts are evaluated and stored in a temporary, which is then
   passed.  For whole arrays the descriptor is passed.  For array sections
   a modified copy of the descriptor is passed, but using the original data.

   This function is also used for array pointer assignments, and there
   are three cases:

     - want_pointer && !se->direct_byref
	 EXPR is an actual argument.  On exit, se->expr contains a
	 pointer to the array descriptor.

     - !want_pointer && !se->direct_byref
	 EXPR is an actual argument to an intrinsic function or the
	 left-hand side of a pointer assignment.  On exit, se->expr
	 contains the descriptor for EXPR.

     - !want_pointer && se->direct_byref
	 EXPR is the right-hand side of a pointer assignment and
	 se->expr is the descriptor for the previously-evaluated
	 left-hand side.  The function creates an assignment from
	 EXPR to se->expr.  */

void
gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
{
  gfc_loopinfo loop;
  gfc_ss *secss;
  gfc_ss_info *info;
  int need_tmp;
  int n;
  tree tmp;
  tree desc;
  stmtblock_t block;
  tree start;
  tree offset;
  int full;
  gfc_ref *ref;

  gcc_assert (ss != gfc_ss_terminator);

  /* TODO: Pass constant array constructors without a temporary.  */
  /* Special case things we know we can pass easily.  */
  switch (expr->expr_type)
    {
    case EXPR_VARIABLE:
      /* If we have a linear array section, we can pass it directly.
	 Otherwise we need to copy it into a temporary.  */

      /* Find the SS for the array section.  */
      secss = ss;
      while (secss != gfc_ss_terminator && secss->type != GFC_SS_SECTION)
	secss = secss->next;

      gcc_assert (secss != gfc_ss_terminator);
      info = &secss->data.info;

      /* Get the descriptor for the array.  */
      gfc_conv_ss_descriptor (&se->pre, secss, 0);
      desc = info->descriptor;

      need_tmp = gfc_ref_needs_temporary_p (expr->ref);
      if (need_tmp)
	full = 0;
      else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
	{
	  /* Create a new descriptor if the array doesn't have one.  */
	  full = 0;
	}
      else if (info->ref->u.ar.type == AR_FULL)
	full = 1;
      else if (se->direct_byref)
	full = 0;
      else
	{
	  ref = info->ref;
	  gcc_assert (ref->u.ar.type == AR_SECTION);

	  full = 1;
	  for (n = 0; n < ref->u.ar.dimen; n++)
	    {
	      /* Detect passing the full array as a section.  This could do
	         even more checking, but it doesn't seem worth it.  */
	      if (ref->u.ar.start[n]
		  || ref->u.ar.end[n]
		  || (ref->u.ar.stride[n]
		      && !gfc_expr_is_one (ref->u.ar.stride[n], 0)))
		{
		  full = 0;
		  break;
		}
	    }
	}

      if (full)
	{
	  if (se->direct_byref)
	    {
	      /* Copy the descriptor for pointer assignments.  */
	      gfc_add_modify_expr (&se->pre, se->expr, desc);
	    }
	  else if (se->want_pointer)
	    {
	      /* We pass full arrays directly.  This means that pointers and
		 allocatable arrays should also work.  */
	      se->expr = build_fold_addr_expr (desc);
	    }
	  else
	    {
	      se->expr = desc;
	    }

	  if (expr->ts.type == BT_CHARACTER)
	    se->string_length = gfc_get_expr_charlen (expr);

	  return;
	}
      break;
      
    case EXPR_FUNCTION:
      /* A transformational function return value will be a temporary
	 array descriptor.  We still need to go through the scalarizer
	 to create the descriptor.  Elemental functions ar handled as
	 arbitrary expressions, i.e. copy to a temporary.  */
      secss = ss;
      /* Look for the SS for this function.  */
      while (secss != gfc_ss_terminator
	     && (secss->type != GFC_SS_FUNCTION || secss->expr != expr))
      	secss = secss->next;

      if (se->direct_byref)
	{
	  gcc_assert (secss != gfc_ss_terminator);

	  /* For pointer assignments pass the descriptor directly.  */
	  se->ss = secss;
	  se->expr = build_fold_addr_expr (se->expr);
	  gfc_conv_expr (se, expr);
	  return;
	}

      if (secss == gfc_ss_terminator)
	{
	  /* Elemental function.  */
	  need_tmp = 1;
	  info = NULL;
	}
      else
	{
	  /* Transformational function.  */
	  info = &secss->data.info;
	  need_tmp = 0;
	}
      break;

    default:
      /* Something complicated.  Copy it into a temporary.  */
      need_tmp = 1;
      secss = NULL;
      info = NULL;
      break;
    }


  gfc_init_loopinfo (&loop);

  /* Associate the SS with the loop.  */
  gfc_add_ss_to_loop (&loop, ss);

  /* Tell the scalarizer not to bother creating loop variables, etc.  */
  if (!need_tmp)
    loop.array_parameter = 1;
  else
    /* The right-hand side of a pointer assignment mustn't use a temporary.  */
    gcc_assert (!se->direct_byref);

  /* Setup the scalarizing loops and bounds.  */
  gfc_conv_ss_startstride (&loop);

  if (need_tmp)
    {
      /* Tell the scalarizer to make a temporary.  */
      loop.temp_ss = gfc_get_ss ();
      loop.temp_ss->type = GFC_SS_TEMP;
      loop.temp_ss->next = gfc_ss_terminator;
      if (expr->ts.type == BT_CHARACTER)
	{
	  if (expr->ts.cl
	      && expr->ts.cl->length
	      && expr->ts.cl->length->expr_type == EXPR_CONSTANT)
	    {
	      expr->ts.cl->backend_decl
		= gfc_conv_mpz_to_tree (expr->ts.cl->length->value.integer,
					expr->ts.cl->length->ts.kind);
	      loop.temp_ss->data.temp.type
		= gfc_typenode_for_spec (&expr->ts);
	      loop.temp_ss->string_length
		= TYPE_SIZE_UNIT (loop.temp_ss->data.temp.type);
	    }
	  else
	    {
	      loop.temp_ss->data.temp.type
		= gfc_typenode_for_spec (&expr->ts);
	      loop.temp_ss->string_length = expr->ts.cl->backend_decl;
	    }
	  se->string_length = loop.temp_ss->string_length;
	}
      else
	{
	  loop.temp_ss->data.temp.type
	    = gfc_typenode_for_spec (&expr->ts);
	  loop.temp_ss->string_length = NULL;
	}
      loop.temp_ss->data.temp.dimen = loop.dimen;
      gfc_add_ss_to_loop (&loop, loop.temp_ss);
    }

  gfc_conv_loop_setup (&loop);

  if (need_tmp)
    {
      /* Copy into a temporary and pass that.  We don't need to copy the data
         back because expressions and vector subscripts must be INTENT_IN.  */
      /* TODO: Optimize passing function return values.  */
      gfc_se lse;
      gfc_se rse;

      /* Start the copying loops.  */
      gfc_mark_ss_chain_used (loop.temp_ss, 1);
      gfc_mark_ss_chain_used (ss, 1);
      gfc_start_scalarized_body (&loop, &block);

      /* Copy each data element.  */
      gfc_init_se (&lse, NULL);
      gfc_copy_loopinfo_to_se (&lse, &loop);
      gfc_init_se (&rse, NULL);
      gfc_copy_loopinfo_to_se (&rse, &loop);

      lse.ss = loop.temp_ss;
      rse.ss = ss;

      gfc_conv_scalarized_array_ref (&lse, NULL);
      if (expr->ts.type == BT_CHARACTER)
	{
	  gfc_conv_expr (&rse, expr);
	  if (POINTER_TYPE_P (TREE_TYPE (rse.expr)))
	    rse.expr = build_fold_indirect_ref (rse.expr);
	}
      else
        gfc_conv_expr_val (&rse, expr);

      gfc_add_block_to_block (&block, &rse.pre);
      gfc_add_block_to_block (&block, &lse.pre);

      gfc_add_modify_expr (&block, lse.expr, rse.expr);

      /* Finish the copying loops.  */
      gfc_trans_scalarizing_loops (&loop, &block);

      desc = loop.temp_ss->data.info.descriptor;

      gcc_assert (is_gimple_lvalue (desc));
    }
  else if (expr->expr_type == EXPR_FUNCTION)
    {
      desc = info->descriptor;
      se->string_length = ss->string_length;
    }
  else
    {
      /* We pass sections without copying to a temporary.  Make a new
	 descriptor and point it at the section we want.  The loop variable
	 limits will be the limits of the section.
	 A function may decide to repack the array to speed up access, but
	 we're not bothered about that here.  */
      int dim;
      tree parm;
      tree parmtype;
      tree stride;
      tree from;
      tree to;
      tree base;

      /* Set the string_length for a character array.  */
      if (expr->ts.type == BT_CHARACTER)
	se->string_length =  gfc_get_expr_charlen (expr);

      desc = info->descriptor;
      gcc_assert (secss && secss != gfc_ss_terminator);
      if (se->direct_byref)
	{
	  /* For pointer assignments we fill in the destination.  */
	  parm = se->expr;
	  parmtype = TREE_TYPE (parm);
	}
      else
	{
	  /* Otherwise make a new one.  */
	  parmtype = gfc_get_element_type (TREE_TYPE (desc));
	  parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen,
						loop.from, loop.to, 0);
	  parm = gfc_create_var (parmtype, "parm");
	}

      offset = gfc_index_zero_node;
      dim = 0;

      /* The following can be somewhat confusing.  We have two
         descriptors, a new one and the original array.
         {parm, parmtype, dim} refer to the new one.
         {desc, type, n, secss, loop} refer to the original, which maybe
         a descriptorless array.
         The bounds of the scalarization are the bounds of the section.
         We don't have to worry about numeric overflows when calculating
         the offsets because all elements are within the array data.  */

      /* Set the dtype.  */
      tmp = gfc_conv_descriptor_dtype (parm);
      gfc_add_modify_expr (&loop.pre, tmp, gfc_get_dtype (parmtype));

      if (se->direct_byref)
	base = gfc_index_zero_node;
      else
	base = NULL_TREE;

      for (n = 0; n < info->ref->u.ar.dimen; n++)
	{
	  stride = gfc_conv_array_stride (desc, n);

	  /* Work out the offset.  */
	  if (info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
	    {
	      gcc_assert (info->subscript[n]
		      && info->subscript[n]->type == GFC_SS_SCALAR);
	      start = info->subscript[n]->data.scalar.expr;
	    }
	  else
	    {
	      /* Check we haven't somehow got out of sync.  */
	      gcc_assert (info->dim[dim] == n);

	      /* Evaluate and remember the start of the section.  */
	      start = info->start[dim];
	      stride = gfc_evaluate_now (stride, &loop.pre);
	    }

	  tmp = gfc_conv_array_lbound (desc, n);
	  tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), start, tmp);

	  tmp = fold_build2 (MULT_EXPR, TREE_TYPE (tmp), tmp, stride);
	  offset = fold_build2 (PLUS_EXPR, TREE_TYPE (tmp), offset, tmp);

	  if (info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
	    {
	      /* For elemental dimensions, we only need the offset.  */
	      continue;
	    }

	  /* Vector subscripts need copying and are handled elsewhere.  */
	  gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);

	  /* Set the new lower bound.  */
	  from = loop.from[dim];
	  to = loop.to[dim];

	  /* If we have an array section or are assigning to a pointer,
	     make sure that the lower bound is 1.  References to the full
	     array should otherwise keep the original bounds.  */
	  if ((info->ref->u.ar.type != AR_FULL || se->direct_byref)
	      && !integer_onep (from))
	    {
	      tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type,
				 gfc_index_one_node, from);
	      to = fold_build2 (PLUS_EXPR, gfc_array_index_type, to, tmp);
	      from = gfc_index_one_node;
	    }
	  tmp = gfc_conv_descriptor_lbound (parm, gfc_rank_cst[dim]);
	  gfc_add_modify_expr (&loop.pre, tmp, from);

	  /* Set the new upper bound.  */
	  tmp = gfc_conv_descriptor_ubound (parm, gfc_rank_cst[dim]);
	  gfc_add_modify_expr (&loop.pre, tmp, to);

	  /* Multiply the stride by the section stride to get the
	     total stride.  */
	  stride = fold_build2 (MULT_EXPR, gfc_array_index_type,
				stride, info->stride[dim]);

	  if (se->direct_byref)
	    base = fold_build2 (MINUS_EXPR, TREE_TYPE (base),
				base, stride);

	  /* Store the new stride.  */
	  tmp = gfc_conv_descriptor_stride (parm, gfc_rank_cst[dim]);
	  gfc_add_modify_expr (&loop.pre, tmp, stride);

	  dim++;
	}

      if (se->data_not_needed)
	gfc_conv_descriptor_data_set (&loop.pre, parm, gfc_index_zero_node);
      else
	{
	  /* Point the data pointer at the first element in the section.  */
	  tmp = gfc_conv_array_data (desc);
	  tmp = build_fold_indirect_ref (tmp);
	  tmp = gfc_build_array_ref (tmp, offset);
	  offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
	  gfc_conv_descriptor_data_set (&loop.pre, parm, offset);
	}

      if (se->direct_byref && !se->data_not_needed)
	{
	  /* Set the offset.  */
	  tmp = gfc_conv_descriptor_offset (parm);
	  gfc_add_modify_expr (&loop.pre, tmp, base);
	}
      else
	{
	  /* Only the callee knows what the correct offset it, so just set
	     it to zero here.  */
	  tmp = gfc_conv_descriptor_offset (parm);
	  gfc_add_modify_expr (&loop.pre, tmp, gfc_index_zero_node);
	}
      desc = parm;
    }

  if (!se->direct_byref)
    {
      /* Get a pointer to the new descriptor.  */
      if (se->want_pointer)
	se->expr = build_fold_addr_expr (desc);
      else
	se->expr = desc;
    }

  gfc_add_block_to_block (&se->pre, &loop.pre);
  gfc_add_block_to_block (&se->post, &loop.post);

  /* Cleanup the scalarizer.  */
  gfc_cleanup_loop (&loop);
}


/* Convert an array for passing as an actual parameter.  */
/* TODO: Optimize passing g77 arrays.  */

void
gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, gfc_ss * ss, int g77)
{
  tree ptr;
  tree desc;
  tree tmp;
  tree stmt;
  gfc_symbol *sym;
  stmtblock_t block;

  /* Passing address of the array if it is not pointer or assumed-shape.  */
  if (expr->expr_type == EXPR_VARIABLE
       && expr->ref->u.ar.type == AR_FULL && g77)
    {
      sym = expr->symtree->n.sym;
      tmp = gfc_get_symbol_decl (sym);

      if (sym->ts.type == BT_CHARACTER)
	se->string_length = sym->ts.cl->backend_decl;
      if (!sym->attr.pointer && sym->as->type != AS_ASSUMED_SHAPE 
          && !sym->attr.allocatable)
        {
	  /* Some variables are declared directly, others are declared as
	     pointers and allocated on the heap.  */
          if (sym->attr.dummy || POINTER_TYPE_P (TREE_TYPE (tmp)))
            se->expr = tmp;
          else
	    se->expr = build_fold_addr_expr (tmp);
	  return;
        }
      if (sym->attr.allocatable)
        {
          se->expr = gfc_conv_array_data (tmp);
          return;
        }
    }

  se->want_pointer = 1;
  gfc_conv_expr_descriptor (se, expr, ss);

  if (g77)
    {
      desc = se->expr;
      /* Repack the array.  */
      tmp = gfc_chainon_list (NULL_TREE, desc);
      ptr = build_function_call_expr (gfor_fndecl_in_pack, tmp);
      ptr = gfc_evaluate_now (ptr, &se->pre);
      se->expr = ptr;

      gfc_start_block (&block);

      /* Copy the data back.  */
      tmp = gfc_chainon_list (NULL_TREE, desc);
      tmp = gfc_chainon_list (tmp, ptr);
      tmp = build_function_call_expr (gfor_fndecl_in_unpack, tmp);
      gfc_add_expr_to_block (&block, tmp);

      /* Free the temporary.  */
      tmp = convert (pvoid_type_node, ptr);
      tmp = gfc_chainon_list (NULL_TREE, tmp);
      tmp = build_function_call_expr (gfor_fndecl_internal_free, tmp);
      gfc_add_expr_to_block (&block, tmp);

      stmt = gfc_finish_block (&block);

      gfc_init_block (&block);
      /* Only if it was repacked.  This code needs to be executed before the
         loop cleanup code.  */
      tmp = build_fold_indirect_ref (desc);
      tmp = gfc_conv_array_data (tmp);
      tmp = build2 (NE_EXPR, boolean_type_node, ptr, tmp);
      tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt ());

      gfc_add_expr_to_block (&block, tmp);
      gfc_add_block_to_block (&block, &se->post);

      gfc_init_block (&se->post);
      gfc_add_block_to_block (&se->post, &block);
    }
}


/* Generate code to deallocate an array, if it is allocated.  */

tree
gfc_trans_dealloc_allocated (tree descriptor)
{ 
  tree tmp;
  tree deallocate;
  stmtblock_t block;

  gfc_start_block (&block);
  deallocate = gfc_array_deallocate (descriptor, null_pointer_node);

  tmp = gfc_conv_descriptor_data_get (descriptor);
  tmp = build2 (NE_EXPR, boolean_type_node, tmp,
                build_int_cst (TREE_TYPE (tmp), 0));
  tmp = build3_v (COND_EXPR, tmp, deallocate, build_empty_stmt ());
  gfc_add_expr_to_block (&block, tmp);

  tmp = gfc_finish_block (&block);

  return tmp;
}


/* NULLIFY an allocatable/pointer array on function entry, free it on exit.  */

tree
gfc_trans_deferred_array (gfc_symbol * sym, tree body)
{
  tree type;
  tree tmp;
  tree descriptor;
  stmtblock_t fnblock;
  locus loc;

  /* Make sure the frontend gets these right.  */
  if (!(sym->attr.pointer || sym->attr.allocatable))
    fatal_error
      ("Possible frontend bug: Deferred array size without pointer or allocatable attribute.");

  gfc_init_block (&fnblock);

  gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
                || TREE_CODE (sym->backend_decl) == PARM_DECL);

  if (sym->ts.type == BT_CHARACTER
      && !INTEGER_CST_P (sym->ts.cl->backend_decl))
    {
      gfc_trans_init_string_length (sym->ts.cl, &fnblock);
      gfc_trans_vla_type_sizes (sym, &fnblock);
    }

  /* Dummy and use associated variables don't need anything special.  */
  if (sym->attr.dummy || sym->attr.use_assoc)
    {
      gfc_add_expr_to_block (&fnblock, body);

      return gfc_finish_block (&fnblock);
    }

  gfc_get_backend_locus (&loc);
  gfc_set_backend_locus (&sym->declared_at);
  descriptor = sym->backend_decl;

  if (TREE_STATIC (descriptor))
    {
      /* SAVEd variables are not freed on exit.  */
      gfc_trans_static_array_pointer (sym);
      return body;
    }

  /* Get the descriptor type.  */
  type = TREE_TYPE (sym->backend_decl);
  if (!GFC_DESCRIPTOR_TYPE_P (type))
    {
      /* If the backend_decl is not a descriptor, we must have a pointer
	 to one.  */
      descriptor = build_fold_indirect_ref (sym->backend_decl);
      type = TREE_TYPE (descriptor);
      gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
    }

  /* NULLIFY the data pointer.  */
  gfc_conv_descriptor_data_set (&fnblock, descriptor, null_pointer_node);

  gfc_add_expr_to_block (&fnblock, body);

  gfc_set_backend_locus (&loc);
  /* Allocatable arrays need to be freed when they go out of scope.  */
  if (sym->attr.allocatable)
    {
      tmp = gfc_trans_dealloc_allocated (sym->backend_decl);
      gfc_add_expr_to_block (&fnblock, tmp);
    }

  return gfc_finish_block (&fnblock);
}

/************ Expression Walking Functions ******************/

/* Walk a variable reference.

   Possible extension - multiple component subscripts.
    x(:,:) = foo%a(:)%b(:)
   Transforms to
    forall (i=..., j=...)
      x(i,j) = foo%a(j)%b(i)
    end forall
   This adds a fair amout of complexity because you need to deal with more
   than one ref.  Maybe handle in a similar manner to vector subscripts.
   Maybe not worth the effort.  */


static gfc_ss *
gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
{
  gfc_ref *ref;
  gfc_array_ref *ar;
  gfc_ss *newss;
  gfc_ss *head;
  int n;

  for (ref = expr->ref; ref; ref = ref->next)
    if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
      break;

  for (; ref; ref = ref->next)
    {
      if (ref->type == REF_SUBSTRING)
	{
	  newss = gfc_get_ss ();
	  newss->type = GFC_SS_SCALAR;
	  newss->expr = ref->u.ss.start;
	  newss->next = ss;
	  ss = newss;

	  newss = gfc_get_ss ();
	  newss->type = GFC_SS_SCALAR;
	  newss->expr = ref->u.ss.end;
	  newss->next = ss;
	  ss = newss;
	}

      /* We're only interested in array sections from now on.  */
      if (ref->type != REF_ARRAY)
	continue;

      ar = &ref->u.ar;
      switch (ar->type)
	{
	case AR_ELEMENT:
	  for (n = 0; n < ar->dimen; n++)
	    {
	      newss = gfc_get_ss ();
	      newss->type = GFC_SS_SCALAR;
	      newss->expr = ar->start[n];
	      newss->next = ss;
	      ss = newss;
	    }
	  break;

	case AR_FULL:
	  newss = gfc_get_ss ();
	  newss->type = GFC_SS_SECTION;
	  newss->expr = expr;
	  newss->next = ss;
	  newss->data.info.dimen = ar->as->rank;
	  newss->data.info.ref = ref;

	  /* Make sure array is the same as array(:,:), this way
	     we don't need to special case all the time.  */
	  ar->dimen = ar->as->rank;
	  for (n = 0; n < ar->dimen; n++)
	    {
	      newss->data.info.dim[n] = n;
	      ar->dimen_type[n] = DIMEN_RANGE;

	      gcc_assert (ar->start[n] == NULL);
	      gcc_assert (ar->end[n] == NULL);
	      gcc_assert (ar->stride[n] == NULL);
	    }
	  ss = newss;
	  break;

	case AR_SECTION:
	  newss = gfc_get_ss ();
	  newss->type = GFC_SS_SECTION;
	  newss->expr = expr;
	  newss->next = ss;
	  newss->data.info.dimen = 0;
	  newss->data.info.ref = ref;

	  head = newss;

          /* We add SS chains for all the subscripts in the section.  */
	  for (n = 0; n < ar->dimen; n++)
	    {
	      gfc_ss *indexss;

	      switch (ar->dimen_type[n])
		{
		case DIMEN_ELEMENT:
		  /* Add SS for elemental (scalar) subscripts.  */
		  gcc_assert (ar->start[n]);
		  indexss = gfc_get_ss ();
		  indexss->type = GFC_SS_SCALAR;
		  indexss->expr = ar->start[n];
		  indexss->next = gfc_ss_terminator;
		  indexss->loop_chain = gfc_ss_terminator;
		  newss->data.info.subscript[n] = indexss;
		  break;

		case DIMEN_RANGE:
                  /* We don't add anything for sections, just remember this
                     dimension for later.  */
		  newss->data.info.dim[newss->data.info.dimen] = n;
		  newss->data.info.dimen++;
		  break;

		case DIMEN_VECTOR:
		  /* Create a GFC_SS_VECTOR index in which we can store
		     the vector's descriptor.  */
		  indexss = gfc_get_ss ();
		  indexss->type = GFC_SS_VECTOR;
		  indexss->expr = ar->start[n];
		  indexss->next = gfc_ss_terminator;
		  indexss->loop_chain = gfc_ss_terminator;
		  newss->data.info.subscript[n] = indexss;
		  newss->data.info.dim[newss->data.info.dimen] = n;
		  newss->data.info.dimen++;
		  break;

		default:
		  /* We should know what sort of section it is by now.  */
		  gcc_unreachable ();
		}
	    }
	  /* We should have at least one non-elemental dimension.  */
	  gcc_assert (newss->data.info.dimen > 0);
	  ss = newss;
	  break;

	default:
	  /* We should know what sort of section it is by now.  */
	  gcc_unreachable ();
	}

    }
  return ss;
}


/* Walk an expression operator. If only one operand of a binary expression is
   scalar, we must also add the scalar term to the SS chain.  */

static gfc_ss *
gfc_walk_op_expr (gfc_ss * ss, gfc_expr * expr)
{
  gfc_ss *head;
  gfc_ss *head2;
  gfc_ss *newss;

  head = gfc_walk_subexpr (ss, expr->value.op.op1);
  if (expr->value.op.op2 == NULL)
    head2 = head;
  else
    head2 = gfc_walk_subexpr (head, expr->value.op.op2);

  /* All operands are scalar.  Pass back and let the caller deal with it.  */
  if (head2 == ss)
    return head2;

  /* All operands require scalarization.  */
  if (head != ss && (expr->value.op.op2 == NULL || head2 != head))
    return head2;

  /* One of the operands needs scalarization, the other is scalar.
     Create a gfc_ss for the scalar expression.  */
  newss = gfc_get_ss ();
  newss->type = GFC_SS_SCALAR;
  if (head == ss)
    {
      /* First operand is scalar.  We build the chain in reverse order, so
         add the scarar SS after the second operand.  */
      head = head2;
      while (head && head->next != ss)
	head = head->next;
      /* Check we haven't somehow broken the chain.  */
      gcc_assert (head);
      newss->next = ss;
      head->next = newss;
      newss->expr = expr->value.op.op1;
    }
  else				/* head2 == head */
    {
      gcc_assert (head2 == head);
      /* Second operand is scalar.  */
      newss->next = head2;
      head2 = newss;
      newss->expr = expr->value.op.op2;
    }

  return head2;
}


/* Reverse a SS chain.  */

gfc_ss *
gfc_reverse_ss (gfc_ss * ss)
{
  gfc_ss *next;
  gfc_ss *head;

  gcc_assert (ss != NULL);

  head = gfc_ss_terminator;
  while (ss != gfc_ss_terminator)
    {
      next = ss->next;
      /* Check we didn't somehow break the chain.  */
      gcc_assert (next != NULL);
      ss->next = head;
      head = ss;
      ss = next;
    }

  return (head);
}


/* Walk the arguments of an elemental function.  */

gfc_ss *
gfc_walk_elemental_function_args (gfc_ss * ss, gfc_actual_arglist *arg,
				  gfc_ss_type type)
{
  int scalar;
  gfc_ss *head;
  gfc_ss *tail;
  gfc_ss *newss;

  head = gfc_ss_terminator;
  tail = NULL;
  scalar = 1;
  for (; arg; arg = arg->next)
    {
      if (!arg->expr)
	continue;

      newss = gfc_walk_subexpr (head, arg->expr);
      if (newss == head)
	{
	  /* Scalar argument.  */
	  newss = gfc_get_ss ();
	  newss->type = type;
	  newss->expr = arg->expr;
	  newss->next = head;
	}
      else
	scalar = 0;

      head = newss;
      if (!tail)
        {
          tail = head;
          while (tail->next != gfc_ss_terminator)
            tail = tail->next;
        }
    }

  if (scalar)
    {
      /* If all the arguments are scalar we don't need the argument SS.  */
      gfc_free_ss_chain (head);
      /* Pass it back.  */
      return ss;
    }

  /* Add it onto the existing chain.  */
  tail->next = ss;
  return head;
}


/* Walk a function call.  Scalar functions are passed back, and taken out of
   scalarization loops.  For elemental functions we walk their arguments.
   The result of functions returning arrays is stored in a temporary outside
   the loop, so that the function is only called once.  Hence we do not need
   to walk their arguments.  */

static gfc_ss *
gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
{
  gfc_ss *newss;
  gfc_intrinsic_sym *isym;
  gfc_symbol *sym;

  isym = expr->value.function.isym;

  /* Handle intrinsic functions separately.  */
  if (isym)
    return gfc_walk_intrinsic_function (ss, expr, isym);

  sym = expr->value.function.esym;
  if (!sym)
      sym = expr->symtree->n.sym;

  /* A function that returns arrays.  */
  if (gfc_return_by_reference (sym) && sym->result->attr.dimension)
    {
      newss = gfc_get_ss ();
      newss->type = GFC_SS_FUNCTION;
      newss->expr = expr;
      newss->next = ss;
      newss->data.info.dimen = expr->rank;
      return newss;
    }

  /* Walk the parameters of an elemental function.  For now we always pass
     by reference.  */
  if (sym->attr.elemental)
    return gfc_walk_elemental_function_args (ss, expr->value.function.actual,
					     GFC_SS_REFERENCE);

  /* Scalar functions are OK as these are evaluated outside the scalarization
     loop.  Pass back and let the caller deal with it.  */
  return ss;
}


/* An array temporary is constructed for array constructors.  */

static gfc_ss *
gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
{
  gfc_ss *newss;
  int n;

  newss = gfc_get_ss ();
  newss->type = GFC_SS_CONSTRUCTOR;
  newss->expr = expr;
  newss->next = ss;
  newss->data.info.dimen = expr->rank;
  for (n = 0; n < expr->rank; n++)
    newss->data.info.dim[n] = n;

  return newss;
}


/* Walk an expression.  Add walked expressions to the head of the SS chain.
   A wholly scalar expression will not be added.  */

static gfc_ss *
gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
{
  gfc_ss *head;

  switch (expr->expr_type)
    {
    case EXPR_VARIABLE:
      head = gfc_walk_variable_expr (ss, expr);
      return head;

    case EXPR_OP:
      head = gfc_walk_op_expr (ss, expr);
      return head;

    case EXPR_FUNCTION:
      head = gfc_walk_function_expr (ss, expr);
      return head;

    case EXPR_CONSTANT:
    case EXPR_NULL:
    case EXPR_STRUCTURE:
      /* Pass back and let the caller deal with it.  */
      break;

    case EXPR_ARRAY:
      head = gfc_walk_array_constructor (ss, expr);
      return head;

    case EXPR_SUBSTRING:
      /* Pass back and let the caller deal with it.  */
      break;

    default:
      internal_error ("bad expression type during walk (%d)",
		      expr->expr_type);
    }
  return ss;
}


/* Entry point for expression walking.
   A return value equal to the passed chain means this is
   a scalar expression.  It is up to the caller to take whatever action is
   necessary to translate these.  */

gfc_ss *
gfc_walk_expr (gfc_expr * expr)
{
  gfc_ss *res;

  res = gfc_walk_subexpr (gfc_ss_terminator, expr);
  return gfc_reverse_ss (res);
}