aboutsummaryrefslogtreecommitdiff
path: root/libjava/java/lang/Thread.java
blob: bac1afd061d313dc3f0bdaa2c5dbf9ddec470a60 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
/* Thread -- an independent thread of executable code
   Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
   Free Software Foundation

This file is part of GNU Classpath.

GNU Classpath is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.

GNU Classpath is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
General Public License for more details.

You should have received a copy of the GNU General Public License
along with GNU Classpath; see the file COPYING.  If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA.

Linking this library statically or dynamically with other modules is
making a combined work based on this library.  Thus, the terms and
conditions of the GNU General Public License cover the whole
combination.

As a special exception, the copyright holders of this library give you
permission to link this library with independent modules to produce an
executable, regardless of the license terms of these independent
modules, and to copy and distribute the resulting executable under
terms of your choice, provided that you also meet, for each linked
independent module, the terms and conditions of the license of that
module.  An independent module is a module which is not derived from
or based on this library.  If you modify this library, you may extend
this exception to your version of the library, but you are not
obligated to do so.  If you do not wish to do so, delete this
exception statement from your version. */


package java.lang;

import gnu.gcj.RawData;
import gnu.gcj.RawDataManaged;
import gnu.java.util.WeakIdentityHashMap;
import java.util.Map;

/* Written using "Java Class Libraries", 2nd edition, ISBN 0-201-31002-3
 * "The Java Language Specification", ISBN 0-201-63451-1
 * plus online API docs for JDK 1.2 beta from http://www.javasoft.com.
 * Status:  Believed complete to version 1.4, with caveats. We do not
 *          implement the deprecated (and dangerous) stop, suspend, and resume
 *          methods. Security implementation is not complete.
 */

/**
 * Thread represents a single thread of execution in the VM. When an
 * application VM starts up, it creates a non-daemon Thread which calls the
 * main() method of a particular class.  There may be other Threads running,
 * such as the garbage collection thread.
 *
 * <p>Threads have names to identify them.  These names are not necessarily
 * unique. Every Thread has a priority, as well, which tells the VM which
 * Threads should get more running time. New threads inherit the priority
 * and daemon status of the parent thread, by default.
 *
 * <p>There are two methods of creating a Thread: you may subclass Thread and
 * implement the <code>run()</code> method, at which point you may start the
 * Thread by calling its <code>start()</code> method, or you may implement
 * <code>Runnable</code> in the class you want to use and then call new
 * <code>Thread(your_obj).start()</code>.
 *
 * <p>The virtual machine runs until all non-daemon threads have died (either
 * by returning from the run() method as invoked by start(), or by throwing
 * an uncaught exception); or until <code>System.exit</code> is called with
 * adequate permissions.
 *
 * <p>It is unclear at what point a Thread should be added to a ThreadGroup,
 * and at what point it should be removed. Should it be inserted when it
 * starts, or when it is created?  Should it be removed when it is suspended
 * or interrupted?  The only thing that is clear is that the Thread should be
 * removed when it is stopped.
 *
 * @author Tom Tromey
 * @author John Keiser
 * @author Eric Blake (ebb9@email.byu.edu)
 * @see Runnable
 * @see Runtime#exit(int)
 * @see #run()
 * @see #start()
 * @see ThreadLocal
 * @since 1.0
 * @status updated to 1.4
 */
public class Thread implements Runnable
{
  /** The minimum priority for a Thread. */
  public static final int MIN_PRIORITY = 1;

  /** The priority a Thread gets by default. */
  public static final int NORM_PRIORITY = 5;

  /** The maximum priority for a Thread. */
  public static final int MAX_PRIORITY = 10;

  /**
   * The group this thread belongs to. This is set to null by
   * ThreadGroup.removeThread when the thread dies.
   */
  ThreadGroup group;

  /** The object to run(), null if this is the target. */
  private Runnable runnable;

  /** The thread name, non-null. */
  String name;

  /** Whether the thread is a daemon. */
  private boolean daemon;

  /** The thread priority, 1 to 10. */
  private int priority;

  boolean interrupt_flag;
  private boolean alive_flag;
  private boolean startable_flag;

  /** The context classloader for this Thread. */
  private ClassLoader contextClassLoader;

  /** This thread's ID.  */
  private final long threadId;

  /** The next thread ID to use.  */
  private static long nextThreadId;

  /** The default exception handler.  */
  private static UncaughtExceptionHandler defaultHandler;

  /** Thread local storage. Package accessible for use by
    * InheritableThreadLocal.
    */
  WeakIdentityHashMap locals;

  /** The uncaught exception handler.  */
  UncaughtExceptionHandler exceptionHandler;

  // This describes the top-most interpreter frame for this thread.
  RawData interp_frame;

  // Our native data - points to an instance of struct natThread.
  private RawDataManaged data;

  /**
   * Allocates a new <code>Thread</code> object. This constructor has
   * the same effect as <code>Thread(null, null,</code>
   * <i>gname</i><code>)</code>, where <b><i>gname</i></b> is
   * a newly generated name. Automatically generated names are of the
   * form <code>"Thread-"+</code><i>n</i>, where <i>n</i> is an integer.
   * <p>
   * Threads created this way must have overridden their
   * <code>run()</code> method to actually do anything.  An example
   * illustrating this method being used follows:
   * <p><blockquote><pre>
   *     import java.lang.*;
   *
   *     class plain01 implements Runnable {
   *         String name;
   *         plain01() {
   *             name = null;
   *         }
   *         plain01(String s) {
   *             name = s;
   *         }
   *         public void run() {
   *             if (name == null)
   *                 System.out.println("A new thread created");
   *             else
   *                 System.out.println("A new thread with name " + name +
   *                                    " created");
   *         }
   *     }
   *     class threadtest01 {
   *         public static void main(String args[] ) {
   *             int failed = 0 ;
   *
   *             <b>Thread t1 = new Thread();</b>
   *             if (t1 != null)
   *                 System.out.println("new Thread() succeed");
   *             else {
   *                 System.out.println("new Thread() failed");
   *                 failed++;
   *             }
   *         }
   *     }
   * </pre></blockquote>
   *
   * @see     java.lang.Thread#Thread(java.lang.ThreadGroup,
   *          java.lang.Runnable, java.lang.String)
   */
  public Thread()
  {
    this(null, null, gen_name());
  }

  /**
   * Allocates a new <code>Thread</code> object. This constructor has
   * the same effect as <code>Thread(null, target,</code>
   * <i>gname</i><code>)</code>, where <i>gname</i> is
   * a newly generated name. Automatically generated names are of the
   * form <code>"Thread-"+</code><i>n</i>, where <i>n</i> is an integer.
   *
   * @param target the object whose <code>run</code> method is called.
   * @see java.lang.Thread#Thread(java.lang.ThreadGroup,
   *                              java.lang.Runnable, java.lang.String)
   */
  public Thread(Runnable target)
  {
    this(null, target, gen_name());
  }

  /**
   * Allocates a new <code>Thread</code> object. This constructor has
   * the same effect as <code>Thread(null, null, name)</code>.
   *
   * @param   name   the name of the new thread.
   * @see     java.lang.Thread#Thread(java.lang.ThreadGroup,
   *          java.lang.Runnable, java.lang.String)
   */
  public Thread(String name)
  {
    this(null, null, name);
  }

  /**
   * Allocates a new <code>Thread</code> object. This constructor has
   * the same effect as <code>Thread(group, target,</code>
   * <i>gname</i><code>)</code>, where <i>gname</i> is
   * a newly generated name. Automatically generated names are of the
   * form <code>"Thread-"+</code><i>n</i>, where <i>n</i> is an integer.
   *
   * @param group the group to put the Thread into
   * @param target the Runnable object to execute
   * @throws SecurityException if this thread cannot access <code>group</code>
   * @throws IllegalThreadStateException if group is destroyed
   * @see #Thread(ThreadGroup, Runnable, String)
   */
  public Thread(ThreadGroup group, Runnable target)
  {
    this(group, target, gen_name());
  }

  /**
   * Allocates a new <code>Thread</code> object. This constructor has
   * the same effect as <code>Thread(group, null, name)</code>
   *
   * @param group the group to put the Thread into
   * @param name the name for the Thread
   * @throws NullPointerException if name is null
   * @throws SecurityException if this thread cannot access <code>group</code>
   * @throws IllegalThreadStateException if group is destroyed
   * @see #Thread(ThreadGroup, Runnable, String)
   */
  public Thread(ThreadGroup group, String name)
  {
    this(group, null, name);
  }

  /**
   * Allocates a new <code>Thread</code> object. This constructor has
   * the same effect as <code>Thread(null, target, name)</code>.
   *
   * @param target the Runnable object to execute
   * @param name the name for the Thread
   * @throws NullPointerException if name is null
   * @see #Thread(ThreadGroup, Runnable, String)
   */
  public Thread(Runnable target, String name)
  {
    this(null, target, name);
  }

  /**
   * Allocate a new Thread object, with the specified ThreadGroup and name, and
   * using the specified Runnable object's <code>run()</code> method to
   * execute.  If the Runnable object is null, <code>this</code> (which is
   * a Runnable) is used instead.
   *
   * <p>If the ThreadGroup is null, the security manager is checked. If a
   * manager exists and returns a non-null object for
   * <code>getThreadGroup</code>, that group is used; otherwise the group
   * of the creating thread is used. Note that the security manager calls
   * <code>checkAccess</code> if the ThreadGroup is not null.
   *
   * <p>The new Thread will inherit its creator's priority and daemon status.
   * These can be changed with <code>setPriority</code> and
   * <code>setDaemon</code>.
   *
   * @param group the group to put the Thread into
   * @param target the Runnable object to execute
   * @param name the name for the Thread
   * @throws NullPointerException if name is null
   * @throws SecurityException if this thread cannot access <code>group</code>
   * @throws IllegalThreadStateException if group is destroyed
   * @see Runnable#run()
   * @see #run()
   * @see #setDaemon(boolean)
   * @see #setPriority(int)
   * @see SecurityManager#checkAccess(ThreadGroup)
   * @see ThreadGroup#checkAccess()
   */
  public Thread(ThreadGroup group, Runnable target, String name)
  {
    this(currentThread(), group, target, name);
  }

  /**
   * Allocate a new Thread object, as if by
   * <code>Thread(group, null, name)</code>, and give it the specified stack
   * size, in bytes. The stack size is <b>highly platform independent</b>,
   * and the virtual machine is free to round up or down, or ignore it
   * completely.  A higher value might let you go longer before a
   * <code>StackOverflowError</code>, while a lower value might let you go
   * longer before an <code>OutOfMemoryError</code>.  Or, it may do absolutely
   * nothing! So be careful, and expect to need to tune this value if your
   * virtual machine even supports it.
   *
   * @param group the group to put the Thread into
   * @param target the Runnable object to execute
   * @param name the name for the Thread
   * @param size the stack size, in bytes; 0 to be ignored
   * @throws NullPointerException if name is null
   * @throws SecurityException if this thread cannot access <code>group</code>
   * @throws IllegalThreadStateException if group is destroyed
   * @since 1.4
   */
  public Thread(ThreadGroup group, Runnable target, String name, long size)
  {
    // Just ignore stackSize for now.
    this(currentThread(), group, target, name);
  }

  private Thread (Thread current, ThreadGroup g, Runnable r, String n)
  {
    // Make sure the current thread may create a new thread.
    checkAccess();
    
    // The Class Libraries book says ``threadName cannot be null''.  I
    // take this to mean NullPointerException.
    if (n == null)
      throw new NullPointerException ();
      
    if (g == null)
      {
	// If CURRENT is null, then we are bootstrapping the first thread. 
	// Use ThreadGroup.root, the main threadgroup.
	if (current == null)
	  group = ThreadGroup.root;
	else
	  group = current.getThreadGroup();
      }
    else
      group = g;

    data = null;
    interrupt_flag = false;
    alive_flag = false;
    startable_flag = true;

    synchronized (Thread.class)
      {
        this.threadId = nextThreadId++;
      }

    if (current != null)
      {
	group.checkAccess();

	daemon = current.isDaemon();
        int gmax = group.getMaxPriority();
	int pri = current.getPriority();
	priority = (gmax < pri ? gmax : pri);
	contextClassLoader = current.contextClassLoader;
	InheritableThreadLocal.newChildThread(this);
      }
    else
      {
	daemon = false;
	priority = NORM_PRIORITY;
      }

    name = n;
    group.addThread(this);
    runnable = r;

    initialize_native ();
  }

  /**
   * Get the number of active threads in the current Thread's ThreadGroup.
   * This implementation calls
   * <code>currentThread().getThreadGroup().activeCount()</code>.
   *
   * @return the number of active threads in the current ThreadGroup
   * @see ThreadGroup#activeCount()
   */
  public static int activeCount()
  {
    return currentThread().group.activeCount();
  }

  /**
   * Check whether the current Thread is allowed to modify this Thread. This
   * passes the check on to <code>SecurityManager.checkAccess(this)</code>.
   *
   * @throws SecurityException if the current Thread cannot modify this Thread
   * @see SecurityManager#checkAccess(Thread)
   */
  public final void checkAccess()
  {
    SecurityManager sm = System.getSecurityManager();
    if (sm != null)
      sm.checkAccess(this);
  }

  /**
   * Count the number of stack frames in this Thread.  The Thread in question
   * must be suspended when this occurs.
   *
   * @return the number of stack frames in this Thread
   * @throws IllegalThreadStateException if this Thread is not suspended
   * @deprecated pointless, since suspend is deprecated
   */
  public native int countStackFrames();

  /**
   * Get the currently executing Thread.
   *
   * @return the currently executing Thread
   */
  public static native Thread currentThread();

  /**
   * Originally intended to destroy this thread, this method was never
   * implemented by Sun, and is hence a no-op.
   */
  public void destroy()
  {
    throw new NoSuchMethodError();
  }
  
  /**
   * Print a stack trace of the current thread to stderr using the same
   * format as Throwable's printStackTrace() method.
   *
   * @see Throwable#printStackTrace()
   */
  public static void dumpStack()
  {
    (new Exception("Stack trace")).printStackTrace();
  }

  /**
   * Copy every active thread in the current Thread's ThreadGroup into the
   * array. Extra threads are silently ignored. This implementation calls
   * <code>getThreadGroup().enumerate(array)</code>, which may have a
   * security check, <code>checkAccess(group)</code>.
   *
   * @param array the array to place the Threads into
   * @return the number of Threads placed into the array
   * @throws NullPointerException if array is null
   * @throws SecurityException if you cannot access the ThreadGroup
   * @see ThreadGroup#enumerate(Thread[])
   * @see #activeCount()
   * @see SecurityManager#checkAccess(ThreadGroup)
   */
  public static int enumerate(Thread[] array)
  {
    return currentThread().group.enumerate(array);
  }
  
  /**
   * Get this Thread's name.
   *
   * @return this Thread's name
   */
  public final String getName()
  {
    return name;
  }

  /**
   * Get this Thread's priority.
   *
   * @return the Thread's priority
   */
  public final int getPriority()
  {
    return priority;
  }

  /**
   * Get the ThreadGroup this Thread belongs to. If the thread has died, this
   * returns null.
   *
   * @return this Thread's ThreadGroup
   */
  public final ThreadGroup getThreadGroup()
  {
    return group;
  }

  /**
   * Checks whether the current thread holds the monitor on a given object.
   * This allows you to do <code>assert Thread.holdsLock(obj)</code>.
   *
   * @param obj the object to test lock ownership on.
   * @return true if the current thread is currently synchronized on obj
   * @throws NullPointerException if obj is null
   * @since 1.4
   */
  public static native boolean holdsLock(Object obj);

  /**
   * Interrupt this Thread. First, there is a security check,
   * <code>checkAccess</code>. Then, depending on the current state of the
   * thread, various actions take place:
   *
   * <p>If the thread is waiting because of {@link #wait()},
   * {@link #sleep(long)}, or {@link #join()}, its <i>interrupt status</i>
   * will be cleared, and an InterruptedException will be thrown. Notice that
   * this case is only possible if an external thread called interrupt().
   *
   * <p>If the thread is blocked in an interruptible I/O operation, in
   * {@link java.nio.channels.InterruptibleChannel}, the <i>interrupt
   * status</i> will be set, and ClosedByInterruptException will be thrown.
   *
   * <p>If the thread is blocked on a {@link java.nio.channels.Selector}, the
   * <i>interrupt status</i> will be set, and the selection will return, with
   * a possible non-zero value, as though by the wakeup() method.
   *
   * <p>Otherwise, the interrupt status will be set.
   *
   * @throws SecurityException if you cannot modify this Thread
   */
  public native void interrupt();

  /**
   * Determine whether the current Thread has been interrupted, and clear
   * the <i>interrupted status</i> in the process.
   *
   * @return whether the current Thread has been interrupted
   * @see #isInterrupted()
   */
  public static boolean interrupted()
  {
    return currentThread().isInterrupted(true);
  }

  /**
   * Determine whether the given Thread has been interrupted, but leave
   * the <i>interrupted status</i> alone in the process.
   *
   * @return whether the Thread has been interrupted
   * @see #interrupted()
   */
  public boolean isInterrupted()
  {
    return interrupt_flag;
  }

  /**
   * Determine whether this Thread is alive. A thread which is alive has
   * started and not yet died.
   *
   * @return whether this Thread is alive
   */
  public final synchronized boolean isAlive()
  {
    return alive_flag;
  }

  /**
   * Tell whether this is a daemon Thread or not.
   *
   * @return whether this is a daemon Thread or not
   * @see #setDaemon(boolean)
   */
  public final boolean isDaemon()
  {
    return daemon;
  }

  /**
   * Wait forever for the Thread in question to die.
   *
   * @throws InterruptedException if the Thread is interrupted; it's
   *         <i>interrupted status</i> will be cleared
   */
  public final void join() throws InterruptedException
  {
    join(0, 0);
  }

  /**
   * Wait the specified amount of time for the Thread in question to die.
   *
   * @param ms the number of milliseconds to wait, or 0 for forever
   * @throws InterruptedException if the Thread is interrupted; it's
   *         <i>interrupted status</i> will be cleared
   */
  public final void join(long ms) throws InterruptedException
  {
    join(ms, 0);
  }

  /**
   * Wait the specified amount of time for the Thread in question to die.
   *
   * <p>Note that 1,000,000 nanoseconds == 1 millisecond, but most VMs do
   * not offer that fine a grain of timing resolution. Besides, there is
   * no guarantee that this thread can start up immediately when time expires,
   * because some other thread may be active.  So don't expect real-time
   * performance.
   *
   * @param ms the number of milliseconds to wait, or 0 for forever
   * @param ns the number of extra nanoseconds to sleep (0-999999)
   * @throws InterruptedException if the Thread is interrupted; it's
   *         <i>interrupted status</i> will be cleared
   * @throws IllegalArgumentException if ns is invalid
   * @XXX A ThreadListener would be nice, to make this efficient.
   */
  public final native void join(long ms, int ns)
    throws InterruptedException;

  /**
   * Resume a suspended thread.
   *
   * @throws SecurityException if you cannot resume the Thread
   * @see #checkAccess()
   * @see #suspend()
   * @deprecated pointless, since suspend is deprecated
   */
  public final native void resume();

  private final native void finish_();

  /**
   * Determine whether the given Thread has been interrupted, but leave
   * the <i>interrupted status</i> alone in the process.
   *
   * @return whether the current Thread has been interrupted
   * @see #interrupted()
   */
  private boolean isInterrupted(boolean clear_flag)
  {
    boolean r = interrupt_flag;
    if (clear_flag && r)
      {
	// Only clear the flag if we saw it as set. Otherwise this could 
	// potentially cause us to miss an interrupt in a race condition, 
	// because this method is not synchronized.
	interrupt_flag = false;
      }
    return r;
  }
  
  /**
   * The method of Thread that will be run if there is no Runnable object
   * associated with the Thread. Thread's implementation does nothing at all.
   *
   * @see #start()
   * @see #Thread(ThreadGroup, Runnable, String)
   */
  public void run()
  {
    if (runnable != null)
      runnable.run();
  }

  /**
   * Set the daemon status of this Thread.  If this is a daemon Thread, then
   * the VM may exit even if it is still running.  This may only be called
   * before the Thread starts running. There may be a security check,
   * <code>checkAccess</code>.
   *
   * @param daemon whether this should be a daemon thread or not
   * @throws SecurityException if you cannot modify this Thread
   * @throws IllegalThreadStateException if the Thread is active
   * @see #isDaemon()
   * @see #checkAccess()
   */
  public final void setDaemon(boolean daemon)
  {
    if (!startable_flag)
      throw new IllegalThreadStateException();
    checkAccess();
    this.daemon = daemon;
  }

  /**
   * Returns the context classloader of this Thread. The context
   * classloader can be used by code that want to load classes depending
   * on the current thread. Normally classes are loaded depending on
   * the classloader of the current class. There may be a security check
   * for <code>RuntimePermission("getClassLoader")</code> if the caller's
   * class loader is not null or an ancestor of this thread's context class
   * loader.
   *
   * @return the context class loader
   * @throws SecurityException when permission is denied
   * @see setContextClassLoader(ClassLoader)
   * @since 1.2
   */
  public synchronized ClassLoader getContextClassLoader()
  {
    if (contextClassLoader == null)
      contextClassLoader = ClassLoader.getSystemClassLoader();

    SecurityManager sm = System.getSecurityManager();
    // FIXME: we can't currently find the caller's class loader.
    ClassLoader callers = null;
    if (sm != null && callers != null)
      {
	// See if the caller's class loader is the same as or an
	// ancestor of this thread's class loader.
	while (callers != null && callers != contextClassLoader)
	  {
	    // FIXME: should use some internal version of getParent
	    // that avoids security checks.
	    callers = callers.getParent();
	  }

	if (callers != contextClassLoader)
	  sm.checkPermission(new RuntimePermission("getClassLoader"));
      }

    return contextClassLoader;
  }

  /**
   * Sets the context classloader for this Thread. When not explicitly set,
   * the context classloader for a thread is the same as the context
   * classloader of the thread that created this thread. The first thread has
   * as context classloader the system classloader. There may be a security
   * check for <code>RuntimePermission("setContextClassLoader")</code>.
   *
   * @param classloader the new context class loader
   * @throws SecurityException when permission is denied
   * @see getContextClassLoader()
   * @since 1.2
   */
  public synchronized void setContextClassLoader(ClassLoader classloader)
  {
    SecurityManager sm = System.getSecurityManager();
    if (sm != null)
      sm.checkPermission(new RuntimePermission("setContextClassLoader"));
    this.contextClassLoader = classloader;
  }

  /**
   * Set this Thread's name.  There may be a security check,
   * <code>checkAccess</code>.
   *
   * @param name the new name for this Thread
   * @throws NullPointerException if name is null
   * @throws SecurityException if you cannot modify this Thread
   */
  public final void setName(String name)
  {
    checkAccess();
    // The Class Libraries book says ``threadName cannot be null''.  I
    // take this to mean NullPointerException.
    if (name == null)
      throw new NullPointerException();
    this.name = name;
  }

  /**
   * Causes the currently executing thread object to temporarily pause
   * and allow other threads to execute.
   */
  public static native void yield();

  /**
   * Suspend the current Thread's execution for the specified amount of
   * time. The Thread will not lose any locks it has during this time. There
   * are no guarantees which thread will be next to run, but most VMs will
   * choose the highest priority thread that has been waiting longest.
   *
   * @param ms the number of milliseconds to sleep, or 0 for forever
   * @throws InterruptedException if the Thread is interrupted; it's
   *         <i>interrupted status</i> will be cleared
   * @see #notify()
   * @see #wait(long)
   */
  public static void sleep(long ms) throws InterruptedException
  {
    sleep(ms, 0);
  }

  /**
   * Suspend the current Thread's execution for the specified amount of
   * time. The Thread will not lose any locks it has during this time. There
   * are no guarantees which thread will be next to run, but most VMs will
   * choose the highest priority thread that has been waiting longest.
   *
   * <p>Note that 1,000,000 nanoseconds == 1 millisecond, but most VMs do
   * not offer that fine a grain of timing resolution. Besides, there is
   * no guarantee that this thread can start up immediately when time expires,
   * because some other thread may be active.  So don't expect real-time
   * performance.
   *
   * @param ms the number of milliseconds to sleep, or 0 for forever
   * @param ns the number of extra nanoseconds to sleep (0-999999)
   * @throws InterruptedException if the Thread is interrupted; it's
   *         <i>interrupted status</i> will be cleared
   * @throws IllegalArgumentException if ns is invalid
   * @see #notify()
   * @see #wait(long, int)
   */
  public static native void sleep(long timeout, int nanos)
    throws InterruptedException;

  /**
   * Start this Thread, calling the run() method of the Runnable this Thread
   * was created with, or else the run() method of the Thread itself. This
   * is the only way to start a new thread; calling run by yourself will just
   * stay in the same thread. The virtual machine will remove the thread from
   * its thread group when the run() method completes.
   *
   * @throws IllegalThreadStateException if the thread has already started
   * @see #run()
   */
  public native void start();

  /**
   * Cause this Thread to stop abnormally because of the throw of a ThreadDeath
   * error. If you stop a Thread that has not yet started, it will stop
   * immediately when it is actually started.
   *
   * <p>This is inherently unsafe, as it can interrupt synchronized blocks and
   * leave data in bad states.  Hence, there is a security check:
   * <code>checkAccess(this)</code>, plus another one if the current thread
   * is not this: <code>RuntimePermission("stopThread")</code>. If you must
   * catch a ThreadDeath, be sure to rethrow it after you have cleaned up.
   * ThreadDeath is the only exception which does not print a stack trace when
   * the thread dies.
   *
   * @throws SecurityException if you cannot stop the Thread
   * @see #interrupt()
   * @see #checkAccess()
   * @see #start()
   * @see ThreadDeath
   * @see ThreadGroup#uncaughtException(Thread, Throwable)
   * @see SecurityManager#checkAccess(Thread)
   * @see SecurityManager#checkPermission(Permission)
   * @deprecated unsafe operation, try not to use
   */
  public final void stop()
  {
    // Argument doesn't matter, because this is no longer
    // supported.
    stop(null);
  }

  /**
   * Cause this Thread to stop abnormally and throw the specified exception.
   * If you stop a Thread that has not yet started, it will stop immediately
   * when it is actually started. <b>WARNING</b>This bypasses Java security,
   * and can throw a checked exception which the call stack is unprepared to
   * handle. Do not abuse this power.
   *
   * <p>This is inherently unsafe, as it can interrupt synchronized blocks and
   * leave data in bad states.  Hence, there is a security check:
   * <code>checkAccess(this)</code>, plus another one if the current thread
   * is not this: <code>RuntimePermission("stopThread")</code>. If you must
   * catch a ThreadDeath, be sure to rethrow it after you have cleaned up.
   * ThreadDeath is the only exception which does not print a stack trace when
   * the thread dies.
   *
   * @param t the Throwable to throw when the Thread dies
   * @throws SecurityException if you cannot stop the Thread
   * @throws NullPointerException in the calling thread, if t is null
   * @see #interrupt()
   * @see #checkAccess()
   * @see #start()
   * @see ThreadDeath
   * @see ThreadGroup#uncaughtException(Thread, Throwable)
   * @see SecurityManager#checkAccess(Thread)
   * @see SecurityManager#checkPermission(Permission)
   * @deprecated unsafe operation, try not to use
   */
  public final native void stop(Throwable t);

  /**
   * Suspend this Thread.  It will not come back, ever, unless it is resumed.
   *
   * <p>This is inherently unsafe, as the suspended thread still holds locks,
   * and can potentially deadlock your program.  Hence, there is a security
   * check: <code>checkAccess</code>.
   *
   * @throws SecurityException if you cannot suspend the Thread
   * @see #checkAccess()
   * @see #resume()
   * @deprecated unsafe operation, try not to use
   */
  public final native void suspend();

  /**
   * Set this Thread's priority. There may be a security check,
   * <code>checkAccess</code>, then the priority is set to the smaller of
   * priority and the ThreadGroup maximum priority.
   *
   * @param priority the new priority for this Thread
   * @throws IllegalArgumentException if priority exceeds MIN_PRIORITY or
   *         MAX_PRIORITY
   * @throws SecurityException if you cannot modify this Thread
   * @see #getPriority()
   * @see #checkAccess()
   * @see ThreadGroup#getMaxPriority()
   * @see #MIN_PRIORITY
   * @see #MAX_PRIORITY
   */
  public final native void setPriority(int newPriority);

  /**
   * Returns a string representation of this thread, including the
   * thread's name, priority, and thread group.
   *
   * @return a human-readable String representing this Thread
   */
  public String toString()
  {
    return ("Thread[" + name + "," + priority + ","
	    + (group == null ? "" : group.getName()) + "]");
  }

  private final native void initialize_native();

  private final native static String gen_name();

  /**
   * Returns the map used by ThreadLocal to store the thread local values.
   */
  static Map getThreadLocals()
  {
    Thread thread = currentThread();
    Map locals = thread.locals;
    if (locals == null)
      {
        locals = thread.locals = new WeakIdentityHashMap();
      }
    return locals;
  }

  /** 
   * Assigns the given <code>UncaughtExceptionHandler</code> to this
   * thread.  This will then be called if the thread terminates due
   * to an uncaught exception, pre-empting that of the
   * <code>ThreadGroup</code>.
   *
   * @param h the handler to use for this thread.
   * @throws SecurityException if the current thread can't modify this thread.
   * @since 1.5 
   */
  public void setUncaughtExceptionHandler(UncaughtExceptionHandler h)
  {
    SecurityManager sm = SecurityManager.current; // Be thread-safe.
    if (sm != null)
      sm.checkAccess(this);    
    exceptionHandler = h;
  }

  /** 
   * <p>
   * Returns the handler used when this thread terminates due to an
   * uncaught exception.  The handler used is determined by the following:
   * </p>
   * <ul>
   * <li>If this thread has its own handler, this is returned.</li>
   * <li>If not, then the handler of the thread's <code>ThreadGroup</code>
   * object is returned.</li>
   * <li>If both are unavailable, then <code>null</code> is returned
   *     (which can only happen when the thread was terminated since
   *      then it won't have an associated thread group anymore).</li>
   * </ul>
   * 
   * @return the appropriate <code>UncaughtExceptionHandler</code> or
   *         <code>null</code> if one can't be obtained.
   * @since 1.5 
   */
  public UncaughtExceptionHandler getUncaughtExceptionHandler()
  {
    return exceptionHandler != null ? exceptionHandler : group;
  }

  /** 
   * <p>
   * Sets the default uncaught exception handler used when one isn't
   * provided by the thread or its associated <code>ThreadGroup</code>.
   * This exception handler is used when the thread itself does not
   * have an exception handler, and the thread's <code>ThreadGroup</code>
   * does not override this default mechanism with its own.  As the group
   * calls this handler by default, this exception handler should not defer
   * to that of the group, as it may lead to infinite recursion.
   * </p>
   * <p>
   * Uncaught exception handlers are used when a thread terminates due to
   * an uncaught exception.  Replacing this handler allows default code to
   * be put in place for all threads in order to handle this eventuality.
   * </p>
   *
   * @param h the new default uncaught exception handler to use.
   * @throws SecurityException if a security manager is present and
   *                           disallows the runtime permission
   *                           "setDefaultUncaughtExceptionHandler".
   * @since 1.5 
   */
  public static void 
    setDefaultUncaughtExceptionHandler(UncaughtExceptionHandler h)
  {
    SecurityManager sm = SecurityManager.current; // Be thread-safe.
    if (sm != null)
      sm.checkPermission(new RuntimePermission("setDefaultUncaughtExceptionHandler"));    
    defaultHandler = h;
  }

  /** 
   * Returns the handler used by default when a thread terminates
   * unexpectedly due to an exception, or <code>null</code> if one doesn't
   * exist.
   *
   * @return the default uncaught exception handler.
   * @since 1.5 
   */
  public static UncaughtExceptionHandler getDefaultUncaughtExceptionHandler()
  {
    return defaultHandler;
  }
  
  /** 
   * Returns the unique identifier for this thread.  This ID is generated
   * on thread creation, and may be re-used on its death.
   *
   * @return a positive long number representing the thread's ID.
   * @since 1.5 
   */
  public long getId()
  {
    return threadId;
  }

  /**
   * <p>
   * This interface is used to handle uncaught exceptions
   * which cause a <code>Thread</code> to terminate.  When
   * a thread, t, is about to terminate due to an uncaught
   * exception, the virtual machine looks for a class which
   * implements this interface, in order to supply it with
   * the dying thread and its uncaught exception.
   * </p>
   * <p>
   * The virtual machine makes two attempts to find an
   * appropriate handler for the uncaught exception, in
   * the following order:
   * </p>
   * <ol>
   * <li>
   * <code>t.getUncaughtExceptionHandler()</code> --
   * the dying thread is queried first for a handler
   * specific to that thread.
   * </li>
   * <li>
   * <code>t.getThreadGroup()</code> --
   * the thread group of the dying thread is used to
   * handle the exception.  If the thread group has
   * no special requirements for handling the exception,
   * it may simply forward it on to
   * <code>Thread.getDefaultUncaughtExceptionHandler()</code>,
   * the default handler, which is used as a last resort.
   * </li>
   * </ol>
   * <p>
   * The first handler found is the one used to handle
   * the uncaught exception.
   * </p>
   *
   * @author Tom Tromey <tromey@redhat.com>
   * @author Andrew John Hughes <gnu_andrew@member.fsf.org>
   * @since 1.5
   * @see Thread#getUncaughtExceptionHandler()
   * @see Thread#setUncaughtExceptionHander(java.lang.Thread.UncaughtExceptionHandler)
   * @see Thread#getDefaultUncaughtExceptionHandler()
   * @see
   * Thread#setDefaultUncaughtExceptionHandler(java.lang.Thread.UncaughtExceptionHandler)
   */
  public interface UncaughtExceptionHandler
  {
    /**
     * Invoked by the virtual machine with the dying thread
     * and the uncaught exception.  Any exceptions thrown
     * by this method are simply ignored by the virtual
     * machine.
     *
     * @param thr the dying thread.
     * @param exc the uncaught exception.
     */
    void uncaughtException(Thread thr, Throwable exc);
  }
}
3414'>3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271
/* Convert function calls to rtl insns, for GNU C compiler.
   Copyright (C) 1989-2022 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "predict.h"
#include "memmodel.h"
#include "tm_p.h"
#include "stringpool.h"
#include "expmed.h"
#include "optabs.h"
#include "emit-rtl.h"
#include "cgraph.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "varasm.h"
#include "internal-fn.h"
#include "dojump.h"
#include "explow.h"
#include "calls.h"
#include "expr.h"
#include "output.h"
#include "langhooks.h"
#include "except.h"
#include "dbgcnt.h"
#include "rtl-iter.h"
#include "tree-vrp.h"
#include "tree-ssanames.h"
#include "intl.h"
#include "stringpool.h"
#include "hash-map.h"
#include "hash-traits.h"
#include "attribs.h"
#include "builtins.h"
#include "gimple-fold.h"
#include "attr-fnspec.h"
#include "value-query.h"
#include "tree-pretty-print.h"

/* Like PREFERRED_STACK_BOUNDARY but in units of bytes, not bits.  */
#define STACK_BYTES (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)

/* Data structure and subroutines used within expand_call.  */

struct arg_data
{
  /* Tree node for this argument.  */
  tree tree_value;
  /* Mode for value; TYPE_MODE unless promoted.  */
  machine_mode mode;
  /* Current RTL value for argument, or 0 if it isn't precomputed.  */
  rtx value;
  /* Initially-compute RTL value for argument; only for const functions.  */
  rtx initial_value;
  /* Register to pass this argument in, 0 if passed on stack, or an
     PARALLEL if the arg is to be copied into multiple non-contiguous
     registers.  */
  rtx reg;
  /* Register to pass this argument in when generating tail call sequence.
     This is not the same register as for normal calls on machines with
     register windows.  */
  rtx tail_call_reg;
  /* If REG is a PARALLEL, this is a copy of VALUE pulled into the correct
     form for emit_group_move.  */
  rtx parallel_value;
  /* If REG was promoted from the actual mode of the argument expression,
     indicates whether the promotion is sign- or zero-extended.  */
  int unsignedp;
  /* Number of bytes to put in registers.  0 means put the whole arg
     in registers.  Also 0 if not passed in registers.  */
  int partial;
  /* Nonzero if argument must be passed on stack.
     Note that some arguments may be passed on the stack
     even though pass_on_stack is zero, just because FUNCTION_ARG says so.
     pass_on_stack identifies arguments that *cannot* go in registers.  */
  int pass_on_stack;
  /* Some fields packaged up for locate_and_pad_parm.  */
  struct locate_and_pad_arg_data locate;
  /* Location on the stack at which parameter should be stored.  The store
     has already been done if STACK == VALUE.  */
  rtx stack;
  /* Location on the stack of the start of this argument slot.  This can
     differ from STACK if this arg pads downward.  This location is known
     to be aligned to TARGET_FUNCTION_ARG_BOUNDARY.  */
  rtx stack_slot;
  /* Place that this stack area has been saved, if needed.  */
  rtx save_area;
  /* If an argument's alignment does not permit direct copying into registers,
     copy in smaller-sized pieces into pseudos.  These are stored in a
     block pointed to by this field.  The next field says how many
     word-sized pseudos we made.  */
  rtx *aligned_regs;
  int n_aligned_regs;
};

/* A vector of one char per byte of stack space.  A byte if nonzero if
   the corresponding stack location has been used.
   This vector is used to prevent a function call within an argument from
   clobbering any stack already set up.  */
static char *stack_usage_map;

/* Size of STACK_USAGE_MAP.  */
static unsigned int highest_outgoing_arg_in_use;

/* Assume that any stack location at this byte index is used,
   without checking the contents of stack_usage_map.  */
static unsigned HOST_WIDE_INT stack_usage_watermark = HOST_WIDE_INT_M1U;

/* A bitmap of virtual-incoming stack space.  Bit is set if the corresponding
   stack location's tail call argument has been already stored into the stack.
   This bitmap is used to prevent sibling call optimization if function tries
   to use parent's incoming argument slots when they have been already
   overwritten with tail call arguments.  */
static sbitmap stored_args_map;

/* Assume that any virtual-incoming location at this byte index has been
   stored, without checking the contents of stored_args_map.  */
static unsigned HOST_WIDE_INT stored_args_watermark;

/* stack_arg_under_construction is nonzero when an argument may be
   initialized with a constructor call (including a C function that
   returns a BLKmode struct) and expand_call must take special action
   to make sure the object being constructed does not overlap the
   argument list for the constructor call.  */
static int stack_arg_under_construction;

static void precompute_register_parameters (int, struct arg_data *, int *);
static int store_one_arg (struct arg_data *, rtx, int, int, int);
static void store_unaligned_arguments_into_pseudos (struct arg_data *, int);
static int finalize_must_preallocate (int, int, struct arg_data *,
				      struct args_size *);
static void precompute_arguments (int, struct arg_data *);
static void compute_argument_addresses (struct arg_data *, rtx, int);
static rtx rtx_for_function_call (tree, tree);
static void load_register_parameters (struct arg_data *, int, rtx *, int,
				      int, int *);
static int special_function_p (const_tree, int);
static int check_sibcall_argument_overlap_1 (rtx);
static int check_sibcall_argument_overlap (rtx_insn *, struct arg_data *, int);

static tree split_complex_types (tree);

#ifdef REG_PARM_STACK_SPACE
static rtx save_fixed_argument_area (int, rtx, int *, int *);
static void restore_fixed_argument_area (rtx, rtx, int, int);
#endif

/* Return true if bytes [LOWER_BOUND, UPPER_BOUND) of the outgoing
   stack region might already be in use.  */

static bool
stack_region_maybe_used_p (poly_uint64 lower_bound, poly_uint64 upper_bound,
			   unsigned int reg_parm_stack_space)
{
  unsigned HOST_WIDE_INT const_lower, const_upper;
  const_lower = constant_lower_bound (lower_bound);
  if (!upper_bound.is_constant (&const_upper))
    const_upper = HOST_WIDE_INT_M1U;

  if (const_upper > stack_usage_watermark)
    return true;

  /* Don't worry about things in the fixed argument area;
     it has already been saved.  */
  const_lower = MAX (const_lower, reg_parm_stack_space);
  const_upper = MIN (const_upper, highest_outgoing_arg_in_use);
  for (unsigned HOST_WIDE_INT i = const_lower; i < const_upper; ++i)
    if (stack_usage_map[i])
      return true;
  return false;
}

/* Record that bytes [LOWER_BOUND, UPPER_BOUND) of the outgoing
   stack region are now in use.  */

static void
mark_stack_region_used (poly_uint64 lower_bound, poly_uint64 upper_bound)
{
  unsigned HOST_WIDE_INT const_lower, const_upper;
  const_lower = constant_lower_bound (lower_bound);
  if (upper_bound.is_constant (&const_upper)
      && const_upper <= highest_outgoing_arg_in_use)
    for (unsigned HOST_WIDE_INT i = const_lower; i < const_upper; ++i)
      stack_usage_map[i] = 1;
  else
    stack_usage_watermark = MIN (stack_usage_watermark, const_lower);
}

/* Force FUNEXP into a form suitable for the address of a CALL,
   and return that as an rtx.  Also load the static chain register
   if FNDECL is a nested function.

   CALL_FUSAGE points to a variable holding the prospective
   CALL_INSN_FUNCTION_USAGE information.  */

rtx
prepare_call_address (tree fndecl_or_type, rtx funexp, rtx static_chain_value,
		      rtx *call_fusage, int reg_parm_seen, int flags)
{
  /* Make a valid memory address and copy constants through pseudo-regs,
     but not for a constant address if -fno-function-cse.  */
  if (GET_CODE (funexp) != SYMBOL_REF)
    {
      /* If it's an indirect call by descriptor, generate code to perform
	 runtime identification of the pointer and load the descriptor.  */
      if ((flags & ECF_BY_DESCRIPTOR) && !flag_trampolines)
	{
	  const int bit_val = targetm.calls.custom_function_descriptors;
	  rtx call_lab = gen_label_rtx ();

	  gcc_assert (fndecl_or_type && TYPE_P (fndecl_or_type));
	  fndecl_or_type
	    = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL, NULL_TREE,
			  fndecl_or_type);
	  DECL_STATIC_CHAIN (fndecl_or_type) = 1;
	  rtx chain = targetm.calls.static_chain (fndecl_or_type, false);

	  if (GET_MODE (funexp) != Pmode)
	    funexp = convert_memory_address (Pmode, funexp);

	  /* Avoid long live ranges around function calls.  */
	  funexp = copy_to_mode_reg (Pmode, funexp);

	  if (REG_P (chain))
	    emit_insn (gen_rtx_CLOBBER (VOIDmode, chain));

	  /* Emit the runtime identification pattern.  */
	  rtx mask = gen_rtx_AND (Pmode, funexp, GEN_INT (bit_val));
	  emit_cmp_and_jump_insns (mask, const0_rtx, EQ, NULL_RTX, Pmode, 1,
				   call_lab);

	  /* Statically predict the branch to very likely taken.  */
	  rtx_insn *insn = get_last_insn ();
	  if (JUMP_P (insn))
	    predict_insn_def (insn, PRED_BUILTIN_EXPECT, TAKEN);

	  /* Load the descriptor.  */
	  rtx mem = gen_rtx_MEM (ptr_mode,
				 plus_constant (Pmode, funexp, - bit_val));
	  MEM_NOTRAP_P (mem) = 1;
	  mem = convert_memory_address (Pmode, mem);
	  emit_move_insn (chain, mem);

	  mem = gen_rtx_MEM (ptr_mode,
			     plus_constant (Pmode, funexp,
					    POINTER_SIZE / BITS_PER_UNIT
					      - bit_val));
	  MEM_NOTRAP_P (mem) = 1;
	  mem = convert_memory_address (Pmode, mem);
	  emit_move_insn (funexp, mem);

	  emit_label (call_lab);

	  if (REG_P (chain))
	    {
	      use_reg (call_fusage, chain);
	      STATIC_CHAIN_REG_P (chain) = 1;
	    }

	  /* Make sure we're not going to be overwritten below.  */
	  gcc_assert (!static_chain_value);
	}

      /* If we are using registers for parameters, force the
	 function address into a register now.  */
      funexp = ((reg_parm_seen
		 && targetm.small_register_classes_for_mode_p (FUNCTION_MODE))
		 ? force_not_mem (memory_address (FUNCTION_MODE, funexp))
		 : memory_address (FUNCTION_MODE, funexp));
    }
  else
    {
      /* funexp could be a SYMBOL_REF represents a function pointer which is
	 of ptr_mode.  In this case, it should be converted into address mode
	 to be a valid address for memory rtx pattern.  See PR 64971.  */
      if (GET_MODE (funexp) != Pmode)
	funexp = convert_memory_address (Pmode, funexp);

      if (!(flags & ECF_SIBCALL))
	{
	  if (!NO_FUNCTION_CSE && optimize && ! flag_no_function_cse)
	    funexp = force_reg (Pmode, funexp);
	}
    }

  if (static_chain_value != 0
      && (TREE_CODE (fndecl_or_type) != FUNCTION_DECL
	  || DECL_STATIC_CHAIN (fndecl_or_type)))
    {
      rtx chain;

      chain = targetm.calls.static_chain (fndecl_or_type, false);
      static_chain_value = convert_memory_address (Pmode, static_chain_value);

      emit_move_insn (chain, static_chain_value);
      if (REG_P (chain))
	{
	  use_reg (call_fusage, chain);
	  STATIC_CHAIN_REG_P (chain) = 1;
	}
    }

  return funexp;
}

/* Generate instructions to call function FUNEXP,
   and optionally pop the results.
   The CALL_INSN is the first insn generated.

   FNDECL is the declaration node of the function.  This is given to the
   hook TARGET_RETURN_POPS_ARGS to determine whether this function pops
   its own args.

   FUNTYPE is the data type of the function.  This is given to the hook
   TARGET_RETURN_POPS_ARGS to determine whether this function pops its
   own args.  We used to allow an identifier for library functions, but
   that doesn't work when the return type is an aggregate type and the
   calling convention says that the pointer to this aggregate is to be
   popped by the callee.

   STACK_SIZE is the number of bytes of arguments on the stack,
   ROUNDED_STACK_SIZE is that number rounded up to
   PREFERRED_STACK_BOUNDARY; zero if the size is variable.  This is
   both to put into the call insn and to generate explicit popping
   code if necessary.

   STRUCT_VALUE_SIZE is the number of bytes wanted in a structure value.
   It is zero if this call doesn't want a structure value.

   NEXT_ARG_REG is the rtx that results from executing
     targetm.calls.function_arg (&args_so_far,
				 function_arg_info::end_marker ());
   just after all the args have had their registers assigned.
   This could be whatever you like, but normally it is the first
   arg-register beyond those used for args in this call,
   or 0 if all the arg-registers are used in this call.
   It is passed on to `gen_call' so you can put this info in the call insn.

   VALREG is a hard register in which a value is returned,
   or 0 if the call does not return a value.

   OLD_INHIBIT_DEFER_POP is the value that `inhibit_defer_pop' had before
   the args to this call were processed.
   We restore `inhibit_defer_pop' to that value.

   CALL_FUSAGE is either empty or an EXPR_LIST of USE expressions that
   denote registers used by the called function.  */

static void
emit_call_1 (rtx funexp, tree fntree ATTRIBUTE_UNUSED, tree fndecl ATTRIBUTE_UNUSED,
	     tree funtype ATTRIBUTE_UNUSED,
	     poly_int64 stack_size ATTRIBUTE_UNUSED,
	     poly_int64 rounded_stack_size,
	     poly_int64 struct_value_size ATTRIBUTE_UNUSED,
	     rtx next_arg_reg ATTRIBUTE_UNUSED, rtx valreg,
	     int old_inhibit_defer_pop, rtx call_fusage, int ecf_flags,
	     cumulative_args_t args_so_far ATTRIBUTE_UNUSED)
{
  rtx rounded_stack_size_rtx = gen_int_mode (rounded_stack_size, Pmode);
  rtx call, funmem, pat;
  int already_popped = 0;
  poly_int64 n_popped = 0;

  /* Sibling call patterns never pop arguments (no sibcall(_value)_pop
     patterns exist).  Any popping that the callee does on return will
     be from our caller's frame rather than ours.  */
  if (!(ecf_flags & ECF_SIBCALL))
    {
      n_popped += targetm.calls.return_pops_args (fndecl, funtype, stack_size);

#ifdef CALL_POPS_ARGS
      n_popped += CALL_POPS_ARGS (*get_cumulative_args (args_so_far));
#endif
    }

  /* Ensure address is valid.  SYMBOL_REF is already valid, so no need,
     and we don't want to load it into a register as an optimization,
     because prepare_call_address already did it if it should be done.  */
  if (GET_CODE (funexp) != SYMBOL_REF)
    funexp = memory_address (FUNCTION_MODE, funexp);

  funmem = gen_rtx_MEM (FUNCTION_MODE, funexp);
  if (fndecl && TREE_CODE (fndecl) == FUNCTION_DECL)
    {
      tree t = fndecl;

      /* Although a built-in FUNCTION_DECL and its non-__builtin
	 counterpart compare equal and get a shared mem_attrs, they
	 produce different dump output in compare-debug compilations,
	 if an entry gets garbage collected in one compilation, then
	 adds a different (but equivalent) entry, while the other
	 doesn't run the garbage collector at the same spot and then
	 shares the mem_attr with the equivalent entry. */
      if (DECL_BUILT_IN_CLASS (t) == BUILT_IN_NORMAL)
	{
	  tree t2 = builtin_decl_explicit (DECL_FUNCTION_CODE (t));
	  if (t2)
	    t = t2;
	}

	set_mem_expr (funmem, t);
    }
  else if (fntree)
    set_mem_expr (funmem, build_simple_mem_ref (CALL_EXPR_FN (fntree)));

  if (ecf_flags & ECF_SIBCALL)
    {
      if (valreg)
	pat = targetm.gen_sibcall_value (valreg, funmem,
					 rounded_stack_size_rtx,
					 next_arg_reg, NULL_RTX);
      else
	pat = targetm.gen_sibcall (funmem, rounded_stack_size_rtx,
				   next_arg_reg,
				   gen_int_mode (struct_value_size, Pmode));
    }
  /* If the target has "call" or "call_value" insns, then prefer them
     if no arguments are actually popped.  If the target does not have
     "call" or "call_value" insns, then we must use the popping versions
     even if the call has no arguments to pop.  */
  else if (maybe_ne (n_popped, 0)
	   || !(valreg
		? targetm.have_call_value ()
		: targetm.have_call ()))
    {
      rtx n_pop = gen_int_mode (n_popped, Pmode);

      /* If this subroutine pops its own args, record that in the call insn
	 if possible, for the sake of frame pointer elimination.  */

      if (valreg)
	pat = targetm.gen_call_value_pop (valreg, funmem,
					  rounded_stack_size_rtx,
					  next_arg_reg, n_pop);
      else
	pat = targetm.gen_call_pop (funmem, rounded_stack_size_rtx,
				    next_arg_reg, n_pop);

      already_popped = 1;
    }
  else
    {
      if (valreg)
	pat = targetm.gen_call_value (valreg, funmem, rounded_stack_size_rtx,
				      next_arg_reg, NULL_RTX);
      else
	pat = targetm.gen_call (funmem, rounded_stack_size_rtx, next_arg_reg,
				gen_int_mode (struct_value_size, Pmode));
    }
  emit_insn (pat);

  /* Find the call we just emitted.  */
  rtx_call_insn *call_insn = last_call_insn ();

  /* Some target create a fresh MEM instead of reusing the one provided
     above.  Set its MEM_EXPR.  */
  call = get_call_rtx_from (call_insn);
  if (call
      && MEM_EXPR (XEXP (call, 0)) == NULL_TREE
      && MEM_EXPR (funmem) != NULL_TREE)
    set_mem_expr (XEXP (call, 0), MEM_EXPR (funmem));

  /* Put the register usage information there.  */
  add_function_usage_to (call_insn, call_fusage);

  /* If this is a const call, then set the insn's unchanging bit.  */
  if (ecf_flags & ECF_CONST)
    RTL_CONST_CALL_P (call_insn) = 1;

  /* If this is a pure call, then set the insn's unchanging bit.  */
  if (ecf_flags & ECF_PURE)
    RTL_PURE_CALL_P (call_insn) = 1;

  /* If this is a const call, then set the insn's unchanging bit.  */
  if (ecf_flags & ECF_LOOPING_CONST_OR_PURE)
    RTL_LOOPING_CONST_OR_PURE_CALL_P (call_insn) = 1;

  /* Create a nothrow REG_EH_REGION note, if needed.  */
  make_reg_eh_region_note (call_insn, ecf_flags, 0);

  if (ecf_flags & ECF_NORETURN)
    add_reg_note (call_insn, REG_NORETURN, const0_rtx);

  if (ecf_flags & ECF_RETURNS_TWICE)
    {
      add_reg_note (call_insn, REG_SETJMP, const0_rtx);
      cfun->calls_setjmp = 1;
    }

  SIBLING_CALL_P (call_insn) = ((ecf_flags & ECF_SIBCALL) != 0);

  /* Restore this now, so that we do defer pops for this call's args
     if the context of the call as a whole permits.  */
  inhibit_defer_pop = old_inhibit_defer_pop;

  if (maybe_ne (n_popped, 0))
    {
      if (!already_popped)
	CALL_INSN_FUNCTION_USAGE (call_insn)
	  = gen_rtx_EXPR_LIST (VOIDmode,
			       gen_rtx_CLOBBER (VOIDmode, stack_pointer_rtx),
			       CALL_INSN_FUNCTION_USAGE (call_insn));
      rounded_stack_size -= n_popped;
      rounded_stack_size_rtx = gen_int_mode (rounded_stack_size, Pmode);
      stack_pointer_delta -= n_popped;

      add_args_size_note (call_insn, stack_pointer_delta);

      /* If popup is needed, stack realign must use DRAP  */
      if (SUPPORTS_STACK_ALIGNMENT)
        crtl->need_drap = true;
    }
  /* For noreturn calls when not accumulating outgoing args force
     REG_ARGS_SIZE note to prevent crossjumping of calls with different
     args sizes.  */
  else if (!ACCUMULATE_OUTGOING_ARGS && (ecf_flags & ECF_NORETURN) != 0)
    add_args_size_note (call_insn, stack_pointer_delta);

  if (!ACCUMULATE_OUTGOING_ARGS)
    {
      /* If returning from the subroutine does not automatically pop the args,
	 we need an instruction to pop them sooner or later.
	 Perhaps do it now; perhaps just record how much space to pop later.

	 If returning from the subroutine does pop the args, indicate that the
	 stack pointer will be changed.  */

      if (maybe_ne (rounded_stack_size, 0))
	{
	  if (ecf_flags & ECF_NORETURN)
	    /* Just pretend we did the pop.  */
	    stack_pointer_delta -= rounded_stack_size;
	  else if (flag_defer_pop && inhibit_defer_pop == 0
	      && ! (ecf_flags & (ECF_CONST | ECF_PURE)))
	    pending_stack_adjust += rounded_stack_size;
	  else
	    adjust_stack (rounded_stack_size_rtx);
	}
    }
  /* When we accumulate outgoing args, we must avoid any stack manipulations.
     Restore the stack pointer to its original value now.  Usually
     ACCUMULATE_OUTGOING_ARGS targets don't get here, but there are exceptions.
     On  i386 ACCUMULATE_OUTGOING_ARGS can be enabled on demand, and
     popping variants of functions exist as well.

     ??? We may optimize similar to defer_pop above, but it is
     probably not worthwhile.

     ??? It will be worthwhile to enable combine_stack_adjustments even for
     such machines.  */
  else if (maybe_ne (n_popped, 0))
    anti_adjust_stack (gen_int_mode (n_popped, Pmode));
}

/* Determine if the function identified by FNDECL is one with
   special properties we wish to know about.  Modify FLAGS accordingly.

   For example, if the function might return more than one time (setjmp), then
   set ECF_RETURNS_TWICE.

   Set ECF_MAY_BE_ALLOCA for any memory allocation function that might allocate
   space from the stack such as alloca.  */

static int
special_function_p (const_tree fndecl, int flags)
{
  tree name_decl = DECL_NAME (fndecl);

  if (maybe_special_function_p (fndecl)
      && IDENTIFIER_LENGTH (name_decl) <= 11)
    {
      const char *name = IDENTIFIER_POINTER (name_decl);
      const char *tname = name;

      /* We assume that alloca will always be called by name.  It
	 makes no sense to pass it as a pointer-to-function to
	 anything that does not understand its behavior.  */
      if (IDENTIFIER_LENGTH (name_decl) == 6
	  && name[0] == 'a'
	  && ! strcmp (name, "alloca"))
	flags |= ECF_MAY_BE_ALLOCA;

      /* Disregard prefix _ or __.  */
      if (name[0] == '_')
	{
	  if (name[1] == '_')
	    tname += 2;
	  else
	    tname += 1;
	}

      /* ECF_RETURNS_TWICE is safe even for -ffreestanding.  */
      if (! strcmp (tname, "setjmp")
	  || ! strcmp (tname, "sigsetjmp")
	  || ! strcmp (name, "savectx")
	  || ! strcmp (name, "vfork")
	  || ! strcmp (name, "getcontext"))
	flags |= ECF_RETURNS_TWICE;
    }

  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
      && ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (fndecl)))
    flags |= ECF_MAY_BE_ALLOCA;

  return flags;
}

/* Return fnspec for DECL.  */

static attr_fnspec
decl_fnspec (tree fndecl)
{
  tree attr;
  tree type = TREE_TYPE (fndecl);
  if (type)
    {
      attr = lookup_attribute ("fn spec", TYPE_ATTRIBUTES (type));
      if (attr)
	{
	  return TREE_VALUE (TREE_VALUE (attr));
	}
    }
  if (fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
    return builtin_fnspec (fndecl);
  return "";
}

/* Similar to special_function_p; return a set of ERF_ flags for the
   function FNDECL.  */
static int
decl_return_flags (tree fndecl)
{
  attr_fnspec fnspec = decl_fnspec (fndecl);

  unsigned int arg;
  if (fnspec.returns_arg (&arg))
    return ERF_RETURNS_ARG | arg;

  if (fnspec.returns_noalias_p ())
    return ERF_NOALIAS;
  return 0;
}

/* Return nonzero when FNDECL represents a call to setjmp.  */

int
setjmp_call_p (const_tree fndecl)
{
  if (DECL_IS_RETURNS_TWICE (fndecl))
    return ECF_RETURNS_TWICE;
  return special_function_p (fndecl, 0) & ECF_RETURNS_TWICE;
}


/* Return true if STMT may be an alloca call.  */

bool
gimple_maybe_alloca_call_p (const gimple *stmt)
{
  tree fndecl;

  if (!is_gimple_call (stmt))
    return false;

  fndecl = gimple_call_fndecl (stmt);
  if (fndecl && (special_function_p (fndecl, 0) & ECF_MAY_BE_ALLOCA))
    return true;

  return false;
}

/* Return true if STMT is a builtin alloca call.  */

bool
gimple_alloca_call_p (const gimple *stmt)
{
  tree fndecl;

  if (!is_gimple_call (stmt))
    return false;

  fndecl = gimple_call_fndecl (stmt);
  if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
    switch (DECL_FUNCTION_CODE (fndecl))
      {
      CASE_BUILT_IN_ALLOCA:
	return gimple_call_num_args (stmt) > 0;
      default:
	break;
      }

  return false;
}

/* Return true when exp contains a builtin alloca call.  */

bool
alloca_call_p (const_tree exp)
{
  tree fndecl;
  if (TREE_CODE (exp) == CALL_EXPR
      && (fndecl = get_callee_fndecl (exp))
      && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
    switch (DECL_FUNCTION_CODE (fndecl))
      {
      CASE_BUILT_IN_ALLOCA:
        return true;
      default:
	break;
      }

  return false;
}

/* Return TRUE if FNDECL is either a TM builtin or a TM cloned
   function.  Return FALSE otherwise.  */

static bool
is_tm_builtin (const_tree fndecl)
{
  if (fndecl == NULL)
    return false;

  if (decl_is_tm_clone (fndecl))
    return true;

  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
    {
      switch (DECL_FUNCTION_CODE (fndecl))
	{
	case BUILT_IN_TM_COMMIT:
	case BUILT_IN_TM_COMMIT_EH:
	case BUILT_IN_TM_ABORT:
	case BUILT_IN_TM_IRREVOCABLE:
	case BUILT_IN_TM_GETTMCLONE_IRR:
	case BUILT_IN_TM_MEMCPY:
	case BUILT_IN_TM_MEMMOVE:
	case BUILT_IN_TM_MEMSET:
	CASE_BUILT_IN_TM_STORE (1):
	CASE_BUILT_IN_TM_STORE (2):
	CASE_BUILT_IN_TM_STORE (4):
	CASE_BUILT_IN_TM_STORE (8):
	CASE_BUILT_IN_TM_STORE (FLOAT):
	CASE_BUILT_IN_TM_STORE (DOUBLE):
	CASE_BUILT_IN_TM_STORE (LDOUBLE):
	CASE_BUILT_IN_TM_STORE (M64):
	CASE_BUILT_IN_TM_STORE (M128):
	CASE_BUILT_IN_TM_STORE (M256):
	CASE_BUILT_IN_TM_LOAD (1):
	CASE_BUILT_IN_TM_LOAD (2):
	CASE_BUILT_IN_TM_LOAD (4):
	CASE_BUILT_IN_TM_LOAD (8):
	CASE_BUILT_IN_TM_LOAD (FLOAT):
	CASE_BUILT_IN_TM_LOAD (DOUBLE):
	CASE_BUILT_IN_TM_LOAD (LDOUBLE):
	CASE_BUILT_IN_TM_LOAD (M64):
	CASE_BUILT_IN_TM_LOAD (M128):
	CASE_BUILT_IN_TM_LOAD (M256):
	case BUILT_IN_TM_LOG:
	case BUILT_IN_TM_LOG_1:
	case BUILT_IN_TM_LOG_2:
	case BUILT_IN_TM_LOG_4:
	case BUILT_IN_TM_LOG_8:
	case BUILT_IN_TM_LOG_FLOAT:
	case BUILT_IN_TM_LOG_DOUBLE:
	case BUILT_IN_TM_LOG_LDOUBLE:
	case BUILT_IN_TM_LOG_M64:
	case BUILT_IN_TM_LOG_M128:
	case BUILT_IN_TM_LOG_M256:
	  return true;
	default:
	  break;
	}
    }
  return false;
}

/* Detect flags (function attributes) from the function decl or type node.  */

int
flags_from_decl_or_type (const_tree exp)
{
  int flags = 0;

  if (DECL_P (exp))
    {
      /* The function exp may have the `malloc' attribute.  */
      if (DECL_IS_MALLOC (exp))
	flags |= ECF_MALLOC;

      /* The function exp may have the `returns_twice' attribute.  */
      if (DECL_IS_RETURNS_TWICE (exp))
	flags |= ECF_RETURNS_TWICE;

      /* Process the pure and const attributes.  */
      if (TREE_READONLY (exp))
	flags |= ECF_CONST;
      if (DECL_PURE_P (exp))
	flags |= ECF_PURE;
      if (DECL_LOOPING_CONST_OR_PURE_P (exp))
	flags |= ECF_LOOPING_CONST_OR_PURE;

      if (DECL_IS_NOVOPS (exp))
	flags |= ECF_NOVOPS;
      if (lookup_attribute ("leaf", DECL_ATTRIBUTES (exp)))
	flags |= ECF_LEAF;
      if (lookup_attribute ("cold", DECL_ATTRIBUTES (exp)))
	flags |= ECF_COLD;

      if (TREE_NOTHROW (exp))
	flags |= ECF_NOTHROW;

      if (flag_tm)
	{
	  if (is_tm_builtin (exp))
	    flags |= ECF_TM_BUILTIN;
	  else if ((flags & (ECF_CONST|ECF_NOVOPS)) != 0
		   || lookup_attribute ("transaction_pure",
					TYPE_ATTRIBUTES (TREE_TYPE (exp))))
	    flags |= ECF_TM_PURE;
	}

      flags = special_function_p (exp, flags);
    }
  else if (TYPE_P (exp))
    {
      if (TYPE_READONLY (exp))
	flags |= ECF_CONST;

      if (flag_tm
	  && ((flags & ECF_CONST) != 0
	      || lookup_attribute ("transaction_pure", TYPE_ATTRIBUTES (exp))))
	flags |= ECF_TM_PURE;
    }
  else
    gcc_unreachable ();

  if (TREE_THIS_VOLATILE (exp))
    {
      flags |= ECF_NORETURN;
      if (flags & (ECF_CONST|ECF_PURE))
	flags |= ECF_LOOPING_CONST_OR_PURE;
    }

  return flags;
}

/* Detect flags from a CALL_EXPR.  */

int
call_expr_flags (const_tree t)
{
  int flags;
  tree decl = get_callee_fndecl (t);

  if (decl)
    flags = flags_from_decl_or_type (decl);
  else if (CALL_EXPR_FN (t) == NULL_TREE)
    flags = internal_fn_flags (CALL_EXPR_IFN (t));
  else
    {
      tree type = TREE_TYPE (CALL_EXPR_FN (t));
      if (type && TREE_CODE (type) == POINTER_TYPE)
	flags = flags_from_decl_or_type (TREE_TYPE (type));
      else
	flags = 0;
      if (CALL_EXPR_BY_DESCRIPTOR (t))
	flags |= ECF_BY_DESCRIPTOR;
    }

  return flags;
}

/* Return true if ARG should be passed by invisible reference.  */

bool
pass_by_reference (CUMULATIVE_ARGS *ca, function_arg_info arg)
{
  if (tree type = arg.type)
    {
      /* If this type contains non-trivial constructors, then it is
	 forbidden for the middle-end to create any new copies.  */
      if (TREE_ADDRESSABLE (type))
	return true;

      /* GCC post 3.4 passes *all* variable sized types by reference.  */
      if (!TYPE_SIZE (type) || !poly_int_tree_p (TYPE_SIZE (type)))
	return true;

      /* If a record type should be passed the same as its first (and only)
	 member, use the type and mode of that member.  */
      if (TREE_CODE (type) == RECORD_TYPE && TYPE_TRANSPARENT_AGGR (type))
	{
	  arg.type = TREE_TYPE (first_field (type));
	  arg.mode = TYPE_MODE (arg.type);
	}
    }

  return targetm.calls.pass_by_reference (pack_cumulative_args (ca), arg);
}

/* Return true if TYPE should be passed by reference when passed to
   the "..." arguments of a function.  */

bool
pass_va_arg_by_reference (tree type)
{
  return pass_by_reference (NULL, function_arg_info (type, /*named=*/false));
}

/* Decide whether ARG, which occurs in the state described by CA,
   should be passed by reference.  Return true if so and update
   ARG accordingly.  */

bool
apply_pass_by_reference_rules (CUMULATIVE_ARGS *ca, function_arg_info &arg)
{
  if (pass_by_reference (ca, arg))
    {
      arg.type = build_pointer_type (arg.type);
      arg.mode = TYPE_MODE (arg.type);
      arg.pass_by_reference = true;
      return true;
    }
  return false;
}

/* Return true if ARG, which is passed by reference, should be callee
   copied instead of caller copied.  */

bool
reference_callee_copied (CUMULATIVE_ARGS *ca, const function_arg_info &arg)
{
  if (arg.type && TREE_ADDRESSABLE (arg.type))
    return false;
  return targetm.calls.callee_copies (pack_cumulative_args (ca), arg);
}


/* Precompute all register parameters as described by ARGS, storing values
   into fields within the ARGS array.

   NUM_ACTUALS indicates the total number elements in the ARGS array.

   Set REG_PARM_SEEN if we encounter a register parameter.  */

static void
precompute_register_parameters (int num_actuals, struct arg_data *args,
				int *reg_parm_seen)
{
  int i;

  *reg_parm_seen = 0;

  for (i = 0; i < num_actuals; i++)
    if (args[i].reg != 0 && ! args[i].pass_on_stack)
      {
	*reg_parm_seen = 1;

	if (args[i].value == 0)
	  {
	    push_temp_slots ();
	    args[i].value = expand_normal (args[i].tree_value);
	    preserve_temp_slots (args[i].value);
	    pop_temp_slots ();
	  }

	/* If we are to promote the function arg to a wider mode,
	   do it now.  */

	if (args[i].mode != TYPE_MODE (TREE_TYPE (args[i].tree_value)))
	  args[i].value
	    = convert_modes (args[i].mode,
			     TYPE_MODE (TREE_TYPE (args[i].tree_value)),
			     args[i].value, args[i].unsignedp);

	/* If the value is a non-legitimate constant, force it into a
	   pseudo now.  TLS symbols sometimes need a call to resolve.  */
	if (CONSTANT_P (args[i].value)
	    && (!targetm.legitimate_constant_p (args[i].mode, args[i].value)
		|| targetm.precompute_tls_p (args[i].mode, args[i].value)))
	  args[i].value = force_reg (args[i].mode, args[i].value);

	/* If we're going to have to load the value by parts, pull the
	   parts into pseudos.  The part extraction process can involve
	   non-trivial computation.  */
	if (GET_CODE (args[i].reg) == PARALLEL)
	  {
	    tree type = TREE_TYPE (args[i].tree_value);
	    args[i].parallel_value
	      = emit_group_load_into_temps (args[i].reg, args[i].value,
					    type, int_size_in_bytes (type));
	  }

	/* If the value is expensive, and we are inside an appropriately
	   short loop, put the value into a pseudo and then put the pseudo
	   into the hard reg.

	   For small register classes, also do this if this call uses
	   register parameters.  This is to avoid reload conflicts while
	   loading the parameters registers.  */

	else if ((! (REG_P (args[i].value)
		     || (GET_CODE (args[i].value) == SUBREG
			 && REG_P (SUBREG_REG (args[i].value)))))
		 && args[i].mode != BLKmode
		 && (set_src_cost (args[i].value, args[i].mode,
				   optimize_insn_for_speed_p ())
		     > COSTS_N_INSNS (1))
		 && ((*reg_parm_seen
		      && targetm.small_register_classes_for_mode_p (args[i].mode))
		     || optimize))
	  args[i].value = copy_to_mode_reg (args[i].mode, args[i].value);
      }
}

#ifdef REG_PARM_STACK_SPACE

  /* The argument list is the property of the called routine and it
     may clobber it.  If the fixed area has been used for previous
     parameters, we must save and restore it.  */

static rtx
save_fixed_argument_area (int reg_parm_stack_space, rtx argblock, int *low_to_save, int *high_to_save)
{
  unsigned int low;
  unsigned int high;

  /* Compute the boundary of the area that needs to be saved, if any.  */
  high = reg_parm_stack_space;
  if (ARGS_GROW_DOWNWARD)
    high += 1;

  if (high > highest_outgoing_arg_in_use)
    high = highest_outgoing_arg_in_use;

  for (low = 0; low < high; low++)
    if (stack_usage_map[low] != 0 || low >= stack_usage_watermark)
      {
	int num_to_save;
	machine_mode save_mode;
	int delta;
	rtx addr;
	rtx stack_area;
	rtx save_area;

	while (stack_usage_map[--high] == 0)
	  ;

	*low_to_save = low;
	*high_to_save = high;

	num_to_save = high - low + 1;

	/* If we don't have the required alignment, must do this
	   in BLKmode.  */
	scalar_int_mode imode;
	if (int_mode_for_size (num_to_save * BITS_PER_UNIT, 1).exists (&imode)
	    && (low & (MIN (GET_MODE_SIZE (imode),
			    BIGGEST_ALIGNMENT / UNITS_PER_WORD) - 1)) == 0)
	  save_mode = imode;
	else
	  save_mode = BLKmode;

	if (ARGS_GROW_DOWNWARD)
	  delta = -high;
	else
	  delta = low;

	addr = plus_constant (Pmode, argblock, delta);
	stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, addr));

	set_mem_align (stack_area, PARM_BOUNDARY);
	if (save_mode == BLKmode)
	  {
	    save_area = assign_stack_temp (BLKmode, num_to_save);
	    emit_block_move (validize_mem (save_area), stack_area,
			     GEN_INT (num_to_save), BLOCK_OP_CALL_PARM);
	  }
	else
	  {
	    save_area = gen_reg_rtx (save_mode);
	    emit_move_insn (save_area, stack_area);
	  }

	return save_area;
      }

  return NULL_RTX;
}

static void
restore_fixed_argument_area (rtx save_area, rtx argblock, int high_to_save, int low_to_save)
{
  machine_mode save_mode = GET_MODE (save_area);
  int delta;
  rtx addr, stack_area;

  if (ARGS_GROW_DOWNWARD)
    delta = -high_to_save;
  else
    delta = low_to_save;

  addr = plus_constant (Pmode, argblock, delta);
  stack_area = gen_rtx_MEM (save_mode, memory_address (save_mode, addr));
  set_mem_align (stack_area, PARM_BOUNDARY);

  if (save_mode != BLKmode)
    emit_move_insn (stack_area, save_area);
  else
    emit_block_move (stack_area, validize_mem (save_area),
		     GEN_INT (high_to_save - low_to_save + 1),
		     BLOCK_OP_CALL_PARM);
}
#endif /* REG_PARM_STACK_SPACE */

/* If any elements in ARGS refer to parameters that are to be passed in
   registers, but not in memory, and whose alignment does not permit a
   direct copy into registers.  Copy the values into a group of pseudos
   which we will later copy into the appropriate hard registers.

   Pseudos for each unaligned argument will be stored into the array
   args[argnum].aligned_regs.  The caller is responsible for deallocating
   the aligned_regs array if it is nonzero.  */

static void
store_unaligned_arguments_into_pseudos (struct arg_data *args, int num_actuals)
{
  int i, j;

  for (i = 0; i < num_actuals; i++)
    if (args[i].reg != 0 && ! args[i].pass_on_stack
	&& GET_CODE (args[i].reg) != PARALLEL
	&& args[i].mode == BLKmode
	&& MEM_P (args[i].value)
	&& (MEM_ALIGN (args[i].value)
	    < (unsigned int) MIN (BIGGEST_ALIGNMENT, BITS_PER_WORD)))
      {
	int bytes = int_size_in_bytes (TREE_TYPE (args[i].tree_value));
	int endian_correction = 0;

	if (args[i].partial)
	  {
	    gcc_assert (args[i].partial % UNITS_PER_WORD == 0);
	    args[i].n_aligned_regs = args[i].partial / UNITS_PER_WORD;
	  }
	else
	  {
	    args[i].n_aligned_regs
	      = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
	  }

	args[i].aligned_regs = XNEWVEC (rtx, args[i].n_aligned_regs);

	/* Structures smaller than a word are normally aligned to the
	   least significant byte.  On a BYTES_BIG_ENDIAN machine,
	   this means we must skip the empty high order bytes when
	   calculating the bit offset.  */
	if (bytes < UNITS_PER_WORD
#ifdef BLOCK_REG_PADDING
	    && (BLOCK_REG_PADDING (args[i].mode,
				   TREE_TYPE (args[i].tree_value), 1)
		== PAD_DOWNWARD)
#else
	    && BYTES_BIG_ENDIAN
#endif
	    )
	  endian_correction = BITS_PER_WORD - bytes * BITS_PER_UNIT;

	for (j = 0; j < args[i].n_aligned_regs; j++)
	  {
	    rtx reg = gen_reg_rtx (word_mode);
	    rtx word = operand_subword_force (args[i].value, j, BLKmode);
	    int bitsize = MIN (bytes * BITS_PER_UNIT, BITS_PER_WORD);

	    args[i].aligned_regs[j] = reg;
	    word = extract_bit_field (word, bitsize, 0, 1, NULL_RTX,
				      word_mode, word_mode, false, NULL);

	    /* There is no need to restrict this code to loading items
	       in TYPE_ALIGN sized hunks.  The bitfield instructions can
	       load up entire word sized registers efficiently.

	       ??? This may not be needed anymore.
	       We use to emit a clobber here but that doesn't let later
	       passes optimize the instructions we emit.  By storing 0 into
	       the register later passes know the first AND to zero out the
	       bitfield being set in the register is unnecessary.  The store
	       of 0 will be deleted as will at least the first AND.  */

	    emit_move_insn (reg, const0_rtx);

	    bytes -= bitsize / BITS_PER_UNIT;
	    store_bit_field (reg, bitsize, endian_correction, 0, 0,
			     word_mode, word, false);
	  }
      }
}

/* Issue an error if CALL_EXPR was flagged as requiring
   tall-call optimization.  */

void
maybe_complain_about_tail_call (tree call_expr, const char *reason)
{
  gcc_assert (TREE_CODE (call_expr) == CALL_EXPR);
  if (!CALL_EXPR_MUST_TAIL_CALL (call_expr))
    return;

  error_at (EXPR_LOCATION (call_expr), "cannot tail-call: %s", reason);
}

/* Fill in ARGS_SIZE and ARGS array based on the parameters found in
   CALL_EXPR EXP.

   NUM_ACTUALS is the total number of parameters.

   N_NAMED_ARGS is the total number of named arguments.

   STRUCT_VALUE_ADDR_VALUE is the implicit argument for a struct return
   value, or null.

   FNDECL is the tree code for the target of this call (if known)

   ARGS_SO_FAR holds state needed by the target to know where to place
   the next argument.

   REG_PARM_STACK_SPACE is the number of bytes of stack space reserved
   for arguments which are passed in registers.

   OLD_STACK_LEVEL is a pointer to an rtx which olds the old stack level
   and may be modified by this routine.

   OLD_PENDING_ADJ, MUST_PREALLOCATE and FLAGS are pointers to integer
   flags which may be modified by this routine.

   MAY_TAILCALL is cleared if we encounter an invisible pass-by-reference
   that requires allocation of stack space.

   CALL_FROM_THUNK_P is true if this call is the jump from a thunk to
   the thunked-to function.  */

static void
initialize_argument_information (int num_actuals ATTRIBUTE_UNUSED,
				 struct arg_data *args,
				 struct args_size *args_size,
				 int n_named_args ATTRIBUTE_UNUSED,
				 tree exp, tree struct_value_addr_value,
				 tree fndecl, tree fntype,
				 cumulative_args_t args_so_far,
				 int reg_parm_stack_space,
				 rtx *old_stack_level,
				 poly_int64_pod *old_pending_adj,
				 int *must_preallocate, int *ecf_flags,
				 bool *may_tailcall, bool call_from_thunk_p)
{
  CUMULATIVE_ARGS *args_so_far_pnt = get_cumulative_args (args_so_far);
  location_t loc = EXPR_LOCATION (exp);

  /* Count arg position in order args appear.  */
  int argpos;

  int i;

  args_size->constant = 0;
  args_size->var = 0;

  /* In this loop, we consider args in the order they are written.
     We fill up ARGS from the back.  */

  i = num_actuals - 1;
  {
    int j = i;
    call_expr_arg_iterator iter;
    tree arg;

    if (struct_value_addr_value)
      {
	args[j].tree_value = struct_value_addr_value;
	j--;
      }
    argpos = 0;
    FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
      {
	tree argtype = TREE_TYPE (arg);

	if (targetm.calls.split_complex_arg
	    && argtype
	    && TREE_CODE (argtype) == COMPLEX_TYPE
	    && targetm.calls.split_complex_arg (argtype))
	  {
	    tree subtype = TREE_TYPE (argtype);
	    args[j].tree_value = build1 (REALPART_EXPR, subtype, arg);
	    j--;
	    args[j].tree_value = build1 (IMAGPART_EXPR, subtype, arg);
	  }
	else
	  args[j].tree_value = arg;
	j--;
	argpos++;
      }
  }

  /* I counts args in order (to be) pushed; ARGPOS counts in order written.  */
  for (argpos = 0; argpos < num_actuals; i--, argpos++)
    {
      tree type = TREE_TYPE (args[i].tree_value);
      int unsignedp;

      /* Replace erroneous argument with constant zero.  */
      if (type == error_mark_node || !COMPLETE_TYPE_P (type))
	args[i].tree_value = integer_zero_node, type = integer_type_node;

      /* If TYPE is a transparent union or record, pass things the way
	 we would pass the first field of the union or record.  We have
	 already verified that the modes are the same.  */
      if (RECORD_OR_UNION_TYPE_P (type) && TYPE_TRANSPARENT_AGGR (type))
	type = TREE_TYPE (first_field (type));

      /* Decide where to pass this arg.

	 args[i].reg is nonzero if all or part is passed in registers.

	 args[i].partial is nonzero if part but not all is passed in registers,
	 and the exact value says how many bytes are passed in registers.

	 args[i].pass_on_stack is nonzero if the argument must at least be
	 computed on the stack.  It may then be loaded back into registers
	 if args[i].reg is nonzero.

	 These decisions are driven by the FUNCTION_... macros and must agree
	 with those made by function.cc.  */

      /* See if this argument should be passed by invisible reference.  */
      function_arg_info arg (type, argpos < n_named_args);
      if (pass_by_reference (args_so_far_pnt, arg))
	{
	  const bool callee_copies
	    = reference_callee_copied (args_so_far_pnt, arg);
	  tree base;

	  /* If we're compiling a thunk, pass directly the address of an object
	     already in memory, instead of making a copy.  Likewise if we want
	     to make the copy in the callee instead of the caller.  */
	  if ((call_from_thunk_p || callee_copies)
	      && TREE_CODE (args[i].tree_value) != WITH_SIZE_EXPR
	      && ((base = get_base_address (args[i].tree_value)), true)
	      && TREE_CODE (base) != SSA_NAME
	      && (!DECL_P (base) || MEM_P (DECL_RTL (base))))
	    {
	      /* We may have turned the parameter value into an SSA name.
		 Go back to the original parameter so we can take the
		 address.  */
	      if (TREE_CODE (args[i].tree_value) == SSA_NAME)
		{
		  gcc_assert (SSA_NAME_IS_DEFAULT_DEF (args[i].tree_value));
		  args[i].tree_value = SSA_NAME_VAR (args[i].tree_value);
		  gcc_assert (TREE_CODE (args[i].tree_value) == PARM_DECL);
		}
	      /* Argument setup code may have copied the value to register.  We
		 revert that optimization now because the tail call code must
		 use the original location.  */
	      if (TREE_CODE (args[i].tree_value) == PARM_DECL
		  && !MEM_P (DECL_RTL (args[i].tree_value))
		  && DECL_INCOMING_RTL (args[i].tree_value)
		  && MEM_P (DECL_INCOMING_RTL (args[i].tree_value)))
		set_decl_rtl (args[i].tree_value,
			      DECL_INCOMING_RTL (args[i].tree_value));

	      mark_addressable (args[i].tree_value);

	      /* We can't use sibcalls if a callee-copied argument is
		 stored in the current function's frame.  */
	      if (!call_from_thunk_p && DECL_P (base) && !TREE_STATIC (base))
		{
		  *may_tailcall = false;
		  maybe_complain_about_tail_call (exp,
						  "a callee-copied argument is"
						  " stored in the current"
						  " function's frame");
		}

	      args[i].tree_value = build_fold_addr_expr_loc (loc,
							 args[i].tree_value);
	      type = TREE_TYPE (args[i].tree_value);

	      if (*ecf_flags & ECF_CONST)
		*ecf_flags &= ~(ECF_CONST | ECF_LOOPING_CONST_OR_PURE);
	    }
	  else
	    {
	      /* We make a copy of the object and pass the address to the
		 function being called.  */
	      rtx copy;

	      if (!COMPLETE_TYPE_P (type)
		  || TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST
		  || (flag_stack_check == GENERIC_STACK_CHECK
		      && compare_tree_int (TYPE_SIZE_UNIT (type),
					   STACK_CHECK_MAX_VAR_SIZE) > 0))
		{
		  /* This is a variable-sized object.  Make space on the stack
		     for it.  */
		  rtx size_rtx = expr_size (args[i].tree_value);

		  if (*old_stack_level == 0)
		    {
		      emit_stack_save (SAVE_BLOCK, old_stack_level);
		      *old_pending_adj = pending_stack_adjust;
		      pending_stack_adjust = 0;
		    }

		  /* We can pass TRUE as the 4th argument because we just
		     saved the stack pointer and will restore it right after
		     the call.  */
		  copy = allocate_dynamic_stack_space (size_rtx,
						       TYPE_ALIGN (type),
						       TYPE_ALIGN (type),
						       max_int_size_in_bytes
						       (type),
						       true);
		  copy = gen_rtx_MEM (BLKmode, copy);
		  set_mem_attributes (copy, type, 1);
		}
	      else
		copy = assign_temp (type, 1, 0);

	      store_expr (args[i].tree_value, copy, 0, false, false);

	      /* Just change the const function to pure and then let
		 the next test clear the pure based on
		 callee_copies.  */
	      if (*ecf_flags & ECF_CONST)
		{
		  *ecf_flags &= ~ECF_CONST;
		  *ecf_flags |= ECF_PURE;
		}

	      if (!callee_copies && *ecf_flags & ECF_PURE)
		*ecf_flags &= ~(ECF_PURE | ECF_LOOPING_CONST_OR_PURE);

	      args[i].tree_value
		= build_fold_addr_expr_loc (loc, make_tree (type, copy));
	      type = TREE_TYPE (args[i].tree_value);
	      *may_tailcall = false;
	      maybe_complain_about_tail_call (exp,
					      "argument must be passed"
					      " by copying");
	    }
	  arg.pass_by_reference = true;
	}

      unsignedp = TYPE_UNSIGNED (type);
      arg.type = type;
      arg.mode
	= promote_function_mode (type, TYPE_MODE (type), &unsignedp,
				 fndecl ? TREE_TYPE (fndecl) : fntype, 0);

      args[i].unsignedp = unsignedp;
      args[i].mode = arg.mode;

      targetm.calls.warn_parameter_passing_abi (args_so_far, type);

      args[i].reg = targetm.calls.function_arg (args_so_far, arg);

      /* If this is a sibling call and the machine has register windows, the
	 register window has to be unwinded before calling the routine, so
	 arguments have to go into the incoming registers.  */
      if (targetm.calls.function_incoming_arg != targetm.calls.function_arg)
	args[i].tail_call_reg
	  = targetm.calls.function_incoming_arg (args_so_far, arg);
      else
	args[i].tail_call_reg = args[i].reg;

      if (args[i].reg)
	args[i].partial = targetm.calls.arg_partial_bytes (args_so_far, arg);

      args[i].pass_on_stack = targetm.calls.must_pass_in_stack (arg);

      /* If FUNCTION_ARG returned a (parallel [(expr_list (nil) ...) ...]),
	 it means that we are to pass this arg in the register(s) designated
	 by the PARALLEL, but also to pass it in the stack.  */
      if (args[i].reg && GET_CODE (args[i].reg) == PARALLEL
	  && XEXP (XVECEXP (args[i].reg, 0, 0), 0) == 0)
	args[i].pass_on_stack = 1;

      /* If this is an addressable type, we must preallocate the stack
	 since we must evaluate the object into its final location.

	 If this is to be passed in both registers and the stack, it is simpler
	 to preallocate.  */
      if (TREE_ADDRESSABLE (type)
	  || (args[i].pass_on_stack && args[i].reg != 0))
	*must_preallocate = 1;

      /* Compute the stack-size of this argument.  */
      if (args[i].reg == 0 || args[i].partial != 0
	       || reg_parm_stack_space > 0
	       || args[i].pass_on_stack)
	locate_and_pad_parm (arg.mode, type,
#ifdef STACK_PARMS_IN_REG_PARM_AREA
			     1,
#else
			     args[i].reg != 0,
#endif
			     reg_parm_stack_space,
			     args[i].pass_on_stack ? 0 : args[i].partial,
			     fndecl, args_size, &args[i].locate);
#ifdef BLOCK_REG_PADDING
      else
	/* The argument is passed entirely in registers.  See at which
	   end it should be padded.  */
	args[i].locate.where_pad =
	  BLOCK_REG_PADDING (arg.mode, type,
			     int_size_in_bytes (type) <= UNITS_PER_WORD);
#endif

      /* Update ARGS_SIZE, the total stack space for args so far.  */

      args_size->constant += args[i].locate.size.constant;
      if (args[i].locate.size.var)
	ADD_PARM_SIZE (*args_size, args[i].locate.size.var);

      /* Increment ARGS_SO_FAR, which has info about which arg-registers
	 have been used, etc.  */

      /* ??? Traditionally we've passed TYPE_MODE here, instead of the
	 promoted_mode used for function_arg above.  However, the
	 corresponding handling of incoming arguments in function.cc
	 does pass the promoted mode.  */
      arg.mode = TYPE_MODE (type);
      targetm.calls.function_arg_advance (args_so_far, arg);
    }
}

/* Update ARGS_SIZE to contain the total size for the argument block.
   Return the original constant component of the argument block's size.

   REG_PARM_STACK_SPACE holds the number of bytes of stack space reserved
   for arguments passed in registers.  */

static poly_int64
compute_argument_block_size (int reg_parm_stack_space,
			     struct args_size *args_size,
			     tree fndecl ATTRIBUTE_UNUSED,
			     tree fntype ATTRIBUTE_UNUSED,
			     int preferred_stack_boundary ATTRIBUTE_UNUSED)
{
  poly_int64 unadjusted_args_size = args_size->constant;

  /* For accumulate outgoing args mode we don't need to align, since the frame
     will be already aligned.  Align to STACK_BOUNDARY in order to prevent
     backends from generating misaligned frame sizes.  */
  if (ACCUMULATE_OUTGOING_ARGS && preferred_stack_boundary > STACK_BOUNDARY)
    preferred_stack_boundary = STACK_BOUNDARY;

  /* Compute the actual size of the argument block required.  The variable
     and constant sizes must be combined, the size may have to be rounded,
     and there may be a minimum required size.  */

  if (args_size->var)
    {
      args_size->var = ARGS_SIZE_TREE (*args_size);
      args_size->constant = 0;

      preferred_stack_boundary /= BITS_PER_UNIT;
      if (preferred_stack_boundary > 1)
	{
	  /* We don't handle this case yet.  To handle it correctly we have
	     to add the delta, round and subtract the delta.
	     Currently no machine description requires this support.  */
	  gcc_assert (multiple_p (stack_pointer_delta,
				  preferred_stack_boundary));
	  args_size->var = round_up (args_size->var, preferred_stack_boundary);
	}

      if (reg_parm_stack_space > 0)
	{
	  args_size->var
	    = size_binop (MAX_EXPR, args_size->var,
			  ssize_int (reg_parm_stack_space));

	  /* The area corresponding to register parameters is not to count in
	     the size of the block we need.  So make the adjustment.  */
	  if (! OUTGOING_REG_PARM_STACK_SPACE ((!fndecl ? fntype : TREE_TYPE (fndecl))))
	    args_size->var
	      = size_binop (MINUS_EXPR, args_size->var,
			    ssize_int (reg_parm_stack_space));
	}
    }
  else
    {
      preferred_stack_boundary /= BITS_PER_UNIT;
      if (preferred_stack_boundary < 1)
	preferred_stack_boundary = 1;
      args_size->constant = (aligned_upper_bound (args_size->constant
						  + stack_pointer_delta,
						  preferred_stack_boundary)
			     - stack_pointer_delta);

      args_size->constant = upper_bound (args_size->constant,
					 reg_parm_stack_space);

      if (! OUTGOING_REG_PARM_STACK_SPACE ((!fndecl ? fntype : TREE_TYPE (fndecl))))
	args_size->constant -= reg_parm_stack_space;
    }
  return unadjusted_args_size;
}

/* Precompute parameters as needed for a function call.

   FLAGS is mask of ECF_* constants.

   NUM_ACTUALS is the number of arguments.

   ARGS is an array containing information for each argument; this
   routine fills in the INITIAL_VALUE and VALUE fields for each
   precomputed argument.  */

static void
precompute_arguments (int num_actuals, struct arg_data *args)
{
  int i;

  /* If this is a libcall, then precompute all arguments so that we do not
     get extraneous instructions emitted as part of the libcall sequence.  */

  /* If we preallocated the stack space, and some arguments must be passed
     on the stack, then we must precompute any parameter which contains a
     function call which will store arguments on the stack.
     Otherwise, evaluating the parameter may clobber previous parameters
     which have already been stored into the stack.  (we have code to avoid
     such case by saving the outgoing stack arguments, but it results in
     worse code)  */
  if (!ACCUMULATE_OUTGOING_ARGS)
    return;

  for (i = 0; i < num_actuals; i++)
    {
      tree type;
      machine_mode mode;

      if (TREE_CODE (args[i].tree_value) != CALL_EXPR)
	continue;

      /* If this is an addressable type, we cannot pre-evaluate it.  */
      type = TREE_TYPE (args[i].tree_value);
      gcc_assert (!TREE_ADDRESSABLE (type));

      args[i].initial_value = args[i].value
	= expand_normal (args[i].tree_value);

      mode = TYPE_MODE (type);
      if (mode != args[i].mode)
	{
	  int unsignedp = args[i].unsignedp;
	  args[i].value
	    = convert_modes (args[i].mode, mode,
			     args[i].value, args[i].unsignedp);

	  /* CSE will replace this only if it contains args[i].value
	     pseudo, so convert it down to the declared mode using
	     a SUBREG.  */
	  if (REG_P (args[i].value)
	      && GET_MODE_CLASS (args[i].mode) == MODE_INT
	      && promote_mode (type, mode, &unsignedp) != args[i].mode)
	    {
	      args[i].initial_value
		= gen_lowpart_SUBREG (mode, args[i].value);
	      SUBREG_PROMOTED_VAR_P (args[i].initial_value) = 1;
	      SUBREG_PROMOTED_SET (args[i].initial_value, args[i].unsignedp);
	    }
	}
    }
}

/* Given the current state of MUST_PREALLOCATE and information about
   arguments to a function call in NUM_ACTUALS, ARGS and ARGS_SIZE,
   compute and return the final value for MUST_PREALLOCATE.  */

static int
finalize_must_preallocate (int must_preallocate, int num_actuals,
			   struct arg_data *args, struct args_size *args_size)
{
  /* See if we have or want to preallocate stack space.

     If we would have to push a partially-in-regs parm
     before other stack parms, preallocate stack space instead.

     If the size of some parm is not a multiple of the required stack
     alignment, we must preallocate.

     If the total size of arguments that would otherwise create a copy in
     a temporary (such as a CALL) is more than half the total argument list
     size, preallocation is faster.

     Another reason to preallocate is if we have a machine (like the m88k)
     where stack alignment is required to be maintained between every
     pair of insns, not just when the call is made.  However, we assume here
     that such machines either do not have push insns (and hence preallocation
     would occur anyway) or the problem is taken care of with
     PUSH_ROUNDING.  */

  if (! must_preallocate)
    {
      int partial_seen = 0;
      poly_int64 copy_to_evaluate_size = 0;
      int i;

      for (i = 0; i < num_actuals && ! must_preallocate; i++)
	{
	  if (args[i].partial > 0 && ! args[i].pass_on_stack)
	    partial_seen = 1;
	  else if (partial_seen && args[i].reg == 0)
	    must_preallocate = 1;

	  if (TYPE_MODE (TREE_TYPE (args[i].tree_value)) == BLKmode
	      && (TREE_CODE (args[i].tree_value) == CALL_EXPR
		  || TREE_CODE (args[i].tree_value) == TARGET_EXPR
		  || TREE_CODE (args[i].tree_value) == COND_EXPR
		  || TREE_ADDRESSABLE (TREE_TYPE (args[i].tree_value))))
	    copy_to_evaluate_size
	      += int_size_in_bytes (TREE_TYPE (args[i].tree_value));
	}

      if (maybe_ne (args_size->constant, 0)
	  && maybe_ge (copy_to_evaluate_size * 2, args_size->constant))
	must_preallocate = 1;
    }
  return must_preallocate;
}

/* If we preallocated stack space, compute the address of each argument
   and store it into the ARGS array.

   We need not ensure it is a valid memory address here; it will be
   validized when it is used.

   ARGBLOCK is an rtx for the address of the outgoing arguments.  */

static void
compute_argument_addresses (struct arg_data *args, rtx argblock, int num_actuals)
{
  if (argblock)
    {
      rtx arg_reg = argblock;
      int i;
      poly_int64 arg_offset = 0;

      if (GET_CODE (argblock) == PLUS)
	{
	  arg_reg = XEXP (argblock, 0);
	  arg_offset = rtx_to_poly_int64 (XEXP (argblock, 1));
	}

      for (i = 0; i < num_actuals; i++)
	{
	  rtx offset = ARGS_SIZE_RTX (args[i].locate.offset);
	  rtx slot_offset = ARGS_SIZE_RTX (args[i].locate.slot_offset);
	  rtx addr;
	  unsigned int align, boundary;
	  poly_uint64 units_on_stack = 0;
	  machine_mode partial_mode = VOIDmode;

	  /* Skip this parm if it will not be passed on the stack.  */
	  if (! args[i].pass_on_stack
	      && args[i].reg != 0
	      && args[i].partial == 0)
	    continue;

	  if (TYPE_EMPTY_P (TREE_TYPE (args[i].tree_value)))
	    continue;

	  addr = simplify_gen_binary (PLUS, Pmode, arg_reg, offset);
	  addr = plus_constant (Pmode, addr, arg_offset);

	  if (args[i].partial != 0)
	    {
	      /* Only part of the parameter is being passed on the stack.
		 Generate a simple memory reference of the correct size.  */
	      units_on_stack = args[i].locate.size.constant;
	      poly_uint64 bits_on_stack = units_on_stack * BITS_PER_UNIT;
	      partial_mode = int_mode_for_size (bits_on_stack, 1).else_blk ();
	      args[i].stack = gen_rtx_MEM (partial_mode, addr);
	      set_mem_size (args[i].stack, units_on_stack);
	    }
	  else
	    {
	      args[i].stack = gen_rtx_MEM (args[i].mode, addr);
	      set_mem_attributes (args[i].stack,
				  TREE_TYPE (args[i].tree_value), 1);
	    }
	  align = BITS_PER_UNIT;
	  boundary = args[i].locate.boundary;
	  poly_int64 offset_val;
	  if (args[i].locate.where_pad != PAD_DOWNWARD)
	    align = boundary;
	  else if (poly_int_rtx_p (offset, &offset_val))
	    {
	      align = least_bit_hwi (boundary);
	      unsigned int offset_align
		= known_alignment (offset_val) * BITS_PER_UNIT;
	      if (offset_align != 0)
		align = MIN (align, offset_align);
	    }
	  set_mem_align (args[i].stack, align);

	  addr = simplify_gen_binary (PLUS, Pmode, arg_reg, slot_offset);
	  addr = plus_constant (Pmode, addr, arg_offset);

	  if (args[i].partial != 0)
	    {
	      /* Only part of the parameter is being passed on the stack.
		 Generate a simple memory reference of the correct size.
	       */
	      args[i].stack_slot = gen_rtx_MEM (partial_mode, addr);
	      set_mem_size (args[i].stack_slot, units_on_stack);
	    }
	  else
	    {
	      args[i].stack_slot = gen_rtx_MEM (args[i].mode, addr);
	      set_mem_attributes (args[i].stack_slot,
				  TREE_TYPE (args[i].tree_value), 1);
	    }
	  set_mem_align (args[i].stack_slot, args[i].locate.boundary);

	  /* Function incoming arguments may overlap with sibling call
	     outgoing arguments and we cannot allow reordering of reads
	     from function arguments with stores to outgoing arguments
	     of sibling calls.  */
	  set_mem_alias_set (args[i].stack, 0);
	  set_mem_alias_set (args[i].stack_slot, 0);
	}
    }
}

/* Given a FNDECL and EXP, return an rtx suitable for use as a target address
   in a call instruction.

   FNDECL is the tree node for the target function.  For an indirect call
   FNDECL will be NULL_TREE.

   ADDR is the operand 0 of CALL_EXPR for this call.  */

static rtx
rtx_for_function_call (tree fndecl, tree addr)
{
  rtx funexp;

  /* Get the function to call, in the form of RTL.  */
  if (fndecl)
    {
      if (!TREE_USED (fndecl) && fndecl != current_function_decl)
	TREE_USED (fndecl) = 1;

      /* Get a SYMBOL_REF rtx for the function address.  */
      funexp = XEXP (DECL_RTL (fndecl), 0);
    }
  else
    /* Generate an rtx (probably a pseudo-register) for the address.  */
    {
      push_temp_slots ();
      funexp = expand_normal (addr);
      pop_temp_slots ();	/* FUNEXP can't be BLKmode.  */
    }
  return funexp;
}

/* Return the static chain for this function, if any.  */

rtx
rtx_for_static_chain (const_tree fndecl_or_type, bool incoming_p)
{
  if (DECL_P (fndecl_or_type) && !DECL_STATIC_CHAIN (fndecl_or_type))
    return NULL;

  return targetm.calls.static_chain (fndecl_or_type, incoming_p);
}

/* Internal state for internal_arg_pointer_based_exp and its helpers.  */
static struct
{
  /* Last insn that has been scanned by internal_arg_pointer_based_exp_scan,
     or NULL_RTX if none has been scanned yet.  */
  rtx_insn *scan_start;
  /* Vector indexed by REGNO - FIRST_PSEUDO_REGISTER, recording if a pseudo is
     based on crtl->args.internal_arg_pointer.  The element is NULL_RTX if the
     pseudo isn't based on it, a CONST_INT offset if the pseudo is based on it
     with fixed offset, or PC if this is with variable or unknown offset.  */
  vec<rtx> cache;
} internal_arg_pointer_exp_state;

static rtx internal_arg_pointer_based_exp (const_rtx, bool);

/* Helper function for internal_arg_pointer_based_exp.  Scan insns in
   the tail call sequence, starting with first insn that hasn't been
   scanned yet, and note for each pseudo on the LHS whether it is based
   on crtl->args.internal_arg_pointer or not, and what offset from that
   that pointer it has.  */

static void
internal_arg_pointer_based_exp_scan (void)
{
  rtx_insn *insn, *scan_start = internal_arg_pointer_exp_state.scan_start;

  if (scan_start == NULL_RTX)
    insn = get_insns ();
  else
    insn = NEXT_INSN (scan_start);

  while (insn)
    {
      rtx set = single_set (insn);
      if (set && REG_P (SET_DEST (set)) && !HARD_REGISTER_P (SET_DEST (set)))
	{
	  rtx val = NULL_RTX;
	  unsigned int idx = REGNO (SET_DEST (set)) - FIRST_PSEUDO_REGISTER;
	  /* Punt on pseudos set multiple times.  */
	  if (idx < internal_arg_pointer_exp_state.cache.length ()
	      && (internal_arg_pointer_exp_state.cache[idx]
		  != NULL_RTX))
	    val = pc_rtx;
	  else
	    val = internal_arg_pointer_based_exp (SET_SRC (set), false);
	  if (val != NULL_RTX)
	    {
	      if (idx >= internal_arg_pointer_exp_state.cache.length ())
		internal_arg_pointer_exp_state.cache
		  .safe_grow_cleared (idx + 1, true);
	      internal_arg_pointer_exp_state.cache[idx] = val;
	    }
	}
      if (NEXT_INSN (insn) == NULL_RTX)
	scan_start = insn;
      insn = NEXT_INSN (insn);
    }

  internal_arg_pointer_exp_state.scan_start = scan_start;
}

/* Compute whether RTL is based on crtl->args.internal_arg_pointer.  Return
   NULL_RTX if RTL isn't based on it, a CONST_INT offset if RTL is based on
   it with fixed offset, or PC if this is with variable or unknown offset.
   TOPLEVEL is true if the function is invoked at the topmost level.  */

static rtx
internal_arg_pointer_based_exp (const_rtx rtl, bool toplevel)
{
  if (CONSTANT_P (rtl))
    return NULL_RTX;

  if (rtl == crtl->args.internal_arg_pointer)
    return const0_rtx;

  if (REG_P (rtl) && HARD_REGISTER_P (rtl))
    return NULL_RTX;

  poly_int64 offset;
  if (GET_CODE (rtl) == PLUS && poly_int_rtx_p (XEXP (rtl, 1), &offset))
    {
      rtx val = internal_arg_pointer_based_exp (XEXP (rtl, 0), toplevel);
      if (val == NULL_RTX || val == pc_rtx)
	return val;
      return plus_constant (Pmode, val, offset);
    }

  /* When called at the topmost level, scan pseudo assignments in between the
     last scanned instruction in the tail call sequence and the latest insn
     in that sequence.  */
  if (toplevel)
    internal_arg_pointer_based_exp_scan ();

  if (REG_P (rtl))
    {
      unsigned int idx = REGNO (rtl) - FIRST_PSEUDO_REGISTER;
      if (idx < internal_arg_pointer_exp_state.cache.length ())
	return internal_arg_pointer_exp_state.cache[idx];

      return NULL_RTX;
    }

  subrtx_iterator::array_type array;
  FOR_EACH_SUBRTX (iter, array, rtl, NONCONST)
    {
      const_rtx x = *iter;
      if (REG_P (x) && internal_arg_pointer_based_exp (x, false) != NULL_RTX)
	return pc_rtx;
      if (MEM_P (x))
	iter.skip_subrtxes ();
    }

  return NULL_RTX;
}

/* Return true if SIZE bytes starting from address ADDR might overlap an
   already-clobbered argument area.  This function is used to determine
   if we should give up a sibcall.  */

static bool
mem_might_overlap_already_clobbered_arg_p (rtx addr, poly_uint64 size)
{
  poly_int64 i;
  unsigned HOST_WIDE_INT start, end;
  rtx val;

  if (bitmap_empty_p (stored_args_map)
      && stored_args_watermark == HOST_WIDE_INT_M1U)
    return false;
  val = internal_arg_pointer_based_exp (addr, true);
  if (val == NULL_RTX)
    return false;
  else if (!poly_int_rtx_p (val, &i))
    return true;

  if (known_eq (size, 0U))
    return false;

  if (STACK_GROWS_DOWNWARD)
    i -= crtl->args.pretend_args_size;
  else
    i += crtl->args.pretend_args_size;

  if (ARGS_GROW_DOWNWARD)
    i = -i - size;

  /* We can ignore any references to the function's pretend args,
     which at this point would manifest as negative values of I.  */
  if (known_le (i, 0) && known_le (size, poly_uint64 (-i)))
    return false;

  start = maybe_lt (i, 0) ? 0 : constant_lower_bound (i);
  if (!(i + size).is_constant (&end))
    end = HOST_WIDE_INT_M1U;

  if (end > stored_args_watermark)
    return true;

  end = MIN (end, SBITMAP_SIZE (stored_args_map));
  for (unsigned HOST_WIDE_INT k = start; k < end; ++k)
    if (bitmap_bit_p (stored_args_map, k))
      return true;

  return false;
}

/* Do the register loads required for any wholly-register parms or any
   parms which are passed both on the stack and in a register.  Their
   expressions were already evaluated.

   Mark all register-parms as living through the call, putting these USE
   insns in the CALL_INSN_FUNCTION_USAGE field.

   When IS_SIBCALL, perform the check_sibcall_argument_overlap
   checking, setting *SIBCALL_FAILURE if appropriate.  */

static void
load_register_parameters (struct arg_data *args, int num_actuals,
			  rtx *call_fusage, int flags, int is_sibcall,
			  int *sibcall_failure)
{
  int i, j;

  for (i = 0; i < num_actuals; i++)
    {
      rtx reg = ((flags & ECF_SIBCALL)
		 ? args[i].tail_call_reg : args[i].reg);
      if (reg)
	{
	  int partial = args[i].partial;
	  int nregs;
	  poly_int64 size = 0;
	  HOST_WIDE_INT const_size = 0;
	  rtx_insn *before_arg = get_last_insn ();
	  tree type = TREE_TYPE (args[i].tree_value);
	  if (RECORD_OR_UNION_TYPE_P (type) && TYPE_TRANSPARENT_AGGR (type))
	    type = TREE_TYPE (first_field (type));
	  /* Set non-negative if we must move a word at a time, even if
	     just one word (e.g, partial == 4 && mode == DFmode).  Set
	     to -1 if we just use a normal move insn.  This value can be
	     zero if the argument is a zero size structure.  */
	  nregs = -1;
	  if (GET_CODE (reg) == PARALLEL)
	    ;
	  else if (partial)
	    {
	      gcc_assert (partial % UNITS_PER_WORD == 0);
	      nregs = partial / UNITS_PER_WORD;
	    }
	  else if (TYPE_MODE (type) == BLKmode)
	    {
	      /* Variable-sized parameters should be described by a
		 PARALLEL instead.  */
	      const_size = int_size_in_bytes (type);
	      gcc_assert (const_size >= 0);
	      nregs = (const_size + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
	      size = const_size;
	    }
	  else
	    size = GET_MODE_SIZE (args[i].mode);

	  /* Handle calls that pass values in multiple non-contiguous
	     locations.  The Irix 6 ABI has examples of this.  */

	  if (GET_CODE (reg) == PARALLEL)
	    emit_group_move (reg, args[i].parallel_value);

	  /* If simple case, just do move.  If normal partial, store_one_arg
	     has already loaded the register for us.  In all other cases,
	     load the register(s) from memory.  */

	  else if (nregs == -1)
	    {
	      emit_move_insn (reg, args[i].value);
#ifdef BLOCK_REG_PADDING
	      /* Handle case where we have a value that needs shifting
		 up to the msb.  eg. a QImode value and we're padding
		 upward on a BYTES_BIG_ENDIAN machine.  */
	      if (args[i].locate.where_pad
		  == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD))
		{
		  gcc_checking_assert (ordered_p (size, UNITS_PER_WORD));
		  if (maybe_lt (size, UNITS_PER_WORD))
		    {
		      rtx x;
		      poly_int64 shift
			= (UNITS_PER_WORD - size) * BITS_PER_UNIT;

		      /* Assigning REG here rather than a temp makes
			 CALL_FUSAGE report the whole reg as used.
			 Strictly speaking, the call only uses SIZE
			 bytes at the msb end, but it doesn't seem worth
			 generating rtl to say that.  */
		      reg = gen_rtx_REG (word_mode, REGNO (reg));
		      x = expand_shift (LSHIFT_EXPR, word_mode,
					reg, shift, reg, 1);
		      if (x != reg)
			emit_move_insn (reg, x);
		    }
		}
#endif
	    }

	  /* If we have pre-computed the values to put in the registers in
	     the case of non-aligned structures, copy them in now.  */

	  else if (args[i].n_aligned_regs != 0)
	    for (j = 0; j < args[i].n_aligned_regs; j++)
	      emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg) + j),
			      args[i].aligned_regs[j]);

	  else if (partial == 0 || args[i].pass_on_stack)
	    {
	      /* SIZE and CONST_SIZE are 0 for partial arguments and
		 the size of a BLKmode type otherwise.  */
	      gcc_checking_assert (known_eq (size, const_size));
	      rtx mem = validize_mem (copy_rtx (args[i].value));

	      /* Check for overlap with already clobbered argument area,
	         providing that this has non-zero size.  */
	      if (is_sibcall
		  && const_size != 0
		  && (mem_might_overlap_already_clobbered_arg_p
		      (XEXP (args[i].value, 0), const_size)))
		*sibcall_failure = 1;

	      if (const_size % UNITS_PER_WORD == 0
		  || MEM_ALIGN (mem) % BITS_PER_WORD == 0)
		move_block_to_reg (REGNO (reg), mem, nregs, args[i].mode);
	      else
		{
		  if (nregs > 1)
		    move_block_to_reg (REGNO (reg), mem, nregs - 1,
				       args[i].mode);
		  rtx dest = gen_rtx_REG (word_mode, REGNO (reg) + nregs - 1);
		  unsigned int bitoff = (nregs - 1) * BITS_PER_WORD;
		  unsigned int bitsize = const_size * BITS_PER_UNIT - bitoff;
		  rtx x = extract_bit_field (mem, bitsize, bitoff, 1, dest,
					     word_mode, word_mode, false,
					     NULL);
		  if (BYTES_BIG_ENDIAN)
		    x = expand_shift (LSHIFT_EXPR, word_mode, x,
				      BITS_PER_WORD - bitsize, dest, 1);
		  if (x != dest)
		    emit_move_insn (dest, x);
		}

	      /* Handle a BLKmode that needs shifting.  */
	      if (nregs == 1 && const_size < UNITS_PER_WORD
#ifdef BLOCK_REG_PADDING
		  && args[i].locate.where_pad == PAD_DOWNWARD
#else
		  && BYTES_BIG_ENDIAN
#endif
		  )
		{
		  rtx dest = gen_rtx_REG (word_mode, REGNO (reg));
		  int shift = (UNITS_PER_WORD - const_size) * BITS_PER_UNIT;
		  enum tree_code dir = (BYTES_BIG_ENDIAN
					? RSHIFT_EXPR : LSHIFT_EXPR);
		  rtx x;

		  x = expand_shift (dir, word_mode, dest, shift, dest, 1);
		  if (x != dest)
		    emit_move_insn (dest, x);
		}
	    }

	  /* When a parameter is a block, and perhaps in other cases, it is
	     possible that it did a load from an argument slot that was
	     already clobbered.  */
	  if (is_sibcall
	      && check_sibcall_argument_overlap (before_arg, &args[i], 0))
	    *sibcall_failure = 1;

	  /* Handle calls that pass values in multiple non-contiguous
	     locations.  The Irix 6 ABI has examples of this.  */
	  if (GET_CODE (reg) == PARALLEL)
	    use_group_regs (call_fusage, reg);
	  else if (nregs == -1)
	    use_reg_mode (call_fusage, reg, TYPE_MODE (type));
	  else if (nregs > 0)
	    use_regs (call_fusage, REGNO (reg), nregs);
	}
    }
}

/* We need to pop PENDING_STACK_ADJUST bytes.  But, if the arguments
   wouldn't fill up an even multiple of PREFERRED_UNIT_STACK_BOUNDARY
   bytes, then we would need to push some additional bytes to pad the
   arguments.  So, we try to compute an adjust to the stack pointer for an
   amount that will leave the stack under-aligned by UNADJUSTED_ARGS_SIZE
   bytes.  Then, when the arguments are pushed the stack will be perfectly
   aligned.

   Return true if this optimization is possible, storing the adjustment
   in ADJUSTMENT_OUT and setting ARGS_SIZE->CONSTANT to the number of
   bytes that should be popped after the call.  */

static bool
combine_pending_stack_adjustment_and_call (poly_int64_pod *adjustment_out,
					   poly_int64 unadjusted_args_size,
					   struct args_size *args_size,
					   unsigned int preferred_unit_stack_boundary)
{
  /* The number of bytes to pop so that the stack will be
     under-aligned by UNADJUSTED_ARGS_SIZE bytes.  */
  poly_int64 adjustment;
  /* The alignment of the stack after the arguments are pushed, if we
     just pushed the arguments without adjust the stack here.  */
  unsigned HOST_WIDE_INT unadjusted_alignment;

  if (!known_misalignment (stack_pointer_delta + unadjusted_args_size,
			   preferred_unit_stack_boundary,
			   &unadjusted_alignment))
    return false;

  /* We want to get rid of as many of the PENDING_STACK_ADJUST bytes
     as possible -- leaving just enough left to cancel out the
     UNADJUSTED_ALIGNMENT.  In other words, we want to ensure that the
     PENDING_STACK_ADJUST is non-negative, and congruent to
     -UNADJUSTED_ALIGNMENT modulo the PREFERRED_UNIT_STACK_BOUNDARY.  */

  /* Begin by trying to pop all the bytes.  */
  unsigned HOST_WIDE_INT tmp_misalignment;
  if (!known_misalignment (pending_stack_adjust,
			   preferred_unit_stack_boundary,
			   &tmp_misalignment))
    return false;
  unadjusted_alignment -= tmp_misalignment;
  adjustment = pending_stack_adjust;
  /* Push enough additional bytes that the stack will be aligned
     after the arguments are pushed.  */
  if (preferred_unit_stack_boundary > 1 && unadjusted_alignment)
    adjustment -= preferred_unit_stack_boundary - unadjusted_alignment;

  /* We need to know whether the adjusted argument size
     (UNADJUSTED_ARGS_SIZE - ADJUSTMENT) constitutes an allocation
     or a deallocation.  */
  if (!ordered_p (adjustment, unadjusted_args_size))
    return false;

  /* Now, sets ARGS_SIZE->CONSTANT so that we pop the right number of
     bytes after the call.  The right number is the entire
     PENDING_STACK_ADJUST less our ADJUSTMENT plus the amount required
     by the arguments in the first place.  */
  args_size->constant
    = pending_stack_adjust - adjustment + unadjusted_args_size;

  *adjustment_out = adjustment;
  return true;
}

/* Scan X expression if it does not dereference any argument slots
   we already clobbered by tail call arguments (as noted in stored_args_map
   bitmap).
   Return nonzero if X expression dereferences such argument slots,
   zero otherwise.  */

static int
check_sibcall_argument_overlap_1 (rtx x)
{
  RTX_CODE code;
  int i, j;
  const char *fmt;

  if (x == NULL_RTX)
    return 0;

  code = GET_CODE (x);

  /* We need not check the operands of the CALL expression itself.  */
  if (code == CALL)
    return 0;

  if (code == MEM)
    return (mem_might_overlap_already_clobbered_arg_p
	    (XEXP (x, 0), GET_MODE_SIZE (GET_MODE (x))));

  /* Scan all subexpressions.  */
  fmt = GET_RTX_FORMAT (code);
  for (i = 0; i < GET_RTX_LENGTH (code); i++, fmt++)
    {
      if (*fmt == 'e')
	{
	  if (check_sibcall_argument_overlap_1 (XEXP (x, i)))
	    return 1;
	}
      else if (*fmt == 'E')
	{
	  for (j = 0; j < XVECLEN (x, i); j++)
	    if (check_sibcall_argument_overlap_1 (XVECEXP (x, i, j)))
	      return 1;
	}
    }
  return 0;
}

/* Scan sequence after INSN if it does not dereference any argument slots
   we already clobbered by tail call arguments (as noted in stored_args_map
   bitmap).  If MARK_STORED_ARGS_MAP, add stack slots for ARG to
   stored_args_map bitmap afterwards (when ARG is a register MARK_STORED_ARGS_MAP
   should be 0).  Return nonzero if sequence after INSN dereferences such argument
   slots, zero otherwise.  */

static int
check_sibcall_argument_overlap (rtx_insn *insn, struct arg_data *arg,
				int mark_stored_args_map)
{
  poly_uint64 low, high;
  unsigned HOST_WIDE_INT const_low, const_high;

  if (insn == NULL_RTX)
    insn = get_insns ();
  else
    insn = NEXT_INSN (insn);

  for (; insn; insn = NEXT_INSN (insn))
    if (INSN_P (insn)
	&& check_sibcall_argument_overlap_1 (PATTERN (insn)))
      break;

  if (mark_stored_args_map)
    {
      if (ARGS_GROW_DOWNWARD)
	low = -arg->locate.slot_offset.constant - arg->locate.size.constant;
      else
	low = arg->locate.slot_offset.constant;
      high = low + arg->locate.size.constant;

      const_low = constant_lower_bound (low);
      if (high.is_constant (&const_high))
	for (unsigned HOST_WIDE_INT i = const_low; i < const_high; ++i)
	  bitmap_set_bit (stored_args_map, i);
      else
	stored_args_watermark = MIN (stored_args_watermark, const_low);
    }
  return insn != NULL_RTX;
}

/* Given that a function returns a value of mode MODE at the most
   significant end of hard register VALUE, shift VALUE left or right
   as specified by LEFT_P.  Return true if some action was needed.  */

bool
shift_return_value (machine_mode mode, bool left_p, rtx value)
{
  gcc_assert (REG_P (value) && HARD_REGISTER_P (value));
  machine_mode value_mode = GET_MODE (value);
  poly_int64 shift = GET_MODE_BITSIZE (value_mode) - GET_MODE_BITSIZE (mode);

  if (known_eq (shift, 0))
    return false;

  /* Use ashr rather than lshr for right shifts.  This is for the benefit
     of the MIPS port, which requires SImode values to be sign-extended
     when stored in 64-bit registers.  */
  if (!force_expand_binop (value_mode, left_p ? ashl_optab : ashr_optab,
			   value, gen_int_shift_amount (value_mode, shift),
			   value, 1, OPTAB_WIDEN))
    gcc_unreachable ();
  return true;
}

/* If X is a likely-spilled register value, copy it to a pseudo
   register and return that register.  Return X otherwise.  */

static rtx
avoid_likely_spilled_reg (rtx x)
{
  rtx new_rtx;

  if (REG_P (x)
      && HARD_REGISTER_P (x)
      && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x))))
    {
      /* Make sure that we generate a REG rather than a CONCAT.
	 Moves into CONCATs can need nontrivial instructions,
	 and the whole point of this function is to avoid
	 using the hard register directly in such a situation.  */
      generating_concat_p = 0;
      new_rtx = gen_reg_rtx (GET_MODE (x));
      generating_concat_p = 1;
      emit_move_insn (new_rtx, x);
      return new_rtx;
    }
  return x;
}

/* Helper function for expand_call.
   Return false is EXP is not implementable as a sibling call.  */

static bool
can_implement_as_sibling_call_p (tree exp,
				 rtx structure_value_addr,
				 tree funtype,
				 tree fndecl,
				 int flags,
				 tree addr,
				 const args_size &args_size)
{
  if (!targetm.have_sibcall_epilogue ())
    {
      maybe_complain_about_tail_call
	(exp,
	 "machine description does not have"
	 " a sibcall_epilogue instruction pattern");
      return false;
    }

  /* Doing sibling call optimization needs some work, since
     structure_value_addr can be allocated on the stack.
     It does not seem worth the effort since few optimizable
     sibling calls will return a structure.  */
  if (structure_value_addr != NULL_RTX)
    {
      maybe_complain_about_tail_call (exp, "callee returns a structure");
      return false;
    }

  /* Check whether the target is able to optimize the call
     into a sibcall.  */
  if (!targetm.function_ok_for_sibcall (fndecl, exp))
    {
      maybe_complain_about_tail_call (exp,
				      "target is not able to optimize the"
				      " call into a sibling call");
      return false;
    }

  /* Functions that do not return exactly once may not be sibcall
     optimized.  */
  if (flags & ECF_RETURNS_TWICE)
    {
      maybe_complain_about_tail_call (exp, "callee returns twice");
      return false;
    }
  if (flags & ECF_NORETURN)
    {
      maybe_complain_about_tail_call (exp, "callee does not return");
      return false;
    }

  if (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (addr))))
    {
      maybe_complain_about_tail_call (exp, "volatile function type");
      return false;
    }

  /* If the called function is nested in the current one, it might access
     some of the caller's arguments, but could clobber them beforehand if
     the argument areas are shared.  */
  if (fndecl && decl_function_context (fndecl) == current_function_decl)
    {
      maybe_complain_about_tail_call (exp, "nested function");
      return false;
    }

  /* If this function requires more stack slots than the current
     function, we cannot change it into a sibling call.
     crtl->args.pretend_args_size is not part of the
     stack allocated by our caller.  */
  if (maybe_gt (args_size.constant,
		crtl->args.size - crtl->args.pretend_args_size))
    {
      maybe_complain_about_tail_call (exp,
				      "callee required more stack slots"
				      " than the caller");
      return false;
    }

  /* If the callee pops its own arguments, then it must pop exactly
     the same number of arguments as the current function.  */
  if (maybe_ne (targetm.calls.return_pops_args (fndecl, funtype,
						args_size.constant),
		targetm.calls.return_pops_args (current_function_decl,
						TREE_TYPE
						(current_function_decl),
						crtl->args.size)))
    {
      maybe_complain_about_tail_call (exp,
				      "inconsistent number of"
				      " popped arguments");
      return false;
    }

  if (!lang_hooks.decls.ok_for_sibcall (fndecl))
    {
      maybe_complain_about_tail_call (exp, "frontend does not support"
					    " sibling call");
      return false;
    }

  /* All checks passed.  */
  return true;
}

/* Update stack alignment when the parameter is passed in the stack
   since the outgoing parameter requires extra alignment on the calling
   function side. */

static void
update_stack_alignment_for_call (struct locate_and_pad_arg_data *locate)
{
  if (crtl->stack_alignment_needed < locate->boundary)
    crtl->stack_alignment_needed = locate->boundary;
  if (crtl->preferred_stack_boundary < locate->boundary)
    crtl->preferred_stack_boundary = locate->boundary;
}

/* Generate all the code for a CALL_EXPR exp
   and return an rtx for its value.
   Store the value in TARGET (specified as an rtx) if convenient.
   If the value is stored in TARGET then TARGET is returned.
   If IGNORE is nonzero, then we ignore the value of the function call.  */

rtx
expand_call (tree exp, rtx target, int ignore)
{
  /* Nonzero if we are currently expanding a call.  */
  static int currently_expanding_call = 0;

  /* RTX for the function to be called.  */
  rtx funexp;
  /* Sequence of insns to perform a normal "call".  */
  rtx_insn *normal_call_insns = NULL;
  /* Sequence of insns to perform a tail "call".  */
  rtx_insn *tail_call_insns = NULL;
  /* Data type of the function.  */
  tree funtype;
  tree type_arg_types;
  tree rettype;
  /* Declaration of the function being called,
     or 0 if the function is computed (not known by name).  */
  tree fndecl = 0;
  /* The type of the function being called.  */
  tree fntype;
  bool try_tail_call = CALL_EXPR_TAILCALL (exp);
  bool must_tail_call = CALL_EXPR_MUST_TAIL_CALL (exp);
  int pass;

  /* Register in which non-BLKmode value will be returned,
     or 0 if no value or if value is BLKmode.  */
  rtx valreg;
  /* Address where we should return a BLKmode value;
     0 if value not BLKmode.  */
  rtx structure_value_addr = 0;
  /* Nonzero if that address is being passed by treating it as
     an extra, implicit first parameter.  Otherwise,
     it is passed by being copied directly into struct_value_rtx.  */
  int structure_value_addr_parm = 0;
  /* Holds the value of implicit argument for the struct value.  */
  tree structure_value_addr_value = NULL_TREE;
  /* Size of aggregate value wanted, or zero if none wanted
     or if we are using the non-reentrant PCC calling convention
     or expecting the value in registers.  */
  poly_int64 struct_value_size = 0;
  /* Nonzero if called function returns an aggregate in memory PCC style,
     by returning the address of where to find it.  */
  int pcc_struct_value = 0;
  rtx struct_value = 0;

  /* Number of actual parameters in this call, including struct value addr.  */
  int num_actuals;
  /* Number of named args.  Args after this are anonymous ones
     and they must all go on the stack.  */
  int n_named_args;
  /* Number of complex actual arguments that need to be split.  */
  int num_complex_actuals = 0;

  /* Vector of information about each argument.
     Arguments are numbered in the order they will be pushed,
     not the order they are written.  */
  struct arg_data *args;

  /* Total size in bytes of all the stack-parms scanned so far.  */
  struct args_size args_size;
  struct args_size adjusted_args_size;
  /* Size of arguments before any adjustments (such as rounding).  */
  poly_int64 unadjusted_args_size;
  /* Data on reg parms scanned so far.  */
  CUMULATIVE_ARGS args_so_far_v;
  cumulative_args_t args_so_far;