aboutsummaryrefslogtreecommitdiff
path: root/libiberty/Makefile.in
blob: fbce3cd24b6df9d46e04508d2b8487a6e915e205 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
# Makefile for the libiberty library.
# Originally written by K. Richard Pixley <rich@cygnus.com>.
#
# Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
# 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software
# Foundation
#
# This file is part of the libiberty library.
# Libiberty is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# Libiberty is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with libiberty; see the file COPYING.LIB.  If not,
# write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor,
# Boston, MA 02110-1301, USA.

libiberty_topdir = @libiberty_topdir@
srcdir = @srcdir@

prefix = @prefix@

exec_prefix = @exec_prefix@
bindir = @bindir@
libdir = @libdir@
includedir = @includedir@
target_header_dir = @target_header_dir@

SHELL = @SHELL@

# Multilib support variables.
MULTISRCTOP =
MULTIBUILDTOP =
MULTIDIRS =
MULTISUBDIR =
MULTIDO = true
MULTICLEAN = true

INSTALL = @INSTALL@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_DATA = @INSTALL_DATA@
mkinstalldirs = $(SHELL) $(libiberty_topdir)/mkinstalldirs

# Some compilers can't handle cc -c blah.c -o foo/blah.o.
OUTPUT_OPTION = @OUTPUT_OPTION@

AR = @AR@
AR_FLAGS = rc

CC = @CC@
CFLAGS = @CFLAGS@
CPPFLAGS = @CPPFLAGS@
RANLIB = @RANLIB@
MAKEINFO = @MAKEINFO@
PERL = @PERL@

PICFLAG = @PICFLAG@

MAKEOVERRIDES =

TARGETLIB = ./libiberty.a
TESTLIB = ./testlib.a

LIBOBJS = @LIBOBJS@

# A configuration can specify extra .o files that should be included,
# even if they are in libc. (Perhaps the libc version is buggy.)
EXTRA_OFILES = 

# Flags to pass to a recursive make.
FLAGS_TO_PASS = \
	"AR=$(AR)" \
	"AR_FLAGS=$(AR_FLAGS)" \
	"CC=$(CC)" \
	"CFLAGS=$(CFLAGS)" \
	"CPPFLAGS=$(CPPFLAGS)" \
	"DESTDIR=$(DESTDIR)" \
	"EXTRA_OFILES=$(EXTRA_OFILES)" \
	"HDEFINES=$(HDEFINES)" \
	"INSTALL=$(INSTALL)" \
	"INSTALL_DATA=$(INSTALL_DATA)" \
	"INSTALL_PROGRAM=$(INSTALL_PROGRAM)" \
	"LDFLAGS=$(LDFLAGS)" \
	"LOADLIBES=$(LOADLIBES)" \
	"RANLIB=$(RANLIB)" \
	"SHELL=$(SHELL)" \
	"prefix=$(prefix)" \
	"exec_prefix=$(exec_prefix)" \
	"libdir=$(libdir)" \
	"libsubdir=$(libsubdir)" \
	"tooldir=$(tooldir)"

# Subdirectories to recurse into. We need to override this during cleaning
SUBDIRS = testsuite

# FIXME: add @BUILD_INFO@ once we're sure it works for everyone.
all: stamp-picdir $(TARGETLIB) required-list all-subdir
	@: $(MAKE) ; $(MULTIDO) $(FLAGS_TO_PASS) multi-do DO=all

.PHONY: check installcheck
check: check-subdir
installcheck: installcheck-subdir

@host_makefile_frag@

INCDIR=$(srcdir)/$(MULTISRCTOP)../include

COMPILE.c = $(CC) -c @DEFS@ $(CFLAGS) $(CPPFLAGS) -I. -I$(INCDIR) $(HDEFINES) @ac_libiberty_warn_cflags@

# Just to make sure we don't use a built-in rule with VPATH
.c.o:
	false

# NOTE: If you add new files to the library, add them to this list
# (alphabetical), and add them to REQUIRED_OFILES, or
# CONFIGURED_OFILES and funcs in configure.ac.  Also run "make maint-deps"
# to build the new rules.
CFILES = alloca.c argv.c asprintf.c atexit.c				\
	basename.c bcmp.c bcopy.c bsearch.c bzero.c			\
	calloc.c choose-temp.c clock.c concat.c cp-demangle.c		\
	 cp-demint.c cplus-dem.c crc32.c				\
	dyn-string.c							\
	fdmatch.c ffs.c fibheap.c filename_cmp.c floatformat.c		\
	fnmatch.c fopen_unlocked.c					\
	getcwd.c getopt.c getopt1.c getpagesize.c getpwd.c getruntime.c	\
         gettimeofday.c                                                 \
	hashtab.c hex.c							\
	index.c insque.c						\
	lbasename.c							\
	lrealpath.c							\
	make-relative-prefix.c						\
	make-temp-file.c md5.c memchr.c memcmp.c memcpy.c memmem.c	\
	 memmove.c mempcpy.c memset.c mkstemps.c			\
	objalloc.c obstack.c						\
	partition.c pexecute.c						\
	 pex-common.c pex-djgpp.c pex-msdos.c pex-one.c			\
	 pex-unix.c pex-win32.c						\
         physmem.c putenv.c						\
	random.c regex.c rename.c rindex.c				\
	safe-ctype.c setenv.c sha1.c sigsetmask.c snprintf.c sort.c	\
	 spaces.c splay-tree.c stpcpy.c stpncpy.c strcasecmp.c		\
	 strchr.c strdup.c strerror.c strncasecmp.c strncmp.c		\
	 strrchr.c strsignal.c strstr.c strtod.c strtol.c strtoul.c	\
	 strndup.c strverscmp.c						\
	tmpnam.c							\
	unlink-if-ordinary.c						\
	vasprintf.c vfork.c vfprintf.c vprintf.c vsnprintf.c vsprintf.c	\
	waitpid.c							\
	xatexit.c xexit.c xmalloc.c xmemdup.c xstrdup.c xstrerror.c	\
	 xstrndup.c

# These are always included in the library.  The first four are listed
# first and by compile time to optimize parallel builds.
REQUIRED_OFILES =							\
	./regex.o ./cplus-dem.o ./cp-demangle.o ./md5.o ./sha1.o	\
	./alloca.o ./argv.o						\
	./choose-temp.o ./concat.o ./cp-demint.o ./crc32.o		\
	./dyn-string.o							\
	./fdmatch.o ./fibheap.o ./filename_cmp.o ./floatformat.o	\
	./fnmatch.o ./fopen_unlocked.o					\
	./getopt.o ./getopt1.o ./getpwd.o ./getruntime.o		\
	./hashtab.o ./hex.o						\
	./lbasename.o ./lrealpath.o					\
	./make-relative-prefix.o ./make-temp-file.o			\
	./objalloc.o ./obstack.o					\
	./partition.o ./pexecute.o ./physmem.o				\
	./pex-common.o ./pex-one.o @pexecute@				\
	./safe-ctype.o ./sort.o ./spaces.o ./splay-tree.o ./strerror.o	\
	 ./strsignal.o							\
	./unlink-if-ordinary.o						\
	./xatexit.o ./xexit.o ./xmalloc.o ./xmemdup.o ./xstrdup.o	\
	 ./xstrerror.o ./xstrndup.o

# These are all the objects that configure may add to the library via
# $funcs or EXTRA_OFILES.  This list exists here only for "make
# maint-missing" and "make check".
CONFIGURED_OFILES = ./asprintf.o ./atexit.o				\
	./basename.o ./bcmp.o ./bcopy.o ./bsearch.o ./bzero.o		\
	./calloc.o ./clock.o ./copysign.o				\
	./_doprnt.o							\
	./ffs.o								\
	./getcwd.o ./getpagesize.o ./gettimeofday.o			\
	./index.o ./insque.o						\
	./memchr.o ./memcmp.o ./memcpy.o ./memmem.o ./memmove.o		\
	 ./mempcpy.o ./memset.o ./mkstemps.o				\
	./pex-djgpp.o ./pex-msdos.o					\
	 ./pex-unix.o ./pex-win32.o					\
	 ./putenv.o							\
	./random.o ./rename.o ./rindex.o				\
	./setenv.o ./sigsetmask.o ./snprintf.o ./stpcpy.o ./stpncpy.o	\
	 ./strcasecmp.o ./strchr.o ./strdup.o ./strncasecmp.o		\
	 ./strncmp.o ./strndup.o ./strrchr.o ./strstr.o			\
	 ./strtod.o ./strtol.o ./strtoul.o ./strverscmp.o		\
	./tmpnam.o							\
	./vasprintf.o ./vfork.o ./vfprintf.o ./vprintf.o ./vsnprintf.o	\
	 ./vsprintf.o							\
	./waitpid.o

# These files are installed if the library has been configured to do so.
INSTALLED_HEADERS =                                                     \
	$(INCDIR)/ansidecl.h                                            \
	$(INCDIR)/demangle.h                                            \
	$(INCDIR)/dyn-string.h                                          \
	$(INCDIR)/fibheap.h                                             \
	$(INCDIR)/floatformat.h                                         \
	$(INCDIR)/hashtab.h                                             \
	$(INCDIR)/libiberty.h                                           \
	$(INCDIR)/objalloc.h                                            \
	$(INCDIR)/partition.h                                           \
	$(INCDIR)/safe-ctype.h                                          \
	$(INCDIR)/sort.h                                                \
	$(INCDIR)/splay-tree.h

$(TARGETLIB): $(REQUIRED_OFILES) $(EXTRA_OFILES) $(LIBOBJS)
	-rm -f $(TARGETLIB) pic/$(TARGETLIB)
	$(AR) $(AR_FLAGS) $(TARGETLIB) \
	  $(REQUIRED_OFILES) $(EXTRA_OFILES) $(LIBOBJS)
	$(RANLIB) $(TARGETLIB)
	if [ x"$(PICFLAG)" != x ]; then \
	  cd pic; \
	  $(AR) $(AR_FLAGS) $(TARGETLIB) \
	    $(REQUIRED_OFILES) $(EXTRA_OFILES) $(LIBOBJS); \
	  $(RANLIB) $(TARGETLIB); \
	  cd ..; \
	else true; fi

$(TESTLIB): $(REQUIRED_OFILES) $(CONFIGURED_OFILES)
	-rm -f $(TESTLIB)
	$(AR) $(AR_FLAGS) $(TESTLIB) \
	  $(REQUIRED_OFILES) $(CONFIGURED_OFILES)
	$(RANLIB) $(TESTLIB)

info: libiberty.info info-subdir
install-info: install-info-subdir
clean-info: clean-info-subdir
dvi: libiberty.dvi dvi-subdir

LIBIBERTY_PDFFILES = libiberty.pdf

pdf: $(LIBIBERTY_PDFFILES) pdf-subdir

.PHONY: install-pdf

pdf__strip_dir = `echo $$p | sed -e 's|^.*/||'`;

install-pdf: $(LIBIBERTY_PDFFILES)
	@$(NORMAL_INSTALL)
	test -z "$(pdfdir)" || $(mkinstalldirs) "$(DESTDIR)$(pdfdir)"
	@list='$(LIBIBERTY_PDFFILES)'; for p in $$list; do \
	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
	  f=$(pdf__strip_dir) \
	  echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(pdfdir)/$$f'"; \
	  $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(pdfdir)/$$f"; \
	done

# html, install-html targets
HTMLS = libiberty.html

html: $(HTMLS)

.PHONY: install-html install-html-am

NORMAL_INSTALL = :
mkdir_p = mkdir -p --
 
html__strip_dir = `echo $$p | sed -e 's|^.*/||'`;

install-html: install-html-am

install-html-am: $(HTMLS)
	@$(NORMAL_INSTALL)
	test -z "$(htmldir)" || $(mkdir_p) "$(DESTDIR)$(htmldir)"
	@list='$(HTMLS)'; for p in $$list; do \
	  if test -f "$$p" || test -d "$$p"; then d=""; else d="$(srcdir)/"; fi; \
	  f=$(html__strip_dir) \
	  if test -d "$$d$$p"; then \
	    echo " $(mkdir_p) '$(DESTDIR)$(htmldir)/$$f'"; \
	    $(mkdir_p) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \
	    echo " $(INSTALL_DATA) '$$d$$p'/* '$(DESTDIR)$(htmldir)/$$f'"; \
	    $(INSTALL_DATA) "$$d$$p"/* "$(DESTDIR)$(htmldir)/$$f"; \
	  else \
	    echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(htmldir)/$$f'"; \
	    $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(htmldir)/$$f"; \
	  fi; \
	done

TEXISRC = \
	$(srcdir)/libiberty.texi \
	$(srcdir)/copying-lib.texi \
	$(srcdir)/obstacks.texi \
	$(srcdir)/functions.texi

# Additional files that have texi snippets that need to be collected
# and sorted.  Some are here because the sources are imported from
# elsewhere.  Others represent headers in ../include.
TEXIFILES = fnmatch.txh pexecute.txh

libiberty.info : $(srcdir)/libiberty.texi $(TEXISRC)
	$(MAKEINFO) -I$(srcdir) $(srcdir)/libiberty.texi

libiberty.dvi : $(srcdir)/libiberty.texi $(TEXISRC)
	texi2dvi $(srcdir)/libiberty.texi

libiberty.pdf : $(srcdir)/libiberty.texi $(TEXISRC)
	texi2pdf $(srcdir)/libiberty.texi

libiberty.html : $(srcdir)/libiberty.texi $(TEXISRC)
	$(MAKEINFO) --no-split --html -I$(srcdir) -o $@ $<

@MAINT@$(srcdir)/functions.texi : stamp-functions
@MAINT@	@true

@MAINT@stamp-functions : $(CFILES:%=$(srcdir)/%) $(TEXIFILES:%=$(srcdir)/%) $(srcdir)/gather-docs Makefile
@MAINT@@HAVE_PERL@	$(PERL) $(srcdir)/gather-docs $(srcdir) $(srcdir)/functions.texi $(CFILES) $(TEXIFILES)
@MAINT@	echo stamp > stamp-functions

INSTALL_DEST = @INSTALL_DEST@
install: install_to_$(INSTALL_DEST) install-subdir

# This is tricky.  Even though CC in the Makefile contains
# multilib-specific flags, it's overridden by FLAGS_TO_PASS from the
# default multilib, so we have to take CFLAGS into account as well,
# since it will be passed the multilib flags.
MULTIOSDIR = `$(CC) $(CFLAGS) -print-multi-os-directory`
install_to_libdir: all
	${mkinstalldirs} $(DESTDIR)$(libdir)/$(MULTIOSDIR)
	$(INSTALL_DATA) $(TARGETLIB) $(DESTDIR)$(libdir)/$(MULTIOSDIR)/$(TARGETLIB)n
	( cd $(DESTDIR)$(libdir)/$(MULTIOSDIR) ; chmod 644 $(TARGETLIB)n ;$(RANLIB) $(TARGETLIB)n )
	mv -f $(DESTDIR)$(libdir)/$(MULTIOSDIR)/$(TARGETLIB)n $(DESTDIR)$(libdir)/$(MULTIOSDIR)/$(TARGETLIB)
	if test -n "${target_header_dir}"; then \
	  case "${target_header_dir}" in \
	    /*)    thd=${target_header_dir};; \
	    *)     thd=${includedir}/${target_header_dir};; \
	  esac; \
	  ${mkinstalldirs} $(DESTDIR)$${thd}; \
	  for h in ${INSTALLED_HEADERS}; do \
	    ${INSTALL_DATA} $$h $(DESTDIR)$${thd}; \
	  done; \
	fi
	@$(MULTIDO) $(FLAGS_TO_PASS) multi-do DO=install

install_to_tooldir: all
	${mkinstalldirs} $(DESTDIR)$(tooldir)/lib/$(MULTIOSDIR)
	$(INSTALL_DATA) $(TARGETLIB) $(DESTDIR)$(tooldir)/lib/$(MULTIOSDIR)/$(TARGETLIB)n
	( cd $(DESTDIR)$(tooldir)/lib/$(MULTIOSDIR) ; chmod 644 $(TARGETLIB)n; $(RANLIB) $(TARGETLIB)n )
	mv -f $(DESTDIR)$(tooldir)/lib/$(MULTIOSDIR)/$(TARGETLIB)n $(DESTDIR)$(tooldir)/lib/$(MULTIOSDIR)/$(TARGETLIB)
	@$(MULTIDO) $(FLAGS_TO_PASS) multi-do DO=install

# required-list was used when building a shared bfd/opcodes/libiberty
# library.  I don't know if it used by anything currently.
required-list: Makefile
	echo $(REQUIRED_OFILES) > required-list

stamp-picdir:
	if [ x"$(PICFLAG)" != x ] && [ ! -d pic ]; then \
	  mkdir pic; \
	else true; fi
	touch stamp-picdir

.PHONY: all etags tags ls clean stage1 stage2

etags tags: TAGS etags-subdir

TAGS: $(CFILES)
	etags `for i in $(CFILES); do echo $(srcdir)/$$i ; done`

# The standalone demangler (c++filt) has been moved to binutils.
# But make this target work anyway for demangler hacking.
demangle: $(ALL) $(srcdir)/cp-demangle.c
	@echo "The standalone demangler, now named c++filt, is now"
	@echo "a part of binutils."
	$(CC) @DEFS@ $(CFLAGS) $(CPPFLAGS) -I. -I$(INCDIR) $(HDEFINES) \
	  $(srcdir)/cp-demangle.c -DSTANDALONE_DEMANGLER $(TARGETLIB) -o $@

ls:
	@echo Makefile $(CFILES)

# Various targets for maintainers.

maint-missing :
	@$(PERL) $(srcdir)/maint-tool -s $(srcdir) missing $(CFILES) $(REQUIRED_OFILES) $(CONFIGURED_OFILES)

maint-buildall : $(REQUIRED_OFILES) $(CONFIGURED_OFILES)
	@true

maint-undoc : $(srcdir)/functions.texi
	@$(PERL) $(srcdir)/maint-tool -s $(srcdir) undoc

maint-deps :
	@$(PERL) $(srcdir)/maint-tool -s $(srcdir) deps $(INCDIR)

# Need to deal with profiled libraries, too.

# Cleaning has to be done carefully to ensure that we don't clean our SUBDIRS
# multiple times, hence our explicit recursion with an empty SUBDIRS.
mostlyclean: mostlyclean-subdir
	-rm -rf *.o pic core errs \#* *.E a.out
	-rm -f errors dummy config.h stamp-*
	-rm -f $(CONFIG_H) stamp-picdir
	-rm -f libiberty.aux libiberty.cp libiberty.cps libiberty.fn libiberty.ky
	-rm -f libiberty.log libiberty.tmp libiberty.tps libiberty.pg
	-rm -f libiberty.pgs libiberty.toc libiberty.tp libiberty.tpl libiberty.vr
	-rm -f libtexi.stamp
	@$(MULTICLEAN) multi-clean DO=mostlyclean
clean: clean-subdir
	$(MAKE) SUBDIRS="" mostlyclean
	-rm -f *.a required-list tmpmulti.out
	-rm -f libiberty.dvi libiberty.pdf libiberty.info* libiberty.html
	@$(MULTICLEAN) multi-clean DO=clean
distclean: distclean-subdir
	$(MAKE) SUBDIRS="" clean
	@$(MULTICLEAN) multi-clean DO=distclean
	-rm -f *~ Makefile config.cache config.status xhost-mkfrag TAGS multilib.out
	-rm -f config.log
	-rmdir testsuite 2>/dev/null
maintainer-clean realclean: maintainer-clean-subdir
	$(MAKE) SUBDIRS="" distclean

force:

Makefile: $(srcdir)/Makefile.in config.status
	CONFIG_FILES=Makefile CONFIG_HEADERS= $(SHELL) ./config.status

# Depending on Makefile makes sure that config.status has been re-run
# if needed.  This prevents problems with parallel builds.
config.h: stamp-h ; @true
stamp-h: $(srcdir)/config.in config.status Makefile
	CONFIG_FILES= CONFIG_HEADERS=config.h:$(srcdir)/config.in $(SHELL) ./config.status

config.status: $(srcdir)/configure
	$(SHELL) ./config.status --recheck

AUTOCONF = autoconf
configure_deps = $(srcdir)/aclocal.m4 \
	$(srcdir)/../config/acx.m4 \
	$(srcdir)/../config/no-executables.m4 \
	$(srcdir)/../config/override.m4 \
	$(srcdir)/../config/warnings.m4 \

$(srcdir)/configure: @MAINT@ $(srcdir)/configure.ac $(configure_deps)
	cd $(srcdir) && $(AUTOCONF)

# Depending on config.h makes sure that config.status has been re-run
# if needed.  This prevents problems with parallel builds, in case
# subdirectories need to run config.status also.
all-subdir check-subdir installcheck-subdir info-subdir	\
install-info-subdir clean-info-subdir dvi-subdir pdf-subdir install-subdir	\
etags-subdir mostlyclean-subdir clean-subdir distclean-subdir \
maintainer-clean-subdir: config.h
	@subdirs='$(SUBDIRS)'; \
	target=`echo $@ | sed -e 's/-subdir//'`; \
	for dir in $$subdirs ; do \
	  cd $$dir && $(MAKE) $(FLAGS_TO_PASS) $$target; \
	done

$(REQUIRED_OFILES) $(EXTRA_OFILES) $(LIBOBJS): stamp-picdir
$(CONFIGURED_OFILES): stamp-picdir

# Don't export variables to the environment, in order to not confuse
# configure.
.NOEXPORT:

# The dependencies in the remainder of this file are automatically
# generated by "make maint-deps".  Manual edits will be lost.

./_doprnt.o: $(srcdir)/_doprnt.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/_doprnt.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/_doprnt.c $(OUTPUT_OPTION)

./alloca.o: $(srcdir)/alloca.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/alloca.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/alloca.c $(OUTPUT_OPTION)

./argv.o: $(srcdir)/argv.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/argv.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/argv.c $(OUTPUT_OPTION)

./asprintf.o: $(srcdir)/asprintf.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/asprintf.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/asprintf.c $(OUTPUT_OPTION)

./atexit.o: $(srcdir)/atexit.c config.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/atexit.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/atexit.c $(OUTPUT_OPTION)

./basename.o: $(srcdir)/basename.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/basename.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/basename.c $(OUTPUT_OPTION)

./bcmp.o: $(srcdir)/bcmp.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/bcmp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/bcmp.c $(OUTPUT_OPTION)

./bcopy.o: $(srcdir)/bcopy.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/bcopy.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/bcopy.c $(OUTPUT_OPTION)

./bsearch.o: $(srcdir)/bsearch.c config.h $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/bsearch.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/bsearch.c $(OUTPUT_OPTION)

./bzero.o: $(srcdir)/bzero.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/bzero.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/bzero.c $(OUTPUT_OPTION)

./calloc.o: $(srcdir)/calloc.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/calloc.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/calloc.c $(OUTPUT_OPTION)

./choose-temp.o: $(srcdir)/choose-temp.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/choose-temp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/choose-temp.c $(OUTPUT_OPTION)

./clock.o: $(srcdir)/clock.c config.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/clock.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/clock.c $(OUTPUT_OPTION)

./concat.o: $(srcdir)/concat.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/concat.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/concat.c $(OUTPUT_OPTION)

./copysign.o: $(srcdir)/copysign.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/copysign.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/copysign.c $(OUTPUT_OPTION)

./cp-demangle.o: $(srcdir)/cp-demangle.c config.h $(INCDIR)/ansidecl.h \
	$(srcdir)/cp-demangle.h $(INCDIR)/demangle.h \
	$(INCDIR)/dyn-string.h $(INCDIR)/getopt.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/cp-demangle.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/cp-demangle.c $(OUTPUT_OPTION)

./cp-demint.o: $(srcdir)/cp-demint.c config.h $(INCDIR)/ansidecl.h \
	$(srcdir)/cp-demangle.h $(INCDIR)/demangle.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/cp-demint.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/cp-demint.c $(OUTPUT_OPTION)

./cplus-dem.o: $(srcdir)/cplus-dem.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/demangle.h $(INCDIR)/libiberty.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/cplus-dem.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/cplus-dem.c $(OUTPUT_OPTION)

./crc32.o: $(srcdir)/crc32.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/crc32.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/crc32.c $(OUTPUT_OPTION)

./dyn-string.o: $(srcdir)/dyn-string.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/dyn-string.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/dyn-string.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/dyn-string.c $(OUTPUT_OPTION)

./fdmatch.o: $(srcdir)/fdmatch.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/fdmatch.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/fdmatch.c $(OUTPUT_OPTION)

./ffs.o: $(srcdir)/ffs.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/ffs.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/ffs.c $(OUTPUT_OPTION)

./fibheap.o: $(srcdir)/fibheap.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/fibheap.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/fibheap.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/fibheap.c $(OUTPUT_OPTION)

./filename_cmp.o: $(srcdir)/filename_cmp.c config.h $(INCDIR)/filenames.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/filename_cmp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/filename_cmp.c $(OUTPUT_OPTION)

./floatformat.o: $(srcdir)/floatformat.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/floatformat.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/floatformat.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/floatformat.c $(OUTPUT_OPTION)

./fnmatch.o: $(srcdir)/fnmatch.c config.h $(INCDIR)/fnmatch.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/fnmatch.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/fnmatch.c $(OUTPUT_OPTION)

./fopen_unlocked.o: $(srcdir)/fopen_unlocked.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/fopen_unlocked.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/fopen_unlocked.c $(OUTPUT_OPTION)

./getcwd.o: $(srcdir)/getcwd.c config.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/getcwd.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/getcwd.c $(OUTPUT_OPTION)

./getopt.o: $(srcdir)/getopt.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/getopt.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/getopt.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/getopt.c $(OUTPUT_OPTION)

./getopt1.o: $(srcdir)/getopt1.c config.h $(INCDIR)/getopt.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/getopt1.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/getopt1.c $(OUTPUT_OPTION)

./getpagesize.o: $(srcdir)/getpagesize.c config.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/getpagesize.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/getpagesize.c $(OUTPUT_OPTION)

./getpwd.o: $(srcdir)/getpwd.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/getpwd.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/getpwd.c $(OUTPUT_OPTION)

./getruntime.o: $(srcdir)/getruntime.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/getruntime.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/getruntime.c $(OUTPUT_OPTION)

./gettimeofday.o: $(srcdir)/gettimeofday.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/gettimeofday.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/gettimeofday.c $(OUTPUT_OPTION)

./hashtab.o: $(srcdir)/hashtab.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/hashtab.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/hashtab.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/hashtab.c $(OUTPUT_OPTION)

./hex.o: $(srcdir)/hex.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/hex.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/hex.c $(OUTPUT_OPTION)

./index.o: $(srcdir)/index.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/index.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/index.c $(OUTPUT_OPTION)

./insque.o: $(srcdir)/insque.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/insque.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/insque.c $(OUTPUT_OPTION)

./lbasename.o: $(srcdir)/lbasename.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/filenames.h $(INCDIR)/libiberty.h \
	$(INCDIR)/safe-ctype.h $(INCDIR)/filenames.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/lbasename.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/lbasename.c $(OUTPUT_OPTION)

./lrealpath.o: $(srcdir)/lrealpath.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/lrealpath.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/lrealpath.c $(OUTPUT_OPTION)

./make-relative-prefix.o: $(srcdir)/make-relative-prefix.c config.h \
	$(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/make-relative-prefix.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/make-relative-prefix.c $(OUTPUT_OPTION)

./make-temp-file.o: $(srcdir)/make-temp-file.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/make-temp-file.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/make-temp-file.c $(OUTPUT_OPTION)

./md5.o: $(srcdir)/md5.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/md5.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/md5.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/md5.c $(OUTPUT_OPTION)

./memchr.o: $(srcdir)/memchr.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/memchr.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/memchr.c $(OUTPUT_OPTION)

./memcmp.o: $(srcdir)/memcmp.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/memcmp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/memcmp.c $(OUTPUT_OPTION)

./memcpy.o: $(srcdir)/memcpy.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/memcpy.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/memcpy.c $(OUTPUT_OPTION)

./memmem.o: $(srcdir)/memmem.c config.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/memmem.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/memmem.c $(OUTPUT_OPTION)

./memmove.o: $(srcdir)/memmove.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/memmove.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/memmove.c $(OUTPUT_OPTION)

./mempcpy.o: $(srcdir)/mempcpy.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/mempcpy.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/mempcpy.c $(OUTPUT_OPTION)

./memset.o: $(srcdir)/memset.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/memset.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/memset.c $(OUTPUT_OPTION)

./mkstemps.o: $(srcdir)/mkstemps.c config.h $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/mkstemps.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/mkstemps.c $(OUTPUT_OPTION)

./msdos.o: $(srcdir)/msdos.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/msdos.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/msdos.c $(OUTPUT_OPTION)

./objalloc.o: $(srcdir)/objalloc.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/objalloc.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/objalloc.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/objalloc.c $(OUTPUT_OPTION)

./obstack.o: $(srcdir)/obstack.c config.h $(INCDIR)/obstack.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/obstack.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/obstack.c $(OUTPUT_OPTION)

./partition.o: $(srcdir)/partition.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(INCDIR)/partition.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/partition.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/partition.c $(OUTPUT_OPTION)

./pex-common.o: $(srcdir)/pex-common.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(srcdir)/pex-common.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/pex-common.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/pex-common.c $(OUTPUT_OPTION)

./pex-djgpp.o: $(srcdir)/pex-djgpp.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(srcdir)/pex-common.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/pex-djgpp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/pex-djgpp.c $(OUTPUT_OPTION)

./pex-msdos.o: $(srcdir)/pex-msdos.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(srcdir)/pex-common.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/pex-msdos.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/pex-msdos.c $(OUTPUT_OPTION)

./pex-one.o: $(srcdir)/pex-one.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/pex-one.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/pex-one.c $(OUTPUT_OPTION)

./pex-unix.o: $(srcdir)/pex-unix.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(srcdir)/pex-common.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/pex-unix.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/pex-unix.c $(OUTPUT_OPTION)

./pex-win32.o: $(srcdir)/pex-win32.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(srcdir)/pex-common.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/pex-win32.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/pex-win32.c $(OUTPUT_OPTION)

./pexecute.o: $(srcdir)/pexecute.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/pexecute.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/pexecute.c $(OUTPUT_OPTION)

./physmem.o: $(srcdir)/physmem.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/physmem.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/physmem.c $(OUTPUT_OPTION)

./putenv.o: $(srcdir)/putenv.c config.h $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/putenv.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/putenv.c $(OUTPUT_OPTION)

./random.o: $(srcdir)/random.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/random.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/random.c $(OUTPUT_OPTION)

./regex.o: $(srcdir)/regex.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/xregex.h \
	$(INCDIR)/xregex2.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/regex.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/regex.c $(OUTPUT_OPTION)

./rename.o: $(srcdir)/rename.c config.h $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/rename.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/rename.c $(OUTPUT_OPTION)

./rindex.o: $(srcdir)/rindex.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/rindex.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/rindex.c $(OUTPUT_OPTION)

./safe-ctype.o: $(srcdir)/safe-ctype.c $(INCDIR)/ansidecl.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/safe-ctype.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/safe-ctype.c $(OUTPUT_OPTION)

./setenv.o: $(srcdir)/setenv.c config.h $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/setenv.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/setenv.c $(OUTPUT_OPTION)

./sha1.o: $(srcdir)/sha1.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/sha1.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/sha1.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/sha1.c $(OUTPUT_OPTION)

./sigsetmask.o: $(srcdir)/sigsetmask.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/sigsetmask.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/sigsetmask.c $(OUTPUT_OPTION)

./snprintf.o: $(srcdir)/snprintf.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/snprintf.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/snprintf.c $(OUTPUT_OPTION)

./sort.o: $(srcdir)/sort.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h \
	$(INCDIR)/sort.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/sort.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/sort.c $(OUTPUT_OPTION)

./spaces.o: $(srcdir)/spaces.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/spaces.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/spaces.c $(OUTPUT_OPTION)

./splay-tree.o: $(srcdir)/splay-tree.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h $(INCDIR)/splay-tree.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/splay-tree.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/splay-tree.c $(OUTPUT_OPTION)

./stpcpy.o: $(srcdir)/stpcpy.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/stpcpy.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/stpcpy.c $(OUTPUT_OPTION)

./stpncpy.o: $(srcdir)/stpncpy.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/stpncpy.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/stpncpy.c $(OUTPUT_OPTION)

./strcasecmp.o: $(srcdir)/strcasecmp.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strcasecmp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strcasecmp.c $(OUTPUT_OPTION)

./strchr.o: $(srcdir)/strchr.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strchr.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strchr.c $(OUTPUT_OPTION)

./strdup.o: $(srcdir)/strdup.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strdup.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strdup.c $(OUTPUT_OPTION)

./strerror.o: $(srcdir)/strerror.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strerror.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strerror.c $(OUTPUT_OPTION)

./strncasecmp.o: $(srcdir)/strncasecmp.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strncasecmp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strncasecmp.c $(OUTPUT_OPTION)

./strncmp.o: $(srcdir)/strncmp.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strncmp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strncmp.c $(OUTPUT_OPTION)

./strndup.o: $(srcdir)/strndup.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strndup.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strndup.c $(OUTPUT_OPTION)

./strrchr.o: $(srcdir)/strrchr.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strrchr.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strrchr.c $(OUTPUT_OPTION)

./strsignal.o: $(srcdir)/strsignal.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strsignal.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strsignal.c $(OUTPUT_OPTION)

./strstr.o: $(srcdir)/strstr.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strstr.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strstr.c $(OUTPUT_OPTION)

./strtod.o: $(srcdir)/strtod.c $(INCDIR)/ansidecl.h $(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strtod.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strtod.c $(OUTPUT_OPTION)

./strtol.o: $(srcdir)/strtol.c config.h $(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strtol.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strtol.c $(OUTPUT_OPTION)

./strtoul.o: $(srcdir)/strtoul.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strtoul.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strtoul.c $(OUTPUT_OPTION)

./strverscmp.o: $(srcdir)/strverscmp.c $(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h \
	$(INCDIR)/safe-ctype.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/strverscmp.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/strverscmp.c $(OUTPUT_OPTION)

./tmpnam.o: $(srcdir)/tmpnam.c
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/tmpnam.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/tmpnam.c $(OUTPUT_OPTION)

./unlink-if-ordinary.o: $(srcdir)/unlink-if-ordinary.c config.h \
	$(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/unlink-if-ordinary.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/unlink-if-ordinary.c $(OUTPUT_OPTION)

./vasprintf.o: $(srcdir)/vasprintf.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/vasprintf.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/vasprintf.c $(OUTPUT_OPTION)

./vfork.o: $(srcdir)/vfork.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/vfork.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/vfork.c $(OUTPUT_OPTION)

./vfprintf.o: $(srcdir)/vfprintf.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/vfprintf.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/vfprintf.c $(OUTPUT_OPTION)

./vprintf.o: $(srcdir)/vprintf.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/vprintf.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/vprintf.c $(OUTPUT_OPTION)

./vsnprintf.o: $(srcdir)/vsnprintf.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/vsnprintf.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/vsnprintf.c $(OUTPUT_OPTION)

./vsprintf.o: $(srcdir)/vsprintf.c $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/vsprintf.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/vsprintf.c $(OUTPUT_OPTION)

./waitpid.o: $(srcdir)/waitpid.c config.h $(INCDIR)/ansidecl.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/waitpid.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/waitpid.c $(OUTPUT_OPTION)

./xatexit.o: $(srcdir)/xatexit.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/xatexit.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/xatexit.c $(OUTPUT_OPTION)

./xexit.o: $(srcdir)/xexit.c config.h $(INCDIR)/ansidecl.h $(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/xexit.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/xexit.c $(OUTPUT_OPTION)

./xmalloc.o: $(srcdir)/xmalloc.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/xmalloc.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/xmalloc.c $(OUTPUT_OPTION)

./xmemdup.o: $(srcdir)/xmemdup.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/xmemdup.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/xmemdup.c $(OUTPUT_OPTION)

./xstrdup.o: $(srcdir)/xstrdup.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/xstrdup.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/xstrdup.c $(OUTPUT_OPTION)

./xstrerror.o: $(srcdir)/xstrerror.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/xstrerror.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/xstrerror.c $(OUTPUT_OPTION)

./xstrndup.o: $(srcdir)/xstrndup.c config.h $(INCDIR)/ansidecl.h \
	$(INCDIR)/libiberty.h
	if [ x"$(PICFLAG)" != x ]; then \
	  $(COMPILE.c) $(PICFLAG) $(srcdir)/xstrndup.c -o pic/$@; \
	else true; fi
	$(COMPILE.c) $(srcdir)/xstrndup.c $(OUTPUT_OPTION)

n5384' href='#n5384'>5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542
/* Subroutines for insn-output.c for HPPA.
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
   2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
   Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.

GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING.  If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "real.h"
#include "insn-config.h"
#include "conditions.h"
#include "insn-attr.h"
#include "flags.h"
#include "tree.h"
#include "output.h"
#include "except.h"
#include "expr.h"
#include "optabs.h"
#include "reload.h"
#include "integrate.h"
#include "function.h"
#include "toplev.h"
#include "ggc.h"
#include "recog.h"
#include "predict.h"
#include "tm_p.h"
#include "target.h"
#include "target-def.h"

/* Return nonzero if there is a bypass for the output of 
   OUT_INSN and the fp store IN_INSN.  */
int
hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
{
  enum machine_mode store_mode;
  enum machine_mode other_mode;
  rtx set;

  if (recog_memoized (in_insn) < 0
      || (get_attr_type (in_insn) != TYPE_FPSTORE
	  && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
      || recog_memoized (out_insn) < 0)
    return 0;

  store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));

  set = single_set (out_insn);
  if (!set)
    return 0;

  other_mode = GET_MODE (SET_SRC (set));

  return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
}
  

#ifndef DO_FRAME_NOTES
#ifdef INCOMING_RETURN_ADDR_RTX
#define DO_FRAME_NOTES 1
#else
#define DO_FRAME_NOTES 0
#endif
#endif

static void copy_reg_pointer (rtx, rtx);
static void fix_range (const char *);
static bool pa_handle_option (size_t, const char *, int);
static int hppa_address_cost (rtx);
static bool hppa_rtx_costs (rtx, int, int, int *);
static inline rtx force_mode (enum machine_mode, rtx);
static void pa_reorg (void);
static void pa_combine_instructions (void);
static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
static int forward_branch_p (rtx);
static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
static int compute_movmem_length (rtx);
static int compute_clrmem_length (rtx);
static bool pa_assemble_integer (rtx, unsigned int, int);
static void remove_useless_addtr_insns (int);
static void store_reg (int, HOST_WIDE_INT, int);
static void store_reg_modify (int, int, HOST_WIDE_INT);
static void load_reg (int, HOST_WIDE_INT, int);
static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
static void update_total_code_bytes (int);
static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
static int pa_adjust_cost (rtx, rtx, rtx, int);
static int pa_adjust_priority (rtx, int);
static int pa_issue_rate (void);
static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
     ATTRIBUTE_UNUSED;
static void pa_encode_section_info (tree, rtx, int);
static const char *pa_strip_name_encoding (const char *);
static bool pa_function_ok_for_sibcall (tree, tree);
static void pa_globalize_label (FILE *, const char *)
     ATTRIBUTE_UNUSED;
static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
				    HOST_WIDE_INT, tree);
#if !defined(USE_COLLECT2)
static void pa_asm_out_constructor (rtx, int);
static void pa_asm_out_destructor (rtx, int);
#endif
static void pa_init_builtins (void);
static rtx hppa_builtin_saveregs (void);
static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
static bool pa_scalar_mode_supported_p (enum machine_mode);
static bool pa_commutative_p (rtx x, int outer_code);
static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
static void output_deferred_plabels (void);
static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
#ifdef ASM_OUTPUT_EXTERNAL_REAL
static void pa_hpux_file_end (void);
#endif
#ifdef HPUX_LONG_DOUBLE_LIBRARY
static void pa_hpux_init_libfuncs (void);
#endif
static rtx pa_struct_value_rtx (tree, int);
static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
				  tree, bool);
static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
				 tree, bool);
static struct machine_function * pa_init_machine_status (void);
static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
					   enum machine_mode,
					   secondary_reload_info *);


/* The following extra sections are only used for SOM.  */
static GTY(()) section *som_readonly_data_section;
static GTY(()) section *som_one_only_readonly_data_section;
static GTY(()) section *som_one_only_data_section;

/* Save the operands last given to a compare for use when we
   generate a scc or bcc insn.  */
rtx hppa_compare_op0, hppa_compare_op1;
enum cmp_type hppa_branch_type;

/* Which cpu we are scheduling for.  */
enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;

/* The UNIX standard to use for predefines and linking.  */
int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;

/* Counts for the number of callee-saved general and floating point
   registers which were saved by the current function's prologue.  */
static int gr_saved, fr_saved;

static rtx find_addr_reg (rtx);

/* Keep track of the number of bytes we have output in the CODE subspace
   during this compilation so we'll know when to emit inline long-calls.  */
unsigned long total_code_bytes;

/* The last address of the previous function plus the number of bytes in
   associated thunks that have been output.  This is used to determine if
   a thunk can use an IA-relative branch to reach its target function.  */
static int last_address;

/* Variables to handle plabels that we discover are necessary at assembly
   output time.  They are output after the current function.  */
struct deferred_plabel GTY(())
{
  rtx internal_label;
  rtx symbol;
};
static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
  deferred_plabels;
static size_t n_deferred_plabels = 0;


/* Initialize the GCC target structure.  */

#undef TARGET_ASM_ALIGNED_HI_OP
#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
#undef TARGET_ASM_ALIGNED_SI_OP
#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
#undef TARGET_ASM_ALIGNED_DI_OP
#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
#undef TARGET_ASM_UNALIGNED_HI_OP
#define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
#undef TARGET_ASM_UNALIGNED_SI_OP
#define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
#undef TARGET_ASM_UNALIGNED_DI_OP
#define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
#undef TARGET_ASM_INTEGER
#define TARGET_ASM_INTEGER pa_assemble_integer

#undef TARGET_ASM_FUNCTION_PROLOGUE
#define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue

#undef TARGET_SCHED_ADJUST_COST
#define TARGET_SCHED_ADJUST_COST pa_adjust_cost
#undef TARGET_SCHED_ADJUST_PRIORITY
#define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE pa_issue_rate

#undef TARGET_ENCODE_SECTION_INFO
#define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
#undef TARGET_STRIP_NAME_ENCODING
#define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding

#undef TARGET_FUNCTION_OK_FOR_SIBCALL
#define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall

#undef TARGET_COMMUTATIVE_P
#define TARGET_COMMUTATIVE_P pa_commutative_p

#undef TARGET_ASM_OUTPUT_MI_THUNK
#define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
#define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall

#undef TARGET_ASM_FILE_END
#ifdef ASM_OUTPUT_EXTERNAL_REAL
#define TARGET_ASM_FILE_END pa_hpux_file_end
#else
#define TARGET_ASM_FILE_END output_deferred_plabels
#endif

#if !defined(USE_COLLECT2)
#undef TARGET_ASM_CONSTRUCTOR
#define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
#undef TARGET_ASM_DESTRUCTOR
#define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
#endif

#undef TARGET_DEFAULT_TARGET_FLAGS
#define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
#undef TARGET_HANDLE_OPTION
#define TARGET_HANDLE_OPTION pa_handle_option

#undef TARGET_INIT_BUILTINS
#define TARGET_INIT_BUILTINS pa_init_builtins

#undef TARGET_RTX_COSTS
#define TARGET_RTX_COSTS hppa_rtx_costs
#undef TARGET_ADDRESS_COST
#define TARGET_ADDRESS_COST hppa_address_cost

#undef TARGET_MACHINE_DEPENDENT_REORG
#define TARGET_MACHINE_DEPENDENT_REORG pa_reorg

#ifdef HPUX_LONG_DOUBLE_LIBRARY
#undef TARGET_INIT_LIBFUNCS
#define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
#endif

#undef TARGET_PROMOTE_FUNCTION_RETURN
#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
#undef TARGET_PROMOTE_PROTOTYPES
#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true

#undef TARGET_STRUCT_VALUE_RTX
#define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
#undef TARGET_RETURN_IN_MEMORY
#define TARGET_RETURN_IN_MEMORY pa_return_in_memory
#undef TARGET_MUST_PASS_IN_STACK
#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
#undef TARGET_PASS_BY_REFERENCE
#define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
#undef TARGET_CALLEE_COPIES
#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
#undef TARGET_ARG_PARTIAL_BYTES
#define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes

#undef TARGET_EXPAND_BUILTIN_SAVEREGS
#define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
#undef TARGET_GIMPLIFY_VA_ARG_EXPR
#define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr

#undef TARGET_SCALAR_MODE_SUPPORTED_P
#define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p

#undef TARGET_CANNOT_FORCE_CONST_MEM
#define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p

#undef TARGET_SECONDARY_RELOAD
#define TARGET_SECONDARY_RELOAD pa_secondary_reload

struct gcc_target targetm = TARGET_INITIALIZER;

/* Parse the -mfixed-range= option string.  */

static void
fix_range (const char *const_str)
{
  int i, first, last;
  char *str, *dash, *comma;

  /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
     REG2 are either register names or register numbers.  The effect
     of this option is to mark the registers in the range from REG1 to
     REG2 as ``fixed'' so they won't be used by the compiler.  This is
     used, e.g., to ensure that kernel mode code doesn't use fr4-fr31.  */

  i = strlen (const_str);
  str = (char *) alloca (i + 1);
  memcpy (str, const_str, i + 1);

  while (1)
    {
      dash = strchr (str, '-');
      if (!dash)
	{
	  warning (0, "value of -mfixed-range must have form REG1-REG2");
	  return;
	}
      *dash = '\0';

      comma = strchr (dash + 1, ',');
      if (comma)
	*comma = '\0';

      first = decode_reg_name (str);
      if (first < 0)
	{
	  warning (0, "unknown register name: %s", str);
	  return;
	}

      last = decode_reg_name (dash + 1);
      if (last < 0)
	{
	  warning (0, "unknown register name: %s", dash + 1);
	  return;
	}

      *dash = '-';

      if (first > last)
	{
	  warning (0, "%s-%s is an empty range", str, dash + 1);
	  return;
	}

      for (i = first; i <= last; ++i)
	fixed_regs[i] = call_used_regs[i] = 1;

      if (!comma)
	break;

      *comma = ',';
      str = comma + 1;
    }

  /* Check if all floating point registers have been fixed.  */
  for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
    if (!fixed_regs[i])
      break;

  if (i > FP_REG_LAST)
    target_flags |= MASK_DISABLE_FPREGS;
}

/* Implement TARGET_HANDLE_OPTION.  */

static bool
pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
{
  switch (code)
    {
    case OPT_mnosnake:
    case OPT_mpa_risc_1_0:
    case OPT_march_1_0:
      target_flags &= ~(MASK_PA_11 | MASK_PA_20);
      return true;

    case OPT_msnake:
    case OPT_mpa_risc_1_1:
    case OPT_march_1_1:
      target_flags &= ~MASK_PA_20;
      target_flags |= MASK_PA_11;
      return true;

    case OPT_mpa_risc_2_0:
    case OPT_march_2_0:
      target_flags |= MASK_PA_11 | MASK_PA_20;
      return true;

    case OPT_mschedule_:
      if (strcmp (arg, "8000") == 0)
	pa_cpu = PROCESSOR_8000;
      else if (strcmp (arg, "7100") == 0)
	pa_cpu = PROCESSOR_7100;
      else if (strcmp (arg, "700") == 0)
	pa_cpu = PROCESSOR_700;
      else if (strcmp (arg, "7100LC") == 0)
	pa_cpu = PROCESSOR_7100LC;
      else if (strcmp (arg, "7200") == 0)
	pa_cpu = PROCESSOR_7200;
      else if (strcmp (arg, "7300") == 0)
	pa_cpu = PROCESSOR_7300;
      else
	return false;
      return true;

    case OPT_mfixed_range_:
      fix_range (arg);
      return true;

#if TARGET_HPUX
    case OPT_munix_93:
      flag_pa_unix = 1993;
      return true;
#endif

#if TARGET_HPUX_10_10
    case OPT_munix_95:
      flag_pa_unix = 1995;
      return true;
#endif

#if TARGET_HPUX_11_11
    case OPT_munix_98:
      flag_pa_unix = 1998;
      return true;
#endif

    default:
      return true;
    }
}

void
override_options (void)
{
  /* Unconditional branches in the delay slot are not compatible with dwarf2
     call frame information.  There is no benefit in using this optimization
     on PA8000 and later processors.  */
  if (pa_cpu >= PROCESSOR_8000
      || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
      || flag_unwind_tables)
    target_flags &= ~MASK_JUMP_IN_DELAY;

  if (flag_pic && TARGET_PORTABLE_RUNTIME)
    {
      warning (0, "PIC code generation is not supported in the portable runtime model");
    }

  if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
   {
      warning (0, "PIC code generation is not compatible with fast indirect calls");
   }

  if (! TARGET_GAS && write_symbols != NO_DEBUG)
    {
      warning (0, "-g is only supported when using GAS on this processor,");
      warning (0, "-g option disabled");
      write_symbols = NO_DEBUG;
    }

  /* We only support the "big PIC" model now.  And we always generate PIC
     code when in 64bit mode.  */
  if (flag_pic == 1 || TARGET_64BIT)
    flag_pic = 2;

  /* We can't guarantee that .dword is available for 32-bit targets.  */
  if (UNITS_PER_WORD == 4)
    targetm.asm_out.aligned_op.di = NULL;

  /* The unaligned ops are only available when using GAS.  */
  if (!TARGET_GAS)
    {
      targetm.asm_out.unaligned_op.hi = NULL;
      targetm.asm_out.unaligned_op.si = NULL;
      targetm.asm_out.unaligned_op.di = NULL;
    }

  init_machine_status = pa_init_machine_status;
}

static void
pa_init_builtins (void)
{
#ifdef DONT_HAVE_FPUTC_UNLOCKED
  built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
    built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
  implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
    = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
#endif
#if TARGET_HPUX_11
  if (built_in_decls [BUILT_IN_FINITE])
    set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
  if (built_in_decls [BUILT_IN_FINITEF])
    set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
#endif
}

/* Function to init struct machine_function.
   This will be called, via a pointer variable,
   from push_function_context.  */

static struct machine_function *
pa_init_machine_status (void)
{
  return ggc_alloc_cleared (sizeof (machine_function));
}

/* If FROM is a probable pointer register, mark TO as a probable
   pointer register with the same pointer alignment as FROM.  */

static void
copy_reg_pointer (rtx to, rtx from)
{
  if (REG_POINTER (from))
    mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
}

/* Return 1 if X contains a symbolic expression.  We know these
   expressions will have one of a few well defined forms, so
   we need only check those forms.  */
int
symbolic_expression_p (rtx x)
{

  /* Strip off any HIGH.  */
  if (GET_CODE (x) == HIGH)
    x = XEXP (x, 0);

  return (symbolic_operand (x, VOIDmode));
}

/* Accept any constant that can be moved in one instruction into a
   general register.  */
int
cint_ok_for_move (HOST_WIDE_INT ival)
{
  /* OK if ldo, ldil, or zdepi, can be used.  */
  return (VAL_14_BITS_P (ival)
	  || ldil_cint_p (ival)
	  || zdepi_cint_p (ival));
}

/* Return truth value of whether OP can be used as an operand in a
   adddi3 insn.  */
int
adddi3_operand (rtx op, enum machine_mode mode)
{
  return (register_operand (op, mode)
	  || (GET_CODE (op) == CONST_INT
	      && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
}

/* True iff the operand OP can be used as the destination operand of
   an integer store.  This also implies the operand could be used as
   the source operand of an integer load.  Symbolic, lo_sum and indexed
   memory operands are not allowed.  We accept reloading pseudos and
   other memory operands.  */
int
integer_store_memory_operand (rtx op, enum machine_mode mode)
{
  return ((reload_in_progress
	   && REG_P (op)
	   && REGNO (op) >= FIRST_PSEUDO_REGISTER
	   && reg_renumber [REGNO (op)] < 0)
	  || (GET_CODE (op) == MEM
	      && (reload_in_progress || memory_address_p (mode, XEXP (op, 0)))
	      && !symbolic_memory_operand (op, VOIDmode)
	      && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0))
	      && !IS_INDEX_ADDR_P (XEXP (op, 0))));
}

/* True iff ldil can be used to load this CONST_INT.  The least
   significant 11 bits of the value must be zero and the value must
   not change sign when extended from 32 to 64 bits.  */
int
ldil_cint_p (HOST_WIDE_INT ival)
{
  HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);

  return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
}

/* True iff zdepi can be used to generate this CONST_INT.
   zdepi first sign extends a 5-bit signed number to a given field
   length, then places this field anywhere in a zero.  */
int
zdepi_cint_p (unsigned HOST_WIDE_INT x)
{
  unsigned HOST_WIDE_INT lsb_mask, t;

  /* This might not be obvious, but it's at least fast.
     This function is critical; we don't have the time loops would take.  */
  lsb_mask = x & -x;
  t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
  /* Return true iff t is a power of two.  */
  return ((t & (t - 1)) == 0);
}

/* True iff depi or extru can be used to compute (reg & mask).
   Accept bit pattern like these:
   0....01....1
   1....10....0
   1..10..01..1  */
int
and_mask_p (unsigned HOST_WIDE_INT mask)
{
  mask = ~mask;
  mask += mask & -mask;
  return (mask & (mask - 1)) == 0;
}

/* True iff depi can be used to compute (reg | MASK).  */
int
ior_mask_p (unsigned HOST_WIDE_INT mask)
{
  mask += mask & -mask;
  return (mask & (mask - 1)) == 0;
}

/* Legitimize PIC addresses.  If the address is already
   position-independent, we return ORIG.  Newly generated
   position-independent addresses go to REG.  If we need more
   than one register, we lose.  */

rtx
legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
{
  rtx pic_ref = orig;

  gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));

  /* Labels need special handling.  */
  if (pic_label_operand (orig, mode))
    {
      /* We do not want to go through the movXX expanders here since that
	 would create recursion.

	 Nor do we really want to call a generator for a named pattern
	 since that requires multiple patterns if we want to support
	 multiple word sizes.

	 So instead we just emit the raw set, which avoids the movXX
	 expanders completely.  */
      mark_reg_pointer (reg, BITS_PER_UNIT);
      emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
      current_function_uses_pic_offset_table = 1;
      return reg;
    }
  if (GET_CODE (orig) == SYMBOL_REF)
    {
      rtx insn, tmp_reg;

      gcc_assert (reg);

      /* Before reload, allocate a temporary register for the intermediate
	 result.  This allows the sequence to be deleted when the final
	 result is unused and the insns are trivially dead.  */
      tmp_reg = ((reload_in_progress || reload_completed)
		 ? reg : gen_reg_rtx (Pmode));

      emit_move_insn (tmp_reg,
		      gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
				    gen_rtx_HIGH (word_mode, orig)));
      pic_ref
	= gen_const_mem (Pmode,
		         gen_rtx_LO_SUM (Pmode, tmp_reg,
				         gen_rtx_UNSPEC (Pmode,
						         gen_rtvec (1, orig),
						         UNSPEC_DLTIND14R)));

      current_function_uses_pic_offset_table = 1;
      mark_reg_pointer (reg, BITS_PER_UNIT);
      insn = emit_move_insn (reg, pic_ref);

      /* Put a REG_EQUAL note on this insn, so that it can be optimized.  */
      set_unique_reg_note (insn, REG_EQUAL, orig);

      return reg;
    }
  else if (GET_CODE (orig) == CONST)
    {
      rtx base;

      if (GET_CODE (XEXP (orig, 0)) == PLUS
	  && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
	return orig;

      gcc_assert (reg);
      gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
      
      base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
      orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
				     base == reg ? 0 : reg);

      if (GET_CODE (orig) == CONST_INT)
	{
	  if (INT_14_BITS (orig))
	    return plus_constant (base, INTVAL (orig));
	  orig = force_reg (Pmode, orig);
	}
      pic_ref = gen_rtx_PLUS (Pmode, base, orig);
      /* Likewise, should we set special REG_NOTEs here?  */
    }

  return pic_ref;
}

static GTY(()) rtx gen_tls_tga;

static rtx
gen_tls_get_addr (void)
{
  if (!gen_tls_tga)
    gen_tls_tga = init_one_libfunc ("__tls_get_addr");
  return gen_tls_tga;
}

static rtx
hppa_tls_call (rtx arg)
{
  rtx ret;

  ret = gen_reg_rtx (Pmode);
  emit_library_call_value (gen_tls_get_addr (), ret,
		  	   LCT_CONST, Pmode, 1, arg, Pmode);

  return ret;
}

static rtx
legitimize_tls_address (rtx addr)
{
  rtx ret, insn, tmp, t1, t2, tp;
  enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);

  switch (model) 
    {
      case TLS_MODEL_GLOBAL_DYNAMIC:
	tmp = gen_reg_rtx (Pmode);
	if (flag_pic)
	  emit_insn (gen_tgd_load_pic (tmp, addr));
	else
	  emit_insn (gen_tgd_load (tmp, addr));
	ret = hppa_tls_call (tmp);
	break;

      case TLS_MODEL_LOCAL_DYNAMIC:
	ret = gen_reg_rtx (Pmode);
	tmp = gen_reg_rtx (Pmode);
	start_sequence ();
	if (flag_pic)
	  emit_insn (gen_tld_load_pic (tmp, addr));
	else
	  emit_insn (gen_tld_load (tmp, addr));
	t1 = hppa_tls_call (tmp);
	insn = get_insns ();
	end_sequence ();
	t2 = gen_reg_rtx (Pmode);
	emit_libcall_block (insn, t2, t1, 
			    gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
				            UNSPEC_TLSLDBASE));
	emit_insn (gen_tld_offset_load (ret, addr, t2));
	break;

      case TLS_MODEL_INITIAL_EXEC:
	tp = gen_reg_rtx (Pmode);
	tmp = gen_reg_rtx (Pmode);
	ret = gen_reg_rtx (Pmode);
	emit_insn (gen_tp_load (tp));
	if (flag_pic)
	  emit_insn (gen_tie_load_pic (tmp, addr));
	else
	  emit_insn (gen_tie_load (tmp, addr));
	emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
	break;

      case TLS_MODEL_LOCAL_EXEC:
	tp = gen_reg_rtx (Pmode);
	ret = gen_reg_rtx (Pmode);
	emit_insn (gen_tp_load (tp));
	emit_insn (gen_tle_load (ret, addr, tp));
	break;

      default:
	gcc_unreachable ();
    }

  return ret;
}

/* Try machine-dependent ways of modifying an illegitimate address
   to be legitimate.  If we find one, return the new, valid address.
   This macro is used in only one place: `memory_address' in explow.c.

   OLDX is the address as it was before break_out_memory_refs was called.
   In some cases it is useful to look at this to decide what needs to be done.

   MODE and WIN are passed so that this macro can use
   GO_IF_LEGITIMATE_ADDRESS.

   It is always safe for this macro to do nothing.  It exists to recognize
   opportunities to optimize the output.

   For the PA, transform:

	memory(X + <large int>)

   into:

	if (<large int> & mask) >= 16
	  Y = (<large int> & ~mask) + mask + 1	Round up.
	else
	  Y = (<large int> & ~mask)		Round down.
	Z = X + Y
	memory (Z + (<large int> - Y));

   This is for CSE to find several similar references, and only use one Z.

   X can either be a SYMBOL_REF or REG, but because combine cannot
   perform a 4->2 combination we do nothing for SYMBOL_REF + D where
   D will not fit in 14 bits.

   MODE_FLOAT references allow displacements which fit in 5 bits, so use
   0x1f as the mask.

   MODE_INT references allow displacements which fit in 14 bits, so use
   0x3fff as the mask.

   This relies on the fact that most mode MODE_FLOAT references will use FP
   registers and most mode MODE_INT references will use integer registers.
   (In the rare case of an FP register used in an integer MODE, we depend
   on secondary reloads to clean things up.)


   It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
   manner if Y is 2, 4, or 8.  (allows more shadd insns and shifted indexed
   addressing modes to be used).

   Put X and Z into registers.  Then put the entire expression into
   a register.  */

rtx
hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
			 enum machine_mode mode)
{
  rtx orig = x;

  /* We need to canonicalize the order of operands in unscaled indexed
     addresses since the code that checks if an address is valid doesn't
     always try both orders.  */
  if (!TARGET_NO_SPACE_REGS
      && GET_CODE (x) == PLUS
      && GET_MODE (x) == Pmode
      && REG_P (XEXP (x, 0))
      && REG_P (XEXP (x, 1))
      && REG_POINTER (XEXP (x, 0))
      && !REG_POINTER (XEXP (x, 1)))
    return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));

  if (PA_SYMBOL_REF_TLS_P (x))
    return legitimize_tls_address (x);
  else if (flag_pic)
    return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));

  /* Strip off CONST.  */
  if (GET_CODE (x) == CONST)
    x = XEXP (x, 0);

  /* Special case.  Get the SYMBOL_REF into a register and use indexing.
     That should always be safe.  */
  if (GET_CODE (x) == PLUS
      && GET_CODE (XEXP (x, 0)) == REG
      && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
    {
      rtx reg = force_reg (Pmode, XEXP (x, 1));
      return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
    }

  /* Note we must reject symbols which represent function addresses
     since the assembler/linker can't handle arithmetic on plabels.  */
  if (GET_CODE (x) == PLUS
      && GET_CODE (XEXP (x, 1)) == CONST_INT
      && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
	   && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
	  || GET_CODE (XEXP (x, 0)) == REG))
    {
      rtx int_part, ptr_reg;
      int newoffset;
      int offset = INTVAL (XEXP (x, 1));
      int mask;

      mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
	      ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);

      /* Choose which way to round the offset.  Round up if we
	 are >= halfway to the next boundary.  */
      if ((offset & mask) >= ((mask + 1) / 2))
	newoffset = (offset & ~ mask) + mask + 1;
      else
	newoffset = (offset & ~ mask);

      /* If the newoffset will not fit in 14 bits (ldo), then
	 handling this would take 4 or 5 instructions (2 to load
	 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
	 add the new offset and the SYMBOL_REF.)  Combine can
	 not handle 4->2 or 5->2 combinations, so do not create
	 them.  */
      if (! VAL_14_BITS_P (newoffset)
	  && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
	{
	  rtx const_part = plus_constant (XEXP (x, 0), newoffset);
	  rtx tmp_reg
	    = force_reg (Pmode,
			 gen_rtx_HIGH (Pmode, const_part));
	  ptr_reg
	    = force_reg (Pmode,
			 gen_rtx_LO_SUM (Pmode,
					 tmp_reg, const_part));
	}
      else
	{
	  if (! VAL_14_BITS_P (newoffset))
	    int_part = force_reg (Pmode, GEN_INT (newoffset));
	  else
	    int_part = GEN_INT (newoffset);

	  ptr_reg = force_reg (Pmode,
			       gen_rtx_PLUS (Pmode,
					     force_reg (Pmode, XEXP (x, 0)),
					     int_part));
	}
      return plus_constant (ptr_reg, offset - newoffset);
    }

  /* Handle (plus (mult (a) (shadd_constant)) (b)).  */

  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
      && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
      && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
      && (OBJECT_P (XEXP (x, 1))
	  || GET_CODE (XEXP (x, 1)) == SUBREG)
      && GET_CODE (XEXP (x, 1)) != CONST)
    {
      int val = INTVAL (XEXP (XEXP (x, 0), 1));
      rtx reg1, reg2;

      reg1 = XEXP (x, 1);
      if (GET_CODE (reg1) != REG)
	reg1 = force_reg (Pmode, force_operand (reg1, 0));

      reg2 = XEXP (XEXP (x, 0), 0);
      if (GET_CODE (reg2) != REG)
        reg2 = force_reg (Pmode, force_operand (reg2, 0));

      return force_reg (Pmode, gen_rtx_PLUS (Pmode,
					     gen_rtx_MULT (Pmode,
							   reg2,
							   GEN_INT (val)),
					     reg1));
    }

  /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).

     Only do so for floating point modes since this is more speculative
     and we lose if it's an integer store.  */
  if (GET_CODE (x) == PLUS
      && GET_CODE (XEXP (x, 0)) == PLUS
      && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
      && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
      && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
      && (mode == SFmode || mode == DFmode))
    {

      /* First, try and figure out what to use as a base register.  */
      rtx reg1, reg2, base, idx, orig_base;

      reg1 = XEXP (XEXP (x, 0), 1);
      reg2 = XEXP (x, 1);
      base = NULL_RTX;
      idx = NULL_RTX;

      /* Make sure they're both regs.  If one was a SYMBOL_REF [+ const],
	 then emit_move_sequence will turn on REG_POINTER so we'll know
	 it's a base register below.  */
      if (GET_CODE (reg1) != REG)
	reg1 = force_reg (Pmode, force_operand (reg1, 0));

      if (GET_CODE (reg2) != REG)
	reg2 = force_reg (Pmode, force_operand (reg2, 0));

      /* Figure out what the base and index are.  */

      if (GET_CODE (reg1) == REG
	  && REG_POINTER (reg1))
	{
	  base = reg1;
	  orig_base = XEXP (XEXP (x, 0), 1);
	  idx = gen_rtx_PLUS (Pmode,
			      gen_rtx_MULT (Pmode,
					    XEXP (XEXP (XEXP (x, 0), 0), 0),
					    XEXP (XEXP (XEXP (x, 0), 0), 1)),
			      XEXP (x, 1));
	}
      else if (GET_CODE (reg2) == REG
	       && REG_POINTER (reg2))
	{
	  base = reg2;
	  orig_base = XEXP (x, 1);
	  idx = XEXP (x, 0);
	}

      if (base == 0)
	return orig;

      /* If the index adds a large constant, try to scale the
	 constant so that it can be loaded with only one insn.  */
      if (GET_CODE (XEXP (idx, 1)) == CONST_INT
	  && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
			    / INTVAL (XEXP (XEXP (idx, 0), 1)))
	  && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
	{
	  /* Divide the CONST_INT by the scale factor, then add it to A.  */
	  int val = INTVAL (XEXP (idx, 1));

	  val /= INTVAL (XEXP (XEXP (idx, 0), 1));
	  reg1 = XEXP (XEXP (idx, 0), 0);
	  if (GET_CODE (reg1) != REG)
	    reg1 = force_reg (Pmode, force_operand (reg1, 0));

	  reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));

	  /* We can now generate a simple scaled indexed address.  */
	  return
	    force_reg
	      (Pmode, gen_rtx_PLUS (Pmode,
				    gen_rtx_MULT (Pmode, reg1,
						  XEXP (XEXP (idx, 0), 1)),
				    base));
	}

      /* If B + C is still a valid base register, then add them.  */
      if (GET_CODE (XEXP (idx, 1)) == CONST_INT
	  && INTVAL (XEXP (idx, 1)) <= 4096
	  && INTVAL (XEXP (idx, 1)) >= -4096)
	{
	  int val = INTVAL (XEXP (XEXP (idx, 0), 1));
	  rtx reg1, reg2;

	  reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));

	  reg2 = XEXP (XEXP (idx, 0), 0);
	  if (GET_CODE (reg2) != CONST_INT)
	    reg2 = force_reg (Pmode, force_operand (reg2, 0));

	  return force_reg (Pmode, gen_rtx_PLUS (Pmode,
						 gen_rtx_MULT (Pmode,
							       reg2,
							       GEN_INT (val)),
						 reg1));
	}

      /* Get the index into a register, then add the base + index and
	 return a register holding the result.  */

      /* First get A into a register.  */
      reg1 = XEXP (XEXP (idx, 0), 0);
      if (GET_CODE (reg1) != REG)
	reg1 = force_reg (Pmode, force_operand (reg1, 0));

      /* And get B into a register.  */
      reg2 = XEXP (idx, 1);
      if (GET_CODE (reg2) != REG)
	reg2 = force_reg (Pmode, force_operand (reg2, 0));

      reg1 = force_reg (Pmode,
			gen_rtx_PLUS (Pmode,
				      gen_rtx_MULT (Pmode, reg1,
						    XEXP (XEXP (idx, 0), 1)),
				      reg2));

      /* Add the result to our base register and return.  */
      return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));

    }

  /* Uh-oh.  We might have an address for x[n-100000].  This needs
     special handling to avoid creating an indexed memory address
     with x-100000 as the base.

     If the constant part is small enough, then it's still safe because
     there is a guard page at the beginning and end of the data segment.

     Scaled references are common enough that we want to try and rearrange the
     terms so that we can use indexing for these addresses too.  Only
     do the optimization for floatint point modes.  */

  if (GET_CODE (x) == PLUS
      && symbolic_expression_p (XEXP (x, 1)))
    {
      /* Ugly.  We modify things here so that the address offset specified
	 by the index expression is computed first, then added to x to form
	 the entire address.  */

      rtx regx1, regx2, regy1, regy2, y;

      /* Strip off any CONST.  */
      y = XEXP (x, 1);
      if (GET_CODE (y) == CONST)
	y = XEXP (y, 0);

      if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
	{
	  /* See if this looks like
		(plus (mult (reg) (shadd_const))
		      (const (plus (symbol_ref) (const_int))))

	     Where const_int is small.  In that case the const
	     expression is a valid pointer for indexing.

	     If const_int is big, but can be divided evenly by shadd_const
	     and added to (reg).  This allows more scaled indexed addresses.  */
	  if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
	      && GET_CODE (XEXP (x, 0)) == MULT
	      && GET_CODE (XEXP (y, 1)) == CONST_INT
	      && INTVAL (XEXP (y, 1)) >= -4096
	      && INTVAL (XEXP (y, 1)) <= 4095
	      && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
	      && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
	    {
	      int val = INTVAL (XEXP (XEXP (x, 0), 1));
	      rtx reg1, reg2;

	      reg1 = XEXP (x, 1);
	      if (GET_CODE (reg1) != REG)
		reg1 = force_reg (Pmode, force_operand (reg1, 0));

	      reg2 = XEXP (XEXP (x, 0), 0);
	      if (GET_CODE (reg2) != REG)
	        reg2 = force_reg (Pmode, force_operand (reg2, 0));

	      return force_reg (Pmode,
				gen_rtx_PLUS (Pmode,
					      gen_rtx_MULT (Pmode,
							    reg2,
							    GEN_INT (val)),
					      reg1));
	    }
	  else if ((mode == DFmode || mode == SFmode)
		   && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
		   && GET_CODE (XEXP (x, 0)) == MULT
		   && GET_CODE (XEXP (y, 1)) == CONST_INT
		   && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
		   && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
		   && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
	    {
	      regx1
		= force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
					     / INTVAL (XEXP (XEXP (x, 0), 1))));
	      regx2 = XEXP (XEXP (x, 0), 0);
	      if (GET_CODE (regx2) != REG)
		regx2 = force_reg (Pmode, force_operand (regx2, 0));
	      regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
							regx2, regx1));
	      return
		force_reg (Pmode,
			   gen_rtx_PLUS (Pmode,
					 gen_rtx_MULT (Pmode, regx2,
						       XEXP (XEXP (x, 0), 1)),
					 force_reg (Pmode, XEXP (y, 0))));
	    }
	  else if (GET_CODE (XEXP (y, 1)) == CONST_INT
		   && INTVAL (XEXP (y, 1)) >= -4096
		   && INTVAL (XEXP (y, 1)) <= 4095)
	    {
	      /* This is safe because of the guard page at the
		 beginning and end of the data space.  Just
		 return the original address.  */
	      return orig;
	    }
	  else
	    {
	      /* Doesn't look like one we can optimize.  */
	      regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
	      regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
	      regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
	      regx1 = force_reg (Pmode,
				 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
						 regx1, regy2));
	      return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
	    }
	}
    }

  return orig;
}

/* For the HPPA, REG and REG+CONST is cost 0
   and addresses involving symbolic constants are cost 2.

   PIC addresses are very expensive.

   It is no coincidence that this has the same structure
   as GO_IF_LEGITIMATE_ADDRESS.  */

static int
hppa_address_cost (rtx X)
{
  switch (GET_CODE (X))
    {
    case REG:
    case PLUS:
    case LO_SUM:
      return 1;
    case HIGH:
      return 2;
    default:
      return 4;
    }
}

/* Compute a (partial) cost for rtx X.  Return true if the complete
   cost has been computed, and false if subexpressions should be
   scanned.  In either case, *TOTAL contains the cost result.  */

static bool
hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
{
  switch (code)
    {
    case CONST_INT:
      if (INTVAL (x) == 0)
	*total = 0;
      else if (INT_14_BITS (x))
	*total = 1;
      else
	*total = 2;
      return true;

    case HIGH:
      *total = 2;
      return true;

    case CONST:
    case LABEL_REF:
    case SYMBOL_REF:
      *total = 4;
      return true;

    case CONST_DOUBLE:
      if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
	  && outer_code != SET)
	*total = 0;
      else
        *total = 8;
      return true;

    case MULT:
      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
        *total = COSTS_N_INSNS (3);
      else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
	*total = COSTS_N_INSNS (8);
      else
	*total = COSTS_N_INSNS (20);
      return true;

    case DIV:
      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
	{
	  *total = COSTS_N_INSNS (14);
	  return true;
	}
      /* FALLTHRU */

    case UDIV:
    case MOD:
    case UMOD:
      *total = COSTS_N_INSNS (60);
      return true;

    case PLUS: /* this includes shNadd insns */
    case MINUS:
      if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
	*total = COSTS_N_INSNS (3);
      else
        *total = COSTS_N_INSNS (1);
      return true;

    case ASHIFT:
    case ASHIFTRT:
    case LSHIFTRT:
      *total = COSTS_N_INSNS (1);
      return true;

    default:
      return false;
    }
}

/* Ensure mode of ORIG, a REG rtx, is MODE.  Returns either ORIG or a
   new rtx with the correct mode.  */
static inline rtx
force_mode (enum machine_mode mode, rtx orig)
{
  if (mode == GET_MODE (orig))
    return orig;

  gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);

  return gen_rtx_REG (mode, REGNO (orig));
}

/* Return 1 if *X is a thread-local symbol.  */

static int
pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
{
  return PA_SYMBOL_REF_TLS_P (*x);
}

/* Return 1 if X contains a thread-local symbol.  */

bool
pa_tls_referenced_p (rtx x)
{
  if (!TARGET_HAVE_TLS)
    return false;

  return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
}

/* Emit insns to move operands[1] into operands[0].

   Return 1 if we have written out everything that needs to be done to
   do the move.  Otherwise, return 0 and the caller will emit the move
   normally.

   Note SCRATCH_REG may not be in the proper mode depending on how it
   will be used.  This routine is responsible for creating a new copy
   of SCRATCH_REG in the proper mode.  */

int
emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
{
  register rtx operand0 = operands[0];
  register rtx operand1 = operands[1];
  register rtx tem;

  /* We can only handle indexed addresses in the destination operand
     of floating point stores.  Thus, we need to break out indexed
     addresses from the destination operand.  */
  if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
    {
      /* This is only safe up to the beginning of life analysis.  */
      gcc_assert (!no_new_pseudos);

      tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
      operand0 = replace_equiv_address (operand0, tem);
    }

  /* On targets with non-equivalent space registers, break out unscaled
     indexed addresses from the source operand before the final CSE.
     We have to do this because the REG_POINTER flag is not correctly
     carried through various optimization passes and CSE may substitute
     a pseudo without the pointer set for one with the pointer set.  As
     a result, we loose various opportunities to create insns with
     unscaled indexed addresses.  */
  if (!TARGET_NO_SPACE_REGS
      && !cse_not_expected
      && GET_CODE (operand1) == MEM
      && GET_CODE (XEXP (operand1, 0)) == PLUS
      && REG_P (XEXP (XEXP (operand1, 0), 0))
      && REG_P (XEXP (XEXP (operand1, 0), 1)))
    operand1
      = replace_equiv_address (operand1,
			       copy_to_mode_reg (Pmode, XEXP (operand1, 0)));

  if (scratch_reg
      && reload_in_progress && GET_CODE (operand0) == REG
      && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
    operand0 = reg_equiv_mem[REGNO (operand0)];
  else if (scratch_reg
	   && reload_in_progress && GET_CODE (operand0) == SUBREG
	   && GET_CODE (SUBREG_REG (operand0)) == REG
	   && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
    {
     /* We must not alter SUBREG_BYTE (operand0) since that would confuse
	the code which tracks sets/uses for delete_output_reload.  */
      rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
				 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
				 SUBREG_BYTE (operand0));
      operand0 = alter_subreg (&temp);
    }

  if (scratch_reg
      && reload_in_progress && GET_CODE (operand1) == REG
      && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
    operand1 = reg_equiv_mem[REGNO (operand1)];
  else if (scratch_reg
	   && reload_in_progress && GET_CODE (operand1) == SUBREG
	   && GET_CODE (SUBREG_REG (operand1)) == REG
	   && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
    {
     /* We must not alter SUBREG_BYTE (operand0) since that would confuse
	the code which tracks sets/uses for delete_output_reload.  */
      rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
				 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
				 SUBREG_BYTE (operand1));
      operand1 = alter_subreg (&temp);
    }

  if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
      && ((tem = find_replacement (&XEXP (operand0, 0)))
	  != XEXP (operand0, 0)))
    operand0 = replace_equiv_address (operand0, tem);

  if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
      && ((tem = find_replacement (&XEXP (operand1, 0)))
	  != XEXP (operand1, 0)))
    operand1 = replace_equiv_address (operand1, tem);

  /* Handle secondary reloads for loads/stores of FP registers from
     REG+D addresses where D does not fit in 5 or 14 bits, including
     (subreg (mem (addr))) cases.  */
  if (scratch_reg
      && fp_reg_operand (operand0, mode)
      && ((GET_CODE (operand1) == MEM
	   && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
				 XEXP (operand1, 0)))
	  || ((GET_CODE (operand1) == SUBREG
	       && GET_CODE (XEXP (operand1, 0)) == MEM
	       && !memory_address_p ((GET_MODE_SIZE (mode) == 4
				      ? SFmode : DFmode),
				     XEXP (XEXP (operand1, 0), 0))))))
    {
      if (GET_CODE (operand1) == SUBREG)
	operand1 = XEXP (operand1, 0);

      /* SCRATCH_REG will hold an address and maybe the actual data.  We want
	 it in WORD_MODE regardless of what mode it was originally given
	 to us.  */
      scratch_reg = force_mode (word_mode, scratch_reg);

      /* D might not fit in 14 bits either; for such cases load D into
	 scratch reg.  */
      if (!memory_address_p (Pmode, XEXP (operand1, 0)))
	{
	  emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
	  emit_move_insn (scratch_reg,
			  gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
					  Pmode,
					  XEXP (XEXP (operand1, 0), 0),
					  scratch_reg));
	}
      else
	emit_move_insn (scratch_reg, XEXP (operand1, 0));
      emit_insn (gen_rtx_SET (VOIDmode, operand0,
			      replace_equiv_address (operand1, scratch_reg)));
      return 1;
    }
  else if (scratch_reg
	   && fp_reg_operand (operand1, mode)
	   && ((GET_CODE (operand0) == MEM
		&& !memory_address_p ((GET_MODE_SIZE (mode) == 4
					? SFmode : DFmode),
				       XEXP (operand0, 0)))
	       || ((GET_CODE (operand0) == SUBREG)
		   && GET_CODE (XEXP (operand0, 0)) == MEM
		   && !memory_address_p ((GET_MODE_SIZE (mode) == 4
					  ? SFmode : DFmode),
			   		 XEXP (XEXP (operand0, 0), 0)))))
    {
      if (GET_CODE (operand0) == SUBREG)
	operand0 = XEXP (operand0, 0);

      /* SCRATCH_REG will hold an address and maybe the actual data.  We want
	 it in WORD_MODE regardless of what mode it was originally given
	 to us.  */
      scratch_reg = force_mode (word_mode, scratch_reg);

      /* D might not fit in 14 bits either; for such cases load D into
	 scratch reg.  */
      if (!memory_address_p (Pmode, XEXP (operand0, 0)))
	{
	  emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
	  emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
								        0)),
						       Pmode,
						       XEXP (XEXP (operand0, 0),
								   0),
						       scratch_reg));
	}
      else
	emit_move_insn (scratch_reg, XEXP (operand0, 0));
      emit_insn (gen_rtx_SET (VOIDmode,
			      replace_equiv_address (operand0, scratch_reg),
			      operand1));
      return 1;
    }
  /* Handle secondary reloads for loads of FP registers from constant
     expressions by forcing the constant into memory.

     Use scratch_reg to hold the address of the memory location.

     The proper fix is to change PREFERRED_RELOAD_CLASS to return
     NO_REGS when presented with a const_int and a register class
     containing only FP registers.  Doing so unfortunately creates
     more problems than it solves.   Fix this for 2.5.  */
  else if (scratch_reg
	   && CONSTANT_P (operand1)
	   && fp_reg_operand (operand0, mode))
    {
      rtx const_mem, xoperands[2];

      /* SCRATCH_REG will hold an address and maybe the actual data.  We want
	 it in WORD_MODE regardless of what mode it was originally given
	 to us.  */
      scratch_reg = force_mode (word_mode, scratch_reg);

      /* Force the constant into memory and put the address of the
	 memory location into scratch_reg.  */
      const_mem = force_const_mem (mode, operand1);
      xoperands[0] = scratch_reg;
      xoperands[1] = XEXP (const_mem, 0);
      emit_move_sequence (xoperands, Pmode, 0);

      /* Now load the destination register.  */
      emit_insn (gen_rtx_SET (mode, operand0,
			      replace_equiv_address (const_mem, scratch_reg)));
      return 1;
    }
  /* Handle secondary reloads for SAR.  These occur when trying to load
     the SAR from memory, FP register, or with a constant.  */
  else if (scratch_reg
	   && GET_CODE (operand0) == REG
	   && REGNO (operand0) < FIRST_PSEUDO_REGISTER
	   && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
	   && (GET_CODE (operand1) == MEM
	       || GET_CODE (operand1) == CONST_INT
	       || (GET_CODE (operand1) == REG
		   && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
    {
      /* D might not fit in 14 bits either; for such cases load D into
	 scratch reg.  */
      if (GET_CODE (operand1) == MEM
	  && !memory_address_p (Pmode, XEXP (operand1, 0)))
	{
	  /* We are reloading the address into the scratch register, so we
	     want to make sure the scratch register is a full register.  */
	  scratch_reg = force_mode (word_mode, scratch_reg);

	  emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
	  emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
								        0)),
						       Pmode,
						       XEXP (XEXP (operand1, 0),
						       0),
						       scratch_reg));

	  /* Now we are going to load the scratch register from memory,
	     we want to load it in the same width as the original MEM,
	     which must be the same as the width of the ultimate destination,
	     OPERAND0.  */
	  scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);

	  emit_move_insn (scratch_reg,
			  replace_equiv_address (operand1, scratch_reg));
	}
      else
	{
	  /* We want to load the scratch register using the same mode as
	     the ultimate destination.  */
	  scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);

	  emit_move_insn (scratch_reg, operand1);
	}

      /* And emit the insn to set the ultimate destination.  We know that
	 the scratch register has the same mode as the destination at this
	 point.  */
      emit_move_insn (operand0, scratch_reg);
      return 1;
    }
  /* Handle the most common case: storing into a register.  */
  else if (register_operand (operand0, mode))
    {
      if (register_operand (operand1, mode)
	  || (GET_CODE (operand1) == CONST_INT
	      && cint_ok_for_move (INTVAL (operand1)))
	  || (operand1 == CONST0_RTX (mode))
	  || (GET_CODE (operand1) == HIGH
	      && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
	  /* Only `general_operands' can come here, so MEM is ok.  */
	  || GET_CODE (operand1) == MEM)
	{
	  /* Various sets are created during RTL generation which don't
	     have the REG_POINTER flag correctly set.  After the CSE pass,
	     instruction recognition can fail if we don't consistently
	     set this flag when performing register copies.  This should
	     also improve the opportunities for creating insns that use
	     unscaled indexing.  */
	  if (REG_P (operand0) && REG_P (operand1))
	    {
	      if (REG_POINTER (operand1)
		  && !REG_POINTER (operand0)
		  && !HARD_REGISTER_P (operand0))
		copy_reg_pointer (operand0, operand1);
	      else if (REG_POINTER (operand0)
		       && !REG_POINTER (operand1)
		       && !HARD_REGISTER_P (operand1))
		copy_reg_pointer (operand1, operand0);
	    }
	  
	  /* When MEMs are broken out, the REG_POINTER flag doesn't
	     get set.  In some cases, we can set the REG_POINTER flag
	     from the declaration for the MEM.  */
	  if (REG_P (operand0)
	      && GET_CODE (operand1) == MEM
	      && !REG_POINTER (operand0))
	    {
	      tree decl = MEM_EXPR (operand1);

	      /* Set the register pointer flag and register alignment
		 if the declaration for this memory reference is a
		 pointer type.  Fortran indirect argument references
		 are ignored.  */
	      if (decl
		  && !(flag_argument_noalias > 1
		       && TREE_CODE (decl) == INDIRECT_REF
		       && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
		{
		  tree type;

		  /* If this is a COMPONENT_REF, use the FIELD_DECL from
		     tree operand 1.  */
		  if (TREE_CODE (decl) == COMPONENT_REF)
		    decl = TREE_OPERAND (decl, 1);

		  type = TREE_TYPE (decl);
		  if (TREE_CODE (type) == ARRAY_TYPE)
		    type = get_inner_array_type (type);

		  if (POINTER_TYPE_P (type))
		    {
		      int align;

		      type = TREE_TYPE (type);
		      /* Using TYPE_ALIGN_OK is rather conservative as
			 only the ada frontend actually sets it.  */
		      align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
			       : BITS_PER_UNIT);
		      mark_reg_pointer (operand0, align);
		    }
		}
	    }

	  emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
	  return 1;
	}
    }
  else if (GET_CODE (operand0) == MEM)
    {
      if (mode == DFmode && operand1 == CONST0_RTX (mode)
	  && !(reload_in_progress || reload_completed))
	{
	  rtx temp = gen_reg_rtx (DFmode);

	  emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
	  emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
	  return 1;
	}
      if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
	{
	  /* Run this case quickly.  */
	  emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
	  return 1;
	}
      if (! (reload_in_progress || reload_completed))
	{
	  operands[0] = validize_mem (operand0);
	  operands[1] = operand1 = force_reg (mode, operand1);
	}
    }

  /* Simplify the source if we need to.
     Note we do have to handle function labels here, even though we do
     not consider them legitimate constants.  Loop optimizations can
     call the emit_move_xxx with one as a source.  */
  if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
      || function_label_operand (operand1, mode)
      || (GET_CODE (operand1) == HIGH
	  && symbolic_operand (XEXP (operand1, 0), mode)))
    {
      int ishighonly = 0;

      if (GET_CODE (operand1) == HIGH)
	{
	  ishighonly = 1;
	  operand1 = XEXP (operand1, 0);
	}
      if (symbolic_operand (operand1, mode))
	{
	  /* Argh.  The assembler and linker can't handle arithmetic
	     involving plabels.

	     So we force the plabel into memory, load operand0 from
	     the memory location, then add in the constant part.  */
	  if ((GET_CODE (operand1) == CONST
	       && GET_CODE (XEXP (operand1, 0)) == PLUS
	       && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
	      || function_label_operand (operand1, mode))
	    {
	      rtx temp, const_part;

	      /* Figure out what (if any) scratch register to use.  */
	      if (reload_in_progress || reload_completed)
		{
		  scratch_reg = scratch_reg ? scratch_reg : operand0;
		  /* SCRATCH_REG will hold an address and maybe the actual
		     data.  We want it in WORD_MODE regardless of what mode it
		     was originally given to us.  */
		  scratch_reg = force_mode (word_mode, scratch_reg);
		}
	      else if (flag_pic)
		scratch_reg = gen_reg_rtx (Pmode);

	      if (GET_CODE (operand1) == CONST)
		{
		  /* Save away the constant part of the expression.  */
		  const_part = XEXP (XEXP (operand1, 0), 1);
		  gcc_assert (GET_CODE (const_part) == CONST_INT);

		  /* Force the function label into memory.  */
		  temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
		}
	      else
		{
		  /* No constant part.  */
		  const_part = NULL_RTX;

		  /* Force the function label into memory.  */
		  temp = force_const_mem (mode, operand1);
		}


	      /* Get the address of the memory location.  PIC-ify it if
		 necessary.  */
	      temp = XEXP (temp, 0);
	      if (flag_pic)
		temp = legitimize_pic_address (temp, mode, scratch_reg);

	      /* Put the address of the memory location into our destination
		 register.  */
	      operands[1] = temp;
	      emit_move_sequence (operands, mode, scratch_reg);

	      /* Now load from the memory location into our destination
		 register.  */
	      operands[1] = gen_rtx_MEM (Pmode, operands[0]);
	      emit_move_sequence (operands, mode, scratch_reg);

	      /* And add back in the constant part.  */
	      if (const_part != NULL_RTX)
		expand_inc (operand0, const_part);

	      return 1;
	    }

	  if (flag_pic)
	    {
	      rtx temp;

	      if (reload_in_progress || reload_completed)
		{
		  temp = scratch_reg ? scratch_reg : operand0;
		  /* TEMP will hold an address and maybe the actual
		     data.  We want it in WORD_MODE regardless of what mode it
		     was originally given to us.  */
		  temp = force_mode (word_mode, temp);
		}
	      else
		temp = gen_reg_rtx (Pmode);

	      /* (const (plus (symbol) (const_int))) must be forced to
		 memory during/after reload if the const_int will not fit
		 in 14 bits.  */
	      if (GET_CODE (operand1) == CONST
		       && GET_CODE (XEXP (operand1, 0)) == PLUS
		       && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
		       && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
		       && (reload_completed || reload_in_progress)
		       && flag_pic)
		{
		  rtx const_mem = force_const_mem (mode, operand1);
		  operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
							mode, temp);
		  operands[1] = replace_equiv_address (const_mem, operands[1]);
		  emit_move_sequence (operands, mode, temp);
		}
	      else
		{
		  operands[1] = legitimize_pic_address (operand1, mode, temp);
		  if (REG_P (operand0) && REG_P (operands[1]))
		    copy_reg_pointer (operand0, operands[1]);
		  emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
		}
	    }
	  /* On the HPPA, references to data space are supposed to use dp,
	     register 27, but showing it in the RTL inhibits various cse
	     and loop optimizations.  */
	  else
	    {
	      rtx temp, set;

	      if (reload_in_progress || reload_completed)
		{
		  temp = scratch_reg ? scratch_reg : operand0;
		  /* TEMP will hold an address and maybe the actual
		     data.  We want it in WORD_MODE regardless of what mode it
		     was originally given to us.  */
		  temp = force_mode (word_mode, temp);
		}
	      else
		temp = gen_reg_rtx (mode);

	      /* Loading a SYMBOL_REF into a register makes that register
		 safe to be used as the base in an indexed address.

		 Don't mark hard registers though.  That loses.  */
	      if (GET_CODE (operand0) == REG
		  && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
		mark_reg_pointer (operand0, BITS_PER_UNIT);
	      if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
		mark_reg_pointer (temp, BITS_PER_UNIT);

	      if (ishighonly)
		set = gen_rtx_SET (mode, operand0, temp);
	      else
		set = gen_rtx_SET (VOIDmode,
				   operand0,
				   gen_rtx_LO_SUM (mode, temp, operand1));

	      emit_insn (gen_rtx_SET (VOIDmode,
				      temp,
				      gen_rtx_HIGH (mode, operand1)));
	      emit_insn (set);

	    }
	  return 1;
	}
      else if (pa_tls_referenced_p (operand1))
	{
	  rtx tmp = operand1;
	  rtx addend = NULL;

	  if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
	    {
	      addend = XEXP (XEXP (tmp, 0), 1);
	      tmp = XEXP (XEXP (tmp, 0), 0);
	    }

	  gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
	  tmp = legitimize_tls_address (tmp);
	  if (addend)
	    {
	      tmp = gen_rtx_PLUS (mode, tmp, addend);
	      tmp = force_operand (tmp, operands[0]);
	    }
	  operands[1] = tmp;
	}
      else if (GET_CODE (operand1) != CONST_INT
	       || !cint_ok_for_move (INTVAL (operand1)))
	{
	  rtx insn, temp;
	  rtx op1 = operand1;
	  HOST_WIDE_INT value = 0;
	  HOST_WIDE_INT insv = 0;
	  int insert = 0;

	  if (GET_CODE (operand1) == CONST_INT)
	    value = INTVAL (operand1);

	  if (TARGET_64BIT
	      && GET_CODE (operand1) == CONST_INT
	      && HOST_BITS_PER_WIDE_INT > 32
	      && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
	    {
	      HOST_WIDE_INT nval;

	      /* Extract the low order 32 bits of the value and sign extend.
		 If the new value is the same as the original value, we can
		 can use the original value as-is.  If the new value is
		 different, we use it and insert the most-significant 32-bits
		 of the original value into the final result.  */
	      nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
		      ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
	      if (value != nval)
		{
#if HOST_BITS_PER_WIDE_INT > 32
		  insv = value >= 0 ? value >> 32 : ~(~value >> 32);
#endif
		  insert = 1;
		  value = nval;
		  operand1 = GEN_INT (nval);
		}
	    }

	  if (reload_in_progress || reload_completed)
	    temp = scratch_reg ? scratch_reg : operand0;
	  else
	    temp = gen_reg_rtx (mode);

	  /* We don't directly split DImode constants on 32-bit targets
	     because PLUS uses an 11-bit immediate and the insn sequence
	     generated is not as efficient as the one using HIGH/LO_SUM.  */
	  if (GET_CODE (operand1) == CONST_INT
	      && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
	      && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	      && !insert)
	    {
	      /* Directly break constant into high and low parts.  This
		 provides better optimization opportunities because various
		 passes recognize constants split with PLUS but not LO_SUM.
		 We use a 14-bit signed low part except when the addition
		 of 0x4000 to the high part might change the sign of the
		 high part.  */
	      HOST_WIDE_INT low = value & 0x3fff;
	      HOST_WIDE_INT high = value & ~ 0x3fff;

	      if (low >= 0x2000)
		{
		  if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
		    high += 0x2000;
		  else
		    high += 0x4000;
		}

	      low = value - high;

	      emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
	      operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
	    }
	  else
	    {
	      emit_insn (gen_rtx_SET (VOIDmode, temp,
				      gen_rtx_HIGH (mode, operand1)));
	      operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
	    }

	  insn = emit_move_insn (operands[0], operands[1]);

	  /* Now insert the most significant 32 bits of the value
	     into the register.  When we don't have a second register
	     available, it could take up to nine instructions to load
	     a 64-bit integer constant.  Prior to reload, we force
	     constants that would take more than three instructions
	     to load to the constant pool.  During and after reload,
	     we have to handle all possible values.  */
	  if (insert)
	    {
	      /* Use a HIGH/LO_SUM/INSV sequence if we have a second
		 register and the value to be inserted is outside the
		 range that can be loaded with three depdi instructions.  */
	      if (temp != operand0 && (insv >= 16384 || insv < -16384))
		{
		  operand1 = GEN_INT (insv);

		  emit_insn (gen_rtx_SET (VOIDmode, temp,
					  gen_rtx_HIGH (mode, operand1)));
		  emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
		  emit_insn (gen_insv (operand0, GEN_INT (32),
				       const0_rtx, temp));
		}
	      else
		{
		  int len = 5, pos = 27;

		  /* Insert the bits using the depdi instruction.  */
		  while (pos >= 0)
		    {
		      HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
		      HOST_WIDE_INT sign = v5 < 0;

		      /* Left extend the insertion.  */
		      insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
		      while (pos > 0 && (insv & 1) == sign)
			{
			  insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
			  len += 1;
			  pos -= 1;
			}

		      emit_insn (gen_insv (operand0, GEN_INT (len),
					   GEN_INT (pos), GEN_INT (v5)));

		      len = pos > 0 && pos < 5 ? pos : 5;
		      pos -= len;
		    }
		}
	    }

	  set_unique_reg_note (insn, REG_EQUAL, op1);

	  return 1;
	}
    }
  /* Now have insn-emit do whatever it normally does.  */
  return 0;
}

/* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
   it will need a link/runtime reloc).  */

int
reloc_needed (tree exp)
{
  int reloc = 0;

  switch (TREE_CODE (exp))
    {
    case ADDR_EXPR:
      return 1;

    case PLUS_EXPR:
    case MINUS_EXPR:
      reloc = reloc_needed (TREE_OPERAND (exp, 0));
      reloc |= reloc_needed (TREE_OPERAND (exp, 1));
      break;

    case NOP_EXPR:
    case CONVERT_EXPR:
    case NON_LVALUE_EXPR:
      reloc = reloc_needed (TREE_OPERAND (exp, 0));
      break;

    case CONSTRUCTOR:
      {
	tree value;
	unsigned HOST_WIDE_INT ix;

	FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
	  if (value)
	    reloc |= reloc_needed (value);
      }
      break;

    case ERROR_MARK:
      break;

    default:
      break;
    }
  return reloc;
}

/* Does operand (which is a symbolic_operand) live in text space?
   If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
   will be true.  */

int
read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
{
  if (GET_CODE (operand) == CONST)
    operand = XEXP (XEXP (operand, 0), 0);
  if (flag_pic)
    {
      if (GET_CODE (operand) == SYMBOL_REF)
	return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
    }
  else
    {
      if (GET_CODE (operand) == SYMBOL_REF)
	return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
    }
  return 1;
}


/* Return the best assembler insn template
   for moving operands[1] into operands[0] as a fullword.  */
const char *
singlemove_string (rtx *operands)
{
  HOST_WIDE_INT intval;

  if (GET_CODE (operands[0]) == MEM)
    return "stw %r1,%0";
  if (GET_CODE (operands[1]) == MEM)
    return "ldw %1,%0";
  if (GET_CODE (operands[1]) == CONST_DOUBLE)
    {
      long i;
      REAL_VALUE_TYPE d;

      gcc_assert (GET_MODE (operands[1]) == SFmode);

      /* Translate the CONST_DOUBLE to a CONST_INT with the same target
	 bit pattern.  */
      REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
      REAL_VALUE_TO_TARGET_SINGLE (d, i);

      operands[1] = GEN_INT (i);
      /* Fall through to CONST_INT case.  */
    }
  if (GET_CODE (operands[1]) == CONST_INT)
    {
      intval = INTVAL (operands[1]);

      if (VAL_14_BITS_P (intval))
	return "ldi %1,%0";
      else if ((intval & 0x7ff) == 0)
	return "ldil L'%1,%0";
      else if (zdepi_cint_p (intval))
	return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
      else
	return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
    }
  return "copy %1,%0";
}


/* Compute position (in OP[1]) and width (in OP[2])
   useful for copying IMM to a register using the zdepi
   instructions.  Store the immediate value to insert in OP[0].  */
static void
compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
{
  int lsb, len;

  /* Find the least significant set bit in IMM.  */
  for (lsb = 0; lsb < 32; lsb++)
    {
      if ((imm & 1) != 0)
        break;
      imm >>= 1;
    }

  /* Choose variants based on *sign* of the 5-bit field.  */
  if ((imm & 0x10) == 0)
    len = (lsb <= 28) ? 4 : 32 - lsb;
  else
    {
      /* Find the width of the bitstring in IMM.  */
      for (len = 5; len < 32; len++)
	{
	  if ((imm & (1 << len)) == 0)
	    break;
	}

      /* Sign extend IMM as a 5-bit value.  */
      imm = (imm & 0xf) - 0x10;
    }

  op[0] = imm;
  op[1] = 31 - lsb;
  op[2] = len;
}

/* Compute position (in OP[1]) and width (in OP[2])
   useful for copying IMM to a register using the depdi,z
   instructions.  Store the immediate value to insert in OP[0].  */
void
compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
{
  HOST_WIDE_INT lsb, len;

  /* Find the least significant set bit in IMM.  */
  for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
    {
      if ((imm & 1) != 0)
        break;
      imm >>= 1;
    }

  /* Choose variants based on *sign* of the 5-bit field.  */
  if ((imm & 0x10) == 0)
    len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
	   ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
  else
    {
      /* Find the width of the bitstring in IMM.  */
      for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
	{
	  if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
	    break;
	}

      /* Sign extend IMM as a 5-bit value.  */
      imm = (imm & 0xf) - 0x10;
    }

  op[0] = imm;
  op[1] = 63 - lsb;
  op[2] = len;
}

/* Output assembler code to perform a doubleword move insn
   with operands OPERANDS.  */

const char *
output_move_double (rtx *operands)
{
  enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
  rtx latehalf[2];
  rtx addreg0 = 0, addreg1 = 0;

  /* First classify both operands.  */

  if (REG_P (operands[0]))
    optype0 = REGOP;
  else if (offsettable_memref_p (operands[0]))
    optype0 = OFFSOP;
  else if (GET_CODE (operands[0]) == MEM)
    optype0 = MEMOP;
  else
    optype0 = RNDOP;

  if (REG_P (operands[1]))
    optype1 = REGOP;
  else if (CONSTANT_P (operands[1]))
    optype1 = CNSTOP;
  else if (offsettable_memref_p (operands[1]))
    optype1 = OFFSOP;
  else if (GET_CODE (operands[1]) == MEM)
    optype1 = MEMOP;
  else
    optype1 = RNDOP;

  /* Check for the cases that the operand constraints are not
     supposed to allow to happen.  */
  gcc_assert (optype0 == REGOP || optype1 == REGOP);

  /* Handle copies between general and floating registers.  */

  if (optype0 == REGOP && optype1 == REGOP
      && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
    {
      if (FP_REG_P (operands[0]))
	{
	  output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
	  output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
	  return "{fldds|fldd} -16(%%sp),%0";
	}
      else
	{
	  output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
	  output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
	  return "{ldws|ldw} -12(%%sp),%R0";
	}
    }

   /* Handle auto decrementing and incrementing loads and stores
     specifically, since the structure of the function doesn't work
     for them without major modification.  Do it better when we learn
     this port about the general inc/dec addressing of PA.
     (This was written by tege.  Chide him if it doesn't work.)  */

  if (optype0 == MEMOP)
    {
      /* We have to output the address syntax ourselves, since print_operand
	 doesn't deal with the addresses we want to use.  Fix this later.  */

      rtx addr = XEXP (operands[0], 0);
      if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
	{
	  rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);

	  operands[0] = XEXP (addr, 0);
	  gcc_assert (GET_CODE (operands[1]) == REG
		      && GET_CODE (operands[0]) == REG);

	  gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
	  
	  /* No overlap between high target register and address
	     register.  (We do this in a non-obvious way to
	     save a register file writeback)  */
	  if (GET_CODE (addr) == POST_INC)
	    return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
	  return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
	}
      else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
	{
	  rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);

	  operands[0] = XEXP (addr, 0);
	  gcc_assert (GET_CODE (operands[1]) == REG
		      && GET_CODE (operands[0]) == REG);
	  
	  gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
	  /* No overlap between high target register and address
	     register.  (We do this in a non-obvious way to save a
	     register file writeback)  */
	  if (GET_CODE (addr) == PRE_INC)
	    return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
	  return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
	}
    }
  if (optype1 == MEMOP)
    {
      /* We have to output the address syntax ourselves, since print_operand
	 doesn't deal with the addresses we want to use.  Fix this later.  */

      rtx addr = XEXP (operands[1], 0);
      if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
	{
	  rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);

	  operands[1] = XEXP (addr, 0);
	  gcc_assert (GET_CODE (operands[0]) == REG
		      && GET_CODE (operands[1]) == REG);

	  if (!reg_overlap_mentioned_p (high_reg, addr))
	    {
	      /* No overlap between high target register and address
		 register.  (We do this in a non-obvious way to
		 save a register file writeback)  */
	      if (GET_CODE (addr) == POST_INC)
		return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
	      return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
	    }
	  else
	    {
	      /* This is an undefined situation.  We should load into the
		 address register *and* update that register.  Probably
		 we don't need to handle this at all.  */
	      if (GET_CODE (addr) == POST_INC)
		return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
	      return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
	    }
	}
      else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
	{
	  rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);

	  operands[1] = XEXP (addr, 0);
	  gcc_assert (GET_CODE (operands[0]) == REG
		      && GET_CODE (operands[1]) == REG);

	  if (!reg_overlap_mentioned_p (high_reg, addr))
	    {
	      /* No overlap between high target register and address
		 register.  (We do this in a non-obvious way to
		 save a register file writeback)  */
	      if (GET_CODE (addr) == PRE_INC)
		return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
	      return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
	    }
	  else
	    {
	      /* This is an undefined situation.  We should load into the
		 address register *and* update that register.  Probably
		 we don't need to handle this at all.  */
	      if (GET_CODE (addr) == PRE_INC)
		return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
	      return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
	    }
	}
      else if (GET_CODE (addr) == PLUS
	       && GET_CODE (XEXP (addr, 0)) == MULT)
	{
	  rtx xoperands[4];
	  rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);

	  if (!reg_overlap_mentioned_p (high_reg, addr))
	    {
	      xoperands[0] = high_reg;
	      xoperands[1] = XEXP (addr, 1);
	      xoperands[2] = XEXP (XEXP (addr, 0), 0);
	      xoperands[3] = XEXP (XEXP (addr, 0), 1);
	      output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
			       xoperands);
	      return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
	    }
	  else
	    {
	      xoperands[0] = high_reg;
	      xoperands[1] = XEXP (addr, 1);
	      xoperands[2] = XEXP (XEXP (addr, 0), 0);
	      xoperands[3] = XEXP (XEXP (addr, 0), 1);
	      output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
			       xoperands);
	      return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
	    }
	}
    }

  /* If an operand is an unoffsettable memory ref, find a register
     we can increment temporarily to make it refer to the second word.  */

  if (optype0 == MEMOP)
    addreg0 = find_addr_reg (XEXP (operands[0], 0));

  if (optype1 == MEMOP)
    addreg1 = find_addr_reg (XEXP (operands[1], 0));

  /* Ok, we can do one word at a time.
     Normally we do the low-numbered word first.

     In either case, set up in LATEHALF the operands to use
     for the high-numbered word and in some cases alter the
     operands in OPERANDS to be suitable for the low-numbered word.  */

  if (optype0 == REGOP)
    latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
  else if (optype0 == OFFSOP)
    latehalf[0] = adjust_address (operands[0], SImode, 4);
  else
    latehalf[0] = operands[0];

  if (optype1 == REGOP)
    latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
  else if (optype1 == OFFSOP)
    latehalf[1] = adjust_address (operands[1], SImode, 4);
  else if (optype1 == CNSTOP)
    split_double (operands[1], &operands[1], &latehalf[1]);
  else
    latehalf[1] = operands[1];

  /* If the first move would clobber the source of the second one,
     do them in the other order.

     This can happen in two cases:

	mem -> register where the first half of the destination register
 	is the same register used in the memory's address.  Reload
	can create such insns.

	mem in this case will be either register indirect or register
	indirect plus a valid offset.

	register -> register move where REGNO(dst) == REGNO(src + 1)
	someone (Tim/Tege?) claimed this can happen for parameter loads.

     Handle mem -> register case first.  */
  if (optype0 == REGOP
      && (optype1 == MEMOP || optype1 == OFFSOP)
      && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
			    operands[1], 0))
    {
      /* Do the late half first.  */
      if (addreg1)
	output_asm_insn ("ldo 4(%0),%0", &addreg1);
      output_asm_insn (singlemove_string (latehalf), latehalf);

      /* Then clobber.  */
      if (addreg1)
	output_asm_insn ("ldo -4(%0),%0", &addreg1);
      return singlemove_string (operands);
    }

  /* Now handle register -> register case.  */
  if (optype0 == REGOP && optype1 == REGOP
      && REGNO (operands[0]) == REGNO (operands[1]) + 1)
    {
      output_asm_insn (singlemove_string (latehalf), latehalf);
      return singlemove_string (operands);
    }

  /* Normal case: do the two words, low-numbered first.  */

  output_asm_insn (singlemove_string (operands), operands);

  /* Make any unoffsettable addresses point at high-numbered word.  */
  if (addreg0)
    output_asm_insn ("ldo 4(%0),%0", &addreg0);
  if (addreg1)
    output_asm_insn ("ldo 4(%0),%0", &addreg1);

  /* Do that word.  */
  output_asm_insn (singlemove_string (latehalf), latehalf);

  /* Undo the adds we just did.  */
  if (addreg0)
    output_asm_insn ("ldo -4(%0),%0", &addreg0);
  if (addreg1)
    output_asm_insn ("ldo -4(%0),%0", &addreg1);

  return "";
}

const char *
output_fp_move_double (rtx *operands)
{
  if (FP_REG_P (operands[0]))
    {
      if (FP_REG_P (operands[1])
	  || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
	output_asm_insn ("fcpy,dbl %f1,%0", operands);
      else
	output_asm_insn ("fldd%F1 %1,%0", operands);
    }
  else if (FP_REG_P (operands[1]))
    {
      output_asm_insn ("fstd%F0 %1,%0", operands);
    }
  else
    {
      rtx xoperands[2];
      
      gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
      
      /* This is a pain.  You have to be prepared to deal with an
	 arbitrary address here including pre/post increment/decrement.

	 so avoid this in the MD.  */
      gcc_assert (GET_CODE (operands[0]) == REG);
      
      xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
      xoperands[0] = operands[0];
      output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
    }
  return "";
}

/* Return a REG that occurs in ADDR with coefficient 1.
   ADDR can be effectively incremented by incrementing REG.  */

static rtx
find_addr_reg (rtx addr)
{
  while (GET_CODE (addr) == PLUS)
    {
      if (GET_CODE (XEXP (addr, 0)) == REG)
	addr = XEXP (addr, 0);
      else if (GET_CODE (XEXP (addr, 1)) == REG)
	addr = XEXP (addr, 1);
      else if (CONSTANT_P (XEXP (addr, 0)))
	addr = XEXP (addr, 1);
      else if (CONSTANT_P (XEXP (addr, 1)))
	addr = XEXP (addr, 0);
      else
	gcc_unreachable ();
    }
  gcc_assert (GET_CODE (addr) == REG);
  return addr;
}

/* Emit code to perform a block move.

   OPERANDS[0] is the destination pointer as a REG, clobbered.
   OPERANDS[1] is the source pointer as a REG, clobbered.
   OPERANDS[2] is a register for temporary storage.
   OPERANDS[3] is a register for temporary storage.
   OPERANDS[4] is the size as a CONST_INT
   OPERANDS[5] is the alignment safe to use, as a CONST_INT.
   OPERANDS[6] is another temporary register.  */

const char *
output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
{
  int align = INTVAL (operands[5]);
  unsigned long n_bytes = INTVAL (operands[4]);

  /* We can't move more than a word at a time because the PA
     has no longer integer move insns.  (Could use fp mem ops?)  */
  if (align > (TARGET_64BIT ? 8 : 4))
    align = (TARGET_64BIT ? 8 : 4);

  /* Note that we know each loop below will execute at least twice
     (else we would have open-coded the copy).  */
  switch (align)
    {
      case 8:
	/* Pre-adjust the loop counter.  */
	operands[4] = GEN_INT (n_bytes - 16);
	output_asm_insn ("ldi %4,%2", operands);

	/* Copying loop.  */
	output_asm_insn ("ldd,ma 8(%1),%3", operands);
	output_asm_insn ("ldd,ma 8(%1),%6", operands);
	output_asm_insn ("std,ma %3,8(%0)", operands);
	output_asm_insn ("addib,>= -16,%2,.-12", operands);
	output_asm_insn ("std,ma %6,8(%0)", operands);

	/* Handle the residual.  There could be up to 7 bytes of
	   residual to copy!  */
	if (n_bytes % 16 != 0)
	  {
	    operands[4] = GEN_INT (n_bytes % 8);
	    if (n_bytes % 16 >= 8)
	      output_asm_insn ("ldd,ma 8(%1),%3", operands);
	    if (n_bytes % 8 != 0)
	      output_asm_insn ("ldd 0(%1),%6", operands);
	    if (n_bytes % 16 >= 8)
	      output_asm_insn ("std,ma %3,8(%0)", operands);
	    if (n_bytes % 8 != 0)
	      output_asm_insn ("stdby,e %6,%4(%0)", operands);
	  }
	return "";

      case 4:
	/* Pre-adjust the loop counter.  */
	operands[4] = GEN_INT (n_bytes - 8);
	output_asm_insn ("ldi %4,%2", operands);

	/* Copying loop.  */
	output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
	output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
	output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
	output_asm_insn ("addib,>= -8,%2,.-12", operands);
	output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);

	/* Handle the residual.  There could be up to 7 bytes of
	   residual to copy!  */
	if (n_bytes % 8 != 0)
	  {
	    operands[4] = GEN_INT (n_bytes % 4);
	    if (n_bytes % 8 >= 4)
	      output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
	    if (n_bytes % 4 != 0)
	      output_asm_insn ("ldw 0(%1),%6", operands);
	    if (n_bytes % 8 >= 4)
	      output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
	    if (n_bytes % 4 != 0)
	      output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
	  }
	return "";

      case 2:
	/* Pre-adjust the loop counter.  */
	operands[4] = GEN_INT (n_bytes - 4);
	output_asm_insn ("ldi %4,%2", operands);

	/* Copying loop.  */
	output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
	output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
	output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
	output_asm_insn ("addib,>= -4,%2,.-12", operands);
	output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);

	/* Handle the residual.  */
	if (n_bytes % 4 != 0)
	  {
	    if (n_bytes % 4 >= 2)
	      output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
	    if (n_bytes % 2 != 0)
	      output_asm_insn ("ldb 0(%1),%6", operands);
	    if (n_bytes % 4 >= 2)
	      output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
	    if (n_bytes % 2 != 0)
	      output_asm_insn ("stb %6,0(%0)", operands);
	  }
	return "";

      case 1:
	/* Pre-adjust the loop counter.  */
	operands[4] = GEN_INT (n_bytes - 2);
	output_asm_insn ("ldi %4,%2", operands);

	/* Copying loop.  */
	output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
	output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
	output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
	output_asm_insn ("addib,>= -2,%2,.-12", operands);
	output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);

	/* Handle the residual.  */
	if (n_bytes % 2 != 0)
	  {
	    output_asm_insn ("ldb 0(%1),%3", operands);
	    output_asm_insn ("stb %3,0(%0)", operands);
	  }
	return "";

      default:
	gcc_unreachable ();
    }
}

/* Count the number of insns necessary to handle this block move.

   Basic structure is the same as emit_block_move, except that we
   count insns rather than emit them.  */

static int
compute_movmem_length (rtx insn)
{
  rtx pat = PATTERN (insn);
  unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
  unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
  unsigned int n_insns = 0;

  /* We can't move more than four bytes at a time because the PA
     has no longer integer move insns.  (Could use fp mem ops?)  */
  if (align > (TARGET_64BIT ? 8 : 4))
    align = (TARGET_64BIT ? 8 : 4);

  /* The basic copying loop.  */
  n_insns = 6;

  /* Residuals.  */
  if (n_bytes % (2 * align) != 0)
    {
      if ((n_bytes % (2 * align)) >= align)
	n_insns += 2;

      if ((n_bytes % align) != 0)
	n_insns += 2;
    }

  /* Lengths are expressed in bytes now; each insn is 4 bytes.  */
  return n_insns * 4;
}

/* Emit code to perform a block clear.

   OPERANDS[0] is the destination pointer as a REG, clobbered.
   OPERANDS[1] is a register for temporary storage.
   OPERANDS[2] is the size as a CONST_INT
   OPERANDS[3] is the alignment safe to use, as a CONST_INT.  */

const char *
output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
{
  int align = INTVAL (operands[3]);
  unsigned long n_bytes = INTVAL (operands[2]);

  /* We can't clear more than a word at a time because the PA
     has no longer integer move insns.  */
  if (align > (TARGET_64BIT ? 8 : 4))
    align = (TARGET_64BIT ? 8 : 4);

  /* Note that we know each loop below will execute at least twice
     (else we would have open-coded the copy).  */
  switch (align)
    {
      case 8:
	/* Pre-adjust the loop counter.  */
	operands[2] = GEN_INT (n_bytes - 16);
	output_asm_insn ("ldi %2,%1", operands);

	/* Loop.  */
	output_asm_insn ("std,ma %%r0,8(%0)", operands);
	output_asm_insn ("addib,>= -16,%1,.-4", operands);
	output_asm_insn ("std,ma %%r0,8(%0)", operands);

	/* Handle the residual.  There could be up to 7 bytes of
	   residual to copy!  */
	if (n_bytes % 16 != 0)
	  {
	    operands[2] = GEN_INT (n_bytes % 8);
	    if (n_bytes % 16 >= 8)
	      output_asm_insn ("std,ma %%r0,8(%0)", operands);
	    if (n_bytes % 8 != 0)
	      output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
	  }
	return "";

      case 4:
	/* Pre-adjust the loop counter.  */
	operands[2] = GEN_INT (n_bytes - 8);
	output_asm_insn ("ldi %2,%1", operands);

	/* Loop.  */
	output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
	output_asm_insn ("addib,>= -8,%1,.-4", operands);
	output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);

	/* Handle the residual.  There could be up to 7 bytes of
	   residual to copy!  */
	if (n_bytes % 8 != 0)
	  {
	    operands[2] = GEN_INT (n_bytes % 4);
	    if (n_bytes % 8 >= 4)
	      output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
	    if (n_bytes % 4 != 0)
	      output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
	  }
	return "";

      case 2:
	/* Pre-adjust the loop counter.  */
	operands[2] = GEN_INT (n_bytes - 4);
	output_asm_insn ("ldi %2,%1", operands);

	/* Loop.  */
	output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
	output_asm_insn ("addib,>= -4,%1,.-4", operands);
	output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);

	/* Handle the residual.  */
	if (n_bytes % 4 != 0)
	  {
	    if (n_bytes % 4 >= 2)
	      output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
	    if (n_bytes % 2 != 0)
	      output_asm_insn ("stb %%r0,0(%0)", operands);
	  }
	return "";

      case 1:
	/* Pre-adjust the loop counter.  */
	operands[2] = GEN_INT (n_bytes - 2);
	output_asm_insn ("ldi %2,%1", operands);

	/* Loop.  */
	output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
	output_asm_insn ("addib,>= -2,%1,.-4", operands);
	output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);

	/* Handle the residual.  */
	if (n_bytes % 2 != 0)
	  output_asm_insn ("stb %%r0,0(%0)", operands);

	return "";

      default:
	gcc_unreachable ();
    }
}

/* Count the number of insns necessary to handle this block move.

   Basic structure is the same as emit_block_move, except that we
   count insns rather than emit them.  */

static int
compute_clrmem_length (rtx insn)
{
  rtx pat = PATTERN (insn);
  unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
  unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
  unsigned int n_insns = 0;

  /* We can't clear more than a word at a time because the PA
     has no longer integer move insns.  */
  if (align > (TARGET_64BIT ? 8 : 4))
    align = (TARGET_64BIT ? 8 : 4);

  /* The basic loop.  */
  n_insns = 4;

  /* Residuals.  */
  if (n_bytes % (2 * align) != 0)
    {
      if ((n_bytes % (2 * align)) >= align)
	n_insns++;

      if ((n_bytes % align) != 0)
	n_insns++;
    }

  /* Lengths are expressed in bytes now; each insn is 4 bytes.  */
  return n_insns * 4;
}


const char *
output_and (rtx *operands)
{
  if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
    {
      unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
      int ls0, ls1, ms0, p, len;

      for (ls0 = 0; ls0 < 32; ls0++)
	if ((mask & (1 << ls0)) == 0)
	  break;

      for (ls1 = ls0; ls1 < 32; ls1++)
	if ((mask & (1 << ls1)) != 0)
	  break;

      for (ms0 = ls1; ms0 < 32; ms0++)
	if ((mask & (1 << ms0)) == 0)
	  break;

      gcc_assert (ms0 == 32);

      if (ls1 == 32)
	{
	  len = ls0;

	  gcc_assert (len);

	  operands[2] = GEN_INT (len);
	  return "{extru|extrw,u} %1,31,%2,%0";
	}
      else
	{
	  /* We could use this `depi' for the case above as well, but `depi'
	     requires one more register file access than an `extru'.  */

	  p = 31 - ls0;
	  len = ls1 - ls0;

	  operands[2] = GEN_INT (p);
	  operands[3] = GEN_INT (len);
	  return "{depi|depwi} 0,%2,%3,%0";
	}
    }
  else
    return "and %1,%2,%0";
}

/* Return a string to perform a bitwise-and of operands[1] with operands[2]
   storing the result in operands[0].  */
const char *
output_64bit_and (rtx *operands)
{
  if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
    {
      unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
      int ls0, ls1, ms0, p, len;

      for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
	if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
	  break;

      for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
	if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
	  break;

      for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
	if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
	  break;

      gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);

      if (ls1 == HOST_BITS_PER_WIDE_INT)
	{
	  len = ls0;

	  gcc_assert (len);

	  operands[2] = GEN_INT (len);
	  return "extrd,u %1,63,%2,%0";
	}
      else
	{
	  /* We could use this `depi' for the case above as well, but `depi'
	     requires one more register file access than an `extru'.  */

	  p = 63 - ls0;
	  len = ls1 - ls0;

	  operands[2] = GEN_INT (p);
	  operands[3] = GEN_INT (len);
	  return "depdi 0,%2,%3,%0";
	}
    }
  else
    return "and %1,%2,%0";
}

const char *
output_ior (rtx *operands)
{
  unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
  int bs0, bs1, p, len;

  if (INTVAL (operands[2]) == 0)
    return "copy %1,%0";

  for (bs0 = 0; bs0 < 32; bs0++)
    if ((mask & (1 << bs0)) != 0)
      break;

  for (bs1 = bs0; bs1 < 32; bs1++)
    if ((mask & (1 << bs1)) == 0)
      break;

  gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);

  p = 31 - bs0;
  len = bs1 - bs0;

  operands[2] = GEN_INT (p);
  operands[3] = GEN_INT (len);
  return "{depi|depwi} -1,%2,%3,%0";
}

/* Return a string to perform a bitwise-and of operands[1] with operands[2]
   storing the result in operands[0].  */
const char *
output_64bit_ior (rtx *operands)
{
  unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
  int bs0, bs1, p, len;

  if (INTVAL (operands[2]) == 0)
    return "copy %1,%0";

  for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
    if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
      break;

  for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
    if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
      break;

  gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
	      || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);

  p = 63 - bs0;
  len = bs1 - bs0;

  operands[2] = GEN_INT (p);
  operands[3] = GEN_INT (len);
  return "depdi -1,%2,%3,%0";
}

/* Target hook for assembling integer objects.  This code handles
   aligned SI and DI integers specially since function references
   must be preceded by P%.  */

static bool
pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
{
  if (size == UNITS_PER_WORD
      && aligned_p
      && function_label_operand (x, VOIDmode))
    {
      fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
      output_addr_const (asm_out_file, x);
      fputc ('\n', asm_out_file);
      return true;
    }
  return default_assemble_integer (x, size, aligned_p);
}

/* Output an ascii string.  */
void
output_ascii (FILE *file, const char *p, int size)
{
  int i;
  int chars_output;
  unsigned char partial_output[16];	/* Max space 4 chars can occupy.  */

  /* The HP assembler can only take strings of 256 characters at one
     time.  This is a limitation on input line length, *not* the
     length of the string.  Sigh.  Even worse, it seems that the
     restriction is in number of input characters (see \xnn &
     \whatever).  So we have to do this very carefully.  */

  fputs ("\t.STRING \"", file);

  chars_output = 0;
  for (i = 0; i < size; i += 4)
    {
      int co = 0;
      int io = 0;
      for (io = 0, co = 0; io < MIN (4, size - i); io++)
	{
	  register unsigned int c = (unsigned char) p[i + io];

	  if (c == '\"' || c == '\\')
	    partial_output[co++] = '\\';
	  if (c >= ' ' && c < 0177)
	    partial_output[co++] = c;
	  else
	    {
	      unsigned int hexd;
	      partial_output[co++] = '\\';
	      partial_output[co++] = 'x';
	      hexd =  c  / 16 - 0 + '0';
	      if (hexd > '9')
		hexd -= '9' - 'a' + 1;
	      partial_output[co++] = hexd;
	      hexd =  c % 16 - 0 + '0';
	      if (hexd > '9')
		hexd -= '9' - 'a' + 1;
	      partial_output[co++] = hexd;
	    }
	}
      if (chars_output + co > 243)
	{
	  fputs ("\"\n\t.STRING \"", file);
	  chars_output = 0;
	}
      fwrite (partial_output, 1, (size_t) co, file);
      chars_output += co;
      co = 0;
    }
  fputs ("\"\n", file);
}

/* Try to rewrite floating point comparisons & branches to avoid
   useless add,tr insns.

   CHECK_NOTES is nonzero if we should examine REG_DEAD notes
   to see if FPCC is dead.  CHECK_NOTES is nonzero for the
   first attempt to remove useless add,tr insns.  It is zero
   for the second pass as reorg sometimes leaves bogus REG_DEAD
   notes lying around.

   When CHECK_NOTES is zero we can only eliminate add,tr insns
   when there's a 1:1 correspondence between fcmp and ftest/fbranch
   instructions.  */
static void
remove_useless_addtr_insns (int check_notes)
{
  rtx insn;
  static int pass = 0;

  /* This is fairly cheap, so always run it when optimizing.  */
  if (optimize > 0)
    {
      int fcmp_count = 0;
      int fbranch_count = 0;

      /* Walk all the insns in this function looking for fcmp & fbranch
	 instructions.  Keep track of how many of each we find.  */
      for (insn = get_insns (); insn; insn = next_insn (insn))
	{
	  rtx tmp;

	  /* Ignore anything that isn't an INSN or a JUMP_INSN.  */
	  if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
	    continue;

	  tmp = PATTERN (insn);

	  /* It must be a set.  */
	  if (GET_CODE (tmp) != SET)
	    continue;

	  /* If the destination is CCFP, then we've found an fcmp insn.  */
	  tmp = SET_DEST (tmp);
	  if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
	    {
	      fcmp_count++;
	      continue;
	    }

	  tmp = PATTERN (insn);
	  /* If this is an fbranch instruction, bump the fbranch counter.  */
	  if (GET_CODE (tmp) == SET
	      && SET_DEST (tmp) == pc_rtx
	      && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
	      && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
	      && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
	      && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
	    {
	      fbranch_count++;
	      continue;
	    }
	}


      /* Find all floating point compare + branch insns.  If possible,
	 reverse the comparison & the branch to avoid add,tr insns.  */
      for (insn = get_insns (); insn; insn = next_insn (insn))
	{
	  rtx tmp, next;

	  /* Ignore anything that isn't an INSN.  */
	  if (GET_CODE (insn) != INSN)
	    continue;

	  tmp = PATTERN (insn);

	  /* It must be a set.  */
	  if (GET_CODE (tmp) != SET)
	    continue;

	  /* The destination must be CCFP, which is register zero.  */
	  tmp = SET_DEST (tmp);
	  if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
	    continue;

	  /* INSN should be a set of CCFP.

	     See if the result of this insn is used in a reversed FP
	     conditional branch.  If so, reverse our condition and
	     the branch.  Doing so avoids useless add,tr insns.  */
	  next = next_insn (insn);
	  while (next)
	    {
	      /* Jumps, calls and labels stop our search.  */
	      if (GET_CODE (next) == JUMP_INSN
		  || GET_CODE (next) == CALL_INSN
		  || GET_CODE (next) == CODE_LABEL)
		break;

	      /* As does another fcmp insn.  */
	      if (GET_CODE (next) == INSN
		  && GET_CODE (PATTERN (next)) == SET
		  && GET_CODE (SET_DEST (PATTERN (next))) == REG
		  && REGNO (SET_DEST (PATTERN (next))) == 0)
		break;

	      next = next_insn (next);
	    }

	  /* Is NEXT_INSN a branch?  */
	  if (next
	      && GET_CODE (next) == JUMP_INSN)
	    {
	      rtx pattern = PATTERN (next);

	      /* If it a reversed fp conditional branch (e.g. uses add,tr)
		 and CCFP dies, then reverse our conditional and the branch
		 to avoid the add,tr.  */
	      if (GET_CODE (pattern) == SET
		  && SET_DEST (pattern) == pc_rtx
		  && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
		  && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
		  && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
		  && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
		  && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
		  && (fcmp_count == fbranch_count
		      || (check_notes
			  && find_regno_note (next, REG_DEAD, 0))))
		{
		  /* Reverse the branch.  */
		  tmp = XEXP (SET_SRC (pattern), 1);
		  XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
		  XEXP (SET_SRC (pattern), 2) = tmp;
		  INSN_CODE (next) = -1;

		  /* Reverse our condition.  */
		  tmp = PATTERN (insn);
		  PUT_CODE (XEXP (tmp, 1),
			    (reverse_condition_maybe_unordered
			     (GET_CODE (XEXP (tmp, 1)))));
		}
	    }
	}
    }

  pass = !pass;

}

/* You may have trouble believing this, but this is the 32 bit HP-PA
   stack layout.  Wow.

   Offset		Contents

   Variable arguments	(optional; any number may be allocated)

   SP-(4*(N+9))		arg word N
   	:		    :
      SP-56		arg word 5
      SP-52		arg word 4

   Fixed arguments	(must be allocated; may remain unused)

      SP-48		arg word 3
      SP-44		arg word 2
      SP-40		arg word 1
      SP-36		arg word 0

   Frame Marker

      SP-32		External Data Pointer (DP)
      SP-28		External sr4
      SP-24		External/stub RP (RP')
      SP-20		Current RP
      SP-16		Static Link
      SP-12		Clean up
      SP-8		Calling Stub RP (RP'')
      SP-4		Previous SP

   Top of Frame

      SP-0		Stack Pointer (points to next available address)

*/

/* This function saves registers as follows.  Registers marked with ' are
   this function's registers (as opposed to the previous function's).
   If a frame_pointer isn't needed, r4 is saved as a general register;
   the space for the frame pointer is still allocated, though, to keep
   things simple.


   Top of Frame

       SP (FP')		Previous FP
       SP + 4		Alignment filler (sigh)
       SP + 8		Space for locals reserved here.
       .
       .
       .
       SP + n		All call saved register used.
       .
       .
       .
       SP + o		All call saved fp registers used.
       .
       .
       .
       SP + p (SP')	points to next available address.

*/

/* Global variables set by output_function_prologue().  */
/* Size of frame.  Need to know this to emit return insns from
   leaf procedures.  */
static HOST_WIDE_INT actual_fsize, local_fsize;
static int save_fregs;

/* Emit RTL to store REG at the memory location specified by BASE+DISP.
   Handle case where DISP > 8k by using the add_high_const patterns.

   Note in DISP > 8k case, we will leave the high part of the address
   in %r1.  There is code in expand_hppa_{prologue,epilogue} that knows this.*/

static void
store_reg (int reg, HOST_WIDE_INT disp, int base)
{
  rtx insn, dest, src, basereg;

  src = gen_rtx_REG (word_mode, reg);
  basereg = gen_rtx_REG (Pmode, base);
  if (VAL_14_BITS_P (disp))
    {
      dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
      insn = emit_move_insn (dest, src);
    }
  else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
    {
      rtx delta = GEN_INT (disp);
      rtx tmpreg = gen_rtx_REG (Pmode, 1);

      emit_move_insn (tmpreg, delta);
      insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
      if (DO_FRAME_NOTES)
	{
	  REG_NOTES (insn)
	    = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
		gen_rtx_SET (VOIDmode, tmpreg,
			     gen_rtx_PLUS (Pmode, basereg, delta)),
                REG_NOTES (insn));
	  RTX_FRAME_RELATED_P (insn) = 1;
	}
      dest = gen_rtx_MEM (word_mode, tmpreg);
      insn = emit_move_insn (dest, src);
    }
  else
    {
      rtx delta = GEN_INT (disp);
      rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
      rtx tmpreg = gen_rtx_REG (Pmode, 1);

      emit_move_insn (tmpreg, high);
      dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
      insn = emit_move_insn (dest, src);
      if (DO_FRAME_NOTES)
	{
	  REG_NOTES (insn)
	    = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
		gen_rtx_SET (VOIDmode,
			     gen_rtx_MEM (word_mode,
					  gen_rtx_PLUS (word_mode, basereg,
							delta)),
                             src),
                REG_NOTES (insn));
	}
    }

  if (DO_FRAME_NOTES)
    RTX_FRAME_RELATED_P (insn) = 1;
}

/* Emit RTL to store REG at the memory location specified by BASE and then
   add MOD to BASE.  MOD must be <= 8k.  */

static void
store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
{
  rtx insn, basereg, srcreg, delta;

  gcc_assert (VAL_14_BITS_P (mod));

  basereg = gen_rtx_REG (Pmode, base);
  srcreg = gen_rtx_REG (word_mode, reg);
  delta = GEN_INT (mod);

  insn = emit_insn (gen_post_store (basereg, srcreg, delta));
  if (DO_FRAME_NOTES)
    {
      RTX_FRAME_RELATED_P (insn) = 1;

      /* RTX_FRAME_RELATED_P must be set on each frame related set
	 in a parallel with more than one element.  */
      RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
      RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
    }
}

/* Emit RTL to set REG to the value specified by BASE+DISP.  Handle case
   where DISP > 8k by using the add_high_const patterns.  NOTE indicates
   whether to add a frame note or not.

   In the DISP > 8k case, we leave the high part of the address in %r1.
   There is code in expand_hppa_{prologue,epilogue} that knows about this.  */

static void
set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
{
  rtx insn;

  if (VAL_14_BITS_P (disp))
    {
      insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
			     plus_constant (gen_rtx_REG (Pmode, base), disp));
    }
  else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
    {
      rtx basereg = gen_rtx_REG (Pmode, base);
      rtx delta = GEN_INT (disp);
      rtx tmpreg = gen_rtx_REG (Pmode, 1);

      emit_move_insn (tmpreg, delta);
      insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
			     gen_rtx_PLUS (Pmode, tmpreg, basereg));
      if (DO_FRAME_NOTES)
	REG_NOTES (insn)
	  = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
	      gen_rtx_SET (VOIDmode, tmpreg,
			   gen_rtx_PLUS (Pmode, basereg, delta)),
	      REG_NOTES (insn));
    }
  else
    {
      rtx basereg = gen_rtx_REG (Pmode, base);
      rtx delta = GEN_INT (disp);
      rtx tmpreg = gen_rtx_REG (Pmode, 1);

      emit_move_insn (tmpreg,
		      gen_rtx_PLUS (Pmode, basereg,
				    gen_rtx_HIGH (Pmode, delta)));
      insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
			     gen_rtx_LO_SUM (Pmode, tmpreg, delta));
    }

  if (DO_FRAME_NOTES && note)
    RTX_FRAME_RELATED_P (insn) = 1;
}

HOST_WIDE_INT
compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
{
  int freg_saved = 0;
  int i, j;

  /* The code in hppa_expand_prologue and hppa_expand_epilogue must
     be consistent with the rounding and size calculation done here.
     Change them at the same time.  */

  /* We do our own stack alignment.  First, round the size of the
     stack locals up to a word boundary.  */
  size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);

  /* Space for previous frame pointer + filler.  If any frame is
     allocated, we need to add in the STARTING_FRAME_OFFSET.  We
     waste some space here for the sake of HP compatibility.  The
     first slot is only used when the frame pointer is needed.  */
  if (size || frame_pointer_needed)
    size += STARTING_FRAME_OFFSET;
  
  /* If the current function calls __builtin_eh_return, then we need
     to allocate stack space for registers that will hold data for
     the exception handler.  */
  if (DO_FRAME_NOTES && current_function_calls_eh_return)
    {
      unsigned int i;

      for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
	continue;
      size += i * UNITS_PER_WORD;
    }

  /* Account for space used by the callee general register saves.  */
  for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
    if (regs_ever_live[i])
      size += UNITS_PER_WORD;

  /* Account for space used by the callee floating point register saves.  */
  for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
    if (regs_ever_live[i]
	|| (!TARGET_64BIT && regs_ever_live[i + 1]))
      {
	freg_saved = 1;

	/* We always save both halves of the FP register, so always
	   increment the frame size by 8 bytes.  */
	size += 8;
      }

  /* If any of the floating registers are saved, account for the
     alignment needed for the floating point register save block.  */
  if (freg_saved)
    {
      size = (size + 7) & ~7;
      if (fregs_live)
	*fregs_live = 1;
    }

  /* The various ABIs include space for the outgoing parameters in the
     size of the current function's stack frame.  We don't need to align
     for the outgoing arguments as their alignment is set by the final
     rounding for the frame as a whole.  */
  size += current_function_outgoing_args_size;

  /* Allocate space for the fixed frame marker.  This space must be
     allocated for any function that makes calls or allocates
     stack space.  */
  if (!current_function_is_leaf || size)
    size += TARGET_64BIT ? 48 : 32;

  /* Finally, round to the preferred stack boundary.  */
  return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
	  & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
}

/* Generate the assembly code for function entry.  FILE is a stdio
   stream to output the code to.  SIZE is an int: how many units of
   temporary storage to allocate.

   Refer to the array `regs_ever_live' to determine which registers to
   save; `regs_ever_live[I]' is nonzero if register number I is ever
   used in the function.  This function is responsible for knowing
   which registers should not be saved even if used.  */

/* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
   of memory.  If any fpu reg is used in the function, we allocate
   such a block here, at the bottom of the frame, just in case it's needed.

   If this function is a leaf procedure, then we may choose not
   to do a "save" insn.  The decision about whether or not
   to do this is made in regclass.c.  */

static void
pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
  /* The function's label and associated .PROC must never be
     separated and must be output *after* any profiling declarations
     to avoid changing spaces/subspaces within a procedure.  */
  ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
  fputs ("\t.PROC\n", file);

  /* hppa_expand_prologue does the dirty work now.  We just need
     to output the assembler directives which denote the start
     of a function.  */
  fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
  if (regs_ever_live[2])
    fputs (",CALLS,SAVE_RP", file);
  else
    fputs (",NO_CALLS", file);

  /* The SAVE_SP flag is used to indicate that register %r3 is stored
     at the beginning of the frame and that it is used as the frame
     pointer for the frame.  We do this because our current frame
     layout doesn't conform to that specified in the HP runtime
     documentation and we need a way to indicate to programs such as
     GDB where %r3 is saved.  The SAVE_SP flag was chosen because it
     isn't used by HP compilers but is supported by the assembler.
     However, SAVE_SP is supposed to indicate that the previous stack
     pointer has been saved in the frame marker.  */
  if (frame_pointer_needed)
    fputs (",SAVE_SP", file);

  /* Pass on information about the number of callee register saves
     performed in the prologue.

     The compiler is supposed to pass the highest register number
     saved, the assembler then has to adjust that number before
     entering it into the unwind descriptor (to account for any
     caller saved registers with lower register numbers than the
     first callee saved register).  */
  if (gr_saved)
    fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);

  if (fr_saved)
    fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);

  fputs ("\n\t.ENTRY\n", file);

  remove_useless_addtr_insns (0);
}

void
hppa_expand_prologue (void)
{
  int merge_sp_adjust_with_store = 0;
  HOST_WIDE_INT size = get_frame_size ();
  HOST_WIDE_INT offset;
  int i;
  rtx insn, tmpreg;

  gr_saved = 0;
  fr_saved = 0;
  save_fregs = 0;

  /* Compute total size for frame pointer, filler, locals and rounding to
     the next word boundary.  Similar code appears in compute_frame_size
     and must be changed in tandem with this code.  */
  local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
  if (local_fsize || frame_pointer_needed)
    local_fsize += STARTING_FRAME_OFFSET;

  actual_fsize = compute_frame_size (size, &save_fregs);

  /* Compute a few things we will use often.  */
  tmpreg = gen_rtx_REG (word_mode, 1);

  /* Save RP first.  The calling conventions manual states RP will
     always be stored into the caller's frame at sp - 20 or sp - 16
     depending on which ABI is in use.  */
  if (regs_ever_live[2] || current_function_calls_eh_return)
    store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);

  /* Allocate the local frame and set up the frame pointer if needed.  */
  if (actual_fsize != 0)
    {
      if (frame_pointer_needed)
	{
	  /* Copy the old frame pointer temporarily into %r1.  Set up the
	     new stack pointer, then store away the saved old frame pointer
	     into the stack at sp and at the same time update the stack
	     pointer by actual_fsize bytes.  Two versions, first
	     handles small (<8k) frames.  The second handles large (>=8k)
	     frames.  */
	  insn = emit_move_insn (tmpreg, frame_pointer_rtx);
	  if (DO_FRAME_NOTES)
	    RTX_FRAME_RELATED_P (insn) = 1;

	  insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
	  if (DO_FRAME_NOTES)
	    RTX_FRAME_RELATED_P (insn) = 1;

	  if (VAL_14_BITS_P (actual_fsize))
	    store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
	  else
	    {
	      /* It is incorrect to store the saved frame pointer at *sp,
		 then increment sp (writes beyond the current stack boundary).

		 So instead use stwm to store at *sp and post-increment the
		 stack pointer as an atomic operation.  Then increment sp to
		 finish allocating the new frame.  */
	      HOST_WIDE_INT adjust1 = 8192 - 64;
	      HOST_WIDE_INT adjust2 = actual_fsize - adjust1;

	      store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
	      set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
			      adjust2, 1);
	    }

	  /* We set SAVE_SP in frames that need a frame pointer.  Thus,
	     we need to store the previous stack pointer (frame pointer)
	     into the frame marker on targets that use the HP unwind
	     library.  This allows the HP unwind library to be used to
	     unwind GCC frames.  However, we are not fully compatible
	     with the HP library because our frame layout differs from
	     that specified in the HP runtime specification.

	     We don't want a frame note on this instruction as the frame
	     marker moves during dynamic stack allocation.

	     This instruction also serves as a blockage to prevent
	     register spills from being scheduled before the stack
	     pointer is raised.  This is necessary as we store
	     registers using the frame pointer as a base register,
	     and the frame pointer is set before sp is raised.  */
	  if (TARGET_HPUX_UNWIND_LIBRARY)
	    {
	      rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
				       GEN_INT (TARGET_64BIT ? -8 : -4));

	      emit_move_insn (gen_rtx_MEM (word_mode, addr),
			      frame_pointer_rtx);
	    }
	  else
	    emit_insn (gen_blockage ());
	}
      /* no frame pointer needed.  */
      else
	{
	  /* In some cases we can perform the first callee register save
	     and allocating the stack frame at the same time.   If so, just
	     make a note of it and defer allocating the frame until saving
	     the callee registers.  */
	  if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
	    merge_sp_adjust_with_store = 1;
	  /* Can not optimize.  Adjust the stack frame by actual_fsize
	     bytes.  */
	  else
	    set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
			    actual_fsize, 1);
	}
    }

  /* Normal register save.

     Do not save the frame pointer in the frame_pointer_needed case.  It
     was done earlier.  */
  if (frame_pointer_needed)
    {
      offset = local_fsize;

      /* Saving the EH return data registers in the frame is the simplest
	 way to get the frame unwind information emitted.  We put them
	 just before the general registers.  */
      if (DO_FRAME_NOTES && current_function_calls_eh_return)
	{
	  unsigned int i, regno;

	  for (i = 0; ; ++i)
	    {
	      regno = EH_RETURN_DATA_REGNO (i);
	      if (regno == INVALID_REGNUM)
		break;

	      store_reg (regno, offset, FRAME_POINTER_REGNUM);
	      offset += UNITS_PER_WORD;
	    }
	}

      for (i = 18; i >= 4; i--)
	if (regs_ever_live[i] && ! call_used_regs[i])
	  {
	    store_reg (i, offset, FRAME_POINTER_REGNUM);
	    offset += UNITS_PER_WORD;
	    gr_saved++;
	  }
      /* Account for %r3 which is saved in a special place.  */
      gr_saved++;
    }
  /* No frame pointer needed.  */
  else
    {
      offset = local_fsize - actual_fsize;

      /* Saving the EH return data registers in the frame is the simplest
         way to get the frame unwind information emitted.  */
      if (DO_FRAME_NOTES && current_function_calls_eh_return)
	{
	  unsigned int i, regno;

	  for (i = 0; ; ++i)
	    {
	      regno = EH_RETURN_DATA_REGNO (i);
	      if (regno == INVALID_REGNUM)
		break;

	      /* If merge_sp_adjust_with_store is nonzero, then we can
		 optimize the first save.  */
	      if (merge_sp_adjust_with_store)
		{
		  store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
		  merge_sp_adjust_with_store = 0;
		}
	      else
		store_reg (regno, offset, STACK_POINTER_REGNUM);
	      offset += UNITS_PER_WORD;
	    }
	}

      for (i = 18; i >= 3; i--)
      	if (regs_ever_live[i] && ! call_used_regs[i])
	  {
	    /* If merge_sp_adjust_with_store is nonzero, then we can
	       optimize the first GR save.  */
	    if (merge_sp_adjust_with_store)
	      {
		store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
		merge_sp_adjust_with_store = 0;
	      }
	    else
	      store_reg (i, offset, STACK_POINTER_REGNUM);
	    offset += UNITS_PER_WORD;
	    gr_saved++;
	  }

      /* If we wanted to merge the SP adjustment with a GR save, but we never
	 did any GR saves, then just emit the adjustment here.  */
      if (merge_sp_adjust_with_store)
	set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
			actual_fsize, 1);
    }

  /* The hppa calling conventions say that %r19, the pic offset
     register, is saved at sp - 32 (in this function's frame)
     when generating PIC code.  FIXME:  What is the correct thing
     to do for functions which make no calls and allocate no
     frame?  Do we need to allocate a frame, or can we just omit
     the save?   For now we'll just omit the save.
     
     We don't want a note on this insn as the frame marker can
     move if there is a dynamic stack allocation.  */
  if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
    {
      rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));

      emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);

    }

  /* Align pointer properly (doubleword boundary).  */
  offset = (offset + 7) & ~7;

  /* Floating point register store.  */
  if (save_fregs)
    {
      rtx base;

      /* First get the frame or stack pointer to the start of the FP register
	 save area.  */
      if (frame_pointer_needed)
	{
	  set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
	  base = frame_pointer_rtx;
	}
      else
	{
	  set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
	  base = stack_pointer_rtx;
	}

      /* Now actually save the FP registers.  */
      for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
	{
	  if (regs_ever_live[i]
	      || (! TARGET_64BIT && regs_ever_live[i + 1]))
	    {
	      rtx addr, insn, reg;
	      addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
	      reg = gen_rtx_REG (DFmode, i);
	      insn = emit_move_insn (addr, reg);
	      if (DO_FRAME_NOTES)
		{
		  RTX_FRAME_RELATED_P (insn) = 1;
		  if (TARGET_64BIT)
		    {
		      rtx mem = gen_rtx_MEM (DFmode,
					     plus_constant (base, offset));
		      REG_NOTES (insn)
			= gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
					     gen_rtx_SET (VOIDmode, mem, reg),
					     REG_NOTES (insn));
		    }
		  else
		    {
		      rtx meml = gen_rtx_MEM (SFmode,
					      plus_constant (base, offset));
		      rtx memr = gen_rtx_MEM (SFmode,
					      plus_constant (base, offset + 4));
		      rtx regl = gen_rtx_REG (SFmode, i);
		      rtx regr = gen_rtx_REG (SFmode, i + 1);
		      rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
		      rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
		      rtvec vec;

		      RTX_FRAME_RELATED_P (setl) = 1;
		      RTX_FRAME_RELATED_P (setr) = 1;
		      vec = gen_rtvec (2, setl, setr);
		      REG_NOTES (insn)
			= gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
					     gen_rtx_SEQUENCE (VOIDmode, vec),
					     REG_NOTES (insn));
		    }
		}
	      offset += GET_MODE_SIZE (DFmode);
	      fr_saved++;
	    }
	}
    }
}

/* Emit RTL to load REG from the memory location specified by BASE+DISP.
   Handle case where DISP > 8k by using the add_high_const patterns.  */

static void
load_reg (int reg, HOST_WIDE_INT disp, int base)
{
  rtx dest = gen_rtx_REG (word_mode, reg);
  rtx basereg = gen_rtx_REG (Pmode, base);
  rtx src;

  if (VAL_14_BITS_P (disp))
    src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
  else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
    {
      rtx delta = GEN_INT (disp);
      rtx tmpreg = gen_rtx_REG (Pmode, 1);

      emit_move_insn (tmpreg, delta);
      if (TARGET_DISABLE_INDEXING)
	{
	  emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
	  src = gen_rtx_MEM (word_mode, tmpreg);
	}
      else
	src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
    }
  else
    {
      rtx delta = GEN_INT (disp);
      rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
      rtx tmpreg = gen_rtx_REG (Pmode, 1);

      emit_move_insn (tmpreg, high);
      src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
    }

  emit_move_insn (dest, src);
}

/* Update the total code bytes output to the text section.  */

static void
update_total_code_bytes (int nbytes)
{
  if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
      && !IN_NAMED_SECTION_P (cfun->decl))
    {
      if (INSN_ADDRESSES_SET_P ())
	{
	  unsigned long old_total = total_code_bytes;

	  total_code_bytes += nbytes;

	  /* Be prepared to handle overflows.  */
	  if (old_total > total_code_bytes)
	    total_code_bytes = -1;
	}
      else
	total_code_bytes = -1;
    }
}

/* This function generates the assembly code for function exit.
   Args are as for output_function_prologue ().

   The function epilogue should not depend on the current stack
   pointer!  It should use the frame pointer only.  This is mandatory
   because of alloca; we also take advantage of it to omit stack
   adjustments before returning.  */

static void
pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
{
  rtx insn = get_last_insn ();

  last_address = 0;

  /* hppa_expand_epilogue does the dirty work now.  We just need
     to output the assembler directives which denote the end
     of a function.

     To make debuggers happy, emit a nop if the epilogue was completely
     eliminated due to a volatile call as the last insn in the
     current function.  That way the return address (in %r2) will
     always point to a valid instruction in the current function.  */

  /* Get the last real insn.  */
  if (GET_CODE (insn) == NOTE)
    insn = prev_real_insn (insn);

  /* If it is a sequence, then look inside.  */
  if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
    insn = XVECEXP (PATTERN (insn), 0, 0);

  /* If insn is a CALL_INSN, then it must be a call to a volatile
     function (otherwise there would be epilogue insns).  */
  if (insn && GET_CODE (insn) == CALL_INSN)
    {
      fputs ("\tnop\n", file);
      last_address += 4;
    }

  fputs ("\t.EXIT\n\t.PROCEND\n", file);

  if (TARGET_SOM && TARGET_GAS)
    {
      /* We done with this subspace except possibly for some additional
	 debug information.  Forget that we are in this subspace to ensure
	 that the next function is output in its own subspace.  */
      in_section = NULL;
      cfun->machine->in_nsubspa = 2;
    }

  if (INSN_ADDRESSES_SET_P ())
    {
      insn = get_last_nonnote_insn ();
      last_address += INSN_ADDRESSES (INSN_UID (insn));
      if (INSN_P (insn))
	last_address += insn_default_length (insn);
      last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
		      & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
    }

  /* Finally, update the total number of code bytes output so far.  */
  update_total_code_bytes (last_address);
}

void
hppa_expand_epilogue (void)
{
  rtx tmpreg;
  HOST_WIDE_INT offset;
  HOST_WIDE_INT ret_off = 0;
  int i;
  int merge_sp_adjust_with_load = 0;

  /* We will use this often.  */
  tmpreg = gen_rtx_REG (word_mode, 1);

  /* Try to restore RP early to avoid load/use interlocks when
     RP gets used in the return (bv) instruction.  This appears to still
     be necessary even when we schedule the prologue and epilogue.  */
  if (regs_ever_live [2] || current_function_calls_eh_return)
    {
      ret_off = TARGET_64BIT ? -16 : -20;
      if (frame_pointer_needed)
	{
	  load_reg (2, ret_off, FRAME_POINTER_REGNUM);
	  ret_off = 0;
	}
      else
	{
	  /* No frame pointer, and stack is smaller than 8k.  */
	  if (VAL_14_BITS_P (ret_off - actual_fsize))
	    {
	      load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
	      ret_off = 0;
	    }
	}
    }

  /* General register restores.  */
  if (frame_pointer_needed)
    {
      offset = local_fsize;

      /* If the current function calls __builtin_eh_return, then we need
         to restore the saved EH data registers.  */
      if (DO_FRAME_NOTES && current_function_calls_eh_return)
	{
	  unsigned int i, regno;

	  for (i = 0; ; ++i)
	    {
	      regno = EH_RETURN_DATA_REGNO (i);
	      if (regno == INVALID_REGNUM)
		break;

	      load_reg (regno, offset, FRAME_POINTER_REGNUM);
	      offset += UNITS_PER_WORD;
	    }
	}

      for (i = 18; i >= 4; i--)
	if (regs_ever_live[i] && ! call_used_regs[i])
	  {
	    load_reg (i, offset, FRAME_POINTER_REGNUM);
	    offset += UNITS_PER_WORD;
	  }
    }
  else
    {
      offset = local_fsize - actual_fsize;

      /* If the current function calls __builtin_eh_return, then we need
         to restore the saved EH data registers.  */
      if (DO_FRAME_NOTES && current_function_calls_eh_return)
	{
	  unsigned int i, regno;

	  for (i = 0; ; ++i)
	    {
	      regno = EH_RETURN_DATA_REGNO (i);
	      if (regno == INVALID_REGNUM)
		break;

	      /* Only for the first load.
	         merge_sp_adjust_with_load holds the register load
	         with which we will merge the sp adjustment.  */
	      if (merge_sp_adjust_with_load == 0
		  && local_fsize == 0
		  && VAL_14_BITS_P (-actual_fsize))
	        merge_sp_adjust_with_load = regno;
	      else
		load_reg (regno, offset, STACK_POINTER_REGNUM);
	      offset += UNITS_PER_WORD;
	    }
	}

      for (i = 18; i >= 3; i--)
	{
	  if (regs_ever_live[i] && ! call_used_regs[i])
	    {
	      /* Only for the first load.
	         merge_sp_adjust_with_load holds the register load
	         with which we will merge the sp adjustment.  */
	      if (merge_sp_adjust_with_load == 0
		  && local_fsize == 0
		  && VAL_14_BITS_P (-actual_fsize))
	        merge_sp_adjust_with_load = i;
	      else
		load_reg (i, offset, STACK_POINTER_REGNUM);
	      offset += UNITS_PER_WORD;
	    }
	}
    }

  /* Align pointer properly (doubleword boundary).  */
  offset = (offset + 7) & ~7;

  /* FP register restores.  */
  if (save_fregs)
    {
      /* Adjust the register to index off of.  */
      if (frame_pointer_needed)
	set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
      else
	set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);

      /* Actually do the restores now.  */
      for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
	if (regs_ever_live[i]
	    || (! TARGET_64BIT && regs_ever_live[i + 1]))
	  {
	    rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
	    rtx dest = gen_rtx_REG (DFmode, i);
	    emit_move_insn (dest, src);
	  }
    }

  /* Emit a blockage insn here to keep these insns from being moved to
     an earlier spot in the epilogue, or into the main instruction stream.

     This is necessary as we must not cut the stack back before all the
     restores are finished.  */
  emit_insn (gen_blockage ());

  /* Reset stack pointer (and possibly frame pointer).  The stack
     pointer is initially set to fp + 64 to avoid a race condition.  */
  if (frame_pointer_needed)
    {
      rtx delta = GEN_INT (-64);

      set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
      emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
    }
  /* If we were deferring a callee register restore, do it now.  */
  else if (merge_sp_adjust_with_load)
    {
      rtx delta = GEN_INT (-actual_fsize);
      rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);

      emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
    }
  else if (actual_fsize != 0)
    set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
		    - actual_fsize, 0);

  /* If we haven't restored %r2 yet (no frame pointer, and a stack
     frame greater than 8k), do so now.  */
  if (ret_off != 0)
    load_reg (2, ret_off, STACK_POINTER_REGNUM);

  if (DO_FRAME_NOTES && current_function_calls_eh_return)
    {
      rtx sa = EH_RETURN_STACKADJ_RTX;

      emit_insn (gen_blockage ());
      emit_insn (TARGET_64BIT
		 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
		 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
    }
}

rtx
hppa_pic_save_rtx (void)
{
  return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
}

#ifndef NO_DEFERRED_PROFILE_COUNTERS
#define NO_DEFERRED_PROFILE_COUNTERS 0
#endif


/* Vector of funcdef numbers.  */
static VEC(int,heap) *funcdef_nos;

/* Output deferred profile counters.  */
static void
output_deferred_profile_counters (void)
{
  unsigned int i;
  int align, n;

  if (VEC_empty (int, funcdef_nos))
   return;

  switch_to_section (data_section);
  align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
  ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));

  for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
    {
      targetm.asm_out.internal_label (asm_out_file, "LP", n);
      assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
    }

  VEC_free (int, heap, funcdef_nos);
}

void
hppa_profile_hook (int label_no)
{
  /* We use SImode for the address of the function in both 32 and
     64-bit code to avoid having to provide DImode versions of the
     lcla2 and load_offset_label_address insn patterns.  */
  rtx reg = gen_reg_rtx (SImode);
  rtx label_rtx = gen_label_rtx ();
  rtx begin_label_rtx, call_insn;
  char begin_label_name[16];

  ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
			       label_no);
  begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));

  if (TARGET_64BIT)
    emit_move_insn (arg_pointer_rtx,
		    gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
				  GEN_INT (64)));

  emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));

  /* The address of the function is loaded into %r25 with an instruction-
     relative sequence that avoids the use of relocations.  The sequence
     is split so that the load_offset_label_address instruction can
     occupy the delay slot of the call to _mcount.  */
  if (TARGET_PA_20)
    emit_insn (gen_lcla2 (reg, label_rtx));
  else
    emit_insn (gen_lcla1 (reg, label_rtx));

  emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25), 
					    reg, begin_label_rtx, label_rtx));

#if !NO_DEFERRED_PROFILE_COUNTERS
  {
    rtx count_label_rtx, addr, r24;
    char count_label_name[16];

    VEC_safe_push (int, heap, funcdef_nos, label_no);
    ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
    count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));

    addr = force_reg (Pmode, count_label_rtx);
    r24 = gen_rtx_REG (Pmode, 24);
    emit_move_insn (r24, addr);

    call_insn =
      emit_call_insn (gen_call (gen_rtx_MEM (Pmode, 
					     gen_rtx_SYMBOL_REF (Pmode, 
								 "_mcount")),
				GEN_INT (TARGET_64BIT ? 24 : 12)));

    use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
  }
#else

  call_insn =
    emit_call_insn (gen_call (gen_rtx_MEM (Pmode, 
					   gen_rtx_SYMBOL_REF (Pmode, 
							       "_mcount")),
			      GEN_INT (TARGET_64BIT ? 16 : 8)));

#endif

  use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
  use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));

  /* Indicate the _mcount call cannot throw, nor will it execute a
     non-local goto.  */
  REG_NOTES (call_insn)
    = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
}

/* Fetch the return address for the frame COUNT steps up from
   the current frame, after the prologue.  FRAMEADDR is the
   frame pointer of the COUNT frame.

   We want to ignore any export stub remnants here.  To handle this,
   we examine the code at the return address, and if it is an export
   stub, we return a memory rtx for the stub return address stored
   at frame-24.

   The value returned is used in two different ways:

	1. To find a function's caller.

	2. To change the return address for a function.

   This function handles most instances of case 1; however, it will
   fail if there are two levels of stubs to execute on the return
   path.  The only way I believe that can happen is if the return value
   needs a parameter relocation, which never happens for C code.

   This function handles most instances of case 2; however, it will
   fail if we did not originally have stub code on the return path
   but will need stub code on the new return path.  This can happen if
   the caller & callee are both in the main program, but the new
   return location is in a shared library.  */

rtx
return_addr_rtx (int count, rtx frameaddr)
{
  rtx label;
  rtx rp;
  rtx saved_rp;
  rtx ins;

  if (count != 0)
    return NULL_RTX;

  rp = get_hard_reg_initial_val (Pmode, 2);

  if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
    return rp;

  saved_rp = gen_reg_rtx (Pmode);
  emit_move_insn (saved_rp, rp);

  /* Get pointer to the instruction stream.  We have to mask out the
     privilege level from the two low order bits of the return address
     pointer here so that ins will point to the start of the first
     instruction that would have been executed if we returned.  */
  ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
  label = gen_label_rtx ();

  /* Check the instruction stream at the normal return address for the
     export stub:

	0x4bc23fd1 | stub+8:   ldw -18(sr0,sp),rp
	0x004010a1 | stub+12:  ldsid (sr0,rp),r1
	0x00011820 | stub+16:  mtsp r1,sr0
	0xe0400002 | stub+20:  be,n 0(sr0,rp)

     If it is an export stub, than our return address is really in
     -24[frameaddr].  */

  emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
		 NULL_RTX, SImode, 1);